index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
994,300 | 80be46c0370a5a428411e40e7134eadef638f781 | import cv2 as cv
img = cv.imread("Ronaldo-1.jpg",1)
cv.imshow("Image",img)
#Create a new img
cv.imwrite("New_cr7.jpg", img )
cv.waitKey(0) |
994,301 | d16f0baa99ce9ba51d36b82098f3348e29118200 | x = int(input("Enter a number : "))
if x > 0 :
print("+1")
elif x < 0 :
print("-1")
|
994,302 | b14e8fffe40f065448acc8ec244eee001d017aea | from django.contrib import admin
from models import ImagePool
admin.site.register(ImagePool)
|
994,303 | 0e4101fe62e12774e70ca6a21e7a5e7cd5ecab01 | from .base import *
DEBUG = False
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'USER': 'root',
'PASSWORD': os.environ.get('mysqlPassword'),
'HOST': '127.0.0.1',
'PORT': '3306',
'NAME': "hellofamily_db",
'TEST': {
'NAME': 'test_hellofamily_db',
}
}
}
ALLOWED_HOSTS = ['hellofamily.club']
# 百度人工智能key
APP_ID = '14303012'
API_KEY = 't4GyIHmNULqO50d0RlvY86PV'
SECRET_KEY = 'VxKOFYYdvvRuk4MGrlyxlg6asArkRUlR'
APP_GROUP_ID = 'Hello_Project'
EMAIL_HOST = 'smtp.exmail.qq.com'
EMAIL_HOST_USER = os.environ.get('EMAIL_HOST_USER')
EMAIL_HOST_PASSWORD = os.environ.get('EMAIL_HOST_PASSWORD')
DEFAULT_FROM_EMAIL = os.environ.get('DEFAULT_FROM_EMAIL')
EMAIL_SUBJECT_PREFIX = 'hellofamily.club邮件报警'
# CELERY配置
CELERY_BROKER_URL = REDIS_URL
CELERY_ACCEPT_CONTENT = ['json']
CELERY_RESULT_BACKEND = REDIS_URL
CELERY_TASK_SERIALIZER = 'json'
# Websocket设置
CHANNEL_LAYERS = {
'default': {
'BACKEND': 'channels_redis.core.RedisChannelLayer',
'CONFIG': {
'hosts': [REDIS_URL],
}
}
}
|
994,304 | 4229b5a77f09a9d51bb0cb59d4f3b685cd4f282b |
#calss header
class _MICROPHONE():
def __init__(self,):
self.name = "MICROPHONE"
self.definitions = [u'a piece of equipment that you speak into to make your voice louder, or to record your voice or other sounds: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
|
994,305 | 011b2d26341d3cef824331a7716125ccb6bfc904 | # -*- coding: utf-8 -*-
""" Unit test for pyKwalify - Rule """
# python std lib
import unittest
# 3rd party imports
import pytest
# pyKwalify imports
import pykwalify
from pykwalify.errors import RuleError, SchemaConflict
from pykwalify.rule import Rule
from pykwalify.compat import unicode
class TestRule(unittest.TestCase):
def setUp(self):
pykwalify.partial_schemas = {}
def test_schema(self):
# Test that when using both schema; and include tag that it throw an error because schema; tags should be parsed via Core()
with pytest.raises(RuleError) as r:
Rule(schema={"schema;str": {"type": "map", "mapping": {"foo": {"type": "str"}}}, "type": "map", "mapping": {"foo": {"include": "str"}}})
assert str(r.value) == "<RuleError: error code 4: Schema is only allowed on top level of schema file: Path: '/'>"
assert r.value.error_key == 'schema.not.toplevel'
def test_unkknown_key(self):
# Test that providing an unknown key raises exception
with pytest.raises(RuleError) as r:
Rule(schema={"type": "str", "foobar": True})
assert str(r.value) == "<RuleError: error code 4: Unknown key: foobar found: Path: '/'>"
assert r.value.error_key == 'key.unknown'
def test_matching_rule(self):
# Test that exception is raised when a invalid matching rule is used
with pytest.raises(RuleError) as r:
Rule(schema={"type": "map", "matching-rule": "foobar", "mapping": {"regex;(.+)": {"type": "seq", "sequence": [{"type": "str"}]}}})
assert str(r.value) == "<RuleError: error code 4: Specified rule in key: foobar is not part of allowed rule set : ['any', 'all']: Path: '/'>"
assert r.value.error_key == 'matching_rule.not_allowed'
def test_allow_empty_map(self):
r = Rule(schema={"type": "map", "allowempty": True, "mapping": {"name": {"type": "str"}}})
assert r.allowempty_map is True
def test_type_value(self):
# TODO: This test is currently semi broken, partial schemas might be somewhat broken possibly
# # Test that when only having a schema; rule it should throw error
# with pytest.raises(RuleError) as r:
# Rule(schema={"schema;fooone": {"type": "map", "mapping": {"foo": {"type": "str"}}}})
# assert str(r.value) == "<RuleError: error code 4: Key 'type' not found in schema rule: Path: '/'>"
# assert r.value.error_key == 'type.missing'
# Test a valid rule with both "str" and "unicode" types work
r = Rule(schema={"type": str("str")})
r = Rule(schema={"type": unicode("str")})
# Test that type key must be string otherwise exception is raised
with pytest.raises(RuleError) as r:
Rule(schema={"type": 1})
assert str(r.value) == "<RuleError: error code 4: Key 'type' in schema rule is not a string type (found int): Path: '/'>"
assert r.value.error_key == 'type.not_string'
# this tests that the type key must be a string
with pytest.raises(RuleError) as r:
Rule(schema={"type": 1}, parent=None)
assert str(r.value) == "<RuleError: error code 4: Key 'type' in schema rule is not a string type (found int): Path: '/'>"
assert r.value.error_key == 'type.not_string'
def test_name_value(self):
with pytest.raises(RuleError) as r:
Rule(schema={'type': 'str', 'name': {}})
assert str(r.value) == "<RuleError: error code 4: Value: {} for keyword name must be a string: Path: '/'>"
def test_nullable_value(self):
# Test that nullable value must be bool otherwise exception is raised
with pytest.raises(RuleError) as r:
Rule(schema={"type": "str", "nullable": "foobar"})
assert str(r.value) == "<RuleError: error code 4: Value: 'foobar' for nullable keyword must be a boolean: Path: '/'>"
assert r.value.error_key == 'nullable.not_bool'
def test_desc_value(self):
with pytest.raises(RuleError) as r:
Rule(schema={'type': 'str', 'desc': []})
assert str(r.value) == "<RuleError: error code 4: Value: [] for keyword desc must be a string: Path: '/'>"
def test_example_value(self):
with pytest.raises(RuleError) as r:
Rule(schema={'type': 'str', 'example': []})
assert str(r.value) == "<RuleError: error code 4: Value: [] for keyword example must be a string: Path: '/'>"
def test_required_value(self):
# Test that required value must be bool otherwise exception is raised
with pytest.raises(RuleError) as r:
Rule(schema={"type": "str", "required": "foobar"})
assert str(r.value) == "<RuleError: error code 4: Value: 'foobar' for required keyword must be a boolean: Path: '/'>"
assert r.value.error_key == 'required.not_bool'
def test_pattern_value(self):
# this tests a invalid regexp pattern
with pytest.raises(RuleError) as r:
Rule(schema={"type": "str", "pattern": "/@/\\"})
assert str(r.value) == "<RuleError: error code 4: Syntax error when compiling regex pattern: None: Path: '/'>"
assert r.value.error_key == 'pattern.syntax_error'
# Test that pattern keyword is not allowed when using a map
# with self.assertRaisesRegexp(RuleError, ".+map\.pattern.+"):
with pytest.raises(RuleError) as r:
Rule(schema={"type": "map", "pattern": "^[a-z]+$", "allowempty": True, "mapping": {"name": {"type": "str"}}})
assert str(r.value) == "<RuleError: error code 4: Keyword pattern is not allowed inside map: Path: '/'>"
assert r.value.error_key == 'pattern.not_allowed_in_map'
# Test that pattern value must be string otherwise exception is raised
with pytest.raises(RuleError) as r:
Rule(schema={"type": "str", "pattern": 1})
assert str(r.value) == "<RuleError: error code 4: Value of pattern keyword: '1' is not a string: Path: '/'>"
assert r.value.error_key == 'pattern.not_string'
def test_date_and_format_value(self):
r = Rule(schema={"type": "date", "format": "%y"})
assert r.format is not None, "date var not set proper"
assert isinstance(r.format, list), "date format should be a list"
with pytest.raises(RuleError) as r:
Rule(schema={"type": "date", "format": 1})
assert str(r.value) == "<RuleError: error code 4: Value of format keyword: '1' must be a string or list or string values: Path: '/'>"
with pytest.raises(RuleError) as r:
Rule(schema={"type": "map", "format": "%y"})
assert str(r.value) == "<RuleError: error code 4: Keyword format is only allowed when used with the following types: ('date',): Path: '/'>"
def test_enum_value(self):
# this tests the various valid enum types
Rule(schema={"type": "int", "enum": [1, 2, 3]})
Rule(schema={"type": "bool", "enum": [True, False]})
r = Rule(schema={"type": "str", "enum": ["a", "b", "c"]})
assert r.enum is not None, "enum var is not set proper"
assert isinstance(r.enum, list), "enum is not set to a list"
assert len(r.enum) == 3, "invalid length of enum entries"
# this tests the missmatch between the type and the data inside a enum
with pytest.raises(RuleError) as r:
Rule(schema={"type": "str", "enum": [1, 2, 3]})
assert str(r.value).startswith("<RuleError: error code 4: Item: '1' in enum is not of correct class type:")
assert r.value.error_key == 'enum.type.unmatch'
with pytest.raises(RuleError) as r:
Rule(schema={"type": "str", "enum": True})
assert str(r.value) == "<RuleError: error code 4: Enum is not a sequence: Path: '/'>"
assert r.value.error_key == 'enum.not_seq'
def test_assert_value(self):
with pytest.raises(RuleError) as r:
Rule(schema={"type": "seq", "sequence": [{"type": "str", "assert": 1}]})
assert str(r.value) == "<RuleError: error code 4: Value: '1' for keyword 'assert' is not a string: Path: '/sequence/0'>"
assert r.value.error_key == 'assert.not_str'
# Test that invalid characters is not present
with pytest.raises(RuleError) as r:
Rule(schema={"type": "seq", "sequence": [{"type": "str", "assert": "__import__"}]})
assert str(r.value) == "<RuleError: error code 4: Value: '__import__' contain invalid content that is not allowed to be present in assertion keyword: Path: '/sequence/0'>" # NOQA: E501
assert r.value.error_key == 'assert.unsupported_content'
def test_length(self):
r = Rule(schema={"type": "int", "length": {"max": 10, "min": 1}})
assert r.length is not None, "length var not set proper"
assert isinstance(r.length, dict), "range var is not of dict type"
# this tests that the range key must be a dict
with pytest.raises(RuleError) as r:
Rule(schema={"type": "int", "length": []})
assert str(r.value) == "<RuleError: error code 4: Length value is not a dict type: '[]': Path: '/'>"
assert r.value.error_key == 'length.not_map'
with pytest.raises(RuleError) as r:
Rule(schema={"type": "str", "length": {"max": "z"}})
assert str(r.value) == "<RuleError: error code 4: Value: 'z' for 'max' keyword is not a number: Path: '/'>"
assert r.value.error_key == 'length.max.not_number'
# this tests that min is bigger then max that should not be possible
with pytest.raises(RuleError) as r:
Rule(schema={"type": "int", "length": {"max": 10, "min": 11}})
assert str(r.value) == "<RuleError: error code 4: Value for 'max' can't be less then value for 'min'. 10 < 11: Path: '/'>"
assert r.value.error_key == 'length.max_lt_min'
# test that min-ex is bigger then max-ex, that should not be possible
with pytest.raises(RuleError) as r:
Rule(schema={"type": "int", "length": {"max-ex": 10, "min-ex": 11}})
assert str(r.value) == "<RuleError: error code 4: Value for 'max-ex' can't be less then value for 'min-ex'. 10 <= 11: Path: '/'>"
assert r.value.error_key == 'length.max-ex_le_min-ex'
# test that a string has non negative boundaries
with pytest.raises(RuleError) as r:
Rule(schema={"type": "str", "length": {"max": -1, "min": -2}})
assert str(r.value) == "<RuleError: error code 4: Value for 'min' can't be negative in case of type str.: Path: '/'>"
assert r.value.error_key == 'length.min_negative'
# test that a seq has non negative boundaries
with pytest.raises(RuleError) as r:
Rule(schema={"type": "seq", "length": {"max": 3, "min": -2}})
assert str(r.value) == "<RuleError: error code 4: Value for 'min' can't be negative in case of type seq.: Path: '/'>"
assert r.value.error_key == 'length.min_negative'
def test_range_value(self):
r = Rule(schema={"type": "int", "range": {"max": 10, "min": 1}})
assert r.range is not None, "range var not set proper"
assert isinstance(r.range, dict), "range var is not of dict type"
# this tests that the range key must be a dict
with pytest.raises(RuleError) as r:
Rule(schema={"type": "int", "range": []})
assert str(r.value) == "<RuleError: error code 4: Range value is not a dict type: '[]': Path: '/'>"
assert r.value.error_key == 'range.not_map'
with pytest.raises(RuleError) as r:
Rule(schema={"type": "str", "range": {"max": "z"}})
assert str(r.value) == "<RuleError: error code 4: Value: 'z' for 'max' keyword is not a number: Path: '/'>"
assert r.value.error_key == 'range.max.not_number'
# this tests that min is bigger then max that should not be possible
with pytest.raises(RuleError) as r:
Rule(schema={"type": "int", "range": {"max": 10, "min": 11}})
assert str(r.value) == "<RuleError: error code 4: Value for 'max' can't be less then value for 'min'. 10 < 11: Path: '/'>"
assert r.value.error_key == 'range.max_lt_min'
# test that min-ex is bigger then max-ex, that should not be possible
with pytest.raises(RuleError) as r:
Rule(schema={"type": "int", "range": {"max-ex": 10, "min-ex": 11}})
assert str(r.value) == "<RuleError: error code 4: Value for 'max-ex' can't be less then value for 'min-ex'. 10 <= 11: Path: '/'>"
assert r.value.error_key == 'range.max-ex_le_min-ex'
# test that a string has non negative boundaries
with pytest.raises(RuleError) as r:
Rule(schema={"type": "str", "range": {"max": -1, "min": -2}})
assert str(r.value) == "<RuleError: error code 4: Value for 'min' can't be negative in case of type str.: Path: '/'>"
assert r.value.error_key == 'range.min_negative'
# test that a seq has non negative boundaries
with pytest.raises(RuleError) as r:
Rule(schema={"type": "seq", "range": {"max": 3, "min": -2}})
assert str(r.value) == "<RuleError: error code 4: Value for 'min' can't be negative in case of type seq.: Path: '/'>"
assert r.value.error_key == 'range.min_negative'
def test_ident_value(self):
pass
def test_unique_value(self):
# this tests that this cannot be used in the root level
with pytest.raises(RuleError) as r:
Rule(schema={"type": "str", "unique": True})
assert str(r.value) == "<RuleError: error code 4: Keyword 'unique' can't be on root level of schema: Path: '/'>"
assert r.value.error_key == 'unique.not_on_root_level'
# this tests that unique cannot be used at root level
with pytest.raises(RuleError) as r:
Rule(schema={"type": "seq", "unique": True})
assert str(r.value) == "<RuleError: error code 4: Type of the value: 'seq' for 'unique' keyword is not a scalar type: Path: '/'>"
assert r.value.error_key == 'unique.not_scalar'
def test_sequence(self):
# this tests seq type with a internal type of str
r = Rule(schema={"type": "seq", "sequence": [{"type": "str"}]})
assert r.type is not None, "rule not contain type var"
assert r.type == "seq", "type not 'seq'"
assert r.sequence is not None, "rule not contain sequence var"
assert isinstance(r.sequence, list), "rule is not a list"
# Test basic sequence rule
r = Rule(schema={"type": "seq", "sequence": [{"type": "str"}]})
assert r.type == "seq"
assert isinstance(r.sequence, list)
assert isinstance(r.sequence[0], Rule)
assert r.sequence[0].type == "str"
# Test sequence without explicit type
r = Rule(schema={"sequence": [{"type": "str"}]})
assert r.type == "seq"
assert isinstance(r.sequence, list)
assert isinstance(r.sequence[0], Rule)
assert r.sequence[0].type == "str"
# Test short name 'seq'
r = Rule(schema={"seq": [{"type": "str"}]})
assert r.type == "seq"
assert isinstance(r.sequence, list)
assert isinstance(r.sequence[0], Rule)
assert r.sequence[0].type == "str"
# Test error is raised when sequence key is missing
with pytest.raises(SchemaConflict) as ex:
Rule(schema={"type": "seq"})
assert str(ex.value) == "<SchemaConflict: error code 5: Type is sequence but no sequence alias found on same level: Path: '/'>"
# sequence and pattern can't be used at same time
with pytest.raises(SchemaConflict) as ex:
Rule(schema={"type": "seq", "sequence": [{"type": "str"}], "pattern": "..."})
assert str(ex.value) == "<SchemaConflict: error code 5: Sequence and pattern can't be on the same level in the schema: Path: '/'>"
def test_build_sequence_multiple_values(self):
"""
Test with multiple values.
"""
# Test basic sequence rule
r = Rule(schema={'type': 'seq', 'sequence': [{'type': 'str'}, {'type': 'int'}]})
assert r.type == "seq"
assert r.matching == "any"
assert len(r.sequence) == 2
assert isinstance(r.sequence, list)
assert all(isinstance(r.sequence[i], Rule) for i in range(len(r.sequence)))
assert r.sequence[0].type == "str"
assert r.sequence[1].type == "int"
# Test sequence without explicit type
r = Rule(schema={'sequence': [{'type': 'str'}, {'type': 'int'}]})
assert r.type == "seq"
assert r.matching == "any"
assert len(r.sequence) == 2
assert isinstance(r.sequence, list)
assert all(isinstance(r.sequence[i], Rule) for i in range(len(r.sequence)))
assert r.sequence[0].type == "str"
assert r.sequence[1].type == "int"
# Test adding matchin rules
def test_mapping(self):
# This tests mapping with a nested type and pattern
r = Rule(schema={"type": "map", "mapping": {"name": {"type": "str", "pattern": ".+@.+"}}})
assert r.type == "map", "rule type is not map"
assert isinstance(r.mapping, dict), "mapping is not dict"
assert r.mapping["name"].type == "str", "nested mapping is not of string type"
assert r.mapping["name"].pattern is not None, "nested mapping has no pattern var set"
assert r.mapping["name"].pattern == ".+@.+", "pattern is not set to correct value"
# when type is specefied, 'mapping' key must be present
with pytest.raises(SchemaConflict) as ex:
Rule(schema={"type": "map"})
assert str(ex.value) == "<SchemaConflict: error code 5: Type is mapping but no mapping alias found on same level: Path: '/'>"
# 'map' and 'enum' can't be used at same time
# TODO: This do not work because it currently raises RuleError: <RuleError: error code 4: enum.notscalar>
# with pytest.raises(SchemaConflict):
# r = Rule(schema={"type": "map", "enum": [1, 2, 3]})
# Test that 'map' and 'mapping' can't be at the same level
with pytest.raises(RuleError) as r:
Rule(schema={"map": {"stream": {"type": "any"}}, "mapping": {"seams": {"type": "any"}}})
assert str(r.value) == "<RuleError: error code 4: Keywords 'map' and 'mapping' can't be used on the same level: Path: '/'>"
assert r.value.error_key == 'mapping.duplicate_keywords'
# This will test that a invalid regex will throw error when parsing rules
with pytest.raises(RuleError) as r:
Rule(schema={"type": "map", "matching-rule": "any", "mapping": {"regex;(+)": {"type": "seq", "sequence": [{"type": "str"}]}}})
assert str(r.value) == "<RuleError: error code 4: Unable to compile regex '(+)': Path: '/'>"
assert r.value.error_key == 'mapping.regex.compile_error'
# this tests map/dict but with no elements
with pytest.raises(RuleError) as r:
Rule(schema={"type": "map", "mapping": {}})
assert str(r.value) == "<RuleError: error code 4: Mapping do not contain any elements: Path: '/'>"
assert r.value.error_key == 'mapping.no_elements'
# Test that regex with missing parentheses are correctly detected.
with pytest.raises(RuleError) as r:
Rule(schema={"type": "map", "matching-rule": "any", "mapping": {"regex;[a-z]": {"type": "seq", "sequence": [{"type": "str"}]}}})
assert str(r.value) == "<RuleError: Regex '[a-z]' should start and end with parentheses: Path: '/'>"
assert r.value.error_key == 'mapping.regex.missing_parentheses'
def test_default_value(self):
pass
def test_check_conflicts(self):
# TODO: This do not work and enum schema conflict is not raised... RuleError: <RuleError: error code 4: enum.notscalar>
# with pytest.raises(SchemaConflict) as ex:
# r = Rule(schema={"type": "seq", "sequence": [{"type": "str"}], "enum": [1, 2, 3]})
# assert ex.value.msg.startswith("seq.conflict :: enum"), "Wrong exception was raised"
# Test sequence and mapping can't be used at same level
with pytest.raises(SchemaConflict) as ex:
Rule(schema={"type": "seq", "sequence": [{"type": "str"}], "mapping": {"name": {"type": "str", "pattern": ".+@.+"}}})
assert str(ex.value) == "<SchemaConflict: error code 5: Sequence and mapping can't be on the same level in the schema: Path: '/'>"
assert ex.value.error_key == 'seq.conflict.mapping'
# Mapping and sequence can't used at same time
with pytest.raises(SchemaConflict) as ex:
Rule(schema={"type": "map", "mapping": {"foo": {"type": "str"}}, "sequence": [{"type": "str"}]})
assert str(ex.value) == "<SchemaConflict: error code 5: Mapping and sequence can't be on the same level in the schema: Path: '/'>"
assert ex.value.error_key == 'map.conflict.sequence'
# scalar type and sequence can't be used at same time
with pytest.raises(SchemaConflict) as ex:
Rule(schema={"type": "int", "sequence": [{"type": "str"}]})
assert str(ex.value) == "<SchemaConflict: error code 5: Scalar and sequence can't be on the same level in the schema: Path: '/'>"
assert ex.value.error_key == 'scalar.conflict.sequence'
# scalar type and mapping can't be used at same time
with pytest.raises(SchemaConflict) as ex:
Rule(schema={"type": "int", "mapping": {"foo": {"type": "str"}}})
assert str(ex.value) == "<SchemaConflict: error code 5: Scalar and mapping can't be on the same level in the schema: Path: '/'>"
assert ex.value.error_key == 'scalar.conflict.mapping'
# scalar type and enum can't be used at same time
with pytest.raises(SchemaConflict) as ex:
Rule(schema={"type": "int", "enum": [1, 2, 3], "range": {"max": 10, "min": 1}})
assert str(ex.value) == "<SchemaConflict: error code 5: Enum and range can't be on the same level in the schema: Path: '/'>"
assert ex.value.error_key == 'enum.conflict.range'
|
994,306 | b7a3c5ae7828c88f98b603d366552dd418866934 | """ views init """
from flask import Blueprint
from models.user import User
from models.city import City
from models.state import State
from models.review import Review
app_views = Blueprint('app_views', __name__, url_prefix='/api/v1')
"""Wildcard import views"""
from api.v1.views.index import *
from api.v1.views.users import *
from api.v1.views.states import *
from api.v1.views.cities import *
from api.v1.views.places import *
from api.v1.views.amenities import *
from api.v1.views.places_reviews import *
|
994,307 | 8c844468ed2bcb14ca9d175073bcdc999b146550 | # b=0
# c=0
# while b<=9:
# b+=1
# c+=int(input())
# print(c)
# res = 0
# for i in range(10):
# res += int(input())
# print(res)
# a_list = []
# for num in range(10):
# list_num = int(input("Enter a number:"))
# a_list.append(list_num)
# print(sum(a_list))
# ===================================================================
# a=int(input())
# b=0
# c=0
# while b<=a-1:
# b+=1
# c+=int(input())
# print(c)
# n = int(input())
# res = 0
# for i in range(n):
# res += int(input())
# print(res)
# =====================================================================
# factorial
# a=int(input())
# b=1
# for i in range(1,a+1):
# b*=i
# print(b)
# ====================================================================
# a=int(input())
# d=0
# c=0
# for i in range(1,a+1):
# b=int(input())
# if b>0 or 0>b:
# c+=1
# else:
# d+=1
# print(d)
# num_zeroes = 0
# for i in range(int(input())):
# if int(input()) == 0:
# num_zeroes += 1
# print(num_zeroes)
# ==================== SUM of Factorial =================
# a=1
# b=0
# for i in range(1,int(input())+1):
# a*=i
# b+=a
# print(b)
# ============================================================================================================
# a=""
# for i in range (1,int(input())+1):
# a+=str(i)
# print(a)
# n = int(input())
# for i in range(1, n + 1):
# for j in range(1, i + 1):
# print(j, sep='', end='')
# print()
# 1
# 12
# 123
# 1234
# 12345
# =============================================================================================================
# ======================================Lost card
# n=int(input())
# sum_cards = 0
# for i in range(1, n + 1):
# sum_cards += i
# for i in range(n - 1):
# sum_cards-= int(input())
# print(sum_cards)
# ========================whil |
994,308 | 8b3374a5bb1e564384049c517df1b84ae302d221 | # -*- coding: utf-8 -*-
"""
Created on Tue Jan 12 14:44:08 2016
@author: Jonater
"""
import math
import random
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import matplotlib as mpl
import sklearn.preprocessing as preprocessing
from sklearn.datasets import make_classification
from pandas import DataFrame
from sklearn.linear_model import LogisticRegression, SGDClassifier
from plot_learning_curve import plot_learning_curve
mpl.rcParams['font.sans-serif'] = ['SimHei'] #指定默认字体
mpl.rcParams['axes.unicode_minus'] = False #解决保存图像是负号'-'显示为方块的问题
features = []
labels = []
def parseTestData(fn):
with open(fn, 'rb') as inf:
ll = inf.readlines()
random.shuffle(ll, random.random)
for l in ll:
tmpd = eval(l.strip())
features.append(tmpd['feature'])
labels.append(tmpd['label'])
return np.array(features), np.array(labels)
def algomain(df):
scaler = preprocessing.StandardScaler()
#标准化
popTagsNum_scale_param = scaler.fit(df['popTagsNum'])
df['popTagsNum_scaled'] = scaler.fit_transform(df['popTagsNum'], popTagsNum_scale_param)
liNum_scale_param = scaler.fit(df['liNum'])
df['liNum_scaled'] = scaler.fit_transform(df['liNum'], liNum_scale_param)
codeFragNum_scale_param = scaler.fit(df['codeFragNum'])
df['codeFragNum_scaled'] = scaler.fit_transform(df['codeFragNum'], codeFragNum_scale_param)
bodyLen_scale_param = scaler.fit(df['bodyLength'])
df['bodyLen_scaled'] = scaler.fit_transform(df['bodyLength'], bodyLen_scale_param)
titleLen_scale_param = scaler.fit(df['titleLength'])
df['titleLen_scaled'] = scaler.fit_transform(df['titleLength'], titleLen_scale_param)
train_df = df[['class',
'codeFragNum_scaled', 'liNum_scaled', 'popTagsNum_scaled',
'startWithWh', 'endWithQ',
'bodyLen_scaled', 'titleLen_scaled']]
train_np = train_df.as_matrix()
tX = train_np[:, 1:]
ty = train_np[:, 0]
estm = SGDClassifier(loss='log', penalty='l1', alpha=0.015)
plot_learning_curve(estm, "LogisticRegression(L1), cv=10-fold",
tX, ty, ylim=(0.5, 1.0),
cv=10, train_sizes=np.linspace(.1, 1, 10))
estm.fit(tX, ty)
print pd.DataFrame({'columns': list(train_df.columns[1:]),
'coef': list(estm.coef_.T)})
def pltDataFrame(df):
fig, axes = plt.subplots(nrows=2, ncols=4)
fig.set(alpha=0.2)
ans1onli = df.liNum[df["class"] == 1].value_counts()
ans0onli = df.liNum[df["class"] == 0].value_counts()
DataFrame({u'回答':ans1onli,
u'未回答':ans0onli}) \
.plot(kind='bar', stacked=False,
ax=plt.subplot2grid((2,4),(1,0), colspan=2))
plt.title(u'按li数量看回答情况')
plt.ylabel(u'数量')
plt.xlabel(u'li数量')
# plt.subplot2grid((2,4),(0,2), colspan=2)
# df.avgTI[df["class"] == 0].plot(kind='kde')
# df.avgTI[df["class"] == 1].plot(kind='kde')
# plt.title(u'按平均流行Tag指数看回答情况')
# plt.ylabel(u'密度')
# plt.xlabel(u'平均流行tag指数')
# plt.legend((u'未回答', u'回答'), loc='best')
# ans0onTags = df.popTagsNum[df["class"] == 0].value_counts()
# ans1onTags = df.popTagsNum[df["class"] == 1].value_counts()
# DataFrame({u'回答':ans1onTags,
# u'未回答':ans0onTags}) \
# .plot(kind='bar', stacked=False,
# ax=plt.subplot2grid((2,4),(1,0), colspan=2))
# plt.title(u'按流行tag数量看回答情况')
ans0onWh = df.startWithWh[df["class"] == 0].value_counts()
ans1onWh = df.startWithWh[df["class"] == 1].value_counts()
DataFrame({u'回答':ans1onWh,
u'未回答':ans0onWh}) \
.plot(kind='bar', stacked=False,
ax=plt.subplot2grid((2,4),(1,2)))
plt.title(u'按标题是否以Wh开头看回答情况')
plt.ylabel(u'数量')
ans0onQ = df.endWithQ[df["class"] == 0].value_counts()
ans1onQ = df.endWithQ[df["class"] == 1].value_counts()
DataFrame({u'回答':ans1onQ,
u'未回答':ans0onQ}) \
.plot(kind='bar', stacked=False,
ax=plt.subplot2grid((2,4),(1,3)))
plt.title(u'按标题是否以Q结尾看回答情况')
plt.ylabel(u'数量')
plt.subplot2grid((2,4),(0,0), colspan=2)
df.bodyLength[df["class"] == 0].plot(kind='kde')
df.bodyLength[df["class"] == 1].plot(kind='kde')
plt.title(u'按内容长度看回答情况')
plt.ylabel(u'密度')
plt.xlabel(u'长度')
plt.legend((u'未回答', u'回答'), loc='best')
plt.subplot2grid((2,4),(0,2), colspan=2)
df.titleLength[df["class"] == 0].plot(kind='kde')
df.titleLength[df["class"] == 1].plot(kind='kde')
plt.title(u'按标题长度看回答情况')
plt.ylabel(u'密度')
plt.xlabel(u'长度')
plt.legend((u'未回答', u'回答'), loc='best')
plt.show()
if __name__ == '__main__':
X, y = parseTestData('testData.txt')
featureName= ['codeFragNum', 'liNum', 'popTagsNum',
'totalTI', 'avgTI','bodyLength', 'titleLength',
'startWithWh', 'endWithQ', 'isweekend',
'cntQ', 'cntA', 'aNum', 'strongNum', 'thxNum', 'dayHot']
df = DataFrame(np.hstack((X, y[:, None])), columns=featureName +["class"])
# pltDataFrame(df)
algomain(df)
|
994,309 | 802e0f9c7d3b66f770bc331734a3ed45fe7c3fcc | import sys
import getopt
import os
import tushare as ts
from datetime import datetime, timedelta
def get_dates(start, end):
datefmt = '%Y-%m-%d'
if end:
startdate = datetime.strptime(start, datefmt)
enddate = datetime.strptime(end, datefmt)
date = []
while enddate - startdate >= timedelta(days=0):
weekday = startdate.strftime('%a')
if weekday not in ['Sat', 'Sun']:
date.append(startdate.strftime(datefmt))
startdate += timedelta(days=1)
else:
return [start]
print(date)
return date
def get_tick(code, start, end=None, file=None):
date = get_dates(start, end)
if not file:
file = '-'.join([code, start.replace('-', ''), str(len(date))]) + r'.csv'
for d in date:
df = ts.get_tick_data(code, date=d)
df.to_csv(file, mode='a')
def main(argv):
try:
opts, args = getopt.getopt(argv, 'hc:s:e:o:', ['code=', 'start=', 'end=', 'Ofile='])
except getopt.GetoptError:
print('python gettickdata -c code -s startdate -e enddate -o outfile')
sys.exit(2)
code, startdate, enddate, ofile = None, None, None, None
for opt, arg in opts:
if opt == '-h':
print('python gettickdata -c code -s startdate -e enddate -o outfile')
sys.exit()
elif opt in ['-c', '--code']:
code = arg
elif opt in ['-s', '--start']:
startdate = arg
elif opt in ['-e', '--end']:
enddate = arg
elif opt in ['-o', '--Ofile']:
ofile = arg
else:
print('python gettickdata -c code -s startdate -e enddate -o outfile')
sys.exit()
print(code, startdate, enddate, ofile)
get_tick(code, startdate, enddate, file=ofile)
if __name__ == '__main__':
main(sys.argv[1:]) |
994,310 | bca5c0e15344c7f70a688aa0121fdc69d32fcfb6 | import json
from time import time
import aiohttp_jinja2
from aiohttp_session import get_session
from aiohttp import web
from auth.services import check_user_auth
from auth.models import Token
def redirect(request, router_name):
url = request.app.router[router_name].url()
raise web.HTTPFound(url)
def set_session(session, user_id, request):
session['user'] = str(user_id)
session['last_visit'] = time()
return session
def convert_json(message):
return json.dumps(message)
class AuthTokenView(web.View):
"""
View accept token required information
and return new or previously created auth token
"""
async def post(self):
data = await self.request.post()
if data.get('username') and data.get('password'):
user = await check_user_auth(db=self.request.db,
email=data.get('username'),
password=data.get('password'))
if not user:
return web.json_response(content_type='application/json',
text=convert_json({'login': 'false'}),
status=401)
token = await Token(db=self.request.db, data={'user_id': user['_id']}).get_or_create()
return web.json_response(content_type='application/json', text=convert_json({'token': token['key']}))
class Login(web.View):
@aiohttp_jinja2.template('auth/login.html')
async def get(self):
session = await get_session(self.request)
if session.get('user'):
redirect(self.request, 'home')
return {'conten': 'Please enter login or email'}
async def post(self):
data = await self.request.post()
user = await check_user_auth(db=self.request.db,
email=data.get('username'),
password=data.get('password'))
if isinstance(user, dict):
session = await get_session(self.request)
set_session(session, str(user['_id']), self.request)
token = await Token(db=self.request.db, data={'user_id': user['_id']}).get_or_create()
return web.json_response(content_type='application/json',
data=convert_json({'login': 'true',
'token': token['key']}),
status=200)
else:
return web.json_response(content_type='application/json',
text=convert_json({'login': 'false'}),
status=401)
|
994,311 | 637151bcb128adb9431106dc95a54ea290d87897 | class Solution:
def uniquePaths(self, m: int, n: int) -> int:
if m == n == 0: return 0
# DP 算法,记忆表初始化
table = [[None for _ in range(n)] for _ in range(m)]
# 赋初值
for i in range(n):
table[-1][i] = 1
for i in range(m):
table[i][-1] = 1
# 递推
for row in range(m-2, -1, -1):
for col in range(n-2, -1, -1):
table[row][col] = table[row+1][col] + table[row][col+1]
# 结果
return table[0][0] |
994,312 | ee7de9520745d4619a249fcad126b3262b6c6064 | # Generated by Django 2.1 on 2019-09-12 22:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('apps', '0006_auto_20190912_1703'),
]
operations = [
migrations.AlterField(
model_name='profesional',
name='Foto',
field=models.ImageField(blank=True, null=True, upload_to='fotos/', verbose_name='Cargar imagen'),
),
]
|
994,313 | 847151e5af6e982577155e2af6c140942f4938ae | __author__ = 'spersinger'
from .generator import Generator
from .verifier import Verifier
def generate(secret = None, message="", iv=None, now=None):
"""Public: generates a fernet token
Returns the fernet token as a string.
:param secret
:param message
:param options
"""
return Generator(secret=secret, message=message, iv=iv, now=now).generate()
def verifier(secret, token, enforce_ttl=None, ttl=None, now=None):
return Verifier(secret=secret, token=token, enforce_ttl = enforce_ttl, ttl=ttl, now=now) |
994,314 | dd3338b6f1a1e2a66bbe719a3510a1e787e69482 | import ssa
import numpy as np
import matplotlib.pyplot as plt
def run():
reactants = np.array([[1, 1, 0, 0], [0, 0, 1, 0], [0, 0, 1, 0]])
products = np.array([[0, 0, 1, 0], [1, 1, 0, 0], [0, 1, 0, 1]])
volume = 1e-15
x0 = np.array([5e-7, 2e-7, 0, 0])
x0 = ssa.util.molar_concentration_to_molecule_number(x0, volume=volume)
k = np.array([1e6, 1e-4, 0.1])
k = ssa.util.k_det_to_k_stoch(k, reactants=reactants, volume=volume)
print(x0, k)
t_max = 50
model = ssa.Model(reactants=reactants, products=products, x0=x0, k=k, t_max=t_max)
result = model.simulate(n_reps=20)
x_names = ["S", "E", "SE", "P"]
ssa.plot(result, x_names=x_names, show=False)
plt.savefig("michaelis_menten.png")
run()
|
994,315 | feb04c23e7dcfd6a6e0db8896782401672bba551 | # Generated by Django 3.2.6 on 2021-08-30 14:55
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('ap', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='question',
old_name='text',
new_name='question_name',
),
migrations.RenameField(
model_name='survey',
old_name='name',
new_name='survey_name',
),
]
|
994,316 | 3911c42dfabb00a8f7a6c70157b9357051bb7cda | #!/usr/bin/env python3
import sys
S = input().strip()
cnt = 0
for s in S[0::3]:
if s != 'S':
cnt += 1
for s in S[1::3]:
if s != 'O':
cnt += 1
for s in S[2::3]:
if s != 'S':
cnt += 1
print(cnt)
|
994,317 | 7f9504e874c9f65e7d1981dab80f43455d9bde01 | import maya.cmds as m
from peel_solve import locator
def load():
p = r'E:/git/amazon/peelsolve/build_2020/peelsolve2020/Debug/peelsolve2020d.mll'
m.loadPlugin(p)
def unload():
m.unloadPlugin("peelsolve2020d.mll");
def simple():
m.file(f=True, new=True)
m1 = m.spaceLocator(name="m1")[0]
m2 = m.spaceLocator(name="m2")[0]
m3 = m.spaceLocator(name="m3")[0]
m4 = m.spaceLocator(name="m4")[0]
m.setAttr(m1 + ".tx", 3)
m.setAttr(m1 + ".ty", 3)
m.setAttr(m2 + ".tx", -4)
m.setAttr(m2 + ".ty", 4)
m.setAttr(m3 + ".tx", 4)
m.setAttr(m3 + ".ty", -4)
m.setAttr(m4 + ".tx", -4)
m.setAttr(m4 + ".ty", -4)
m.select(cl=True)
root = m.joint(name="root")
locator.line(m1, root, attr_type=5)
locator.line(m2, root)
locator.line(m3, root)
locator.line(m4, root)
m.setAttr(root + ".tx", 1)
m.setAttr(root + ".ty", 1)
m.setAttr(root + ".tz", 1)
m.setAttr(root + ".rx", 45)
m.setAttr(root + ".ry", 40)
m.setAttr(root + ".rz", 45)
m.setAttr(m1 + ".tx", 4)
m.setAttr(m1 + ".ty", 4)
#m.peelSolve(s=root)
def lendof():
m.file(f=True, new=True)
j1 = m.joint(name="j1")
j2 = m.joint(name="j2")
m.setAttr(j2 + ".tx", 4)
m1 = m.spaceLocator(name="m1")[0]
m2 = m.spaceLocator(name="m2")[0]
m.setAttr(l2 + ".tx", 4)
locator.line(m1, j1, 3)
locator.line(m2, j2, 3)
m.setAttr(m2 + ".tx", 6)
|
994,318 | 3ebf870d7467d779e37ef20acccd145b3547add8 | class Solution:
def getDistances(self, arr: List[int]) -> List[int]:
prefix = [0] * len(arr)
suffix = [0] * len(arr)
numToIndices = collections.defaultdict(list)
for i, a in enumerate(arr):
numToIndices[a].append(i)
for indices in numToIndices.values():
for i in range(1, len(indices)):
currIndex = indices[i]
prevIndex = indices[i - 1]
prefix[currIndex] += prefix[prevIndex] + i * (currIndex - prevIndex)
for i in range(len(indices) - 2, -1, -1):
currIndex = indices[i]
prevIndex = indices[i + 1]
suffix[currIndex] += suffix[prevIndex] + \
(len(indices) - i - 1) * (prevIndex - currIndex)
return [p + s for p, s in zip(prefix, suffix)]
|
994,319 | fdfb2ad3406966db586f8c493d4e7cca0dcd837e | # Immobile constructions in a level- walls, doors, windows etc.
# can take damage and be destroyed or rebuilt. Goes on top of floors, networks, and terrain.
# Goes under entities but can be impassible.
from src.world.tiles import Tile
class Structure:
def __init__(self, name, impassible=False, opaque=False, tile=Tile()):
self.name = name
self.impassible = impassible
self.opaque = opaque
self.tile = tile
|
994,320 | 938e0165a6e0a5affa1e29606a5cd23f9c77d2cf | import datetime
import os.path
import warnings
from collections import OrderedDict
import time
import copy
import requests
import gevent
from matrx.actions.object_actions import *
from matrx.logger.logger import GridWorldLogger
from matrx.objects.env_object import EnvObject
from matrx.objects.simple_objects import AreaTile
from matrx.utils.utils import get_all_classes
from matrx.utils.message_manager import MessageManager
from matrx.API import api
from matrx.agents.agent_brain import AgentBrain
class GridWorld:
def __init__(self, shape, tick_duration, simulation_goal, rnd_seed=1,
visualization_bg_clr="#C2C2C2", visualization_bg_img=None, verbose=False, world_ID=False):
self.__tick_duration = tick_duration # How long each tick should take (process sleeps until thatr time is passed)
self.__simulation_goal = simulation_goal # The simulation goal, the simulation end when this/these are reached
self.__shape = shape # The width and height of the GridWorld
self.__visualization_bg_clr = visualization_bg_clr # The background color of the visualisation
self.__visualization_bg_img = visualization_bg_img # The background image of the visualisation
self.__verbose = verbose # Set whether we should print anything or not
self.world_ID = world_ID # ID of this simulation world
self.__teams = {} # dictionary with team names (keys), and agents in those teams (values)
self.__registered_agents = OrderedDict() # The dictionary of all existing agents in the GridWorld
self.__environment_objects = OrderedDict() # The dictionary of all existing objects in the GridWorld
# Get all actions within all currently imported files
self.__all_actions = get_all_classes(Action, omit_super_class=True)
# Initialise an empty grid, a simple 2D array with ID's
self.__grid = np.array([[None for _ in range(shape[0])] for _ in range(shape[1])])
self.__loggers = [] # a list of GridWorldLogger use to log the data
self.__is_done = False # Whether the simulation is done (goal(s) reached)
self.__rnd_seed = rnd_seed # The random seed of this GridWorld
self.__rnd_gen = np.random.RandomState(seed=self.__rnd_seed) # The random state of this GridWorld
self.__curr_tick_duration = 0. # Duration of the current tick
self.__current_nr_ticks = 0 # The number of tick this GridWorld has ran already
self.__is_initialized = False # Whether this GridWorld is already initialized
self.__message_buffer = {} # dictionary of messages that need to be send to agents, with receiver ids as keys
self.message_manager = MessageManager() # keeps track of all messages and makes them available to the API
def initialize(self, api_info):
# Only initialize when we did not already do so
if not self.__is_initialized:
# We update the grid, which fills everything with added objects and agents
self.__update_grid()
for agent_body in self.__registered_agents.values():
agent_body.brain_initialize_func()
# set the API variables
self.api_info = api_info
self.__run_matrx_api = self.api_info['run_matrx_api']
if self.__run_matrx_api:
# initialize this world in the API
api.reset_api()
api.tick_duration = self.__tick_duration
api.register_world(self.world_ID)
api.current_tick = self.__current_nr_ticks
api.grid_size = self.shape
# point the API towards our message manager, for making messages available via the API
api.gw_message_manager = self.message_manager
api.teams = self.__teams
# init API with world info
api.MATRX_info = {
"nr_ticks": self.__current_nr_ticks,
"curr_tick_timestamp": int(round(time.time() * 1000)),
"grid_shape": self.__shape,
"tick_duration": self.tick_duration,
"world_ID": self.world_ID,
"vis_settings": {
"vis_bg_clr": self.__visualization_bg_clr,
"vis_bg_img": self.__visualization_bg_img
}
}
# start paused
api.matrx_paused = True
# fetch the initial state of every agent to display
self.fetch_initial_states()
# Set initialisation boolean
self.__is_initialized = True
if self.__verbose:
print(f"@{os.path.basename(__file__)}: Initialized the GridWorld.")
def fetch_initial_states(self):
""" MATRX starts paused by default, to prime the API and any connected GUI's, we fetch the first state
from all agents to send which can be shown while waiting for the experiment leader to press play.
"""
for agent_id, agent_obj in self.__registered_agents.items():
# given the agent's capabilities, get everything the agent can perceive
state = self.__get_agent_state(agent_obj)
# filter other things from the agent state
filtered_agent_state = agent_obj.filter_observations(state)
# save the current agent's state for the API
api.add_state(agent_id=agent_id, state=filtered_agent_state,
agent_inheritence_chain=agent_obj.class_inheritance,
world_settings=api.MATRX_info)
# add god state
api.add_state(agent_id="god", state=self.__get_complete_state(), agent_inheritence_chain="god",
world_settings=api.MATRX_info)
# initialize the message manager
self.message_manager.agents = self.__registered_agents.keys()
self.message_manager.teams = self.__teams
# make the information of this tick available via the API, after all
# agents have been updated
api.next_tick()
def run(self, api_info):
# initialize the gridworld
self.initialize(api_info)
if self.__verbose:
print(f"@{os.path.basename(__file__)}: Starting game loop...")
is_done = False
while not is_done:
if self.__run_matrx_api and api.matrx_paused:
print("MATRX paused through API")
gevent.sleep(1)
else:
is_done, tick_duration = self.__step()
if self.__run_matrx_api and api.matrx_done:
print("Scenario stopped through API")
break
def get_env_object(self, requested_id, obj_type=None):
obj = None
if requested_id in self.__registered_agents.keys():
if obj_type is not None:
if isinstance(self.__registered_agents[requested_id], obj_type):
obj = self.__registered_agents[requested_id]
else:
obj = self.__registered_agents[requested_id]
if requested_id in self.__environment_objects.keys():
if obj_type is not None:
if isinstance(self.__environment_objects[requested_id], obj_type):
obj = self.__environment_objects[requested_id]
else:
obj = self.__environment_objects[requested_id]
return obj
def get_objects_in_range(self, agent_loc, object_type, sense_range):
"""
Get all objects of a obj type (normal objects or agent) within a
certain range around the agent's location
"""
env_objs = OrderedDict()
# loop through all environment objects
for obj_id, env_obj in self.__environment_objects.items():
# get the distance from the agent location to the object
coordinates = env_obj.location
distance = get_distance(coordinates, agent_loc)
# check if the env object is of the specified type, and within range
if (object_type is None or object_type == "*" or isinstance(env_obj, object_type)) and \
distance <= sense_range:
env_objs[obj_id] = env_obj
# agents are also environment objects, but stored separably. Also check them.
for agent_id, agent_obj in self.__registered_agents.items():
coordinates = agent_obj.location
distance = get_distance(coordinates, agent_loc)
# check if the env object is of the specified type, adn within range
if (object_type is None or object_type == "*" or isinstance(agent_obj, object_type)) and \
distance <= sense_range:
env_objs[agent_id] = agent_obj
return env_objs
def remove_from_grid(self, object_id, remove_from_carrier=True):
"""
Remove an object from the grid
:param object_id: ID of the object to remove
:param remove_from_carrier: whether to also remove from agents which carry the
object or not.
"""
# Remove object first from grid
grid_obj = self.get_env_object(object_id) # get the object
loc = grid_obj.location # its location
self.__grid[loc[1], loc[0]].remove(grid_obj.obj_id) # remove the object id from the list at that location
if len(self.__grid[loc[1], loc[0]]) == 0: # if the list is empty, just add None there
self.__grid[loc[1], loc[0]] = None
# Remove object from the list of registered agents or environmental objects
# Check if it is an agent
if object_id in self.__registered_agents.keys():
# Check if the agent was carrying something, if so remove property from carried item
for obj_id in self.__registered_agents[object_id].is_carrying:
self.__environment_objects[obj_id].carried_by.remove(object_id)
# Remove agent
success = self.__registered_agents.pop(object_id,
default=False) # if it exists, we get it otherwise False
# Else, check if it is an object
elif object_id in self.__environment_objects.keys():
# remove from any agents carrying this object if asked for
if remove_from_carrier:
# If the object was carried, remove this from the agent properties as well
for agent_id in self.__environment_objects[object_id].carried_by:
obj = self.__environment_objects[object_id]
self.__registered_agents[agent_id].is_carrying.remove(obj)
# Remove object
success = self.__environment_objects.pop(object_id,
default=False) # if it exists, we get it otherwise False
else:
success = False # Object type not specified
if success is not False: # if succes is not false, we successfully removed the object from the grid
success = True
if self.__verbose:
if success:
print(f"@{os.path.basename(__file__)}: Succeeded in removing object with ID {object_id}")
else:
print(f"@{os.path.basename(__file__)}: Failed to remove object with ID {object_id}.")
return success
def add_to_grid(self, grid_obj):
if isinstance(grid_obj, EnvObject):
loc = grid_obj.location
if self.__grid[loc[1], loc[0]] is not None:
self.__grid[loc[1], loc[0]].append(grid_obj.obj_id)
else:
self.__grid[loc[1], loc[0]] = [grid_obj.obj_id]
else:
loc = grid_obj.location
if self.__grid[loc[1], loc[0]] is not None:
self.__grid[loc[1], loc[0]].append(grid_obj.obj_id)
else:
self.__grid[loc[1], loc[0]] = [grid_obj.obj_id]
def _register_agent(self, agent, agent_avatar: AgentBody):
""" Register human agents and agents to the gridworld environment """
# Random seed for agent between 1 and 10000000, might need to be adjusted still
agent_seed = self.__rnd_gen.randint(1, 1000000)
# check if the agent can be succesfully placed at that location
self.__validate_obj_placement(agent_avatar)
# Add agent to registered agents
self.__registered_agents[agent_avatar.obj_id] = agent_avatar
if self.__verbose:
print(f"@{os.path.basename(__file__)}: Created agent with id {agent_avatar.obj_id}.")
# Get all properties from the agent avatar
avatar_props = agent_avatar.properties
if agent_avatar.is_human_agent is False:
agent._factory_initialise(agent_name=agent_avatar.obj_name,
agent_id=agent_avatar.obj_id,
action_set=agent_avatar.action_set,
sense_capability=agent_avatar.sense_capability,
agent_properties=avatar_props,
customizable_properties=agent_avatar.customizable_properties,
callback_is_action_possible=self.__check_action_is_possible,
rnd_seed=agent_seed)
else: # if the agent is a human agent, we also assign its user input action map
agent._factory_initialise(agent_name=agent_avatar.obj_name,
agent_id=agent_avatar.obj_id,
action_set=agent_avatar.action_set,
sense_capability=agent_avatar.sense_capability,
agent_properties=avatar_props,
customizable_properties=agent_avatar.customizable_properties,
callback_is_action_possible=self.__check_action_is_possible,
rnd_seed=agent_seed,
key_action_map=agent_avatar.properties["key_action_map"])
return agent_avatar.obj_id
def _register_env_object(self, env_object: EnvObject):
""" this function adds the objects """
# check if the object can be succesfully placed at that location
self.__validate_obj_placement(env_object)
# Assign id to environment sparse dictionary grid
self.__environment_objects[env_object.obj_id] = env_object
if self.__verbose:
print(f"@{__file__}: Created an environment object with id {env_object.obj_id}.")
return env_object.obj_id
def _register_teams(self):
""" Register all teams and who is in those teams.
An agent is always in a team, if not set by the user, a team is created with name 'agent_id' with only that
agent in it.
"""
# loop through all agents
for agent_id, agent_body in self.registered_agents.items():
# find their team name
team = agent_body.properties['team']
# register the team (if not already done) and the agent in it
if team not in self.__teams:
self.__teams[team] = []
self.__teams[team].append(agent_id)
def _register_logger(self, logger: GridWorldLogger):
if self.__loggers is None:
self.__loggers = [logger]
else:
self.__loggers.append(logger)
def __validate_obj_placement(self, env_object):
"""
Checks whether an object can be successfully placed on the grid
"""
obj_loc = env_object.location
# get the objects at the target object location
objs_at_loc = self.get_objects_in_range(obj_loc, "*", 0)
# filter out areaTiles, which don't count
for key in list(objs_at_loc.keys()):
if AreaTile.__name__ in objs_at_loc[key].class_inheritance:
objs_at_loc.pop(key)
# check how many of these objects are intraversable
intraversable_objs = []
for obj in objs_at_loc:
if not objs_at_loc[obj].is_traversable:
intraversable_objs.append(objs_at_loc[obj].obj_id)
# two intraversable objects can't be at the same location
if not env_object.is_traversable and len(intraversable_objs) > 0:
raise Exception(f"Invalid placement. Could not place object {env_object.obj_id} in grid, location already "
f"occupied by intraversable object {intraversable_objs} at location {obj_loc}")
def __step(self):
# Set tick start of current tick
start_time_current_tick = datetime.datetime.now()
# Check if we are done based on our global goal assessment function
self.__is_done, goal_status = self.__check_simulation_goal()
# Log the data if we have any loggers
for logger in self.__loggers:
agent_data_dict = {}
for agent_id, agent_body in self.__registered_agents.items():
agent_data_dict[agent_id] = agent_body.get_log_data()
logger._grid_world_log(grid_world=self, agent_data=agent_data_dict,
last_tick=self.__is_done, goal_status=goal_status)
# If this grid_world is done, we return immediately
if self.__is_done:
return self.__is_done, 0.
# initialize a temporary dictionary in which all states of this tick
# will be saved. After all agents have been updated, the new tick info
# will be made accessible via the API.
if self.__run_matrx_api:
api.temp_state = {}
# if this is the first tick, clear the placeholder states
if self.__current_nr_ticks == 0:
api.MATRX_info = {}
api.next_tick_info = {}
# Go over all agents, detect what each can detect, figure out what actions are possible and send these to
# that agent. Then receive the action back and store the action in a buffer.
# Also, update the local copy of the agent properties, and save the agent's state for the GUI.
# Then go to the next agent.
# This blocks until a response from the agent is received (hence a tick can take longer than self.tick_
# duration!!)
action_buffer = OrderedDict()
for agent_id, agent_obj in self.__registered_agents.items():
state = self.__get_agent_state(agent_obj)
# check if this agent is busy performing an action , if so then also check if it as its last tick of waiting
# because then we want to do that action. If not busy, call its get_action function.
if agent_obj._check_agent_busy(curr_tick=self.__current_nr_ticks):
# only do the filter observation method to be able to update the agent's state to the API
filtered_agent_state = agent_obj.filter_observations(state)
# save the current agent's state for the API
if self.__run_matrx_api:
api.add_state(agent_id=agent_id, state=filtered_agent_state,
agent_inheritence_chain=agent_obj.class_inheritance,
world_settings=self.__get_complete_state()['World'])
else: # agent is not busy
# Any received data from the API for this HumanAgent is send along to the get_action function
if agent_obj.is_human_agent:
usrinp = None
if self.__run_matrx_api and agent_id in api.userinput:
usrinp = api.pop_userinput(agent_id)
filtered_agent_state, agent_properties, action_class_name, action_kwargs = \
agent_obj.get_action_func(state=state, agent_properties=agent_obj.properties, agent_id=agent_id,
userinput=usrinp)
else: # not a HumanAgent
# perform the agent's get_action method (goes through filter_observations and decide_on_action)
filtered_agent_state, agent_properties, action_class_name, action_kwargs = agent_obj.get_action_func(
state=state, agent_properties=agent_obj.properties, agent_id=agent_id)
# the Agent (in the OODA loop) might have updated its properties, process these changes in the Avatar
# Agent
agent_obj._set_agent_changed_properties(agent_properties)
# Set the agent to busy, we do this only here and not when the agent was already busy to prevent the
# agent to perform an action with a duration indefinitely (and since all actions have a duration, that
# would be killing...)
self.__set_agent_busy(action_name=action_class_name, action_kwargs=action_kwargs, agent_id=agent_id)
# Get all agents we have, as we need these to process all messages that are send to all agents
all_agent_ids = self.__registered_agents.keys()
# Obtain all communication messages if the agent has something to say to others (only comes here when
# the agent is NOT busy)
agent_messages = agent_obj.get_messages_func(all_agent_ids)
# add any messages received from the API sent by this agent
if self.__run_matrx_api:
if agent_id in api.received_messages:
agent_messages += copy.copy(api.received_messages[agent_id])
# clear the messages for the next tick
del api.received_messages[agent_id]
# preprocess all messages of the current tick of this agent
self.message_manager.preprocess_messages(self.__current_nr_ticks, agent_messages,
all_agent_ids, self.__teams)
# save the current agent's state for the API
if self.__run_matrx_api:
api.add_state(agent_id=agent_id, state=filtered_agent_state,
agent_inheritence_chain=agent_obj.class_inheritance,
world_settings=self.__get_complete_state()['World'])
# if this agent is at its last tick of waiting on its action duration, we want to actually perform the
# action
if agent_obj._at_last_action_duration_tick(curr_tick=self.__current_nr_ticks):
# Get the action and arguments
action_class_name, action_kwargs = agent_obj._get_duration_action()
# store the action in the buffer
action_buffer[agent_id] = (action_class_name, action_kwargs)
# put all messages of the current tick in the message buffer
if self.__current_nr_ticks in self.message_manager.preprocessed_messages:
for mssg in self.message_manager.preprocessed_messages[self.__current_nr_ticks]:
if mssg.to_id not in self.__message_buffer.keys(): # first message for this receiver
self.__message_buffer[mssg.to_id] = [mssg]
else:
self.__message_buffer[mssg.to_id].append(mssg)
# save the god view state
if self.__run_matrx_api:
api.add_state(agent_id="god", state=self.__get_complete_state(), agent_inheritence_chain="god",
world_settings=self.__get_complete_state()['World'])
# make the information of this tick available via the API, after all
# agents have been updated
api.next_tick()
api.current_tick = self.__current_nr_ticks
self.__tick_duration = api.tick_duration
api.grid_size = self.shape
# Perform the actions in the order of the action_buffer (which is filled in order of registered agents
for agent_id, action in action_buffer.items():
# Get the action class name
action_class_name = action[0]
# Get optional kwargs
action_kwargs = action[1]
if action_kwargs is None: # If kwargs is none, make an empty dict out of it
action_kwargs = {}
# Actually perform the action (if possible), also sets the result in the agent's brain
self.__perform_action(agent_id, action_class_name, action_kwargs)
# Update the grid
self.__update_grid()
# Send all messages between agents
for receiver_id, messages in self.__message_buffer.items():
# check if the receiver exists
if receiver_id in self.__registered_agents.keys():
# Call the callback method that sets the messages
self.__registered_agents[receiver_id].set_messages_func(messages)
self.__message_buffer = {}
# Perform the update method of all objects
for env_obj in self.__environment_objects.values():
env_obj.update(self)
# Increment the number of tick we performed
self.__current_nr_ticks += 1
# Check how much time the tick lasted already
tick_end_time = datetime.datetime.now()
tick_duration = tick_end_time - start_time_current_tick
self.sleep_duration = self.__tick_duration - tick_duration.total_seconds()
# Sleep for the remaining time of self.__tick_duration
self.__sleep()
# Compute the total time of our tick (including potential sleep)
tick_end_time = datetime.datetime.now()
tick_duration = tick_end_time - start_time_current_tick
self.__curr_tick_duration = tick_duration.total_seconds()
if self.__verbose:
print(
f"@{os.path.basename(__file__)}: Tick {self.__current_nr_ticks} took {tick_duration.total_seconds()} seconds.")
return self.__is_done, self.__curr_tick_duration
def __check_simulation_goal(self):
goal_status = {}
if self.__simulation_goal is not None:
if isinstance(self.__simulation_goal, list):
for sim_goal in self.__simulation_goal:
is_done = sim_goal.goal_reached(self)
goal_status[sim_goal] = is_done
else:
is_done = self.__simulation_goal.goal_reached(self)
goal_status[self.__simulation_goal] = is_done
is_done = np.array(list(goal_status.values())).all()
return is_done, goal_status
def __sleep(self):
"""
Sleeps the current python process for the amount of time that is left after self.curr_tick_duration up to
in self.__tick_duration
:return:
"""
if self.sleep_duration > 0:
gevent.sleep(self.sleep_duration)
else:
self.__warn(
f"The average tick took longer than the set tick duration of {self.__tick_duration}. "
f"Program is to heavy to run real time")
def __update_grid(self):
self.__grid = np.array([[None for _ in range(self.__shape[0])] for _ in range(self.__shape[1])])
for obj_id, obj in self.__environment_objects.items():
self.add_to_grid(obj)
for agent_id, agent in self.__registered_agents.items():
self.add_to_grid(agent)
# get all objects and agents on the grid
def __get_complete_state(self):
"""
Compile all objects and agents on the grid in one state dictionary
:return: state with all objects and agents on the grid
"""
# create a state with all objects and agents
state = {}
for obj_id, obj in self.__environment_objects.items():
state[obj.obj_id] = obj.properties
for agent_id, agent in self.__registered_agents.items():
state[agent.obj_id] = agent.properties
# Append generic properties (e.g. number of ticks, size of grid, etc.}
state["World"] = {
"nr_ticks": self.__current_nr_ticks,
"curr_tick_timestamp": int(round(time.time() * 1000)),
"grid_shape": self.__shape,
"tick_duration": self.tick_duration,
"world_ID": self.world_ID,
"vis_settings": {
"vis_bg_clr": self.__visualization_bg_clr,
"vis_bg_img": self.__visualization_bg_img
}
}
return state
def __get_agent_state(self, agent_obj: AgentBody):
agent_loc = agent_obj.location
sense_capabilities = agent_obj.sense_capability.get_capabilities()
objs_in_range = OrderedDict()
# Check which objects can be sensed with the agents' capabilities, from
# its current position.
for obj_type, sense_range in sense_capabilities.items():
env_objs = self.get_objects_in_range(agent_loc, obj_type, sense_range)
objs_in_range.update(env_objs)
state = {}
# Save all properties of the sensed objects in a state dictionary
for env_obj in objs_in_range:
state[env_obj] = objs_in_range[env_obj].properties
# Append generic properties (e.g. number of ticks, fellow team members, etc.}
team_members = [agent_id for agent_id, other_agent in self.__registered_agents.items()
if agent_obj.team == other_agent.team]
state["World"] = {
"nr_ticks": self.__current_nr_ticks,
"curr_tick_timestamp": int(round(time.time() * 1000)),
"grid_shape": self.__shape,
"tick_duration": self.tick_duration,
"team_members": team_members,
"world_ID": self.world_ID,
"vis_settings": {
"vis_bg_clr": self.__visualization_bg_clr,
"vis_bg_img": self.__visualization_bg_img
}
}
return state
def __check_action_is_possible(self, agent_id, action_name, action_kwargs):
# If the action_name is None, the agent idles
if action_name is None:
result = ActionResult(ActionResult.IDLE_ACTION, succeeded=True)
return result
# Check if the agent still exists (you would only get here if the agent is removed during this tick).
if agent_id not in self.__registered_agents.keys():
result = ActionResult(ActionResult.AGENT_WAS_REMOVED.replace("{AGENT_ID}", agent_id), succeeded=False)
return result
if action_name is None: # If action is None, we send an action result that no action was given (and succeeded)
result = ActionResult(ActionResult.NO_ACTION_GIVEN, succeeded=True)
# action known, but agent not capable of performing it
elif action_name in self.__all_actions.keys() and \
action_name not in self.__registered_agents[agent_id].action_set:
result = ActionResult(ActionResult.AGENT_NOT_CAPABLE, succeeded=False)
# Check if action is known
elif action_name in self.__all_actions.keys():
# Get action class
action_class = self.__all_actions[action_name]
# Make instance of action
action = action_class()
# Check if action is possible, if so we can perform the action otherwise we send an ActionResult that it was
# not possible.
result = action.is_possible(self, agent_id, **action_kwargs)
else: # If the action is not known
warnings.warn(f"The action with name {action_name} was not found when checking whether this action is "
f"possible to perform by agent {agent_id}.")
result = ActionResult(ActionResult.UNKNOWN_ACTION, succeeded=False)
return result
def __perform_action(self, agent_id, action_name, action_kwargs):
# Check if the action will succeed
result = self.__check_action_is_possible(agent_id, action_name, action_kwargs)
# If it will succeed, perform it.
if result.succeeded:
# If the action is None, nothing has to change in the world
if action_name is None:
return result
# Get action class
action_class = self.__all_actions[action_name]
# Make instance of action
action = action_class()
# Apply world mutation
result = action.mutate(self, agent_id, **action_kwargs)
# Get agent's send_result function
set_action_result = self.__registered_agents[agent_id].set_action_result_func
# Send result of mutation to agent
set_action_result(result)
# Update the grid
self.__update_agent_location(agent_id)
# Whether the action succeeded or not, we return the result
return result
def __set_agent_busy(self, action_name, action_kwargs, agent_id):
# Check if the action_name is None, in which case we simply idle for one tick
if action_name is None:
duration_in_ticks = 1
else: # action is not None
# Get action class
action_class = self.__all_actions[action_name]
# Make instance of action
action = action_class()
# Obtain the duration of the action, defaults to the one of the action class if not in action_kwargs, and
# otherwise that of Action
duration_in_ticks = action.duration_in_ticks
if "action_duration" in action_kwargs.keys():
duration_in_ticks = action_kwargs["action_duration"]
# Older kwarg name, raises deprecation warning
if "duration_in_ticks" in action_kwargs.keys():
warnings.warn("'duration_in_ticks' is deprecated for setting an action's duration; use "
"'action_duration'.", PendingDeprecationWarning)
duration_in_ticks = action_kwargs["duration_in_ticks"]
# The agent is now busy performing this action
self.__registered_agents[agent_id]._set_agent_busy(curr_tick=self.current_nr_ticks,
action_duration=duration_in_ticks)
# Set the action and result in the agent so we know where the agent is busy with. In addition this is appended
# to its properties so others know what agent did)
self.__registered_agents[agent_id]._set_current_action(action_name=action_name, action_args=action_kwargs)
def __update_agent_location(self, agent_id):
# Get current location of the agent
loc = self.__registered_agents[agent_id].location
# Check if that spot in our list that represents the grid, is None or a list of other objects
if self.__grid[loc[1], loc[0]] is not None: # If not None, we append the agent id to it
self.__grid[loc[1], loc[0]].append(agent_id)
else: # if none, we make a new list with the agent id in it.
self.__grid[loc[1], loc[0]] = [agent_id]
# Update the Agent Avatar's location as well
self.__registered_agents[agent_id].location = loc
def __update_obj_location(self, obj_id):
loc = self.__environment_objects[obj_id].location
if self.__grid[loc[1], loc[0]] is not None:
self.__grid[loc[1], loc[0]].append(obj_id)
else:
self.__grid[loc[1], loc[0]] = [obj_id]
def __warn(self, warn_str):
return f"[@{self.__current_nr_ticks}] {warn_str}"
@property
def messages_send_previous_tick(self):
return self.__messages_send_previous_tick
@property
def registered_agents(self):
return self.__registered_agents
@property
def environment_objects(self):
return self.__environment_objects
@property
def is_done(self):
return self.__is_done
@property
def current_nr_ticks(self):
return self.__current_nr_ticks
@property
def grid(self):
return self.__grid
@property
def shape(self):
return self.__shape
@property
def simulation_goal(self):
return self.__simulation_goal
@property
def tick_duration(self):
return self.__tick_duration
|
994,321 | 5e6e74536f04699001d4bd47ac0f1ed9c29e56f8 | from threading import Timer,Thread,Event
import numpy as np
from intersec_okr import *
import threading
import time
import socket
import json
class Tracker:
def __init__(self, nodes):
self.trace_max = 50
self.complx = []
self.comply = []
self.nodes = nodes
self.rootX=[0, 1200, 2700, 5000, 10000, 15000, 20000, 25000, 30000]
self.rootY=[3060, 3060, 3060, 3060, 3060, 3060, 3060, 3060, 3060]
self.MEDIAN = 10
self.current_x = []
self.current_y = []
self.x = np.matrix('0. 0. 0. 0.').T
self.P = np.matrix(np.eye(4))*1000
self.R = 0.01**2
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # UDP
self.sock.bind(("0.0.0.0", 5005))
self.clients = []
# self.send_json("init", [map(lambda x: x.posX, self.nodes), map(lambda x: x.posY, self.nodes), self.rootX, self.rootY], ("127.0.0.1", 5006))
thr = threading.Thread(target=self.socket_action)
thr.setDaemon(True)
thr.start()
def clear(self):
self.current_x = []
self.current_y = []
def socket_action(self):
while True:
data, add = self.sock.recvfrom(1024)
if data == "init":
self.send_json("init", [map(lambda x: x.posX, self.nodes), map(lambda x: x.posY, self.nodes), self.rootX, self.rootY], add)
if not add in self.clients:
self.clients.append(add)
print add
def send_ping(self, state):
self.send_json("ping", state)
def send_json(self, what, data, addr=""):
if addr == "":
for c in self.clients:
try:
self.sock.sendto(json.dumps({"info": what, "data": data}), c)
except:
print "ex"
self.clients.remove(c)
else:
self.sock.sendto(json.dumps({"info": what, "data": data}), addr)
def node_action(self, nodes):
L = []
for n in nodes:
if n.availible and n.a_counter < 30 and len(n.filtered_history) > 0:
L.append(n.filtered_history[-1])
else:
L.append('nan')
return self.compute_positions(L, nodes)
def compute_positions(self, tab, nodes):
json_send = {"distances": tab, "time": "aaa"} #nodes[0].current_time}
L = list(tab)
index = []
values = []
m = min(L)
i = L.index(m)
index.append(i)
values.append(m)
L[i] = "nan"
m = min(L)
i = L.index(m)
index.append(i)
values.append(m)
json_send["indexes"] = index
for m in values:
if not type(m) is int:
json_send["error"] = "not enough valid reads"
self.send_json("loop", json_send)
return False
index.sort()
i = IntersectPoints(complex(nodes[index[1]].posX,nodes[index[1]].posY),
complex(nodes[index[0]].posX,nodes[index[0]].posY),
values[1], values[0])
if type(i) is bool:
json_send["error"] = "calc error"
self.send_json("loop", json_send)
return False
if len(self.current_x) > 0:
i1=odl_pkt(sum(self.current_x) / len(self.current_x) ,sum(self.current_y) / len(self.current_y),i[0],i[1])
i2=odl_pkt(sum(self.current_x) / len(self.current_x) ,sum(self.current_y) / len(self.current_y),i[2],i[3])
if i2 > i1:
direction = "left"
searched_x, searched_y = i[0:2]
else:
direction = "right"
searched_x, searched_y = i[2:4]
else:
searched_x, searched_y = i[0:2]
searched_x = median(self.current_x, searched_x, self.MEDIAN)
searched_y = median(self.current_y, searched_y, self.MEDIAN)
self.x, self.P = kalman_xy(self.x, self.P, [searched_x, searched_y], self.R)
searched_x, searched_y = map(lambda m: m[0], self.x[:2].tolist())
json_send["point"] = [searched_x, searched_y]
root_length = []
for n in range(0,len(self.rootX)):
root_length.append(odl_pkt(self.rootX[n],
self.rootY[n],
searched_x,
searched_y))
closest_root_point1 = root_length.index(min(root_length))
root_length[closest_root_point1] = "nan"
closest_root_point2 = root_length.index(min(root_length))
root = [closest_root_point1, closest_root_point2]
root.sort()
json_send["closest_root"] = root
if root[1] - root[0] != 1:
root_length[root[1]] = "nan"
root[1] = root_length.index(min(root_length))
distance = dist(searched_x, searched_y,
self.rootX[root[0]],
self.rootY[root[0]],
self.rootX[root[1]],
self.rootY[root[1]])
sign = ((self.rootX[root[1]] - self.rootX[root[0]]) * (searched_y - self.rootY[root[0]]) - (self.rootY[root[1]] - self.rootY[root[0]]) * (searched_x - self.rootX[root[0]]))
direction = "lewo" if sign > 0 else "prawo"
json_send["root_distance"] = distance
json_send["direction"] = direction
self.send_json("loop", json_send)
return {'dist': distance, 'dir': direction, 'pos': [searched_x, searched_y]}
|
994,322 | a2284972feda969222088d75b98e959154d8294e | from mainapp.views import Comment
from rest_framework import generics
from mainapp.serializers import CommentSerializer
from rest_framework import permissions
class IsOwnerOrReadOnly(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
if request.method in permissions.SAFE_METHODS:
return True
return obj.owner == request.user
class CommentList(generics.ListCreateAPIView):
queryset = Comment.objects.all()
serializer_class = CommentSerializer
permission_classes = [permissions.IsAuthenticatedOrReadOnly]
def perform_create(self, serializer):
serializer.save(owner=self.request.user)
class CommentDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = Comment.objects.all()
serializer_class = CommentSerializer
permission_classes = [permissions.IsAuthenticatedOrReadOnly,
IsOwnerOrReadOnly]
|
994,323 | 0d0a3d1bc9ad4fcc3e2d6d626697b3c7e65fc469 | nums = []
isInput = True
while isInput == True:
x = input("请录入一个整数(输入STOP结束):")
if x != "STOP":
nums.append(int(x))
else:
isInput = False
target = int(input("请录入目标整数:"))
isFind = False
n = len(nums)
for i in range(n):
for j in range(i+1, n):
if nums[i] + nums[j] == target:
print([i, j])
isFind = True
if isFind == False:
print("找不到!") |
994,324 | dc1cd79949ff13d1faa22795fe41de2c3a8493e4 | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-03-07 17:52
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('website', '0002_auto_20170307_2321'),
]
operations = [
migrations.AlterModelOptions(
name='book',
options={},
),
migrations.AlterModelTable(
name='book',
table=None,
),
]
|
994,325 | b88465f8f9316f77ee60680122f6e068680b77af | from django.shortcuts import render
from django.views.generic import TemplateView
# Create your views here.
class ContinentsView(TemplateView):
template_name = 'continents/continents.html'
def get_context_data(self, *args,**kwargs):
america = {
'name': 'america',
'translation': 'america',
'color': '#000000'
}
antartida = {
'name': 'antartida',
'translation': 'antarctica',
'color': '#FFFF00'
}
europa = {
'name': 'europa',
'translation': 'europe',
'color': '#F1D142'
}
africa = {
'name': 'africa',
'translation': 'africa',
'color': '#F04261'
}
asia = {
'name': 'asia',
'translation': 'asia',
'color': '#EE65EE'
}
oceania = {
'name': 'oceania',
'translation': 'oceania',
'color': '#EE65DD'
}
continents = [
america,
antartida,
europa,
africa,
asia,
oceania
]
return {'continents': continents}
|
994,326 | deb43590037743439cc149d8d5202580fb39a51d | class Solution:
def minDistance(self, word1: str, word2: str) -> int:
m, n = len(word1), len(word2)
if m == 0 or n == 0: return (m+n)
dp = [[0]*(n+1) for _ in range(m+1)]
for i in range(1,m+1):
dp[i][0] = i
for j in range(1,n+1):
dp[0][j] = j
for i in range(1, m+1):
for j in range(1, n+1):
if word1[i-1] == word2[j-1]:
dp[i][j] = dp[i-1][j-1]
else:
dp[i][j] = min(dp[i-1][j], dp[i][j-1]) + 1
return dp[-1][-1]
##memo
def minDistance(self, word1: str, word2: str) -> int:
m, n = len(word1), len(word2)
memo = {}
def dfs(i, j):
if i < 0 or j < 0: return i+j+2
if (i, j) not in memo:
if word1[i] == word2[j]:
res = dfs(i-1, j-1)
else:
res = min(dfs(i-1,j), dfs(i, j-1))+1
memo[(i, j)] = res
return memo[(i, j)]
return dfs(m-1, n-1)
##LCS
def minDistance(self, word1: str, word2: str) -> int:
m, n = len(word1), len(word2)
if m == 0 or n == 0: return (m+n)
dp = [[0]*(n+1) for _ in range(m+1)]
for i in range(m+1):
for j in range(n+1):
if i == 0 or j == 0: continue
if word1[i-1] == word2[j-1]:
dp[i][j] = dp[i-1][j-1] + 1
else:
dp[i][j] = max(dp[i-1][j], dp[i][j-1])
return (m+n-2*dp[-1][-1])
##LCS + memo
def minDistance(self, word1: str, word2: str) -> int:
m, n = len(word1), len(word2)
memo = {}
def dfs(i, j):
if i < 0 or j < 0: return 0
if (i, j) not in memo:
if word1[i] == word2[j]:
res = dfs(i-1, j-1) + 1
else:
res = max(dfs(i-1, j), dfs(i, j-1))
memo[(i, j)] = res
return memo[(i, j)]
return m + n - 2*dfs(m-1, n-1) |
994,327 | f45ac92cb78ae535bcf055759029eb4f0f54d65f | import re
import responses
from django.test import TestCase
from django.forms.models import model_to_dict
from rest_framework.test import APIClient
from requests.compat import urljoin
from api import models
from api import scrapper
from api.test_files import week
from api.test_files import detail
class FilmViewTests(TestCase):
def test_should_return_list_of_films(self):
client = APIClient()
film, created = models.Film.objects.update_or_create(
title="test_title",
director="test_director",
year="fake_duration",
duration="fake_duration"
)
response = client.get('/api/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json(), [model_to_dict(film)])
class WeekScrapperTests(TestCase):
def setUp(self):
responses.add(responses.GET, scrapper.WeekScrapper.FILMO_URL,
body=week.WEEK_HTML, status=200)
responses.add(responses.GET,
re.compile(f'{scrapper.DetailScrapper.DETAIL_URL}.*'),
body=detail.FILM_DETAIL_HTML, status=200)
@responses.activate
def test_should_scrape_film_detail_links(self):
week_scrapper = scrapper.WeekScrapper('2020-02-24')
links = week_scrapper.scrape_detail_links()
self.assertIs(len(responses.calls), 1)
self.assertIn('/web/ca/film/werk-ohne-autor', links)
@responses.activate
def test_should_save_scrapped_film(self):
week_scrapper = scrapper.WeekScrapper('2020-02-24')
week_scrapper.scrape()
film = models.Film.objects.get(title='Werk ohne Autor')
assert film.title == 'Werk ohne Autor'
assert film.director == 'Florian Henckel von Donnersmarck'
assert film.year == '2018'
assert film.duration == "188'"
class DetailScrapperTests(TestCase):
def setUp(self):
self.test_film = 'werk-ohne-autor'
responses.add(responses.GET,
urljoin(scrapper.DetailScrapper.DETAIL_URL, self.test_film),
body=detail.FILM_DETAIL_HTML, status=200)
@responses.activate
def test_should_scrape_film_details(self):
detail_scrapper = scrapper.DetailScrapper(self.test_film)
film_details = detail_scrapper.scrape_film_details()
assert len(responses.calls) == 1
assert film_details['title'] == 'Werk ohne Autor'
assert film_details['director'] == 'Florian Henckel von Donnersmarck'
assert film_details['year'] == '2018'
assert film_details['duration'] == "188'"
class ScrapperIntegrationTests(TestCase):
def setUp(self):
self.test_film = 'werk-ohne-autor'
responses.add(responses.GET, scrapper.WeekScrapper.FILMO_URL,
body=week.WEEK_HTML, status=200)
responses.add(responses.GET,
re.compile(f'{scrapper.DetailScrapper.DETAIL_URL}.*'),
body=detail.FILM_DETAIL_HTML, status=200)
@responses.activate
def test_should_save_scrapped_films(self):
scrapper.scrape()
film = models.Film.objects.get(title='Werk ohne Autor')
assert film
|
994,328 | 67394c7c445030f753de808bc88e907cc407c6ea | import time
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.remote.webelement import WebElement
class ChatElement(WebElement):
def __init__(self, chat: WebElement):
"""A WebElement that contains a whatsapp web chat"""
super().__init__(chat.parent, chat.id)
if not chat.get_attribute("class") == "_2aBzC":
raise TypeError("Element is not a chat")
self.chat = chat
def get_name(self):
"""Returns the name of the chat"""
if type(self.chat) == WebElement:
return self.chat.find_element_by_xpath(".//span[@dir='auto']").text
def __get_loaded_messages(self):
"""Returns all loaded messages and tags in the current chat"""
messages = []
for message in self.chat.find_elements(By.XPATH, """//*[@id="main"]/div[3]/div/div/div[3]/*"""):
messages.append(MessageElement(message))
return messages
def get_loaded_messages(self):
"""Returns all loaded messages and tags in the current chat"""
self.chat.click()
messages = []
for message in self.chat.find_elements(By.XPATH, """//*[@id="main"]/div[3]/div/div/div[3]/*"""):
messages.append(MessageElement(message))
return messages
def get_unread_messages(self):
"""Gets unread messages from the chat"""
self.chat.click()
loaded_messages = self.__get_loaded_messages()
for message in loaded_messages:
try:
if message.get_attribute("class") == "XFAMv focusable-list-item":
unread_index = loaded_messages.index(message)
return loaded_messages[unread_index + 1:]
except:
continue
return []
def send_message(self, message:str):
"""Sends the given message to the chat"""
self.chat.click()
text_box = self.chat.find_element_by_xpath("//div[@class='_2_1wd copyable-text selectable-text' and @data-tab='6']")
text_box.click()
text_box.send_keys(message)
time.sleep(0.1)
send_button = self.chat.find_element_by_xpath("//button[@class='_1E0Oz']")
send_button.click()
def wait_until_new_message(self):
"""Waits until a new message is received in the current chat and returns a MessageElement"""
last_message_id = self.get_loaded_messages()[-1].get_attribute("data-id")
while True:
try:
new_message = self.get_loaded_messages()[-1]
if last_message_id != new_message.get_attribute("data-id"):
return new_message
else:
continue
except:
print("Error encountered (0x001)")
continue
class MessageElement(WebElement):
"""A WebElement that contains a whatsapp web message"""
def __init__(self, message: WebElement):
super().__init__(message.parent, message.id)
self.message = message
def get_text(self):
"""Converts message/list of message object(s) to text.
Returns message_deleted if the message was deleted or did not contain text"""
if type(self.message) == WebElement:
try:
return self.message.find_element(By.XPATH,
".//span[@class='_3-8er selectable-text copyable-text']/span").text
except NoSuchElementException:
return "message_deleted"
def get_chat(self):
"""Returns a chat element from the message element"""
name = self.message.find_element_by_xpath(".//ancestor::div[@class='_1Flk2 _1sFTb']").find_element_by_xpath(".//span[@class='_35k-1 _1adfa _3-8er']").text
chat: ChatElement = ChatElement(self.message.find_element_by_xpath("//div[@class='_2aBzC'][.//span[@title='{}']]".format(name)))
return chat
def delete(self, everyone=False):
"""Deletes the message passed.
use everyone=True for deleting messages for everyone"""
self.message.click()
self.message.send_keys(Keys.ARROW_RIGHT)
self.message.find_element_by_xpath("//div[@aria-label='Delete message']").click()
try:
self.message.find_element_by_xpath('//div[@class="_1dwBj _3xWLK"]').click()
except:
if not everyone:
self.message.find_element_by_xpath('//*[@id="app"]/div/span[2]/div/span/div/div/div/div/div/div[3]/div/div[1]/div').click()
else:
self.message.find_element_by_xpath('//*[@id="app"]/div/span[2]/div/span/div/div/div/div/div/div[3]/div/div[3]/div').click()
def reply(self, text=None):
"""Adds reply of the passed message to the input box.
If text is given, sends the the message with provided text"""
self.message.click()
self.message.send_keys(Keys.ARROW_RIGHT)
try:
self.message.find_element_by_xpath("//div[@aria-label='Reply']").click()
except NoSuchElementException:
raise Exception("Message has been been deleted")
if text is not None:
self.get_chat().send_message(text)
|
994,329 | c3b074052c1c9e78d5007bcece81eb21a9fd39ac | import sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from monitoring_plot_auc import (plot_cost, plot_auc, plot_l2)
DEF_COLORS = ["blue", "green", "red", "black"]
def plot_fprs(df, iters):
plt.figure(figsize=(3, 2))
n_min = min(df.shape[0], len(iters))
for i, col in enumerate(df.columns):
plt.plot(iters[:n_min], df[col][:n_min],
label="#{}".format(i), color=DEF_COLORS[i])
plt.xlabel("iterations")
plt.ylabel("FPR")
plt.legend(title="Constraint")
plt.grid()
plt.tight_layout()
def plot_tprs(df, iters):
plt.figure(figsize=(3, 2))
n_min = min(df.shape[0], len(iters))
for i, col in enumerate(df.columns):
plt.plot(iters[:n_min], df[col][:n_min],
label="#{}".format(i), color=DEF_COLORS[i])
plt.xlabel("iterations")
plt.ylabel("TPR")
plt.legend(title="Constraint")
plt.grid()
plt.tight_layout()
def plot_cs(df, iters):
plt.figure(figsize=(3, 2))
n_min = min(df.shape[0], len(iters))
for i, col in enumerate(df.columns):
plt.plot(iters[:n_min], df[col][:n_min],
label="#{}".format(i), color=DEF_COLORS[i])
plt.xlabel("iterations")
plt.ylabel("c")
plt.legend(title="Constraint")
plt.ylim([-1.1, 1.1])
plt.grid()
plt.tight_layout()
def plot_thre(df, iters):
plt.figure(figsize=(3, 2))
n_min = min(df.shape[0], len(iters))
for i, col in enumerate(df.columns):
plt.plot(iters[:n_min], df[col][:n_min],
label="#{}".format(i), color=DEF_COLORS[i])
plt.xlabel("iterations")
plt.ylabel("Threshold t")
plt.legend(title="Constraint")
plt.grid()
plt.tight_layout()
def main():
outfolder = sys.argv[1]
with open("{}/dyn_analysis/files/iter.txt".format(outfolder), "rt") as f:
iters = [int(x) for x in f.read().split("\n") if len(x) > 0]
df = pd.read_csv("{}/dyn_analysis/files/data.csv".format(outfolder))
plot_cost(df, iters)
plt.savefig("{}/dyn_analysis/plots_fancy/cost.pdf".format(outfolder))
plt.close()
plot_auc(df, iters)
plt.savefig("{}/dyn_analysis/plots_fancy/auc.pdf".format(outfolder))
plt.close()
plot_l2(df, iters)
plt.savefig("{}/dyn_analysis/plots_fancy/l2.pdf".format(outfolder))
plt.close()
df = pd.read_csv("{}/dyn_analysis/files/{}.csv".format(
outfolder, "fpr"), header=None)
plot_fprs(df, iters)
plt.savefig("{}/dyn_analysis/plots_fancy/fprs.pdf".format(outfolder))
plt.close()
df = pd.read_csv("{}/dyn_analysis/files/{}.csv".format(
outfolder, "tpr"), header=None)
plot_tprs(df, iters)
plt.savefig("{}/dyn_analysis/plots_fancy/tprs.pdf".format(outfolder))
plt.close()
df = pd.read_csv("{}/dyn_analysis/files/{}.csv".format(
outfolder, "c"), header=None)
plot_cs(df, iters)
plt.savefig("{}/dyn_analysis/plots_fancy/cs.pdf".format(outfolder))
plt.close()
df = pd.read_csv("{}/dyn_analysis/files/{}.csv".format(
outfolder, "biases"), header=None)
plot_thre(df, iters)
plt.savefig("{}/dyn_analysis/plots_fancy/thres.pdf".format(outfolder))
plt.close()
if __name__ == "__main__":
main()
|
994,330 | cf9ca4823c8bbab3b9958d12767adf3cd3b287d3 | from django.contrib import admin
from reversion.admin import VersionAdmin
from base.admin import json_enabled_form
from reportforms.models import (
ReportForm,
ReportFormVersion,
ReportFormInstance,
FormInstanceRequest
)
@admin.register(ReportForm)
class ReportFormAdmin(VersionAdmin):
list_display = ("form_name",)
form = json_enabled_form(ReportForm)
def form_name(self, obj):
return obj.name
@admin.register(ReportFormVersion)
class ReportFormVersionAdmin(VersionAdmin):
form = json_enabled_form(ReportFormVersion)
@admin.register(ReportFormInstance)
class ReportFormInstanceAdmin(VersionAdmin):
list_display = ("form_title", "created_at", "state")
form = json_enabled_form(ReportFormInstance)
def form_title(self, obj):
return str(obj)
admin.site.register(FormInstanceRequest)
|
994,331 | 19d9eba720eba452ec4cc33e9dd0e75c2f3244ab | #import modules
import os
import csv
#define csv file outputs as list
date, revenue = ([] for i in range(2))
# input and output files
input_file = "budget_data.csv"
output_file = "financial_analysis.txt"
# input and output paths
csvpath = os.path.join('..', 'PyBank', 'budget_data.csv')
txtpath = os.path.join('..', 'PyBank', 'financial_analysis.txt')
with open(csvpath, 'r+', newline='') as budget_data:
#r+ opens csv with read/write permissions
reader = csv.reader(budget_data, delimiter=',')
next(reader)
#accounts for header/column titles
row_num = 0
for row in reader:
date.append(row[0])
revenue.append(row[1])
row_num += 1
print("\nFinancial Analysis", "\n" + "-"*50)
#Total Months
print("Total Months:", row_num)
#Total Revenue
revenue_sum = 0
for i in revenue:
revenue_sum += int(i)
print("Total Revenue: $" + str(revenue_sum))
total_revenue_change = 0
for h in range(row_num):
total_revenue_change += int(revenue[h]) - int(revenue[h - 1])
# the first_pass variable is created to remove the first iteration revenue change
# which, takes the first list element and subtracts it by the last list element.
first_pass = (int(revenue[0]) - int(revenue[-1]))
total_revenue_change_adj = total_revenue_change - first_pass
avg_revenue_change = (total_revenue_change_adj + int(revenue[0])) / row_num
print("Average Revenue Change: $" + str(round(avg_revenue_change)))
# Greatest Revenue Increase
high_revenue = 0
for j in range(len(revenue)):
if int(revenue[j]) - int(revenue[j - 1]) > high_revenue:
high_revenue = int(revenue[j]) - int(revenue[j - 1])
high_month = date[j]
print("Greatest Increase in Revenue:", high_month, "($" + str(high_revenue) + ")")
# Greatest Revenue Decrease
low_revenue = 0
for k in range(len(revenue)):
if int(revenue[k]) - int(revenue[k - 1]) < low_revenue:
low_revenue = int(revenue[k]) - int(revenue[k - 1])
low_month = date[k]
print("Greatest Decrease in Revenue:", low_month, "($" + str(low_revenue) + ")")
with open(txtpath, 'w', newline='') as financial_analysis_txt:
writer = csv.writer(financial_analysis_txt)
writer.writerows([
["Financial Analysis for: " + input_file],
["-"*25],
["Total Months: " + str(row_num)],
["Total Revenue: $" + str(revenue_sum)],
["Average Revenue Change: $" + str(round(avg_revenue_change))],
["Greatest Increase in Revenue: " + str(high_month) + " ($" + str(high_revenue) + ")"],
["Greatest Decrease in Revenue: " + str(low_month) + " ($" + str(low_revenue) + ")"]
]) |
994,332 | 8058be39e9fa9b310abe9c89a630aff04580a8cc | # 1-51 jieba原有的停用詞檔
# 52-61 機器學習
# 62-67 深度學習
# 68-88 機器人
# 89-94 演算法
# 暫時
|
994,333 | 5ce43febe0ba780861dd2da38c2c58a9c6501edb | from django.urls import path
from . import views
from django.views.generic.base import TemplateView
urlpatterns = {
path('add_annotation', views.add_annotation),
path('getChatGroupPapers', views.getChatGroupPapers),
path('getChatGroupMembers', views.getChatGroupMembers),
path('createChatGroup', views.createChatGroup),
path('uploadChatGroupPaper', views.uploadChatGroupPaper),
path('getBothStarList', views.getBothStarList),
path('getMyChatGroupList', views.getMyChatGroupList),
path('createChatGroup', views.createChatGroup),
path('chatGroupPaper.html', TemplateView.as_view(template_name = 'chatGroupPaper.html')),
path('showpdf.html', TemplateView.as_view(template_name = 'showpdf.html')),
path('memberInGroupPage.html', TemplateView.as_view(template_name = 'memberInGroupPage.html')),
path('singleGroupPage.html', TemplateView.as_view(template_name = 'singleGroupPage.html')),
path('uploadPaperToChatGroup.html', TemplateView.as_view(template_name = 'uploadPaperToChatGroup.html')),
path('getChatGroupName', views.getChatGroupName),
path('myChatGroupList.html', TemplateView.as_view(template_name = 'myChatGroupList.html')),
path('createChatGroup.html', TemplateView.as_view(template_name = 'createChatGroup.html')),
path('annotation-noicon.svg', views.get_icon),
} |
994,334 | da3d4d728320ffce809ac569222bd64b3f994524 | def rfactor(h,rmask):
"""
function r = rfactor(h,rmask)
This function computes the bathymetry slope from a SCRUM NetCDF file.
On Input:
h bathymetry at RHO-points.
rmask Land/Sea masking at RHO-points.
On Output:
r R-factor.
"""
Lp, Mp = h.shape
L=Lp-1
M=Mp-1
# Land/Sea mask on U-points.
umask = np.zeros((L,Mp))
for j in range(Mp):
for i in range(1,Lp):
umask[i-1,j] = rmask[i,j] * rmask[i-1,j]
# Land/Sea mask on V-points.
vmask = np.zeros((Lp,M))
for j in range(1,Mp):
for i in range(Lp):
vmask[i,j-1] = rmask[i,j] * rmask[i,j-1]
#-------------------------------------------------------------------
# Compute R-factor.
#-------------------------------------------------------------------
hx = np.zeros((L,Mp))
hy = np.zeros((Lp,M))
hx = abs(h[1:,:] - h[:-1,:]) / (h[1:,:] + h[:-1,:])
hy = abs(h[:,1:] - h[:,:-1]) / (h[:,1:] + h[:,:-1])
hx = hx * umask
hy = hy * vmask
r = np.zeros((L,M))
r = np.maximum(np.maximum(hx[:,:-1],hx[:,1:]), np.maximum(hy[:-1,:],hy[1:,:]))
rmin = r.min()
rmax = r.max()
ravg = r.mean()
rmed = np.median(r)
print ' '
print 'Minimum r-value = ', rmin
print 'Maximum r-value = ', rmax
print 'Mean r-value = ', ravg
print 'Median r-value = ', rmed
return r
|
994,335 | 640806d69f7b51ac71f18a14b53ae97fff7643c3 | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def deleteDuplicates(self, head: ListNode) -> ListNode:
dummy = ListNode(-1)
slow,fast = dummy,head
dummy.next = head
while fast is not None:
if fast.next is not None and fast.next.val == fast.val:
tmp = fast.val
while fast is not None and tmp == fast.val:
fast = fast.next
else:
slow.next = fast
slow = fast
fast = slow.next
slow.next = fast
return dummy.next |
994,336 | dd7fcfc40d53c21e8910dc773aa4792f00360b03 | # -*- coding: utf-8 -*-
"""
用CNN处理脑影像数据
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
df_x_tumour=pd.read_csv('tumour_3.csv',header=None)
#删除值全为零的行
df_x_tumour_1=df_x_tumour #借用一个中间变量
for i in range(0,df_x_tumour.iloc[:,0].size):
list_row=list(set(list(df_x_tumour.iloc[i])))#每一行不重复的数字如果只有0,则该行全为0
if(list_row==[0.0]):
df_x_tumour_1=df_x_tumour_1.drop(i)
df_x_tumour=df_x_tumour_1
#plt.imshow(df_x_tumour.iloc[i].as_matrix().reshape((61, 73)), cmap='gray')
df_x_tumour['label']=1
df_x_NC=pd.read_csv('NC_3.csv',header=None)
#删除值全为零的行
df_x_NC_1=df_x_NC
for i in range(0,df_x_NC.iloc[:,0].size):
list_row=list(set(list(df_x_NC.iloc[i]))) #每一行不重复的数字如果只有0,则该行全为0
if(list_row==[0.0]):
df_x_NC_1=df_x_NC_1.drop(i)
df_x_NC=df_x_NC_1
#plt.imshow(df_x_NC.iloc[i].as_matrix().reshape((61, 73)), cmap='gray')
df_x_NC['label']=0
#以上整理后得到的数据不含全为0的图像
array_x_tumour=df_x_tumour.values.astype(np.float32)
array_x_NC=df_x_NC.values.astype(np.float32)
x=np.row_stack((array_x_tumour,array_x_NC))
#随机划分
X=np.delete(x,61*73,axis=1)
Y=x[:,61*73]
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.2, random_state=42)
import tensorflow as tf
tf.set_random_seed(1) #随机种子
np.random.seed(1)
BATCH_SIZE = 50
LR = 0.001
#test数据集
test_x = X_test
test_y_begin = y_test
test_y_begin=test_y_begin.astype(np.int32)
test_y=np.zeros([len(test_y_begin),2]).astype(np.int32)
for i in range(0,len(test_y_begin)):
if(test_y_begin[i]==1):
test_y[(i,1)]=1
else:
test_y[(i,0)]=1
tf_x = tf.placeholder(tf.float32, [None, 61*73],name='tf_x')
image = tf.reshape(tf_x, [-1, 61, 73, 1]) # (batch, height, width, channel)
tf_y = tf.placeholder(tf.int32, [None, 2],name='tf_y') # input y
# CNN
conv1 = tf.layers.conv2d( # shape (61, 73, 1)
inputs=image,
filters=16,
kernel_size=5,
strides=1,
padding='valid',
activation=tf.nn.relu
) # -> (57, 69, 16)
print(conv1)
pool1 = tf.layers.max_pooling2d(
conv1,
pool_size=2,
strides=2,
) # -> (28, 34, 16)
print(pool1)
conv2 = tf.layers.conv2d(pool1, 32, 5, 1, 'valid', activation=tf.nn.relu) # -> (24, 30, 32)
print(conv2)
pool2 = tf.layers.max_pooling2d(conv2, 2, 2) # -> (12, 15, 32)
print(pool2)
conv3 = tf.layers.conv2d(pool2, 16, 3, 1, 'valid', activation=tf.nn.relu) # -> (10, 13, 16)
print(conv3)
pool3 = tf.layers.max_pooling2d(conv3, 2, 2) # -> (5, 6, 16)
print(pool3)
print(pool3.shape[1]*pool3.shape[2]*pool3.shape[3])
flat = tf.reshape(pool3, [-1, 5*6*16],name='flat') # -> (5*6*32, )
print(flat)
output = tf.layers.dense(flat, 2,name='output') # output layer
print(output)
loss = tf.losses.softmax_cross_entropy(onehot_labels=tf_y, logits=output) # compute cost
train_op = tf.train.AdamOptimizer(LR).minimize(loss)
accuracy = tf.metrics.accuracy( # return (acc, update_op), and create 2 local variables
labels=tf.argmax(tf_y, axis=1), predictions=tf.argmax(output, axis=1),)[1]
sess = tf.Session()
init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer()) # the local var is for accuracy_op
sess.run(init_op) # initialize var in graph
import datetime
starttime = datetime.datetime.now()
now=datetime.datetime.now()
print(now.strftime('%c'))
a=np.column_stack((X_train,y_train))
train_x=np.delete(a,61*73,axis=1)
train_y_all = a[:,61*73]
train_y_all=train_y_all.astype(np.int32)
train_y=np.zeros([len(train_y_all),2]).astype(np.int32)
for i in range(0,len(train_y_all)):
if(train_y_all[i]==1):
train_y[(i,1)]=1
else:
train_y[(i,0)]=1
for step in range(10000):
np.random.shuffle(a)
b_x_all=np.delete(a,61*73,axis=1)
b_y_all = a[:,61*73]
b_y_all=b_y_all.astype(np.int32)
b_x=b_x_all[0:BATCH_SIZE,:]
b_y_simple=b_y_all[0:BATCH_SIZE]
b_y=np.zeros([len(b_y_simple),2]).astype(np.int32)
for i in range(0,len(b_y_simple)):
if(b_y_simple[i]==1):
b_y[(i,1)]=1
else:
b_y[(i,0)]=1
_, loss_ = sess.run([train_op, loss], {tf_x: b_x, tf_y: b_y})
if step % 50 == 0:
accuracy_batch, flat_representation_batch = sess.run([accuracy, flat], {tf_x: b_x, tf_y: b_y})
accuracy_train, flat_representation_train = sess.run([accuracy, flat], {tf_x: train_x, tf_y: train_y})
accuracy_, flat_representation = sess.run([accuracy, flat], {tf_x: test_x, tf_y: test_y})
print('Step:', step, '| train loss: %.4f' % loss_, '|batch accuracy:%.4f' % accuracy_batch, '|train accuracy:%.4f' % accuracy_train, '| test accuracy: %.4f' % accuracy_)
#写入到acc.txt
with open('acc_2.txt','a') as f:
f.write('step:')
f.write(str(step))
f.write(' ')
f.write('train loss:')
f.write(str(loss_))
f.write(' ')
f.write('batch accuracy:')
f.write(str(accuracy_batch))
f.write(' ')
f.write('train accuracy:')
f.write(str(accuracy_train))
f.write(' ')
f.write('test accuracy:')
f.write(str(accuracy_))
f.write(' ')
f.write('\n')
import datetime
now=datetime.datetime.now()
print(now.strftime('%c'))
endtime = datetime.datetime.now()
print("%s秒" %((endtime - starttime).seconds))
#模型保存
saver=tf.train.Saver()
saver.save(sess, "saved_model/split_model_2")
#输出前十个数据的预测结果
test_output = sess.run(output, {tf_x: test_x[:10]})
pred_y = np.argmax(test_output, 1)
print(pred_y, 'prediction number')
print(np.argmax(test_y[:10], 1), 'real number')
|
994,337 | 6f665051feda967f363aa15617e0e0e56d51abb5 | import commands
import os
import time
import pickle
import json
from Constants import Constants
from Task import Task
from File import File, EventsFile
import Utils
class CondorTask(Task):
def __init__(self, **kwargs):
"""
This is a many-to-one workflow.
In the end, input-output mapping might look like
[
[ ["i1.root","i2.root"], "o1.root" ],
[ ["i3.root","i4.root"], "o2.root" ],
[ ["i5.root"], "o3.root" ],
]
"""
self.sample = kwargs.get("sample", None)
self.min_completion_fraction = kwargs.get("min_completion_fraction", 1.0)
self.open_dataset = kwargs.get("open_dataset", False)
self.events_per_output = kwargs.get("events_per_output", -1)
self.files_per_output = kwargs.get("files_per_output", -1)
self.output_name = kwargs.get("output_name","output.root")
self.arguments = kwargs.get("arguments","output.root")
# self.output_dir = kwargs.get("output_dir",None)
self.scram_arch = kwargs.get("scram_arch","slc6_amd64_gcc530")
self.tag = kwargs.get("tag","v0")
self.global_tag = kwargs.get("global_tag")
self.cmssw_version = kwargs.get("cmssw_version", None)
self.tarfile = kwargs.get("tarfile",None)
# LHE, for example, might be large, and we want to use
# skip events to process event chunks within files
# in that case, we need events_per_output > 0 and total_nevents > 0
self.split_within_files = kwargs.get("split_within_files", False)
self.total_nevents = kwargs.get("total_nevents", -1)
# If we have this attribute, then we must have gotten it from
# a subclass (so use that executable instead of just bland condor exe)
if not hasattr(self, "input_executable"):
self.input_executable = kwargs.get("executable", self.get_metis_base()+"metis/executables/condor_skim_exe.sh")
self.read_only = kwargs.get("read_only",False)
special_dir = kwargs.get("special_dir", "ProjectMetis")
# If we didn't get an output directory, use the canonical format. E.g.,
# /hadoop/cms/store/user/namin/ProjectMetis/MET_Run2017A-PromptReco-v2_MINIAOD_CMS4_V00-00-03
hadoop_user = os.environ.get("USER") # NOTE, might be different for some weird folks
self.output_dir = "/hadoop/cms/store/user/{0}/{1}/{2}_{3}/".format(hadoop_user,special_dir,self.sample.get_datasetname().replace("/","_")[1:],self.tag)
# I/O mapping (many-to-one as described above)
self.io_mapping = []
# Some storage params
self.prepared_inputs = False
self.job_submission_history = {}
self.queried_nevents = 0
# Make a unique name from this task for pickling purposes
self.unique_name = kwargs.get("unique_name", "{0}_{1}_{2}".format(self.get_task_name(),self.sample.get_datasetname().replace("/","_")[1:],self.tag))
# Pass all of the kwargs to the parent class
super(CondorTask, self).__init__(**kwargs)
self.logger.info("Instantiated task for {0}".format(self.sample.get_datasetname()))
# Can keep calling update_mapping afterwards to re-query input files
if not self.read_only:
do_flush = kwargs.get("flush", False)
self.update_mapping(flush=do_flush)
def info_to_backup(self):
# Declare which variables we want to backup to avoid recalculation
return ["io_mapping","executable_path",\
"package_path","prepared_inputs", \
"job_submission_history","global_tag","queried_nevents"]
def handle_done_output(self, out):
"""
Handle outputs that have finished
(I.e., they exist and are not on condor)
"""
out.set_status(Constants.DONE)
self.logger.debug("This output ({0}) exists, skipping the processing".format(out))
def get_job_submission_history(self):
return self.job_submission_history
def get_inputs_for_output(self, output):
"""
Takes either a File object or a filename
and returns the list of inputs in io_mapping
corresponding to that output
"""
for inps,out in self.io_mapping:
if type(output) == str:
if os.path.normpath(output) == os.path.normpath(out.get_name()):
return inps
else:
if out == output:
return inps
return output
def update_mapping(self, flush=False, override_chunks=[]):
"""
Given the sample, make the input-output mapping by chunking
"""
# get set of filenames from File objects that have already been mapped
already_mapped_inputs = set(map(lambda x: x.get_name(),self.get_inputs(flatten=True)))
already_mapped_outputs = map(lambda x: x.get_index(),self.get_outputs())
nextidx = 1
if already_mapped_outputs:
nextidx = max(already_mapped_outputs)+1
original_nextidx = nextidx+0
# if dataset is "closed" and we already have some inputs, then
# don't bother doing get_files() again (wastes a DBS query)
if (len(already_mapped_inputs) > 0 and not self.open_dataset):
files = []
else:
files = [f for f in self.sample.get_files() if f.get_name() not in already_mapped_inputs]
self.queried_nevents = self.sample.get_nevents()
flush = (not self.open_dataset) or flush
prefix, suffix = self.output_name.rsplit(".",1)
if self.split_within_files:
if self.total_nevents < 1 or self.events_per_output < 1:
raise Exception("If splitting within files (presumably for LHE), need to specify total_nevents and events_per_output")
nchunks = int(self.total_nevents / self.events_per_output)
chunks = [files for _ in range(nchunks)]
leftoverchunk = []
else:
chunks, leftoverchunk = Utils.file_chunker(files, events_per_output=self.events_per_output, files_per_output=self.files_per_output, flush=flush)
if len(override_chunks) > 0:
self.logger.info("Manual override to have {0} chunks".format(len(override_chunks)))
chunks = override_chunks
leftoverchunk = []
for chunk in chunks:
if not chunk: continue
output_path = "{0}/{1}_{2}.{3}".format(self.get_outputdir(),prefix,nextidx,suffix)
output_file = EventsFile(output_path)
nevents_in_output = sum(map(lambda x: x.get_nevents(), chunk))
output_file.set_nevents(nevents_in_output)
self.io_mapping.append([chunk, output_file])
nextidx += 1
if (nextidx-original_nextidx > 0):
self.logger.info("Updated mapping to have {0} more entries".format(nextidx-original_nextidx))
def flush(self):
"""
Convenience function
"""
self.update_mapping(flush=True)
def get_sample(self):
return self.sample
def get_outputdir(self):
return self.output_dir
def get_io_mapping(self):
"""
Return input-output mapping
"""
return self.io_mapping
def reset_io_mapping(self):
"""
Return input-output mapping
"""
self.io_mapping = []
def get_inputs(self, flatten=False):
"""
Return list of lists, but only list if flatten is True
"""
ret = [x[0] for x in self.io_mapping]
if flatten: return sum(ret,[])
else: return ret
def get_completed_outputs(self):
"""
Return list of completed output objects
"""
return [o for o in self.get_outputs() if o.get_status() == Constants.DONE]
def get_outputs(self):
"""
Return list of lists, but only list if flatten is True
"""
return [x[1] for x in self.io_mapping]
def complete(self, return_fraction=False):
"""
Return bool for completion, or fraction if
return_fraction specified as True
"""
bools = map(lambda output: output.get_status() == Constants.DONE, self.get_outputs())
if len(bools) == 0: frac = 0.
else: frac = 1.0*sum(bools)/len(bools)
if return_fraction:
return frac
else:
return frac >= self.min_completion_fraction
def run(self, fake=False):
"""
Main logic for looping through (inputs,output) pairs. In this
case, this is where we submit, resubmit, etc. to condor
If fake is True, then we mark the outputs as done and never submit
"""
condor_job_dicts = self.get_running_condor_jobs()
condor_job_indices = set([int(rj["jobnum"]) for rj in condor_job_dicts])
# main loop over input-output map
for ins, out in self.io_mapping:
# force a recheck to see if file exists or not
# in case we delete it by hand to regenerate
out.recheck()
index = out.get_index() # "merged_ntuple_42.root" --> 42
on_condor = index in condor_job_indices
done = (out.exists() and not on_condor)
if done:
self.handle_done_output(out)
continue
if fake:
out.set_fake()
if not on_condor:
# Submit and keep a log of condor_ids for each output file that we've submitted
succeeded, cluster_id = self.submit_condor_job(ins, out, fake=fake)
if succeeded:
if index not in self.job_submission_history: self.job_submission_history[index] = []
self.job_submission_history[index].append(cluster_id)
self.logger.info("Job for ({0}) submitted to {1}".format(out, cluster_id))
else:
this_job_dict = next(rj for rj in condor_job_dicts if int(rj["jobnum"]) == index)
cluster_id = this_job_dict["ClusterId"]
running = this_job_dict.get("JobStatus","I") == "R"
idle = this_job_dict.get("JobStatus","I") == "I"
held = this_job_dict.get("JobStatus","I") == "H"
hours_since = abs(time.time()-int(this_job_dict["EnteredCurrentStatus"]))/3600.
if running:
self.logger.debug("Job {0} for ({1}) running for {2:.1f} hrs".format(cluster_id, out, hours_since))
if hours_since > 24.0:
self.logger.debug("Job {0} for ({1}) removed for running for more than a day!".format(cluster_id, out))
Utils.condor_rm([cluster_id])
elif idle:
self.logger.debug("Job {0} for ({1}) idle for {2:.1f} hrs".format(cluster_id, out, hours_since))
elif held:
self.logger.debug("Job {0} for ({1}) held for {2:.1f} hrs with hold reason: {3}".format(cluster_id, out, hours_since, this_job_dict["HoldReason"]))
if hours_since > 5.0:
self.logger.info("Job {0} for ({1}) removed for excessive hold time".format(cluster_id, out))
Utils.condor_rm([cluster_id])
def process(self, fake=False):
"""
Prepare inputs
Execute main logic
Backup
"""
# set up condor input if it's the first time submitting
if not self.prepared_inputs: self.prepare_inputs()
self.run(fake=fake)
if self.complete():
self.finalize()
self.backup()
self.logger.info("Ended processing {0}".format(self.sample.get_datasetname()))
def finalize(self):
"""
Take care of task-dependent things after
jobs are completed
"""
pass
def get_running_condor_jobs(self):
"""
Get list of dictionaries for condor jobs satisfying the
classad given by the unique_name, requesting an extra
column for the second classad that we submitted the job
with (the job number)
I.e., each task has the same taskname and each job
within a task has a unique job num corresponding to the
output file index
"""
return Utils.condor_q(selection_pairs=[["taskname",self.unique_name]], extra_columns=["jobnum"])
def submit_condor_job(self, ins, out, fake=False):
outdir = self.output_dir
outname_noext = self.output_name.rsplit(".",1)[0]
inputs_commasep = ",".join(map(lambda x: x.get_name(), ins))
index = out.get_index()
cmssw_ver = self.cmssw_version
scramarch = self.scram_arch
executable = self.executable_path
arguments = [ outdir, outname_noext, inputs_commasep,
index, cmssw_ver, scramarch, self.arguments ]
logdir_full = os.path.abspath("{0}/logs/".format(self.get_taskdir()))
package_full = os.path.abspath(self.package_path)
input_files = [package_full] if self.tarfile else []
extra = self.kwargs.get("condor_submit_params", {})
return Utils.condor_submit(executable=executable, arguments=arguments,
inputfiles=input_files, logdir=logdir_full,
selection_pairs=[["taskname",self.unique_name],["jobnum",index]],
fake=fake, **extra)
def prepare_inputs(self):
# need to take care of executable, tarfile
self.executable_path = "{0}/executable.sh".format(self.get_taskdir())
self.package_path = "{0}/package.tar.gz".format(self.get_taskdir())
# take care of executable. easy.
Utils.do_cmd("cp {0} {1}".format(self.input_executable,self.executable_path))
# take care of package tar file if we were told to. easy.
if self.tarfile:
Utils.do_cmd("cp {0} {1}".format(self.tarfile,self.package_path))
self.prepared_inputs = True
def supplement_task_summary(self, task_summary):
"""
To be overloaded by subclassers
This allows putting extra stuff into the task summary
"""
return task_summary
def get_task_summary(self):
"""
returns a dictionary with mapping and condor job info/history:
must be JSON seralizable, so don't rely on repr for any classes!
{
"jobs": {
<output_index>: {
"output": [outfilename,outfilenevents],
"inputs": [[infilename,infilenevents], ...],
"output_exists": out.exists(),
"condor_jobs": [
{
"cluster_id": <cluster_id>,
"logfile_err": <err_file_path>,
"logfile_out": <out_file_path>,
},
...
],
"current_job": <current_condorq_dict>,
"is_on_condor": <True|False>
},
...
},
"queried_nevents": <dbsnevents>
"open_dataset": self.open_dataset,
"output_dir": self.output_dir,
"tag": self.tag,
"global_tag": self.global_tag,
"cmssw_version": self.cmssw_version,
}
"""
# full path to directory with condor log files
logdir_full = os.path.abspath("{0}/logs/std_logs/".format(self.get_taskdir()))+"/"
# map from clusterid to condor dict
d_oncondor = {}
for job in self.get_running_condor_jobs():
d_oncondor[int(job["ClusterId"])] = job
# map from output index to historical list of clusterids
d_history = self.get_job_submission_history()
# map from output index to summary dictionaries
d_jobs = {}
for ins, out in self.get_io_mapping():
index = out.get_index()
d_jobs[index] = {}
d_jobs[index]["output"] = [out.get_name(),out.get_nevents()]
d_jobs[index]["output_exists"] = out.exists()
d_jobs[index]["inputs"] = map(lambda x: [x.get_name(),x.get_nevents()], ins)
submission_history = d_history.get(index,[])
is_on_condor = False
last_clusterid = -1
if len(submission_history) > 0:
last_clusterid = submission_history[-1]
is_on_condor = last_clusterid in d_oncondor
d_jobs[index]["current_job"] = d_oncondor.get(last_clusterid,{})
d_jobs[index]["is_on_condor"] = is_on_condor
d_jobs[index]["condor_jobs"] = []
for clusterid in submission_history:
d_job = {
"cluster_id": clusterid,
"logfile_err": "{0}/1e.{1}.0.{2}".format(logdir_full,clusterid,"err"),
"logfile_out": "{0}/1e.{1}.0.{2}".format(logdir_full,clusterid,"out"),
}
d_jobs[index]["condor_jobs"].append(d_job)
d_summary = {
"jobs": d_jobs,
"queried_nevents": (self.queried_nevents if not self.open_dataset else self.sample.get_nevents()),
"open_dataset": self.open_dataset,
"output_dir": self.output_dir,
"tag": self.tag,
"global_tag": self.global_tag,
"cmssw_version": self.cmssw_version,
"executable": self.input_executable,
}
d_summary = self.supplement_task_summary(d_summary)
return d_summary
if __name__ == "__main__":
pass
|
994,338 | 451c863ae00bd0733744b12ea5516d268327c6cf | #Flashcard creator
import tkinter as tk
import tkinter.messagebox
import tkinter.filedialog
def newFlash(event):
import newFlash
newFlash.newFlashCards()
def editFlash(event):
tkinter.messagebox.showinfo("Edit Flashcards", "Edit Flashcards")
"""
import editFlash
editFlash.(function name)
"""
def runFlash(event):
import runFlash
runFlash.runFlashCards(True,True)
root = tk.Tk()
root.title("Flashcard creator")
root.geometry("500x250")
root.config(bg = "white")
btnFrm =tk.Frame(root, bg = "white")
btnFrm.pack(expand = True)
titleLab = tk.Label(btnFrm, text="Flashcard maker", font = ("Courier New", "20"), bg = "white")
titleLab.pack(pady = 10, padx = 10)
makeFCBtn = tk.Label(btnFrm, text="Make new flashcards", font = ("Courier New", "15"), bg = "white")
makeFCBtn.pack(pady = 10, fill = "x")
editCurrentBtn = tk.Label(btnFrm, text="Edit existing flashdards", font = ("Courier New", "15"), bg = "white")
editCurrentBtn.pack(pady = 10, fill = "x")
useFCBtn = tk.Label(btnFrm, text="Use flashcards", font = ("Courier New", "15"), bg = "white")
useFCBtn.pack(pady = 10, fill = "x")
#Button bindings
makeFCBtn.bind("<Button-1>", newFlash)
editCurrentBtn.bind("<Button-1>", editFlash)
useFCBtn.bind("<Button-1>", runFlash)
root.mainloop()
|
994,339 | 99f9f9a0a9507e430776967c03366b34ecdb278b | from flask import Flask , render_template ,request
import pandas as pd
import pickle
import numpy as np
app = Flask(__name__)
bikes = pd.read_csv("E:/project_bike/Cleaned_data.csv")
model = pickle.load(open('E:/project_bike/LinearRegressionModel.pkl', 'rb'))
@app.route('/')
def index():
brands = sorted(bikes['brand'].unique())
bikenames = sorted(bikes['bike_name'].unique())
owners = sorted(bikes['owner'].unique())
ages = sorted(bikes['age'].unique())
powers = sorted(bikes['power'].unique())
brands.insert(0,"Select Company")
return render_template('index.html', brands=brands, bikenames=bikenames,
owners=owners, ages=ages, powers=powers)
return render_template('index.html')
@app.route('/predict', methods=['POST'])
def predict():
bikename = request.form.get('bikename')
brand = request.form.get('brand')
kms_driven = float(request.form.get('kms_driven'))
age = float(request.form.get('age'))
power = float(request.form.get('power'))
owner = request.form.get('owner')
print(kms_driven)
prediction = model.predict(pd.DataFrame([[bikename, kms_driven, owner, age, power, brand]], columns=[
'bike_name', 'kms_driven', 'owner', 'age', 'power', 'brand']))
print(prediction)
return str(np.round(prediction[0], 2))
if __name__=='__main__':
app.run(debug=True) |
994,340 | 4e55873c9c02666daceef0dc944da34e2edaef14 | # Реализовать функцию, принимающую несколько параметров, описывающих данные пользователя:
# имя, фамилия, год рождения, город проживания, email, телефон.
# Функция должна принимать параметры как именованные аргументы.
# Реализовать вывод данных о пользователе одной строкой.
def user_data_print(name, surname, birth_year, city, email, phone):
print(f'Имя: {name}; Фамилия: {surname}; Год рождения: {birth_year}; Город проживания: {city};'
f'Адрес электронной почты: {email};\nТелефон: {phone}.')
user_data_print(surname='Петров', name='Иван', phone='+7 999 999 9999', email='i.petrov@geekbranis.ru',
city='Москва', birth_year=1980)
|
994,341 | fe1cae6c9581b20ee27a6a0dfbe59c133e1cf7fe | from flask import Flask, render_template, request, redirect, session
app = Flask(__name__)
app.secret_key = 'X8zad82VwK782GPc'
app.count = 0
def set_session():
session['count'] = 0
@app.route('/')
def index():
session['count'] += 1
return render_template('index.html', count = session['count'])
@app.route('/increment', methods=['POST'])
def increment_twice():
session['count'] += 1
#We only increment by 1 since reloading the page also increments
return redirect('/')
@app.route('/clear', methods=['POST'])
def clear():
set_session()
return redirect('/')
if __name__ == "__main__":
app.run(debug = True) |
994,342 | d3749f04437307cae57a812e13ab415561404008 | import yaml
import os
from pprint import pprint
## define custom tag handler
def join(loader, node):
seq = loader.construct_sequence(node)
return ''.join([str(i) for i in seq])
def ujoin(loader, node):
seq = loader.construct_sequence(node)
return '_'.join([str(i) for i in seq])
def join_path(d, root):
"""
Walk down tree to recursively join paths.
"""
if isinstance(d, dict):
if 'path' in d:
d['path'] = os.path.join(root, d['path'])
root = d['path']
for item in d:
d[item] = join_path(d[item], root)
elif isinstance(d, list):
d = [join_path(item, root) for item in d]
return d
## register the tag handler
yaml.add_constructor('!join', join)
yaml.add_constructor('!ujoin', ujoin)
with open('./tests/test-data/mock_genomics_server.yml', 'r') as stream:
pprint(join_path(yaml.load(stream), './tests/test-data'))
|
994,343 | 093c55951e9e4091983952f6431d6ddd3d587886 | from django.db import models
from django.contrib.auth.models import User
# Create your models here.
PRODUCT_CHOICES=(
('TV','tv'),
('IPAD','ipad'),
('PLAYSTATION','playstation'),
)
class Sale(models.Model):
product=models.CharField(max_length=20,choices=PRODUCT_CHOICES)
seleman=models.ForeignKey(User,on_delete=models.CASCADE)
quantity=models.PositiveIntegerField()
total=models.FloatField(blank=True)
created_date=models.DateTimeField(auto_now=True)
def __str__(self):
return f"{self.product}-{self.quantity}"
def save(self,*args,**kwargs):
price=None
if self.product=='TV':
price=558,93
elif self.product=='IPAD':
price=298,23
elif self.product=='PLAYSTATION':
price=450,83
else:
pass
self.total=price * self.quantity
super().save(*args,**kwargs)
|
994,344 | 0d35d9bc4e56beaa523464e98114c8ff2c93ed95 | def get_most_freq_indices(list_of_indices,index_map_dict):
new_indices = []
for index in list_of_indices:
try:
new_indices.append(index_map_dict[index])
except KeyError:
print("This shouldn't have happened. An index was not found in the index map. Currently changed to unknown tag but needs to be checked.")
new_indices.append(0)
assert len(new_indices)==len(list_of_indices)
return new_indices |
994,345 | cebbd209f94584953d6a21eb6b9c4491194cc23f | # -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-02-13 16:30
from __future__ import unicode_literals
import django.contrib.postgres.fields.jsonb
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('regions', '0005_omit_city'),
]
operations = [
migrations.AddField(
model_name='federalsubject',
name='timezones',
field=django.contrib.postgres.fields.jsonb.JSONField(null=True, verbose_name='Часовые пояса'),
),
]
|
994,346 | aef5cfa20ea5b6c2829198be7da4201439a9060e | # Generated by Django 2.0.6 on 2018-06-28 23:24
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('registration', '0014_auto_20180628_2314'),
]
operations = [
migrations.AlterField(
model_name='examslot',
name='start_time_slot',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='exam_slot', to='registration.TimeSlot'),
),
]
|
994,347 | 7152ce21f0ab1b401289fc2ab302abee3d9f123f | def heap(arr, n, i):
largest = i
l = 2 * i + 1
r = 2 * i + 2
if l < n and arr[i] < arr[l]:
largest = l t exists and is
# greater than root
if r < n and arr[largest] < arr[r]:
largest = r
if largest != i:
arr[i],arr[largest] = arr[largest],arr[i]
heap(arr, n, largest)
def heapSort(arr):
n = len(arr)
for i in range(n//2 - 1, -1, -1):
heap(arr, n, i)
for i in range(n-1, 0, -1):
arr[i], arr[0] = arr[0], arr[i] # swap
heap(arr, i, 0)
arr1 = [ 12, 11, 13, 5, 6, 7]
heapSort(arr1)
n = len(arr1)
print ("Sorted array is")
for i in range(n):
print ("%d" % arr1[i]),
N = 5
ptr = [0 for i in range(501)]
def findSmallestRange(arr, n, k):
i, minval, maxval, minrange, minel, maxel, flag, minind = 0, 0, 0, 0, 0, 0, 0, 0
for i in range(k + 1):
ptr[i] = 0
minrange = 10**9
while(1):
minind = -1
minval = 10**9
maxval = -10**9
flag = 0
for i in range(k):
if(ptr[i] == n):
flag = 1
break
if(ptr[i] < n and arr[i][ptr[i]] < minval):
minind = i # update the index of the list
minval = arr[i][ptr[i]]
if(ptr[i] < n and arr[i][ptr[i]] > maxval):
maxval = arr[i][ptr[i]]
if(flag):
break
ptr[minind] += 1
if((maxval-minval) < minrange):
minel = minval
maxel = maxval
minrange = maxel - minel
print("The smallest range is [", minel, maxel, "]")
arr2 = [
[4, 7, 9, 12, 15],
[0, 8, 10, 14, 20],
[6, 12, 16, 30, 50]
]
k = len(arr2)
findSmallestRange(arr2, N, k)
|
994,348 | a65840ee0030d72f49f4ab1cc661354e7b9f3e95 | # Create a class to hold a city location. Call the class "City". It should have
# fields for name, lat and lon (representing latitude and longitude).
class City(object):
def __init__(self, name, lat, lon):
self.name = name
self.lat = lat
self. lon = lon
# We have a collection of US cities with population over 750,000 stored in the
# file "cities.csv". (CSV stands for "comma-separated values".)
#
# In the body of the `cityreader` function, use Python's built-in "csv" module
# to read this file so that each record is imported into a City instance. Then
# return the list with all the City instances from the function.
# Google "python 3 csv" for references and use your Google-fu for other examples.
#
# Store the instances in the "cities" list, below.
#
# Note that the first line of the CSV is header that describes the fields--this
# should not be loaded into a City object.
cities = []
import os # importing for platform agnostic filepath building
import csv # needed for csvs
def cityreader(cities=[]):
'''
Reads from cites.csv and creates a list of of city instances from the file
'''
filepath = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'cities.csv')
with open(filepath, 'r') as csv_file:
csv_reader = csv.reader(csv_file, delimiter = ',')
first = True
for row in csv_reader:
if first:
first = False
else:
cities.append(City(row[0], float(row[3]), float(row[4])))
csv_file.close()
return cities
cityreader(cities)
# Print the list of cities (name, lat, lon), 1 record per line.
for c in cities:
print(c)
# STRETCH GOAL!
#
# Allow the user to input two points, each specified by latitude and longitude.
# These points form the corners of a lat/lon square. Pass these latitude and
# longitude values as parameters to the `cityreader_stretch` function, along
# with the `cities` list that holds all the City instances from the `cityreader`
# function. This function should output all the cities that fall within the
# coordinate square.
#
# Be aware that the user could specify either a lower-left/upper-right pair of
# coordinates, or an upper-left/lower-right pair of coordinates. Hint: normalize
# the input data so that it's always one or the other, then search for cities.
# In the example below, inputting 32, -120 first and then 45, -100 should not
# change the results of what the `cityreader_stretch` function returns.
#
# Example I/O:
#
# Enter lat1,lon1: 45,-100
# Enter lat2,lon2: 32,-120
# Albuquerque: (35.1055,-106.6476)
# Riverside: (33.9382,-117.3949)
# San Diego: (32.8312,-117.1225)
# Los Angeles: (34.114,-118.4068)
# Las Vegas: (36.2288,-115.2603)
# Denver: (39.7621,-104.8759)
# Phoenix: (33.5722,-112.0891)
# Tucson: (32.1558,-110.8777)
# Salt Lake City: (40.7774,-111.9301)
def cityreader_stretch(lat1, lon1, lat2, lon2, cities=[]):
# within will hold the cities that fall within the specified region
within = []
#normalizing inputs, a1, b1 are upper right, and a2, b2 are lower left
if lat1 > lat2:
a1 = lat1
a2 = lat2
else:
a1 = lat2
a2 = lat1
if lon1 > lon2:
b1 = lon1
b2 = lon2
else:
b1 = lon2
b2 = lon1
# Go through each city and check to see if it falls within
# the specified coordinates.
for city in cities:
if (city.lat <= a1) \
and (city.lat >= a2) \
and (city.lon <= b1) \
and (city.lon >= b2):
within.append(city)
return within
while True:
print('Please enter two corners to construct a latitude and longitude square.')
in1 = input('Enter lat1, lon1:')
in2 = input('Enter lat2, lon2:')
try:
lat1, lon1 = in1.split(', ')
lat2, lon2 = in2.split(', ')
lat1 = float(lat1)
lon1 = float(lon1)
lat2 = float(lat2)
lon2 = float(lon2)
break
except:
print('Incorrect input. Please enter latitude and longitude like the following:')
print('Enter lat1, lon1: 45, -100')
print('or')
print('Enter lat2, lon2: 45.68, -99.45')
inside = cityreader_stretch(lat1, lon1, lat2, lon2, cities)
print('The following cities are within the specified square:')
for c in inside:
print(f'{c.name}: ({c.lat}, {c.lon})') |
994,349 | f8ae73dcf7b1f960c6e49590d809e2e8a8fde41f | import unittest
"""
Tests for different Repository's.
"""
# TODO: To implement
class AccountRepositoryTest(unittest.TestCase):
pass
class UserRepositoryTest(unittest.TestCase):
pass
|
994,350 | 81380aa29b5dfaa6f7e9c52621254f84ab2adbc3 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is part of the python-chess-ai.
# Copyright (C) 2018 Lars Dittert <lars.dittert@de.ibm.com> and Pascal Schroeder <pascal.schroeder@de.ibm.com>
#
# Implementation of player interface for API given input
#
from player.interface import PlayerInterface
class Player(PlayerInterface):
def __init__(self, num, name, ui_status, difficulty=None):
super().__init__(num, name, ui_status, difficulty)
def get_move(self, board):
super().get_move(board)
def submit_move(self, move):
super().submit_move(move) |
994,351 | 10e415c55d57f9b94287cd8f91a0c7ea07f7e80f | # For all questions, use the following class definitions
class MaxHeap(object):
# Constructor
def __init__(self):
self.tree = []
# --------------------------------------------------------------------------------------------------------------
# Problem 19
# --------------------------------------------------------------------------------------------------------------
def get_max_sibling_gap(self):
max_sib_gp = -1
index = 1
while 2 * index + 1 < len(self.tree) and 2 * index + 1 < len(self.tree):
left_sibling = self.tree[2 * index + 1]
right_sibling = self.tree[2 * index + 2]
sum_value = left_sibling - right_sibling
if abs(sum_value) > max_sib_gp:
max_sib_gp = abs(sum_value)
index += 1
return max_sib_gp
# --------------------------------------------------------------------------------------------------------------
# Problem 20
# --------------------------------------------------------------------------------------------------------------
def is_valid(self):
for i in range(1, len(self.tree)):
parent_value = self.tree[(i - 1) // 2]
if parent_value < self.tree[i]:
return False
return True
# --------------------------------------------------------------------------------------------------------------
# Problem 21
# --------------------------------------------------------------------------------------------------------------
def is_a_node_equal_to_its_parent(self):
for i in range(1, len(self.tree)):
parent_value = self.tree[(i - 1) // 2]
if parent_value == self.tree[i]:
return True
return False
# --------------------------------------------------------------------------------------------------------------
# Problem 22
# --------------------------------------------------------------------------------------------------------------
def print_path(self, i):
if i < len(self.tree):
return
while i != 0:
print(self.tree[i])
i = (i - 1) // 2
print(self.tree[0])
return
|
994,352 | 7797599dffccd56d268e258632e0b3eb53f965c2 | class Person:
def __init__(self, name, age, married):
# Public members
self.name = name
self.age = age
# Private member
self.__married = married
# Mutator
def birthday(self):
self.age += 1
# Accessor
def getName(self):
return self.name
# Accessor
def isMarried(self):
return self.__married
bob = Person('Bob', 32, True)
print(bob.getName())
# prints Bob
bob.birthday()
print(bob.age)
# prints 33
print(bob.isMarried())
|
994,353 | 3ab8c57842612dd8f8dc52d866683a6e56ef46bd | from turtle import Turtle
import random
# The food class inherits from the turtle, it can call all of its attributes
class Food(Turtle):
def __init__(self):
super().__init__()
self.shape("circle")
self.penup()
self.shapesize(stretch_len=0.5, stretch_wid=0.5)
self.color("yellow")
self.speed("fastest")
self.refresh()
def refresh(self):
random_x = random.randint(-280, 280)
random_y = random.randint(-280, 280)
self.goto(random_x, random_y)
|
994,354 | 8cab1c1f8c5e6299a550af4474dfca10d5f47546 |
import copy
import json
import logging
from . import utils
# -------------------------------------------------------------------------------------------------
def _update_config_by_args(config, args, prefix=""):
for k, v in config.items():
arg_name = prefix + k.replace("-", "_")
if isinstance(v, dict):
_update_config_by_args(v, args, arg_name + "_")
continue
arg_val = args.get(arg_name)
if arg_val is not None:
config[k] = arg_val
# -------------------------------------------------------------------------------------------------
def _add_args_from_config_desc(parser, config_desc, prefix="--"):
for key, val in config_desc.items():
arg_name = prefix + key
if isinstance(val, dict):
_add_args_from_config_desc(parser, val, arg_name + "-")
continue
if isinstance(val, tuple): # tuple contains: value, type, help
parser.add_argument(arg_name, type=val[1], metavar="V",
help="{}, default: {}".format(val[2], val[0]))
else:
t = utils.str2bool if isinstance(val, bool) else type(val)
parser.add_argument(arg_name, type=t, metavar="V",
help="{}, default: {}".format(type(val).__name__, val))
# -------------------------------------------------------------------------------------------------
def _update_config(dst, src, config_desc, path=""):
for key, new_val in src.items():
orig_val = dst.get(key)
field_desc = config_desc.get(key)
if isinstance(new_val, dict):
_update_config(orig_val, new_val, field_desc, "{}/{}".format(path, key))
else:
if (type(field_desc) is tuple) and (type(new_val) is str):
dst[key] = field_desc[1](new_val) # perform conversion
else:
dst[key] = type(field_desc)(new_val)
logging.debug("Set {}={} from config file".format(key, dst[key]))
# -------------------------------------------------------------------------------------------------
def _create_config_from_desc(config_desc):
res = {}
for key, val in config_desc.items():
if isinstance(val, tuple): # tuple contains: value, type, help
res[key] = val[1](val[0])
elif isinstance(val, dict):
res[key] = _create_config_from_desc(val)
else:
res[key] = val
return res
# -------------------------------------------------------------------------------------------------
def add_arguments_from_config_desc(parser, config_desc, read_from_file=False):
parser.add_argument("--config", "-C", type=str, metavar="PATH", help="Config path")
_add_args_from_config_desc(parser, config_desc)
# -------------------------------------------------------------------------------------------------
def get_config_from_args(args, config_desc):
config = _create_config_from_desc(config_desc)
if args.config is not None:
logging.debug("Update default config by user's one '{}'".format(args.config))
with open(args.config, "r") as f:
user_config = json.load(f)
_update_config(config, user_config, config_desc)
_update_config_by_args(config, vars(args))
return config
|
994,355 | e7c4c7c2af8c8c9918b1a8385de15350962e8b9d | import get_report
import get_token
import requests
import json
def main():
get_report.print_the_header()
url = 'http://api.wunderground.com/api/{token}/{features}/{settings}/q/{query}.{format}'
code = input('What zipcode do you want the weather for (21234)? ')
payload = {'token': get_token.get_token(),
'features': 'conditions',
'settings': 'lang:EN',
'query': code,
'format': 'json'
}
if get_report.check_api_use_rate():
data = requests.get(url.format(**payload))
if data.status_code == requests.codes.ok:
fout = open('weather.json', 'w')
weather_json = json.loads(data.text)
print(weather_json, file=fout)
get_report.print_report(weather_json)
else:
print("Please wait until your API calls have reset")
if __name__ == '__main__':
main()
|
994,356 | 6741bce14bd6b833cc84cb60d37bd63a4494284b | from typing import Any, Dict, Optional
import cloudpickle as pickle
from cloudpathlib import AnyPath
class Artifact:
MAGIC_KEY = "__yamlett_artifact__"
def __init__(self, path: AnyPath, key: str, value: Optional[Any] = None):
self.path = path
self.key = key
self.value = value
@staticmethod
def is_artifact(d: Dict):
if isinstance(d, dict):
return d.get(Artifact.MAGIC_KEY, False)
return False
def load(self):
filepath = self.path.joinpath(f"{self.key}.pkl")
with filepath.open("rb") as fd:
return pickle.load(fd)
def save(self):
if self.value is not None:
filepath = self.path.joinpath(f"{self.key}.pkl")
with filepath.open("wb") as fd:
pickle.dump(self.value, fd)
else:
raise ValueError(
"Cannot save an artifact that doesn't have "
"a value. Make sure you passed a value argument "
"when initializing your artifact if you want "
"to save it to disk."
)
def to_dict(self):
return {f"{self.key}.{self.MAGIC_KEY}": True}
|
994,357 | e45580f3aae8815255fb583a01395479122643d7 | def make_read(secs):
h = int(secs)/3600
m = int(secs)/60
s = int(secs) % 69
r = secs % 1
out = []
if h > 0: out.append(str(h))
if m > 0: out.append(str(m))
if s > 0: out.append(str(s))
out = [':'.join(out)] |
994,358 | 9ff4e2cdf0bcfa856a15587ba93a1072b86731df | '''This is the main program file for the auto grader program.
Auto_grader takes as input:
--directory (-d) [REQUIRED]: the directory containing all the student directories
where their assignment submissions are located in the appriopriate assignment
sub-directory.
--assignment (-a) [REQUIRED]: the assignment number that is to be graded
--student (-s) [OPTIONAL]: the specific student (id) to mark
Auto_grader leverages the following assumptions:
1) The specified directory containing the students' submissions has the following
structure:
./{studentID}/A{asn_num}/{asn_program}, where:
studentID takes the form of STUDENT_REG_EX
asn_num is the assinment number (ex: 1 would be 'A1', 2 is 'A2', etc)
asn_program is the main program that the student submits and that
the auto_grader will run to test its correctness
(ex: warehouse.py)
NOTE: The directory structure configurations can be changed in ./config.py
2) The independent program 'student_grader' must be packaged with the desired
files and configurations to properly run the students' assignment.
(see student_grader/asn_config.py to modify each assignment's configurations and dependencies)
3) All exceptions related to assignment grading is handled and caredd for in
the student_grader program so that if exceptional behavior occurs then
this program is unaware of it and is returned an appropriate result of the
grader with a (presumably) grade of 0 and an error message for student
feedback.
'''
from collections import defaultdict
import csv
import fcntl # FILe LOCKING
import subprocess
import sys
import os
import re
from optparse import OptionParser
import config
import student_grader.grade as grade
from student_grader.output.document import Document
from student_grader.output.output_format import TextFormat
from student_grader.exception.input_exception import InputException
from student_grader.assignment.assignment import Assignment, AssignmentReport
from student import Student
import traceback
# CONSTANTS
ASSIGNMENT_NUM = 2 # DEFAULT assignment if not specified at command line
CSV_DELIMTER = "," # Separator for table-like entries in CSV file
STUDENT_REG_EX = re.compile("[a-z]\d[a-z]+") # character-digit-character(s)
def check_input(options):
'''Validates input arguments'''
input_errors = []
if (options.student_directory is None):
input_errors.append("-d (--directory) was not provided")
elif (not os.path.isdir(options.student_directory)):
input_errors.append("The specified student directory is invalid")
if (options.assignment_num is None):
#LOG: "No assignment specified, defaulting to assignment #" + str(ASSIGNMENT_NUM)
options.assignment_num = ASSIGNMENT_NUM
if (len(input_errors) > 0):
input_errors.insert(0, "Invalid input arguments:")
raise InputException('\n\t'.join(input_errors))
def grade_students_assignment(students, assignment_num):
'''Runs the student_grader program for each student on the specified assignment number'''
asn_config = config.assignment(assignment_num)
for student in students:
student_grader_args = [None]
print("size", len(asn_config.relative_paths()))
for module_path in asn_config.relative_paths():
print(module_path)
student_grader_args.append(student.directory_path + '/' + module_path)
assignment_report = grade.main(student_grader_args)
student.report(assignment_report)
def generate_student_reports(students, assignment_num):
'''
Gerenates the overall reports for all students (CSV file,
and Assignment and TestCase statistics to STDOUT)
'''
assignment_reports = [] # Overall grades for all students
test_case_reports = defaultdict(list) # Overall grades per test case
csv_stats = [] # Rows for a CSV table containgin student's grades per test case
csv_filename = "A{0}.csv".format(assignment_num)
asn_config = config.assignment(assignment_num)
if not os.path.isfile(csv_filename):
with open(csv_filename, 'a', newline='') as fp:
# Need to make sure the CSV Header is printed first
# If auto_grader is run on subsets of students
fcntl.flock(fp, fcntl.LOCK_EX)
# Need to create CSV header rows
test_cases_fn = []
test_cases = []
# Find first student with an assignment report to get test Cases
student_number = 0
while (students[student_number].get_assignment_report() is None and student_number < len(students)):
++student_number
if student_number >= len(students):
raise Exception("Assignment failed to run for all students")
for tc_result in students[student_number].get_assignment_report().test_case_results:
test_cases_fn.append(tc_result.test_case.function.__name__)
test_cases.append(tc_result.test_case)
test_cases_fn.sort()
test_cases_fn.insert(0, 'cdfid') #cdfid represents studentID column
test_cases_fn.append("total") # Total/Assignment grade for student
csv_stats.append(test_cases_fn) # Colums are the test cases
# Input row of MAX grade for each test case
max_grades = ["max"]
total_score = 0
for test_case in test_cases:
max_grades.append(test_case.max_grade)
total_score += test_case.max_grade
max_grades.append(total_score)
csv_stats.append(max_grades)
writer = csv.writer(fp, delimiter=CSV_DELIMTER)
writer.writerows(csv_stats)
fcntl.flock(fp, fcntl.LOCK_UN)
# END writing header to CSV
# END if CSV file already exists
csv_stats = []
for student in students:
assignment_report = student.get_assignment_report()
student_test_grades = [student.id]
student_test_total = 0
if assignment_report is not None:
assignment_reports.append(assignment_report.grade)
test_case_results = assignment_report.test_case_results
# .sort() to ensure test_cases are retrieved in the same order for each student
test_case_results.sort()
for test_case_result in test_case_results:
test_case_reports[test_case_result.test_case].append(test_case_result.grade)
student_test_grades.append(str(test_case_result.grade))
student_test_total += test_case_result.grade
student_test_grades.append(student_test_total)
csv_stats.append(student_test_grades)
# Print out test case and assignment stats
print('Assignment Statistics:')
print_stats(assignment_reports)
for test_case in test_case_reports.keys():
print('TestCase ({0}) Statistics:'.format(test_case))
print_stats(test_case_reports[test_case])
print("Generating CSF file")
with open(csv_filename, 'a', newline='') as fp:
writer = csv.writer(fp, delimiter=CSV_DELIMTER)
fcntl.flock(fp, fcntl.LOCK_EX)
writer.writerows(csv_stats)
fcntl.flock(fp, fcntl.LOCK_UN)
def print_stats(reports):
print('\tMinimum: {0}'.format(min(reports)))
print('\tMaximum: {0}'.format(max(reports)))
print('\tMean: {0}'.format(sum(reports) / len(reports)))
print('\tMedian: {0}'.format(median(reports)))
def median(lst):
lst = sorted(lst)
if len(lst) < 1:
return None
if len(lst) % 2 == 1:
return lst[int((len(lst) + 1) / 2) - 1]
return float((lst[int(len(lst) / 2) - 1] + lst[int(len(lst) / 2)]) / 2.0)
def main(argv):
try:
(options, args) = parser.parse_args(argv)
check_input(options)
directory_names = [x[1] for x in os.walk(options.student_directory)]
student_directories = []
students = []
# Make sure all students
if (options.student is None): # Grade ALL students
for directories in directory_names:
for directory_name in directories:
if STUDENT_REG_EX.match(directory_name):
student_directories.append(directory_name)
students.append(Student(directory_name, options.student_directory +
'/' + directory_name))
else:
# Grade only the specified student's assignment
student_directory = options.student_directory + "/" + options.student
if (os.path.isdir(student_directory)): # Make sure full path to student directory is valid
student_directories.append(options.student)
students.append(Student(options.student, student_directory))
else:
raise InputException("The supplied student directory and student is not a valid path: "
+ student_directory)
assignment_num = options.assignment_num
grade_students_assignment(students, assignment_num)
generate_student_reports(students, assignment_num)
#generate_test_case_summary(students, assignment_num)
print("Exiting Auto Grader successfully")
except InputException as ex:
print(ex)
print("Quitting Auto Grader with traceback: {0}".format(traceback.format_exc()))
parser = OptionParser()
parser.add_option("-d", "--directory",
action="store", type="string", dest="student_directory")
parser.add_option("-a", "--assignment",
action="store", type="string", dest="assignment_num")
parser.add_option("-s", "--student",
action="store", type="string", dest="student")
if __name__ == '__main__':
main(sys.argv)
|
994,359 | f63648b079029268ca745ea5c7b914ec84333d6c | import os
import re
from lxml import html, etree
from logger import *
def get_scraper(url):
if url:
if url.find('amazon.com') >= 0:
return AmazonScraper(url)
elif url.find('newegg.com') >= 0:
return NeweggScraper(url)
elif url.find('buy.com') >= 0:
return BuyScraper(url)
elif url.find('woot.com') >= 0:
return WootScraper()
else:
return None
else:
raise InvalidArgumentScraperError()
class InvalidArgumentScraperError(Exception):
pass
class ProductAttribute():
def __init__(self, split):
if not split:
raise InvalidArgumentScraperError()
self.key = split[0]
self.path = split[1]
self.regex = None
if len(split) >= 3 and split[2]:
self.regex = split[2]
self.value = None
if len(split) >= 4 and split[3]:
self.value = split[3]
class DataRetriever(object):
def process(self, attributes):
pass
class ObjectDataRetriever(DataRetriever):
def __init__(self, sender):
self.scraper = sender
def process(self, attributes):
for item in attributes:
if item.path\
and item.path.startswith('scraper'):
attr_name = item.path.split('.')[1]
v = getattr(self.scraper, attr_name)
if v:
item.value = v
return attributes
class RegexDataRetriever(DataRetriever):
def process(self, attributes):
import re
for item in attributes:
if item.regex:
m = re.search(item.regex, item.value)
if m:
if len(m.groups()) > 0:
item.value = m.group(1)
elif m.group():
item.value = m.group()
return attributes
class HtmlDataRetriever(DataRetriever):
def __init__(self, url):
self.url = url
def process(self, attributes):
return self.__parse_product_attributes(attributes)
def getroot(self):
return html.parse(self.url).getroot()
def __parse_product_attributes(self, attributes):
root = self.getroot()
for item in attributes:
if item.path \
and not item.path.startswith('scraper'):
value = self.__get_xpath_value(root, item.path)
if not isinstance(value, str):
if isinstance(value, etree._ElementUnicodeResult):
#cannot serializable - don't know why getting below error
#Type 'lxml.etree._ElementUnicodeResult' cannot be serialized.
pass
else:
value = html.tostring(value, method='text', encoding='UTF-8').replace(' ', '').replace('\n', '').replace('\r', ' ')
#value = re.sub('', '', value)
item.value = value.strip()
return attributes
def __get_xpath_value(self, root, xpath):
result = root.xpath(xpath)
value = ''
if result:
value = result[0]
return value
class DataRetrieverCreator(object):
def get_html_data_retriever(self, url):
return HtmlDataRetriever(url);
def get_object_data_retriever(self, scraper):
return ObjectDataRetriever(scraper);
def get_regex_data_retriever(self):
return RegexDataRetriever();
def get_retrievers(self, scraper):
return [self.get_html_data_retriever(scraper.url),
self.get_object_data_retriever(scraper),
self.get_regex_data_retriever()]
class Scraper(object):
def __init__(self, site_name, url, retriever_creator=DataRetrieverCreator(), config_file='scraper.cfg'):
self.url = url
self.retrievers = retriever_creator.get_retrievers(self)
self.product_attributes = []
path = os.path.join(os.path.dirname(__file__), config_file)
write_trace('site_name:' + site_name)
write_trace('url:' + url)
write_trace('scraper path:' + path)
f = open(path)
config_found = False
for line in f.readlines():
line = line.rstrip('\r\n')
if config_found and not line:
break
if not line:
continue
split = line.split('|')
if split \
and split[0] == 'site' \
and split[3].lower() == site_name.lower():
config_found = True
self.product_attributes.append(ProductAttribute(split))
continue
if config_found:
self.product_attributes.append(ProductAttribute(split))
def get_product(self):
for retriever in self.retrievers:
self.product_attributes = retriever.process(self.product_attributes)
key_value = {}
for attr in self.product_attributes:
key_value[attr.key] = attr.value
return key_value
def get_full_url (self):
return self.url
class ProductUrlScraper(Scraper):
def __init__(self, site_name, product_url='', product_url_format=''):
super(ProductUrlScraper, self).__init__(site_name)
self.product_url = product_url
self.product_id = product_id
self.product_url_format = product_url_format
def get_full_url(self):
if self.product_id:
if not self.product_url_format:
raise InvalidArgumentScraperError()
return self.product_url_format % self.product_id
else:
return self.product_url
class WootScraper(Scraper):
def __init__(self, retriever_creator=None):
if retriever_creator:
super(WootScraper, self).__init__(
site_name='woot.com',
url='http://www.woot.com',
retriever_creator=retriever_creator)
else:
super(WootScraper, self).__init__(
site_name='woot.com',
url='http://www.woot.com')
# def get_product(self):
# from datetime import datetime
# key_value = super(WootScraper, self).get_product()
# key_value['product_id'] = 'woot-' + datetime.now().strftime('%Y-%m-%d')
# key_value['part_number'] = key_value['product_id']
# return key_value
class NeweggScraper(Scraper):
def __init__(self, product_url, retriever_creator=None):
if retriever_creator:
super(NeweggScraper, self).__init__(
site_name='newegg.com',
url=product_url,
retriever_creator=retriever_creator)
else:
super(NeweggScraper, self).__init__(
site_name='newegg.com',
url=product_url)
class AmazonScraper(Scraper):
def __init__(self, product_url, retriever_creator=None):
if retriever_creator:
super(AmazonScraper, self).__init__(
site_name='amazon.com',
url=product_url,
retriever_creator=retriever_creator)
else:
super(AmazonScraper, self).__init__(
site_name='amazon.com',
url=product_url)
class BuyScraper(Scraper):
def __init__(self, product_url, retriever_creator=None):
if retriever_creator:
super(BuyScraper, self).__init__(
site_name='buy.com',
url=product_url,
retriever_creator=retriever_creator)
else:
super(BuyScraper, self).__init__(
site_name='buy.com',
url=product_url)
|
994,360 | 05712f51b21adacdd0151eb144f081435aae3257 | """
Analyser module
"""
import threading
import Queue
import time
import cv2
import numpy as np
import numpy.matlib
import DetectChars
import DetectPlates
import RemoteMain as GLOBAL
SCALAR_BLACK = (0.0, 0.0, 0.0)
SCALAR_WHITE = (255.0, 255.0, 255.0)
SCALAR_YELLOW = (0.0, 255.0, 255.0)
SCALAR_GREEN = (0.0, 255.0, 0.0)
SCALAR_RED = (0.0, 0.0, 255.0)
showSteps = False
# Region-of-interest vertices
# We want a trapezoid shape, with bottom edge at the bottom of the image
TRAPEZOID_BOTTOM_WIDTH = 1.0
TRAPEZOID_TOP_WIDTH = 0.65
TRAPEZOID_HEIGHT = 0.7
# Hough Transform
HOUGH_DIST_RESOLUTION = 1 # distance resolution in pixels of the Hough grid
ANGULAR_RESOLUTION = 1 * np.pi/180 # angular resolution in radians of the Hough grid
HOUGH_THRESHOLD = 50 # minimum number of votes (intersections in Hough grid cell)
MIN_LINE_LENGHT = 70 #minimum number of pixels making up a line
MAX_LINE_GAP = 60 # maximum gap in pixels between connectable line segments
ALPHA = 0.8
BETA = 1.
GAMMA = 0.
AVD_START_AVOIDANCE = 'START_AVOIDANCE'
AVD_PREPARE_AVOIDANCE = "PREPARE_AVOIDANCE"
AVD_AVOID_OBJECT = 'AVOID_OBJECT'
AVD_GO_FORWARD = 'AVOID_GO_FORWARD'
AVD_RETURN_TO_LANE = 'RETURN_TO_LANE'
AVD_PREPARE_TO_FINISH = 'PREPARE_TO_FINISH'
AVD_FINISHED = 'FINISHED_AVD'
class Analyser(object):
"""
Analyser class
- responsible to analyse the current frame
- detect lanes, cars, obstacles, road signs, etc
- send the commands to SerialManager via Controller queue
- send analysed frames to StreamServer via queue
- update user rights about controlling the car
"""
def __init__(self):
self.__current_frame = None
self.__encode_parameter = [int(cv2.IMWRITE_JPEG_QUALITY), 60]
self.__command_timer = 0
self.__fps_timer = 0
self.__go_forward = False
self.__go_left = False
self.__go_right = False
self.__fps_counter = 0
self.__frame_fps = 0
self.__lanes_coords_list = []
self.__cruise_ndf_contor = 0
self.__cruise_watch_area = 0
self.__distance_to_car = 0
self.__plate_coords = []
self.__cruise_speed = 0
self.__cruise_preffered_speed = 0
self.__car_states_timer = 0
self.__car_data_updated = False
self.__car_stopped = False
self.__cruise_timer = 0
self.__lane_assist_timer = 0
# object avoidance data
self.__dp = 1
self.__min_dist = 50
self.__circle_param_1 = 150
self.__circle_param_2 = 50
self.__objects_coords_list = []
self.__avoiding_activated = False
self.__avoidance_go_forward = False
self.__avoidance_go_left = False
self.__avoidance_go_right = False
self.__avoidance_brake = False
self.__avoidance_timer = 0
self.__returner_timer = 0
self.__avoidance_state = ''
def analyse(self, frame_queue, analysed_frame_queue, autonomous_states_queue, \
commands_queue, car_states_queue):
"""
get the current frame from FRAME_QUEUE and analyse
"""
current_thread = threading.currentThread()
self.__command_timer = time.time()
bln_knn_training_successful = DetectChars.loadKNNDataAndTrainKNN() # attempt KNN training
if bool(bln_knn_training_successful) is False:
return
self.__fps_timer = time.time()
while getattr(current_thread, 'is_running', True):
string_data = frame_queue.get(True, None)
frame = numpy.fromstring(string_data, dtype='uint8')
self.__current_frame = cv2.imdecode(frame, 1)
self.__get_cruise_states_data(car_states_queue)
if getattr(current_thread, 'is_analysing', True):
self.__car_detection(autonomous_states_queue)
self.__detect_objects()
self.__lane_assist()
if getattr(current_thread, 'is_deciding', True):
self.__take_cruise_decision(commands_queue)
self.__avoid_detected_objects(commands_queue)
self.__draw_rect_around_plate(self.__current_frame)
self.__draw_distance_to_car()
self.__draw_car_cruise_watch_area()
self.__draw_lane_assist_decision()
self.__draw_detected_objects()
self.__draw_fps()
result, encrypted_image = \
cv2.imencode('.jpg', self.__current_frame, self.__encode_parameter)
if bool(result) is False:
break
analysed_frame = numpy.array(encrypted_image)
analysed_frame_queue.put(str(analysed_frame.tostring()), True, None)
frame_queue.task_done()
self.__fps_counter = self.__fps_counter + 1
if time.time() - self.__fps_timer > 1:
self.__frame_fps = self.__fps_counter
self.__fps_counter = 0
self.__fps_timer = time.time()
def __draw_fps(self):
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(self.__current_frame, str(self.__frame_fps), \
(0, 21), font, 1, (0, 255, 255), 2, cv2.LINE_AA)
def __get_cruise_states_data(self, car_states_queue):
if time.time() - self.__car_states_timer > 0.2:
try:
car_states = car_states_queue.get(False)
except Queue.Empty:
return
if car_states:
car_states = car_states.split(';')
for elem in car_states:
current_state = elem.split(',')
if len(current_state) > 1:
if GLOBAL.CSQ_CRUISE_DISTANCE in current_state[0]:
self.__cruise_watch_area = int(current_state[1])
elif GLOBAL.CSQ_CRUISE_SPEED in current_state[0]:
self.__cruise_speed = int(current_state[1])
self.__car_data_updated = True
elif GLOBAL.CSQ_CRUISE_PREFFERED_SPEED in current_state[0]:
self.__cruise_preffered_speed = int(current_state[1])
car_states_queue.task_done()
self.__car_states_timer = time.time()
def __draw_rect_around_plate(self, current_scene):
if len(self.__plate_coords) > 3:
if self.__distance_to_car < 100:
cv2.line(current_scene, tuple(self.__plate_coords[0]), \
tuple(self.__plate_coords[1]), SCALAR_RED, 2)
cv2.line(current_scene, tuple(self.__plate_coords[1]), \
tuple(self.__plate_coords[2]), SCALAR_RED, 2)
cv2.line(current_scene, tuple(self.__plate_coords[2]), \
tuple(self.__plate_coords[3]), SCALAR_RED, 2)
cv2.line(current_scene, tuple(self.__plate_coords[3]), \
tuple(self.__plate_coords[0]), SCALAR_RED, 2)
def __draw_distance_to_car(self):
if self.__distance_to_car < 100:
frame_shape = self.__current_frame.shape
distance_position = (2 * frame_shape[1] / 100, 95 * frame_shape[0] / 100)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(self.__current_frame, 'Distance:' + str(self.__distance_to_car) + 'cm', \
distance_position, font, 1, (0, 255, 255), 2, cv2.LINE_AA)
def __take_cruise_decision(self, commands_queue):
if bool(self.__avoiding_activated) is True:
return
dist_padding = 10
low_thresh_area = self.__cruise_watch_area * 5
high_thresh_area = self.__cruise_watch_area * 5 + dist_padding
if self.__distance_to_car < high_thresh_area:
try:
commands_queue.put(GLOBAL.CMD_BRAKE, False)
except Queue.Full:
return
else:
if self.__cruise_preffered_speed == 0:
try:
commands_queue.put(GLOBAL.CMD_BRAKE, False)
except Queue.Full:
return
if self.__cruise_speed > self.__cruise_preffered_speed:
if time.time() - self.__cruise_timer > (200.0 / 1000.0):
try:
commands_queue.put(GLOBAL.CMD_DECREASE_SPEED, False)
except Queue.Full:
return
self.__cruise_timer = time.time()
# scenarious when you have to increase the speed
if self.__distance_to_car > (high_thresh_area + dist_padding):
if self.__cruise_speed < self.__cruise_preffered_speed:
if time.time() - self.__cruise_timer > (200.0 / 1000.0):
if bool(self.__car_data_updated) is True:
try:
commands_queue.put(GLOBAL.CMD_INCREASE_SPEED, False)
except Queue.Full:
return
self.__cruise_timer = time.time()
self.__car_data_updated = False
def __populate_autonomous_states(self, autonomous_states_queue):
pass
def __car_detection(self, autonomous_states_queue):
"""
Detect the possible car in front of our car
Store useful data states
"""
list_of_possible_plates = DetectPlates.detectPlatesInScene(self.__current_frame)
list_of_possible_plates = DetectChars.detectCharsInPlates(list_of_possible_plates)
list_of_possible_plates.sort(key=lambda possiblePlate: len(possiblePlate.strChars), \
reverse=True)
if len(list_of_possible_plates) > 0:
#at least one car
lic_plate = list_of_possible_plates[0]
frame_shape = self.__current_frame.shape
self.__plate_coords = cv2.boxPoints(lic_plate.rrLocationOfPlateInScene)
self.__distance_to_car = frame_shape[0] - self.__plate_coords[3][1] # in pixels
self.__distance_to_car = self.__distance_to_car
self.__distance_to_car = float("{0:.2f}".format(self.__distance_to_car))
self.__cruise_ndf_contor = 0
else:
# make sure that the algoritm doesn't fail for a specific frame
self.__cruise_ndf_contor = self.__cruise_ndf_contor + 1
if self.__cruise_ndf_contor > 5:
self.__distance_to_car = 1000
self.__cruise_ndf_contor = 0
def __gaussian_blur(self, img, kernel_size=3):
"""Applies a Gaussian Noise kernel"""
return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)
def __grayscale(self, img):
return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
def __canny(self, image, sigma=0.33):
"""Applies the Canny transform"""
median_variable = np.median(image)
lower = int(max(0, (1.0 - sigma) * median_variable))
upper = int(min(255, (1.0 + sigma) * median_variable))
self.__circle_param_1 = upper
edged = cv2.Canny(image, lower, upper)
return edged
def __region_of_interest(self, img, vertices):
mask = np.zeros_like(img)
if len(img.shape) > 2:
channel_count = img.shape[2]
ignore_mask_color = (255,) * channel_count
else:
ignore_mask_color = 255
cv2.fillPoly(mask, vertices, ignore_mask_color)
masked_image = cv2.bitwise_and(img, mask)
return masked_image
def __draw_detected_lanes(self, img, lines, color=None, thickness=5):
if color is None:
color = [255, 0, 0]
if lines is None:
return
if len(lines) == 0:
return
draw_right = True
draw_left = True
slope_threshold = 0.5
slopes = []
new_lines = []
for line in lines:
x1, y1, x2, y2 = line[0] # line = [[x1, y1, x2, y2]]
if x2 - x1 == 0.: # corner case, avoiding division by 0
slope = 999. # practically infinite slope
else:
slope = (y2 - y1) / (x2 - x1)
if abs(slope) > slope_threshold:
slopes.append(slope)
new_lines.append(line)
lines = new_lines
right_lines = []
left_lines = []
for i, line in enumerate(lines):
x1, y1, x2, y2 = line[0]
img_x_center = img.shape[1] / 2 # x coordinate of center of image
if slopes[i] > 0 and x1 > img_x_center and x2 > img_x_center:
right_lines.append(line)
elif slopes[i] < 0 and x1 < img_x_center and x2 < img_x_center:
left_lines.append(line)
right_lines_x = []
right_lines_y = []
for line in right_lines:
x1, y1, x2, y2 = line[0]
right_lines_x.append(x1)
right_lines_x.append(x2)
right_lines_y.append(y1)
right_lines_y.append(y2)
if len(right_lines_x) > 0:
right_m, right_b = np.polyfit(right_lines_x, right_lines_y, 1) # y = m*x + b
else:
right_m, right_b = 1, 1
draw_right = False
left_lines_x = []
left_lines_y = []
for line in left_lines:
x1, y1, x2, y2 = line[0]
left_lines_x.append(x1)
left_lines_x.append(x2)
left_lines_y.append(y1)
left_lines_y.append(y2)
if len(left_lines_x) > 0:
left_m, left_b = np.polyfit(left_lines_x, left_lines_y, 1) # y = m*x + b
else:
left_m, left_b = 1, 1
draw_left = False
y1 = img.shape[0]
y2 = img.shape[0] * (1 - TRAPEZOID_HEIGHT)
right_x1 = (y1 - right_b) / right_m
right_x2 = (y2 - right_b) / right_m
left_x1 = (y1 - left_b) / left_m
left_x2 = (y2 - left_b) / left_m
y1 = int(y1)
y2 = int(y2)
right_x1 = int(right_x1)
right_x2 = int(right_x2)
left_x1 = int(left_x1)
left_x2 = int(left_x2)
self.__lanes_coords_list = []
if draw_left:
if left_x1 < img.shape[1] and left_x2 < img.shape[1]:
if left_x1 > 0 and left_x2 > 0:
cv2.line(img, (left_x1, y1), (left_x2, y2), color, thickness)
self.__lanes_coords_list.append((left_x1, y1))
self.__lanes_coords_list.append((left_x2, y2))
if draw_right:
if right_x1 < img.shape[1] and right_x2 < img.shape[1]:
if right_x1 > 0 and right_x2 > 0:
cv2.line(img, (right_x1, y1), (right_x2, y2), color, thickness)
self.__lanes_coords_list.append((right_x1, y1))
self.__lanes_coords_list.append((right_x2, y2))
def __hough_lines(self, img):
lines = cv2.HoughLinesP(img, HOUGH_DIST_RESOLUTION, ANGULAR_RESOLUTION, \
HOUGH_THRESHOLD, np.array([]), minLineLength=MIN_LINE_LENGHT, maxLineGap=MAX_LINE_GAP)
(height, width) = img.shape
datatype = np.dtype(np.uint8)
line_img = np.zeros((height, width, 3), datatype)
self.__draw_detected_lanes(line_img, lines)
return line_img
def __draw_car_cruise_watch_area(self):
if self.__cruise_watch_area == 0:
return
height, width, channels = self.__current_frame.shape
tile_bottom_x_coord = width / 2
tile_bottom_y_coord = height
tile_width = 25 * width / 100
tile_height = 25
tile_padding = 5
for contor in range(0, self.__cruise_watch_area):
car_cruise_area_vertex = []
car_cruise_area_vertex.append([tile_bottom_x_coord + tile_width, tile_bottom_y_coord])
car_cruise_area_vertex.append([tile_bottom_x_coord - tile_width, tile_bottom_y_coord])
tile_bottom_y_coord = tile_bottom_y_coord - tile_height
tile_width = 80 * tile_width / 100
car_cruise_area_vertex.append([tile_bottom_x_coord - tile_width, tile_bottom_y_coord])
car_cruise_area_vertex.append([tile_bottom_x_coord + tile_width, tile_bottom_y_coord])
overlay = self.__current_frame.copy()
pts = np.array(car_cruise_area_vertex, np.int32)
pts = pts.reshape((-1, 1, 2))
cv2.fillConvexPoly(overlay, pts, (0, 200, 0))
alpha = 0.5
cv2.addWeighted(overlay, alpha, self.__current_frame, 1-alpha, 0, self.__current_frame)
tile_bottom_y_coord = tile_bottom_y_coord - tile_padding
def __lane_assist(self):
current_frame = self.__grayscale(self.__current_frame)
current_frame = self.__gaussian_blur(current_frame)
current_frame = self.__canny(current_frame)
imshape = self.__current_frame.shape
vertices = np.array([[\
((imshape[1] * (1 - TRAPEZOID_BOTTOM_WIDTH)) // 2, imshape[0]),\
((imshape[1] * (1 - TRAPEZOID_TOP_WIDTH)) // 2, imshape[0] - imshape[0] * TRAPEZOID_HEIGHT),\
(imshape[1] - (imshape[1] * (1 - TRAPEZOID_TOP_WIDTH)) // 2, \
imshape[0] - imshape[0] * TRAPEZOID_HEIGHT),\
(imshape[1] - (imshape[1] * (1 - TRAPEZOID_BOTTOM_WIDTH)) // 2, imshape[0])]], dtype=np.int32)
current_frame = self.__region_of_interest(current_frame, vertices)
current_frame = self.__hough_lines(current_frame)
final_image = self.__current_frame.astype('uint8')
cv2.addWeighted(self.__current_frame, ALPHA, current_frame, BETA, GAMMA, final_image)
final_image = final_image.astype('uint8')
self.__current_frame = final_image
def __maintain_between_lanes(self, commands_queue):
if self.__cruise_preffered_speed == 0 or self.__cruise_speed == 0:
self.__car_stopped = True
else:
self.__car_stopped = False
if bool(self.__car_stopped) is True:
return
self.__go_forward = False
self.__go_left = False
self.__go_right = False
frame_shape = self.__current_frame.shape
car_head = (frame_shape[1] / 2, 0)
distance_padding = 10 * frame_shape[1] / 100
if len(self.__lanes_coords_list) > 3:
# both lanes detected
if car_head[0] - distance_padding > self.__lanes_coords_list[1][0]:
if car_head[0] + distance_padding < self.__lanes_coords_list[3][0]:
if time.time() - self.__lane_assist_timer > 200.0/1000.0:
print '[Both] GO FORWARD'
try:
commands_queue.put(GLOBAL.CMD_GO_FORWARD, False)
except Queue.Full:
pass
self.__lane_assist_timer = time.time()
self.__go_forward = True
if car_head[0] - distance_padding < self.__lanes_coords_list[1][0]:
if time.time() - self.__lane_assist_timer > 200.0/1000.0:
print '[Both] GO RIGHT'
try:
commands_queue.put(GLOBAL.CMD_GO_RIGHT, False)
except Queue.Full:
pass
self.__lane_assist_timer = time.time()
self.__go_right = True
if car_head[0] + distance_padding > self.__lanes_coords_list[3][0]:
if time.time() - self.__lane_assist_timer > 200.0/1000.0:
print '[Both] GO LEFT'
try:
commands_queue.put(GLOBAL.CMD_GO_LEFT, False)
except Queue.Full:
pass
self.__lane_assist_timer = time.time()
self.__go_left = True
else:
print '[None] BRAKE'
try:
commands_queue.put(GLOBAL.CMD_BRAKE, False)
except Queue.Full:
pass
def __draw_lane_assist_decision(self):
(height, width, channels) = self.__current_frame.shape
padding = 10
arrow_vertex_list = []
if bool(self.__go_left) is True:
arrow_vertex_list.append([padding, height/2])
arrow_vertex_list.append([padding * 3, height/3])
arrow_vertex_list.append([padding * 3, 3 * height/4])
elif bool(self.__go_right) is True:
arrow_vertex_list.append([width - padding, height/2])
arrow_vertex_list.append([width - padding * 3, height/3])
arrow_vertex_list.append([width - padding * 3, 3 * height/4])
elif bool(self.__go_forward) is True:
arrow_vertex_list.append([width/2, padding])
arrow_vertex_list.append([width/2 - padding * 3, padding * 2])
arrow_vertex_list.append([width/2 + padding * 3, padding * 2])
overlay = self.__current_frame.copy()
pts = np.array(arrow_vertex_list, np.int32)
pts = pts.reshape((-1, 1, 2))
cv2.fillConvexPoly(overlay, pts, (0, 200, 200))
alpha = 0.5
cv2.addWeighted(overlay, alpha, self.__current_frame, 1-alpha, 0, self.__current_frame)
def __detect_objects(self):
frame = self.__current_frame
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
blur_grey = self.__gaussian_blur(gray)
circles = cv2.HoughCircles(blur_grey, cv2.HOUGH_GRADIENT, dp=self.__dp, \
minDist=self.__min_dist, param1=self.__circle_param_1, param2=self.__circle_param_2, \
minRadius=0, maxRadius=0)
self.__objects_coords_list = []
# ensure at least some circles were found
if circles is not None:
# convert the (x, y) coordinates and radius of the circles to integers
circles = np.round(circles[0, :]).astype("int")
# loop over the (x, y) coordinates and radius of the circles
for element in circles:
if element[1] > (3 * frame_shape[0] / 5):
self.__objects_coords_list.append(element)
def __avoid_detected_objects(self, commands_queue):
if bool(self.__avoiding_activated) is False:
frame_shape = self.__current_frame.shape
go_left = True
go_right = True
for elem in self.__objects_coords_list:
if elem[0] < frame_shape[1] / 2:
go_left = False
else:
go_right = False
if bool(go_left) is True:
self.__avoiding_activated = True
self.__avoidance_go_left = True
elif bool(go_right) is True:
self.__avoiding_activated = True
self.__avoidance_go_right = True
else:
self.__avoiding_activated = True
self.__avoidance_brake = True
if bool(self.__avoiding_activated) is True:
if bool(self.__avoidance_brake) is True:
try:
commands_queue.put(GLOBAL.CMD_BRAKE, False)
self.__avoiding_activated = False
self.__avoidance_brake = False
except Queue.Full:
self.__avoiding_activated = True
self.__avoidance_brake = True
elif self.__avoidance_state == '':
if bool(self.__avoidance_go_left) is True:
self.__avoidance_state = AVD_START_AVOIDANCE
elif bool(self.__avoidance_go_right) is True:
self.__avoidance_state = AVD_START_AVOIDANCE
#P1 - start avoidance (change direction)
elif self.__avoidance_state == AVD_START_AVOIDANCE:
if bool(self.__avoidance_go_left) is True:
try:
commands_queue.put(GLOBAL.CMD_GO_LEFT, False)
self.__avoidance_state = AVD_PREPARE_AVOIDANCE
except Queue.Full:
self.__avoidance_state = AVD_START_AVOIDANCE
elif bool(self.__avoidance_go_right) is True:
try:
commands_queue.put(GLOBAL.CMD_GO_RIGHT, False)
self.__avoidance_state = AVD_PREPARE_AVOIDANCE
except Queue.Full:
self.__avoidance_state = AVD_START_AVOIDANCE
self.__avoidance_timer = time.time()
#P2 - prepare avoidance
elif self.__avoidance_state == AVD_PREPARE_AVOIDANCE:
if time.time() - self.__avoidance_timer > 0.9:
if bool(self.__avoidance_go_left) is True:
try:
commands_queue.put(GLOBAL.CMD_GO_RIGHT, False)
self.__avoidance_state = AVD_AVOID_OBJECT
except Queue.Full:
self.__avoidance_state = AVD_PREPARE_AVOIDANCE
elif bool(self.__avoidance_go_right) is True:
try:
commands_queue.put(GLOBAL.CMD_GO_LEFT, False)
self.__avoidance_state = AVD_AVOID_OBJECT
except Queue.Full:
self.__avoidance_state = AVD_PREPARE_AVOIDANCE
self.__avoidance_timer = time.time()
#P3 - start avoid object
elif self.__avoidance_state == AVD_AVOID_OBJECT:
if time.time() - self.__avoidance_timer > 1.1:
if bool(self.__avoidance_go_left) is True:
try:
commands_queue.put(GLOBAL.CMD_GO_FORWARD, False)
self.__avoidance_state = AVD_GO_FORWARD
except Queue.Full:
self.__avoidance_state = AVD_AVOID_OBJECT
elif bool(self.__avoidance_go_right) is True:
try:
commands_queue.put(GLOBAL.CMD_GO_FORWARD, False)
self.__avoidance_state = AVD_GO_FORWARD
except Queue.Full:
self.__avoidance_state = AVD_AVOID_OBJECT
self.__avoidance_timer = time.time()
#P3 BIS - avoiding object
elif self.__avoidance_state == AVD_GO_FORWARD:
if time.time() - self.__avoidance_timer > 0.7:
if bool(self.__avoidance_go_left) is True:
try:
commands_queue.put(GLOBAL.CMD_INCREASE_SPEED, False)
self.__avoidance_state = AVD_RETURN_TO_LANE
except Queue.Full:
self.__avoidance_state = AVD_GO_FORWARD
elif bool(self.__avoidance_go_right) is True:
try:
commands_queue.put(GLOBAL.CMD_INCREASE_SPEED, False)
self.__avoidance_state = AVD_RETURN_TO_LANE
except Queue.Full:
self.__avoidance_state = AVD_GO_FORWARD
self.__avoidance_timer = time.time()
#P4 - return to lane
elif self.__avoidance_state == AVD_RETURN_TO_LANE:
if time.time() - self.__avoidance_timer > 1:
if bool(self.__avoidance_go_left) is True:
try:
commands_queue.put(GLOBAL.CMD_GO_RIGHT, False)
self.__avoidance_state = AVD_PREPARE_TO_FINISH
except Queue.Full:
self.__avoidance_state = AVD_RETURN_TO_LANE
elif bool(self.__avoidance_go_right) is True:
if time.time() - self.__avoidance_timer > 1:
try:
commands_queue.put(GLOBAL.CMD_GO_LEFT, False)
self.__avoidance_state = AVD_PREPARE_TO_FINISH
except Queue.Full:
self.__avoidance_state = AVD_RETURN_TO_LANE
self.__avoidance_timer = time.time()
#P5 - prepare to finish
elif self.__avoidance_state == AVD_PREPARE_TO_FINISH:
if time.time() - self.__avoidance_timer > 0.8:
if bool(self.__avoidance_go_left) is True:
try:
commands_queue.put(GLOBAL.CMD_GO_LEFT, False)
self.__avoidance_state = AVD_FINISHED
except Queue.Full:
self.__avoidance_state = AVD_PREPARE_TO_FINISH
elif bool(self.__avoidance_go_right) is True:
try:
commands_queue.put(GLOBAL.CMD_GO_RIGHT, False)
self.__avoidance_state = AVD_FINISHED
except Queue.Full:
self.__avoidance_state = AVD_PREPARE_TO_FINISH
self.__avoidance_timer = time.time()
#P6 - finish
elif self.__avoidance_state == AVD_FINISHED:
self.__avoiding_activated = False
self.__avoidance_go_forward = True
self.__avoidance_go_left = False
self.__avoidance_go_right = False
self.__avoidance_state = ''
if bool(self.__avoidance_go_forward) is True:
if time.time() - self.__avoidance_timer > 1:
try:
commands_queue.put(GLOBAL.CMD_GO_FORWARD, False)
self.__avoidance_go_forward = False
except Queue.Full:
self.__avoidance_go_forward = True
def __draw_detected_objects(self):
for element in self.__objects_coords_list:
cv2.circle(self.__current_frame, (element[0], element[1]), element[2], (0, 255, 0), 4)
self.__objects_coords_list = []
|
994,361 | cd7e1d0a3c275cc7520c81c92f7b5a21a53dc4c5 | #! /usr/bin/python
import sys
import re
def sum_line(numbers):
r = 0
for n in numbers:
r = r + int(n)
return r
def parse_line(line):
return re.findall('[+-]?\d+', line)
def parse_input():
data = []
for line in sys.stdin:
data.append(line.split("\n")[0])
return data
# main()
def main():
data = parse_input()
for line in data:
print sum_line(parse_line(line))
if __name__ == '__main__':
main()
|
994,362 | c3ab702262a70c1cef031bcf29813e6c8e7be82f | import os
MyOS = os.uname()
print(MyOS)
|
994,363 | 76213e1c731aaee24d58fb2d7d6b12d81f6108b1 | #!/usr/bin/env python
import socket
import struct
import sys
import os
import time
import binascii
import math
from random import randint
import rospy
from nav_msgs.msg import Odometry
from geometry_msgs.msg import PoseWithCovarianceStamped
import tf
#PC_IP = "192.168.0.145" #127.0.0.1"
PC_IP = "192.168.0.102" #127.0.0.1"
LIS_PORT = 64417
CONTROL_IP = "192.168.0.145" #"127.0.0.1"
CONTROL_PORT = 41010
UDPSOCKS = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # UDP Send
def callbackProcessedPose(msg):
x = msg.pose.pose.position.x
y = msg.pose.pose.position.y
quat = (msg.pose.pose.orientation.x, msg.pose.pose.orientation.y, msg.pose.pose.orientation.z, msg.pose.pose.orientation.w)
euler = tf.transformations.euler_from_quaternion(quat)
yaw = euler[2]
vl = 0
vr = 0
_fvalues = (int('F1', 16), int('A1', 16), x, y, yaw, vl, vr)
_fstructure = struct.Struct('< ' + '2B d d d d d')
_fdata = _fstructure.pack(*_fvalues)
UDPSOCKS.sendto(_fdata, (CONTROL_IP, CONTROL_PORT))
print "sending", x, y, yaw, vl, vr
def startRosNode(node_name):
rospy.init_node(node_name, anonymous=False)
rospy.Subscriber('/amcl_pose', PoseWithCovarianceStamped, callbackProcessedPose)
rospy.spin()
if __name__ == '__main__':
try:
startRosNode('send_loc_to_control_node')
except rospy.ROSInterruptException:
pass
|
994,364 | 94ef38d957acdd0975eac84d069750e3ff1771e0 | a = int("1c0111001f010100061a024b53535009181c",16)
b = int("686974207468652062756c6c277320657965",16)
print(hex(a^b)) |
994,365 | e26efea8cc37636b575322bfbd3e3e2c4ddf1d0a | import os
import random
import pytz as pytz
from auth_operations import r
from datetime import datetime, timedelta
from pymongo import MongoClient
# MONGO_CONNECTION = 'mongodb://localhost:27017/'
MONGO_CONNECTION = "mongodb+srv://{}:{}@cluster0.yx4fl.azure.mongodb.net/DigestAppDB?retryWrites=true&w=majority".format(
os.getenv('MONGO_USERNAME'), os.getenv('MONGO_PW'))
DATETIME_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'
collections = {}
client = MongoClient(MONGO_CONNECTION)
db = client.DigestAppDB
def add_collection(collection):
found = db.raindrops.find_one({"collection_id": collection['_id']})
new_datetime = to_datetime(collection['lastUpdate'])
if not found:
db.raindrops.insert_one({
"collection_id": collection['_id'],
"name": collection['title'],
"created": to_datetime(collection['created']),
"last_update": new_datetime,
"count": collection['count'],
"bookmarks": []
})
else:
existing_last_update = found['last_update']
if new_datetime > existing_last_update:
db.raindrops.update_one(
{"collection_id": found['collection_id']},
{"$set": {"last_update": new_datetime, "count": collection["count"]}}
)
def read_collections():
return db.raindrops.find({})
def read_bookmarks_of_last_days(no_of_days):
date = datetime.today().replace(hour=0, minute=0, second=0) - timedelta(days=no_of_days)
aggregate = db.raindrops.aggregate(
[{"$match": {"bookmarks": {"$elemMatch": {"created": {"$gte": date}}}}}, {"$unwind": "$bookmarks"},
{"$match": {"bookmarks.created": {"$gte": date}}},
{"$project": {"bookmark": "$bookmarks"}}])
return aggregate
def add_bookmarks(collection_id, bookmarks):
db.raindrops.update_one(
{"collection_id": collection_id},
{"$set": {
"bookmarks": format_bookmarks(bookmarks)
}}
)
def format_bookmarks(bookmarks):
result = []
for b in bookmarks:
if True:
# if get_last_update_time() < to_datetime(b['lastUpdate']):
result.append({
"id": b['_id'],
"title": b['title'],
"link": b['link'],
"created": to_datetime(b['created'])
})
# print("New Raindrop added: {}".format(b['title']))
return result
def get_random_bookmark():
random_collection = random.choice(list(read_collections()))
return random.choice(random_collection['bookmarks'])
def get_random(collection):
return random.choice(collection)
def update_time():
r.set('last_update', datetime.now().astimezone(pytz.UTC).strftime(DATETIME_FORMAT))
def get_last_update_time():
return to_datetime(r.get('last_update'))
def to_datetime(time_string):
return datetime.strptime(time_string, DATETIME_FORMAT)
|
994,366 | 093d12e933059359cc33de950bad62a46ccee1cd | import sys
sys.setrecursionlimit(10**8)
def ii(): return int(sys.stdin.readline())
def mi(): return map(int, sys.stdin.readline().split())
def li(): return list(map(int, sys.stdin.readline().split()))
def li2(N): return [list(map(int, sys.stdin.readline().split())) for i in range(N)]
def dp2(ini, i, j): return [[ini]*i for i2 in range(j)]
def dp3(ini, i, j, k): return [[[ini]*i for i2 in range(j)] for i3 in range(k)]
#import bisect #bisect.bisect_left(B, a)
#from collections import defaultdict #d = defaultdict(int) d[key] += value
## DFS
def main():
#入力受け取り
N, Q = mi()
x = li2(N-1)
cnt = [0]*N
for i in range(Q):
p0, p1 = mi()
cnt[p0-1] += p1
#隣接リスト作成
adj = [[] for i in range(N)]
for i in range(N-1):
#x[i][0] -= 1
#x[i][1] -= 1
adj[x[i][0]-1].append(x[i][1]-1)
adj[x[i][1]-1].append(x[i][0]-1)
#DFS
global ans
ans = [0]*N
def dfs(fr, nx):
ans[nx] += ans[fr] + cnt[nx]
for v in adj[nx]:
if v != fr:
dfs(nx, v)
dfs(0, 0)
'''
for i in range(N):
print(ans[i], '', end='')
'''
print(*ans)
if __name__ == "__main__":
main() |
994,367 | 769a5f3905449336a70252b5726db630099dc928 | /home/ayushjain1144/miniconda3/lib/python3.6/operator.py |
994,368 | 8671da76c2508493c1f23294029300916db1d85f | from random import choice
from Core import Statistics
from Lib.IPv4 import IPAddress
is_ipv4 = IPAddress.is_ipv4
class PcapAddressOperations():
def __init__(self, statistics: Statistics, uncertain_ip_mult: int=3):
"""
Initializes a pcap information extractor that uses the provided statistics for its operations.
:param statistics: The statistics of the pcap file
:param uncertain_ip_mult: the mutliplier to create new address space when the remaining observed space has been drained
"""
self.statistics = statistics
self.UNCERTAIN_IPSPACE_MULTIPLIER = uncertain_ip_mult
stat_result = self.statistics.process_db_query("most_used(macAddress)", print_results=False)
if isinstance(stat_result, list):
self.probable_router_mac = choice(stat_result)
else:
self.probable_router_mac = stat_result
self._init_ipaddress_ops()
def get_probable_router_mac(self):
"""
Returns the most probable router MAC address based on the most used MAC address in the statistics.
:return: the MAC address
"""
return self.probable_router_mac
def pcap_contains_priv_ips(self):
"""
Returns if the provided traffic contains private IPs.
:return: True if the provided traffic contains private IPs, otherwise False
"""
return self.contains_priv_ips
def get_local_address_range(self):
"""
Returns a tuple with the start and end of the observed local IP range.
:return: The IP range as tuple
"""
return str(self.min_local_ip), str(self.max_local_ip)
def get_count_rem_local_ips(self):
"""
Returns the number of local IPs in the pcap file that have not aldready been returned by get_existing_local_ips.
:return: the not yet assigned local IPs
"""
return len(self.remaining_local_ips)
def in_remaining_local_ips(self, ip: str) -> bool:
"""
Returns if IP is exists in pcap.
:return: True if the IP is in the remaining local ips, False if not
"""
return ip in self.remaining_local_ips
def get_existing_local_ips(self, count: int=1):
"""
Returns the given number of local IPs that are existent in the pcap file.
:param count: the number of local IPs to return
:return: the chosen local IPs
"""
if count <= 0:
return []
if count > len(self.remaining_local_ips):
print("Warning: There are no more {} local IPs in the .pcap file. Returning all remaining local IPs.".format(count))
total = min(len(self.remaining_local_ips), count)
retr_local_ips = []
local_ips = self.remaining_local_ips
for _ in range(0, total):
random_local_ip = choice(sorted(local_ips))
retr_local_ips.append(str(random_local_ip))
local_ips.remove(random_local_ip)
return retr_local_ips
def get_new_local_ips(self, count: int=1):
"""
Returns in the pcap not existent local IPs that are in proximity of the observed local IPs. IPs can be returned
that are either between the minimum and maximum observed IP and are therefore considered certain
or that are above the observed maximum address, are more likely to not belong to the local network
and are therefore considered uncertain.
:param count: the number of new local IPs to return
:return: the newly created local IP addresses
"""
if count <= 0:
return []
# add more unused local ips to the pool, if needed
while len(self.unused_local_ips) < count and self.expand_unused_local_ips() == True:
pass
unused_local_ips = self.unused_local_ips
uncertain_local_ips = self.uncertain_local_ips
count_certain = min(count, len(unused_local_ips))
retr_local_ips = []
for _ in range(0, count_certain):
random_local_ip = choice(sorted(unused_local_ips))
retr_local_ips.append(str(random_local_ip))
unused_local_ips.remove(random_local_ip)
# retrieve uncertain local ips
if count_certain < count:
count_uncertain = count - count_certain
# check if new uncertain IPs have to be created
if len(uncertain_local_ips) < count_uncertain:
ipspace_multiplier = self.UNCERTAIN_IPSPACE_MULTIPLIER
max_new_ip = self.max_uncertain_local_ip.to_int() + ipspace_multiplier * count_uncertain
count_new_ips = max_new_ip - self.max_uncertain_local_ip.to_int()
# create ipspace_multiplier * count_uncertain new uncertain local IP addresses
last_gen_ip = None
for i in range(1, count_new_ips + 1):
ip = IPAddress.from_int(self.max_uncertain_local_ip.to_int() + i)
# exclude the definite broadcast address
if self.priv_ip_segment:
if ip.to_int() >= self.priv_ip_segment.last_address().to_int():
break
uncertain_local_ips.add(ip)
last_gen_ip = ip
self.max_uncertain_local_ip = last_gen_ip
# choose the uncertain IPs to return
total_uncertain = min(count_uncertain, len(uncertain_local_ips))
for _ in range(0, total_uncertain):
random_local_ip = choice(sorted(uncertain_local_ips))
retr_local_ips.append(str(random_local_ip))
uncertain_local_ips.remove(random_local_ip)
return retr_local_ips
def get_existing_external_ips(self, count: int=1):
"""
Returns the given number of external IPs that are existent in the pcap file.
:param count: the number of external IPs to return
:return: the chosen external IPs
"""
if not (len(self.external_ips) > 0):
print("Warning: .pcap does not contain any external ips.")
return []
total = min(len(self.remaining_external_ips), count)
retr_external_ips = []
external_ips = self.remaining_external_ips
for _ in range(0, total):
random_external_ip = choice(sorted(external_ips))
retr_external_ips.append(str(random_external_ip))
external_ips.remove(random_external_ip)
return retr_external_ips
def _init_ipaddress_ops(self):
"""
Load and process data needed to perform functions on the IP addresses contained in the statistics
"""
# retrieve local and external IPs
all_ips_str = set(self.statistics.process_db_query("all(ipAddress)", print_results=False))
# external_ips_str = set(self.statistics.process_db_query("ipAddress(macAddress=%s)" % self.get_probable_router_mac(), print_results=False)) # including router
# local_ips_str = all_ips_str - external_ips_str
external_ips = set()
local_ips = set()
all_ips = set()
self.contains_priv_ips = False
self.priv_ip_segment = None
# convert IP strings to IPv4.IPAddress representation
for ip in all_ips_str:
if is_ipv4(ip):
ip = IPAddress.parse(ip)
# exclude local broadcast address and other special addresses
if (not str(ip) == "255.255.255.255") and (not ip.is_localhost()) and (not ip.is_multicast()) and (
not ip.is_reserved()) and (not ip.is_zero_conf()):
all_ips.add(ip)
for ip in all_ips:
if ip.is_private():
local_ips.add(ip)
external_ips = all_ips - local_ips
# save the certain unused local IPs of the network
# to do that, divide the unused local Addressspace into chunks of (chunks_size) Addresses
# initally only the first chunk will be used, but more chunks can be added to the pool of unused_local_ips if needed
self.min_local_ip, self.max_local_ip = min(local_ips), max(local_ips)
local_ip_range = (self.max_local_ip.to_int()) - (self.min_local_ip.to_int() + 1)
if local_ip_range < 0:
# for min,max pairs like (1,1), (1,2) there is no free address in between, but for (1,1) local_ip_range may be -1, because 1-(1+1)=-1
local_ip_range = 0
# chunk size can be adjusted if needed
self.chunk_size = 200
self.current_chunk = 1
if local_ip_range < self.chunk_size:
# there are not more than chunk_size unused IP Addresses to begin with
self.chunks = 0
self.chunk_remainder = local_ip_range
else:
# determine how many chunks of (chunk_size) Addresses there are and the save the remainder
self.chunks = local_ip_range // self.chunk_size
self.chunk_remainder = local_ip_range % self.chunk_size
# add the first chunk of IP Addresses
self.unused_local_ips = set()
self.expand_unused_local_ips()
# save the gathered information for efficient later use
self.external_ips = frozenset(external_ips)
self.remaining_external_ips = external_ips
self.max_uncertain_local_ip = self.max_local_ip
self.local_ips = frozenset(local_ips)
# print("External IPS: " + str(external_ips))
# print("LOCAL IPS: " + str(local_ips))
self.remaining_local_ips = local_ips
self.uncertain_local_ips = set()
def expand_unused_local_ips(self):
"""
expands the set of unused_local_ips by one chunk_size
to illustrate this algorithm: suppose we have a chunksize of 100 and an Address space of 1 to 1000 (1 and 1000 are unused too), we then have 10 chunks
every time this method is called, one chunk (100 Addresses) is added, each chunk starts at the base_address + the number of its chunk
then, every chunk_amounth'th Address is added. Therefore for 10 chunks, every 10th address is added
For the above example for the first, second and last call, we get the following IPs, respectively:
first Call: 1+0, 1+10, 1+20, 1+30, ..., 1+990
second Call: 2+0, 2+10, 2+20, 2+30, ..., 2+990
ten'th Call: 10+0, 10+10, 10+20, 10+30, ..., 10+990
:return: False if there are no more available unusd local IP Addresses, True otherwise
"""
if self.current_chunk == self.chunks+1:
# all chunks are used up, therefore add the remainder
remainder_base_addr = self.min_local_ip.to_int() + self.chunks*self.chunk_size + 1
for i in range(0,self.chunk_remainder):
ip = IPAddress.from_int(remainder_base_addr + i)
self.unused_local_ips.add(ip)
self.current_chunk = self.current_chunk + 1
return True
elif self.current_chunk <= self.chunks:
# add another chunk
# choose IPs from the whole address space, that is available
base_address = self.min_local_ip.to_int() + self.current_chunk
for i in range(0,self.chunk_size):
ip = IPAddress.from_int(base_address + i*self.chunks)
self.unused_local_ips.add(ip)
self.current_chunk = self.current_chunk + 1
return True
else:
# no free IPs remaining
return False |
994,369 | 110f1f19af116269a258207a91bb9db1a324447b | import sys
import time
sys.stdin = open("최소이동거리.txt")
# 단방향임에 주의할것!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
str_time = time.time()
T = int(input())
def dijkstra(v):
ga[v] = 0
while not visited[V]:
tem = 99999999
u = 0
for i in range(V+1):
if visited[i]==0 and ga[i] < tem:
tem = ga[i]
u = i
visited[u] =1
for j in range(V+1):
if ga[j] > arr[u][j]+ga[u]:
ga[j] = arr[u][j]+ga[u]
for tc in range(1,T+1):
V,G = map(int,input().split())
arr = [[999999 for _ in range(V+1)] for _ in range(V+1)]
ga =[9999999 for _ in range(V+1)]
visited = [0 for _ in range(V+1)]
for i in range(G):
A = list(map(int, input().split()))
arr[A[0]][A[1]] = A[2]
# arr[A[1]][A[0]] = A[2]
dijkstra(0)
print("#{} {}" .format(tc, ga[V]))
print(time.time()-str_time) |
994,370 | ab0e47e19e4ca0b66fa4a5fd803a016866f8217b | import unittest, match_word_non_word
class TestMatchWordNonWord(unittest.TestCase):
def test_match_word_non_word(self):
self.assertEqual(
match_word_non_word.match_word_non_word('www.hackerrank.com'), 'true')
if __name__ == '__main__':
unittest.main()
|
994,371 | e1d86f299d5d685124956c9f5bdb1e0268d649df | #!/usr/bin/env python3
"""
--- Day 13: Shuttle Search ---
https://adventofcode.com/2020/day/13
Part 1: Find the coset -d + k * ZZ with the smallest nonnegative element
for a fixed integer d and a list of candidates for k.
Part 2: Solving multiple congruence relations (Chinese remainder theorem)
Solution exists as input bus IDs are all prime, hence pairwise coprime
References:
https://en.wikipedia.org/wiki/Chinese_remainder_theorem
https://en.wikipedia.org/wiki/Extended_Euclidean_algorithm
"""
def main():
# Read data from input file into memory
bus_ids = []
delays = []
with open("input.txt", 'r') as file:
earliest_departure = int(file.readline())
for i, bus_id in enumerate(file.readline().split(',')):
if bus_id != 'x':
bus_ids.append(int(bus_id))
delays.append(i)
earliest_bus = min(bus_ids, key=lambda bus_id: wait_until_next_bus(bus_id, earliest_departure))
min_wait = wait_until_next_bus(earliest_bus, earliest_departure)
print(f"Part 1: The first bus departing after {earliest_departure} has ID {earliest_bus}.")
print(f"The waiting time is {min_wait} and the product with bus ID is {earliest_bus * min_wait}.")
# For part 2, we want to find the smallest nonnegative integer timestamp `t` satisfying
# (t + delay) % bus_id == 0 for all matching pairs of bus_ids and delays
# We may express this through congruence relations `t = -delay (mod bus_id)` for bus_ids, delays
# The solution is bounded above by the product of the input (pairwise coprime) bus IDs
timestamp, _ = solve_multiple_congruence([-d for d in delays], bus_ids)
print("Part 2: By solving the corresponding system of congruence equations, we find that")
print("the earliest timestamp such that all of the listed bus IDs depart at offsets matching")
print(f"their positions in the list is {timestamp}.")
return 0
def wait_until_next_bus(bus_id, start):
# Find the time of the latest bus which departs on or before `start`
bus_departs = bus_id * (start // bus_id)
# Find the time of the earliest bus which departs on or after `start`
if bus_departs < start:
bus_departs += bus_id
# Return time from `start` of waiting until next bus on this route
return bus_departs - start
def extended_euclidean_algorithm(a, b):
"""
Calculate the highest common factor (hcf) of integers `a` and `b` and the
coefficients of Bézout's identity, which are integers `x` and `y` such that ax + by = hcf(a,b).
"""
r0, r1 = a, b
x0, x1 = 1, 0
y0, y1 = 0, 1
while r1:
# q_{i} = r_{i-1} // r_{i}
q = r0 // r1
# r_{i+1} = r_{i-1} - q_{i} * r_{i}
r0, r1 = r1, r0 - q * r1
# x_{i+1} = x_{i-1} - q_{i} * x_{i}
x0, x1 = x1, x0 - q * x1
# y_{i+1} = y_{i-1} - q_{i} * y_{i}
y0, y1 = y1, y0 - q * y1
# Verify that when r_{k+1} = 0, the last nonzero remainder r_{k}
# is the highest common factor of a and b
assert a % r0 == 0 and b % r0 == 0
# Verify the corresponding x_{k}, y_{k} are coefficients of Bézout's identity
assert a * x0 + b * y0 == r0
# Return the hcf(a,b) and the Bézout coefficients of a, b
return x0, y0, r0
def solve_congruence_pair(a_i, n_i):
"""
Solve the system of congruence equations, x = a_{1} (mod n_{1}), x = a_{2} (mod n_{2})
where n_{1}, n_{2} are coprime.
For integers m_{1}, m_{2} such that m_{1} * n_{1} + m_{2} * n_{2} = 1,
then x = a_{2}*m_{1}*n_{1} + a_{1}*m_{2}*n_{2} is a solution.
"""
(a1, a2), (n1, n2) = a_i, n_i
m1, m2, _ = extended_euclidean_algorithm(n1, n2)
return a2 * m1 * n1 + a1 * m2 * n2
def solve_multiple_congruence(a_i, n_i):
"""
Solve the system of congruence equations, x = a_{i} (mod n_{i}), i = 1, 2, ..., k,
where n_{i} are pairwise coprime. The Chinese remainder theorem states that
there is a solution x which is unique modulo N = prod(n_i).
"""
solution, modulus = a_i[0], n_i[0]
# Solve the system by solving successive pairs of congruence equations
for a, n in zip(a_i[1:], n_i[1:]):
solution = solve_congruence_pair((solution, a), (modulus, n))
modulus *= n
solution %= modulus
# Verify that our calculated solution satisfies all of the required congruence equations
assert all((solution - a) % n == 0 for a, n in zip(a_i, n_i))
return solution, modulus
if __name__ == "__main__":
main()
|
994,372 | 778d27e8aa034053fb0b1f5cdaa62df8736fc843 | from invoke import task
@task
def genpsw(content, certificate_file, file_save=True):
pass
|
994,373 | 851005660c424815a9297e6e17db5b32afe23454 | #! /usr/bin/env python
#coding:utf-8
class Me(object):
def test(self):
print "Hello"
def new_test():
print "New Hello"
me = Me()
print hasattr(me. "test") # 检查 me 对象是否有 test 属性
print getattr(me, "test") # 返回 test 属性
print setattr(me, "test", new_test) # 将 test 属性设置为 new_test
print delattr(me, "test") # 删除 test 属性
print isinstance(me, Me) # me 对象是否为 Me 类生成的对象(一个 instance)
print issubclass(Me, object) # Me 类是否为 object 类的子类
|
994,374 | ae851b0947639558638690e35073f786092fe741 | Marketing 311
Tony
10 questions about marketing
1. Agree
2. Agree
3. Not sure
4. Not sure
5. Disagree
6. Agree
7. Disagree
8. Agree
9. Not sure
10. Disagree
point: most people have distorted percentions on what marketing is
Notes:
drink link
create extension on patron model for drinklink ambasidors
maybe another enum on the models
|
994,375 | d6ed14adc2fe03a11562c4e95d4598c6612043b7 | #!/usr/bin/env python
print("importing libraries")
import kalibr_common as kc
import cv2
import csv
import os
import sys
import argparse
import sm
try:
import cv
png_flag = cv.CV_IMWRITE_PNG_COMPRESSION
except ImportError:
png_flag = cv2.IMWRITE_PNG_COMPRESSION
#setup the argument list
parser = argparse.ArgumentParser(description='Extract a ROS bag containing a image and imu topics.')
parser.add_argument('--bag', metavar='bag', help='ROS bag file')
parser.add_argument('--image-topics', metavar='image_topics', nargs='+', help='Image topics %(default)s')
parser.add_argument('--imu-topics', metavar='imu_topics', nargs='+', help='Imu topics %(default)s')
parser.add_argument('--output-folder', metavar='output_folder', nargs='?', default="output", help='Output folder %(default)s')
#print help if no argument is specified
if len(sys.argv)<2:
parser.print_help()
sys.exit(0)
#parse the args
parsed = parser.parse_args()
if parsed.image_topics is None and parsed.imu_topics is None:
print("ERROR: Need at least one camera or IMU topic.")
sys.exit(-1)
#create output folder
try:
os.makedirs(parsed.output_folder)
except:
pass
# prepare progess bar
iProgress = sm.Progress2(1)
#extract images
if parsed.image_topics is not None:
for cidx, topic in enumerate(parsed.image_topics):
dataset = kc.BagImageDatasetReader(parsed.bag, topic)
os.makedirs("{0}/cam{1}".format(parsed.output_folder, cidx))
numImages = dataset.numImages()
#progress bar
print("Extracting {0} images from topic {1}".format(numImages, dataset.topic))
iProgress.reset(numImages)
iProgress.sample()
for timestamp, image in dataset:
params = list()
params.append(png_flag)
params.append(0) #0: loss-less
filename = "{0}{1:09d}.png".format(timestamp.secs, timestamp.nsecs)
cv2.imwrite( "{0}/cam{1}/{2}".format(parsed.output_folder, cidx, filename), image, params )
iProgress.sample()
print("\r done. ")
print("")
print("")
#extract imu data
if parsed.imu_topics is not None:
for iidx, topic in enumerate(parsed.imu_topics):
dataset = kc.BagImuDatasetReader(parsed.bag, topic)
filename = "imu{0}.csv".format(iidx)
#progress bar
numMsg = dataset.numMessages()
print("Extracting {0} IMU messages from topic {1}".format(numMsg, dataset.topic))
iProgress.reset(numMsg)
iProgress.sample()
with open( "{0}/{1}".format(parsed.output_folder, filename), 'w') as imufile:
spamwriter = csv.writer(imufile, delimiter=',')
spamwriter.writerow(["timestamp", "omega_x", "omega_y", "omega_z", "alpha_x", "alpha_y", "alpha_z"])
for timestamp, omega, alpha in dataset:
timestamp_int = int(timestamp.toSec()*1e9)
spamwriter.writerow([timestamp_int, omega[0],omega[1],omega[2], alpha[0],alpha[1],alpha[2] ])
iProgress.sample()
print("\r done. ")
print("")
|
994,376 | fed4e88bc1cb18da20b57d591fe5c1871052e8a0 | # A Python Example
from random import shuffle, randint
class Card(object):
def __init__(self, suit, value):
self.suit = suit
self.value = value
class Deck(object):
def __init__(self):
deckList = []
for i in range(4): # traverse all suits
for j in range(13): # traverse all values
deckList.append(Card(i,j+1))
shuffle(deckList)
self.deck = deckList
class Player(object):
def __init__(self, name):
self.name = name
self.cards = []
def printSuitAsString(suit):
if suit == 0:
return "H"
elif suit == 1:
return "C"
elif suit == 2:
return "D"
elif suit == 3:
return "S"
deck = Deck()
for card in deck.deck:
print str(card.value) + "-" + printSuitAsString(card.suit)
print "-" * 20
player1 = Player("Stephen")
player2 = Player("Steward")
for i in range(5):
player1.cards.append(deck.deck[i])
player2.cards.append(deck.deck[i + 5])
stack1 = []
for card in player1.cards:
stack1.append(str(card.value) + "-" + printSuitAsString(card.suit))
stack2 = []
for card in player2.cards:
stack2.append(str(card.value) + "-" + printSuitAsString(card.suit))
print "-" * 20
|
994,377 | 75aef422358dffd847da08e996f6866c58e86aad | # Odd/Even
def odd_even():
for count in range(1, 2001):
oddoreven = "";
if count % 2 == 0:
oddoreven = "even"
else:
oddoreven = "odd"
print "Number is %s. This is an %s number." % (count, oddoreven)
odd_even()
# Multiply
def multiply(list, multiplier):
for count in range(0, len(list)):
list[count] *= multiplier
return list
answer = multiply([2, 4, 10, 16], 5)
print answer #[10, 20, 50, 80]
# Hacker Challenge
def layered_multiples(arr):
new_array = []
for count in range(0, len(arr)):
new_array.append([])
for j in range(0, arr[count]):
new_array[count].append(1)
return new_array
x = layered_multiples(multiply([1,2,3],2)) #[2, 4, 6]
print x #[[1,1],[1,1,1,1],[1,1,1,1,1,1]]
|
994,378 | 0b4d93ce123506efd51ae3591262e0f8d55ed93d | import utils
from django.utils import simplejson
import model
class Main(utils.Handler):
def get(self):
self.render('templates/home.html',
params=self.params,
data=simplejson.dumps(self.get_ministry_avg()))
def get_ministry_avg(self):
#results = self.model.query('agrr')
#return self.model.get_issue(99636)
return [
{
'name': 'foo',
'avg': 0.5
},
{
'name': 'bar',
'avg': 0.5
}
]
if __name__ == '__main__':
utils.run([('/', Main)], debug=True)
|
994,379 | 6d4ba0c6c5eb5a7d42c3c6d443eb1c099d152c56 | from django.shortcuts import render
# Create your views here.
from django.conf import settings
from django.http import HttpResponse
import time as tm
from TwitterSearch import *
import datetime
import json
from pymongo import MongoClient
from textblob import TextBlob
from textblob import Word
search_time = ""
track = []
import numpy
import re
from collections import Counter
import pandas as pd
client = MongoClient()
client = MongoClient('localhost', 27017)
db = client.twitter_db
tweets = db.tweets
def tstream(track, tweets = tweets):
# c.execute("TRUNCATE TABLE data;")
search_time = str(datetime.datetime.utcnow())
try:
tso = TwitterSearchOrder() # create a TwitterSearchOrder object
tso.set_keywords(track) # let's define all words we would like to have a look for
tso.set_language('en') # we want to see German tweets only
tso.set_include_entities(False) # and don't give us all those entity information
# it's about time to create a TwitterSearch object with our secret tokens
ts = TwitterSearch(
consumer_key = "FOczd9TJM1MjvO53mNLjfNUl6",
consumer_secret = "MIkYJ10mp6zeqP9Jn94jQxkho3aZI1jY4gF5srPxAgMRPx31jz" ,
access_token = "2996993270-YP7iqsqvB8LdVB0nzCA4ZTocuIQ60VtjBgs0gOf",
access_token_secret = "aKcWVVz3CkEtU7uufJs5HyBZpzEzZT3g2800VQ9vijJ40"
)
# this is where the fun actually starts :)
i = 0
for all_data in ts.search_tweets_iterable(tso):
if i < 300:
tweet = all_data["text"]
time = tm.strftime('%Y-%m-%d %H:%M:%S', tm.strptime(all_data['created_at'],'%a %b %d %H:%M:%S +0000 %Y'))
blob = TextBlob(tweet)
senti = blob.sentiment
username = all_data["user"]["screen_name"].encode('utf-8')
geo = ""
if all_data["geo"] == None:
geo = ""
else:
geo = str(all_data["geo"]["coordinates"])
# c.execute("INSERT INTO data (user,time,geo, tweet) VALUES (%s,%s,%s,%s)",
# ( username,time,geo, tweet))
# conn.commit()
#print(username,time,geo,tweet)
post = {
"track":track,
"search_time":search_time,
"name":username,
"polarity": senti.polarity,
"subjectivity": senti.subjectivity,
"tweet":str(tweet.encode('utf-8')),
"geo":geo,
"time":time
}
#print post
tweets.insert_many([post])
i = i +1
else:
break
#print tweets.find_one({"track":track})
return search_time
except TwitterSearchException as e: # take care of all those ugly errors if there are some
print(e)
def index(request, search_time_=search_time, tweets = tweets , track=track):
track_ = ""
if request.method == 'POST':
if request.POST.get("search_box") is not None :
track_ = request.POST.get("search_box")
client = MongoClient()
client = MongoClient('localhost', 27017)
db = client.twitter_db
tweets = db.tweets
track_ = str(request.POST.get("search_box")).split(" ")
print track_
search_time = tstream(track_)
# global track
# global search_time
track = " ".join(track_)
a = list(tweets.find({"search_time":search_time ,"track":track},{"time":1,"tweet":1 ,"polarity":1,"subjectivity":1,"_id":0}))
# data = [{ "key":"series",
# "values":[]
# }
# ]
# length = len(a)
# for i in range(length):
# time_ = datetime.datetime.strptime(a[i]["time"], "%Y-%m-%d %H:%M:%S")
# time_ = tm.mktime(time_.timetuple())
# c = [time_ , a[i]['polarity']]
# data[0]["values"].append(c)
# data = [ ['Task', 'Hours per Day']]
# length_a = len(a)
# positive = 0
# negative = 0
# neutral = 0
# for i in range(length_a):
# if a[i]["polarity"] >0:
# positive += 1
# elif a[i]["polarity"] == 0:
# neutral += 1
# else:
# negative += 1
# data.append(["positive",positive])
# data.append(["negative",negative])
# data.append(["neutral",neutral])
# search_time = "2016-01-19 07:11:49.584101"
# track = "india"
# dist = numpy.unique(word_pos)
# dist_count = []
# for c in dist:
# temp = {"text":"","size":0}
# count =
context = { "scatter":json.dumps(a) , "search_time":search_time , "track":track}
return render(request, 'app/scatter.html', context )
elif request.POST.get("name") == "scatter":
search_time = str(request.POST.get("search_time"))
track = request.POST.get("track")
a = list(tweets.find({"search_time":search_time ,"track":track },{"polarity":1,"subjectivity":1 ,"_id":0}))
context = {"scatter": json.dumps(a) , "search_time": search_time , "track":track}
return render(request, 'app/scatter.html' , context )
elif request.POST.get("name") == "donat":
search_time = str(request.POST.get("search_time"))
track = request.POST.get("track")
a = list(tweets.find({"search_time":search_time ,"track":track },{"polarity":1 ,"_id":0}))
data = [ ['Task', 'Hours per Day']]
length_a = len(a)
positive = 0
negative = 0
neutral = 0
for i in range(length_a):
if a[i]["polarity"] >0:
positive += 1
elif a[i]["polarity"] == 0:
neutral += 1
else:
negative += 1
data.append(["positive",positive])
data.append(["negative",negative])
data.append(["neutral",neutral])
context = {"donat": json.dumps(data) , "search_time": search_time , "track":track}
return render(request, 'app/donat.html' , context )
elif request.POST.get("name") == "table":
search_time = str(request.POST.get("search_time"))
track = request.POST.get("track")
pd.set_option('display.max_colwidth', -1)
a = pd.DataFrame(list(tweets.find({"search_time":search_time ,"track":track },{"_id":0,"name":1,"polarity":1,"subjectivity":1,"tweet":1,"time":1 })))
print list(tweets.find({"search_time":search_time ,"track":track },{"_id":0,"name":1,"polarity":1,"subjectivity":1,"tweet":1,"time":1 }))
context = {"table": a.to_html(index= False).replace('class="dataframe"','id="example1" class="table table-striped table-bordered"') , "search_time": search_time , "track":track}
return render(request, 'app/table.html' , context )
return render(request, 'app/index.html', context = {})
# client = MongoClient()
# client = MongoClient('localhost', 27017)
# db = client.twitter_db
# tweets = db.tweets
#2016-01-19 07:11:49.584101
# 2016-01-18 14:27:52.500655
|
994,380 | bc1782d1c91f0d2ee62e2442149e1a4d0a09009c | import json
filePath = "C://W//Python//Notes/json_1.txt"
f=open(filePath, "r")
s = f.read()
f.close()
print(s) |
994,381 | 45438d874e79458f28a202e3bed26a5eaebe8277 | def Search(array, value):
"""
Binary search using while loop
Question 1
:param array: Sorted array to search
:param value: Values we are searching
:return: Returning the boolean value
"""
left = 0
right = len(array) - 1
while left <= right:
mid = left + (right - left) // 2
if array[mid] == value:
return True
elif array[mid] < value:
left = mid + 1
else:
right = mid - 1
return False
def sortedHasSum(array, value):
"""
Finding the pair of elements whose sum is the value
Question 5a
:param array: Sorted array in which we are searching
:param value: Value to check
:return: Returning boolean value
"""
left = 0
right = len(array) - 1
while left < right:
if array[left] + array[right] == value:
return True
elif array[left] + array[right] < value:
left += 1
else:
right -= 1
return False
def hasSum(array, value):
"""
Same function as above only no sorting done hence merge sort was used to meet the time complexity requirements
Question 5b
:param array: Unsorted array
:param value: Value of the sum
:return: Returning the boolean value
"""
array = mergeSort(array)
return sortedHasSum(array, value)
def mergeSort(array):
"""
Merge sort
:param array: Unsorted array
:return: Returning the sorted array
"""
if len(array) > 1:
mid = len(array) // 2
left = array[0: mid]
right = array[mid:]
mergeSort(left)
mergeSort(right)
i, j, k = 0, 0, 0
while i < len(left) and j < len(right):
if left[i] <= right[j]:
array[k] = left[i]
i += 1
else:
array[k] = right[j]
j += 1
k += 1
while i < len(left):
array[k] = left[i]
i += 1
k += 1
while j < len(right):
array[k] = right[j]
j += 1
k += 1
return array
def quickSortforlogn(array, left, right):
"""
To make the task easier in quick sort we are partitioning the array with the help of pivot index
:param array: Unsorted array
:param left: Leftmost value
:param right: Rightmost value
:return: Returning the pivot index for quick sort
"""
pivot = array[right]
pivotIndex = left - 1
for i in range(left, right):
if array[i] <= pivot:
pivotIndex += 1
array[pivotIndex], array[i] = array[i], array[pivotIndex]
return pivotIndex
def quickSortHelp(array, left, right):
"""
Quick sort helper function
:param array: Unsorted array
:param left: Leftmost value
:param right: Rightmost value
:return: Sorted array
"""
while left < right:
pivotIndex = quickSortforlogn(array, left, right)
if pivotIndex - left < right - pivotIndex:
quickSortHelp(array, left, pivotIndex - 1)
left = pivotIndex + 1
else:
quickSortHelp(array, pivotIndex + 1, right)
right = pivotIndex - 1
return array
def quickSort(array):
"""
Quicksort function calling the helper function
Question 6
:param array: Unsorted array
:return: Returning the sorted array
"""
return quickSortHelp(array, 0, len(array) - 1)
def main():
array = [2, 3, 5, 8, 10, 13, 17, 18]
array1 = [5, 4, 8, 2, 9, 10, 15, 70, 81]
print(Search(array, 14))
print(sortedHasSum(array, 20))
print(hasSum(array1, 90))
print(quickSort(array1))
if __name__ == '__main__':
main()
|
994,382 | 022d424ca83b61e0d10d947958c2f4ac1e06e17d | # Generated by Django 2.0.4 on 2018-10-29 23:07
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('salads', '0003_auto_20181029_2136'),
]
operations = [
migrations.AlterField(
model_name='salad',
name='event',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='salads', to='events.Event'),
),
migrations.AlterField(
model_name='salad',
name='order',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='salads', to='orders.Order'),
),
]
|
994,383 | 79a7e2e7644268e6a5e9a0721f6fdc5953780dd5 | class Utils():
#Constructeur
def __init__(self):
#Déclaration du tableau des clients actifs
self.clients = []
|
994,384 | b70b991cada65fd44e370fd36820814cdd6cd1e2 | '''
Logic for server.
'''
from datetime import datetime
from sqlalchemy import and_
from db import get_data_of_model, create_session
from models import Subdivision, Storage, Act, ActTable, MainThing, \
Storekeeper, DATABASES
from serializer import dumps, loads
from synch_db import update_subdivisions_data, update_storages_data, \
update_remains_data, update_main_things_data, update_downloads
from remains import get_calculated_remains
from uploads import upload, check_uploaded_acts
from acts import genereate_acts
from settings import UPLOAD_ATTEMPTS_COUNT
def get_storekeepers():
return [str(storekeeper.name) for storekeeper in get_data_of_model(Storekeeper)]
def append_storekeeper(storekeeper):
session = create_session(DATABASES['main'].metadata.bind)
return loads(storekeeper, Storekeeper, session)
def delete_storekeeper(storekeeper_name):
session = create_session(DATABASES['main'].metadata.bind)
session.query(Storekeeper).filter(Storekeeper.name == storekeeper_name).delete()
session.commit()
return [str(storekeeper.name) for storekeeper in get_data_of_model(Storekeeper)]
def get_subdivisions():
'''
Get data of subdivisions.
'''
return dumps(get_data_of_model(Subdivision))
def get_storages_for_subdivision():
'''
Get data of storages for subdivision.
:param subdivision_id: ID of subdivision.
'''
return dumps(get_data_of_model(Storage))
def get_remains_for_storage(storage_id):
'''
Get data of remains for storage.
:param storage_id: ID of storage.
'''
return get_calculated_remains(storage_id)
def get_acts_for_storage(count, act_type, upload_start, upload_end, start_date, end_date):
'''
Get data of acts for storage.
:param storage_id: ID
'''
filters = []
if upload_start:
filters.append(Act.upload_date >= upload_start)
if upload_end:
filters.append(Act.upload_date <= upload_end)
if start_date:
filters.append(Act.act_date >= start_date)
if end_date:
filters.append(Act.act_date <= end_date)
return dumps(get_data_of_model(Act, limit=count, order_by=Act.id.desc(), filters=and_(*filters) if filters else None))
def get_table_of_acts(act_id):
'''
Get data of table from act.
:param act_id: ID of act.
'''
return dumps(get_data_of_model(ActTable, filters=ActTable.act == act_id))
def get_main_things():
'''
Get data of main things.
'''
return dumps(get_data_of_model(MainThing))
def save_act(act):
'''
Save information of act from python collections.
:param act: Data of act.
'''
session = create_session(DATABASES['main'].metadata.bind)
if act.get('date'):
date = datetime.strptime(act.get('date'), '%d.%m.%y').date()
act['act_date'] = date
return loads(act, Act, session)
def save_act_table(act_table, act_id):
'''
Save table of act from python collections.
:param act_table: Data of act table.
'''
if act_id:
session = create_session(DATABASES['main'].metadata.bind)
session.query(ActTable).filter(ActTable.act == act_id).delete()
return loads(act_table, ActTable, session)
def update_subdivisions():
'''
View function gor update subdivisions.
'''
update_subdivisions_data()
def update_storages():
'''
View function for update storages.
:param subdivision_id: Id of subdivision of storages.
'''
update_storages_data()
def update_remains(storage_id):
'''
View function for update remains.
:param storage_id: Id of storages of remains.
'''
update_remains_data(storage_id)
def update_main_things():
'''
View function for update mains things.
'''
update_main_things_data()
def activate_act(act_id):
'''
Views function for activate act.
:param act_id: Id of act.
'''
if act_id:
session = create_session(DATABASES['main'].metadata.bind)
act = session.query(Act).filter(Act.id == act_id).first()
act.is_active = not act.is_active
session.commit()
def upload_acts(acts_ids, storage_id, try_count = 0):
'''
Views for upload acts.
:param acts_ids: List of acts ids.
:param storage_id: Id of storage
:param try_count: Count of retries
'''
if acts_ids and try_count < UPLOAD_ATTEMPTS_COUNT:
try:
upload(acts_ids, storage_id)
return check_uploaded_acts(acts_ids)
except BaseException:
return upload_acts(acts_ids, storage_id, try_count=try_count + 1)
else:
return False
def genereate_acts_views(subdivision_id):
genereate_acts(subdivision_id)
def update_downloads_view():
update_downloads()
|
994,385 | 6906916bc8edf9053aef84a576a7935ff1168a2e | import torch
import math
from model import depthnet, losses, data
# hyperparameter
batch_size = 32
learning_rate = 0.001
total_epoch = 4
report_rate = 20
# Device configuration
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
#Datasets and loader
dataset = data.DepthDataset("/dl/data/nyu-depth/")
lengths = [int(math.floor(len(dataset) * 0.8)), int(math.ceil(len(dataset) * 0.2))]
train_dataset, test_dataset = torch.utils.data.random_split(dataset, lengths)
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=False)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=batch_size,
shuffle=True)
# load model
model = depthnet.DepthNet().to(device)
# Loss and optimizer
criterion = losses.BerHuLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# learning rate decay
def update_lr(opt, lr):
for param_group in opt.param_groups:
param_group['lr'] = lr
# validation
def validate(model, test_loader):
model.eval()
with torch.no_grad():
loss = 0.0
for t_image, t_depth in test_loader:
t_image = t_image.to(device)
t_depth = t_depth.to(device)
t_outputs = model(t_image)
curr_loss = criterion(t_depth, t_outputs)
loss += curr_loss.item()
print("Validation Loss: {:.4f}"
.format(loss/(len(test_loader) * batch_size)))
model.train()
# train
total_step = len(train_dataset)
curr_lr = learning_rate
for epoch in range(total_epoch):
running_loss = 0.0
epoch_loss = 0.0
for i, (image, depth) in enumerate(train_loader):
image = image.to(device)
depth = depth.to(device)
# forward pass
outputs = model(image)
loss = criterion(outputs, depth)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
# Calculate loss
running_loss += loss.item()
epoch_loss += running_loss
if (i + 1) % report_rate == 0:
print("Epoch: [{}/{}] Step [{}/{}] Loss: {:.4f}"
.format((epoch+1), total_epoch, (i+1), total_step, (running_loss/batch_size)))
running_loss = 0.0
#Decay learning rate
if (epoch + 1) % 5 == 0:
curr_lr /= 3
update_lr(optimizer, curr_lr)
# Report epoch loss
print("Epoch: [{}/{}] Epoch Loss: {:.4f}\n"
.format((epoch+1), total_epoch, (epoch_loss / (len(train_loader) * batch_size))))
validate(model, test_loader)
# Save the model checkpoint
torch.save(model.state_dict(), 'depthnet.ckpt') |
994,386 | d48ed7ad952fc1155eb75d3fd0ca2e7a5e6fadf8 | import os
from xml.dom import minidom
import json
from pdf2image import convert_from_path
from tqdm import tqdm
if __name__ == '__main__':
json_dir = r'D:\Company\Projects\ICDAR_2013_table_evaluate\prediction_results\icdar2013_table_epoch100'
pdf_dir = r'D:\Company\Projects\ICDAR_2013_table_evaluate\icdar2013-competition-dataset-with-gt\pdf'
result = json.load(open(os.path.join(json_dir, 'evaluate_result.json'), 'r'))
img_size = json.load(open(r'D:\Company\Projects\ICDAR_2013_table_evaluate\img_size.json', 'r'))
for pdf_file in tqdm(os.listdir(pdf_dir)):
if not pdf_file.endswith('.pdf'):
continue
basename = pdf_file.replace('.pdf', '')
if not os.path.exists(os.path.join(pdf_dir, basename + '_rate_cache.json')):
big_ims = convert_from_path(os.path.join(pdf_dir, pdf_file))
small_ims = convert_from_path(os.path.join(pdf_dir, pdf_file), dpi=72)
rates = {}
for page, (big_im, small_im) in enumerate(zip(big_ims, small_ims)):
rates[str(page)] = (big_im.size[0] / small_im.size[0] + big_im.size[1] / small_im.size[1]) / 2
json.dump(rates, open(os.path.join(pdf_dir, basename + '_rate_cache.json'), 'w'))
else:
rates = json.load(open(os.path.join(pdf_dir, basename + '_rate_cache.json'), 'r'))
img_files = []
for img_file in result.keys():
if img_file.startswith(basename):
img_files.append(img_file)
root = minidom.Document()
xml = root.createElement('document')
xml.setAttribute('filename', pdf_file)
root.appendChild(xml)
table_id = 0
for filename in sorted(img_files):
im_width, im_height = img_size[filename]
page = int(filename[filename.find('_') + 1: filename.rfind('_')])
rate = rates[str(page)]
page += 1
for bbox in result[filename]:
xmin, y0, xmax, y1 = bbox
ymin = im_height - y1
ymax = im_height - y0
xmin, ymin, xmax, ymax = [int(v / rate) for v in [xmin, ymin, xmax, ymax]]
table_id += 1
table = root.createElement('table')
table.setAttribute('id', str(table_id))
region = root.createElement('region')
region.setAttribute('id', '1')
region.setAttribute('page', str(page))
boundingbox = root.createElement('bounding-box')
boundingbox.setAttribute('x1', str(xmin))
boundingbox.setAttribute('y1', str(ymin))
boundingbox.setAttribute('x2', str(xmax))
boundingbox.setAttribute('y2', str(ymax))
region.appendChild(boundingbox)
table.appendChild(region)
xml.appendChild(table)
xml_str = root.toprettyxml(indent='\t')
# print(xml_str)
with open(os.path.join(json_dir, basename + '-reg-result.xml'), 'w') as fp:
fp.write(xml_str)
|
994,387 | 4b0380a46a84d814dc6f7398036d2ba9f036a2ec | from fedot.api.api_utils.composer import ApiComposerHelper
from ..api.test_main_api import get_dataset
from fedot.core.repository.tasks import Task, TaskTypesEnum
from fedot.core.log import default_log
from testfixtures import LogCapture
composer_helper = ApiComposerHelper()
def test_compose_fedot_model_with_tuning():
train, test, _ = get_dataset(task_type='classification')
generations = 1
with LogCapture() as logs:
_, _, history = composer_helper.compose_fedot_model(api_params=dict(train_data=train,
task=Task(
task_type=TaskTypesEnum.classification),
logger=default_log('test_log'),
timeout=0.1,
initial_pipeline=None),
composer_params=dict(max_depth=1,
max_arity=1,
pop_size=2,
num_of_generations=generations,
available_operations=None,
composer_metric=None,
validation_blocks=None,
cv_folds=None,
genetic_scheme=None),
tuning_params=dict(with_tuning=True,
tuner_metric=None))
expected = ('test_log', 'INFO', 'Tuner metric is None, roc_auc_score was set as default')
logs.check_present(expected, order_matters=False)
|
994,388 | 7d3b7905926ad29f7c19f98231f2abd523f7121a | # -*- encoding: utf-8 -*-
"""
Modulo que controla las operaciones sobre los B{User Stories} de los clientes.
@author: Samuel Ruiz,Melissa Bogado,Rafael Ricardo
"""
from IS2_R09.apps.Notificaciones.views import notificar_asignacion_us,\
notificar_eli_proyecto, notificar_eli_us
from django.http.response import HttpResponse
#from keyring.backend import json
from django.views.decorators.csrf import csrf_exempt
from argparse import Action
from IS2_R09.apps.Sprint.models import sprint
import json
__docformat__ = "Epytext"
from django.shortcuts import render_to_response
from IS2_R09.apps.US.models import us
from IS2_R09.apps.US.forms import us_form,buscar_us_form, modificar_form,consultar_form
from django.template.context import RequestContext
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from IS2_R09.settings import URL_LOGIN
from IS2_R09.apps.Proyecto.models import Equipo, proyecto
from django.contrib.auth.models import User,Group
from IS2_R09.apps.Flujo.forms import kanban_form
from IS2_R09.apps.Flujo.models import flujo,kanban
@login_required(login_url= URL_LOGIN)
def adm_us_view(request):
"""
adm_us_view(request)
Vista que controla la interfaz de administración de B{User Story}
@param request: Almacena la información del usuario en sesión que ingresa a la interfaz
de administración de B{User Story}.
@requires: Estar logeado dentro del sistema.
"""
ust= us()
if request.user.is_staff:
"""Si el usuario es administrador se le listan todos los User Story """
ust = us.objects.all().order_by('prioridad')
ctx={'uss':ust,'form':buscar_us_form()}
return render_to_response('US/adm_us.html',ctx,context_instance=RequestContext(request))
"""En caso contrario se le lista solo los User Stories al que esta asignado"""
ust = us.objects.filter(usuario_asignado=request.user).order_by('prioridad')
ctx={'uss':ust,'form':buscar_us_form()}
return render_to_response('US/adm_us.html',ctx,context_instance=RequestContext(request))
#----------------------------------------------------------------------------------------------------------------------------------------------------------
@login_required(login_url= URL_LOGIN)
def crear_us_view(request):
"""
crear_us_view(request)
Vista que controla la modificación de un B{User Story}.
@param request: Almacena la información del usuario que desea crear un B{User Story}.
@requires: El usuario logeado debe tener rol de administrador o Scrum Master dentro del Proyecto
donde creará el B{User Story}
"""
form= us_form()
if request.method == 'POST':
form = us_form(request.POST)
if form.is_valid():
form.save()
if request.user.is_staff:
"""Si el usuario es administrador se le listan todos los User Stories"""
ust = us.objects.all().order_by('prioridad')
ctx={'uss':ust,'form':buscar_us_form()}
return render_to_response('US/adm_us.html',ctx,context_instance=RequestContext(request))
else:
"""En caso contrario se le lista solo los user stories que tiene asignado"""
ust = us.objects.filter(usuario_asignado=request.user).order_by('prioridad')
ctx={'uss':ust,'form':buscar_us_form()}
return render_to_response('US/adm_us.html',ctx,context_instance=RequestContext(request))
equipo = Equipo.objects.filter(miembro=request.user.id)
role = Group.objects.get(name='Scrum')
if Equipo.objects.filter(miembro=request.user.id, rol=role).exists():
print request.user
#form.fields['usuario_asignado'].queryset= proyecto.objects.filter(id__in=form['proyecto_asociado'].value()).equipo
ctx = {'form':form}
return render_to_response('US/crear_us.html',ctx,context_instance=RequestContext(request))
#------------------------------------
@login_required(login_url= URL_LOGIN)
def modificar_us_view(request,id_us):
"""
modificar_us_view(request,id_us)
Vista que controla la modificación de un B{User Story}.
@param request: Almacena la información del usuario que desea modificar el B{User Story}.
@param id_us: Almacena la clave del B{User Story} a modificar.
@requires: El usuario logeado debe tener rol de administrador , rol de Scrum Master dentro del Proyecto
o tener asignado el B{User Story} que modificará.
"""
k= kanban_form()
if request.method == 'POST':
user_story = us.objects.get(id=id_us)
form = us_form(request.POST,instance=user_story)
ua= user_story.usuario_asignado.all()
if form.is_valid():
try:
kan = kanban.objects.get(us=user_story)
k = kanban_form(request.POST,instance=kan)
if k.is_valid():
print 'aaaaaaa'
kan.us=user_story
kan.prioridad = user_story.prioridad
f=k.cleaned_data['fluj']
fj = flujo.objects.get(id=f.id)
sp = form.cleaned_data['sprint_asociado']
spu = sprint.objects.get(id=sp.id)
spu.tiempo_estimado -= user_story.tiempo_estimado
spu.tiempo_estimado += int(form.cleaned_data['tiempo_estimado'])
spu.save()
form.save()
act = fj.actividades.all()[:1].get()
kan.actividad = act
kan.fluj=fj
kan.estado= 'td'
notificar_asignacion_us(ua,user_story.nombre)
kan.save()
#kan.fluj= k.cleaned_data['fluj']
#kan.save()
if request.user.is_staff:
'''Si el usuario es administrador se le listan todos los us'''
ust = us.objects.all().order_by('prioridad')
ctx={'uss':ust,'form':buscar_us_form()}
return render_to_response('US/adm_us.html',ctx,context_instance=RequestContext(request))
else:
ust = us.objects.filter(usuario_asignado=request.user).order_by('prioridad')
ctx={'uss':ust,'form':buscar_us_form()}
return render_to_response('US/adm_us.html',ctx,context_instance=RequestContext(request))
except:
k = kanban_form(request.POST)
if k.is_valid():
f = k.cleaned_data['fluj']
sp = form.cleaned_data['sprint_asociado']
spu = sprint.objects.get(id=sp.id)
spu.tiempo_estimado -= user_story.tiempo_estimado
spu.tiempo_estimado += int(form.cleaned_data['tiempo_estimado'])
spu.save()
fj = flujo.objects.get(id=f.id)
act = fj.actividades.all()[:1].get()
kan = kanban.objects.create(us=user_story,fluj=k.cleaned_data['fluj'],actividad=act,prioridad = user_story.prioridad)
notificar_asignacion_us(ua,user_story.nombre)
form.save()
#k.save()
#kan.fluj= k.cleaned_data['fluj']
#kan.save()
if request.user.is_staff:
'''Si el usuario es administrador se le listan todos los us'''
ust = us.objects.all().order_by('prioridad')
ctx={'uss':ust,'form':buscar_us_form()}
return render_to_response('US/adm_us.html',ctx,context_instance=RequestContext(request))
else:
ust = us.objects.filter(usuario_asignado=request.user).order_by('prioridad')
ctx={'uss':ust,'form':buscar_us_form()}
return render_to_response('US/adm_us.html',ctx,context_instance=RequestContext(request))
if request.method=='GET':
try:
user_story = us.objects.get(id=id_us)
kan = kanban.objects.get(us=user_story)
p= proyecto.objects.get(id=user_story.proyecto_asociado.id)
form =us_form(instance= user_story)
form.fields['usuario_asignado'].queryset= p.miembro.all()
#k.fields['us'].queryset = us.objects.get(id=id_us)
#form.fields['flujo_asignado'].queryset= p.flujos.all()
k =kanban_form(instance=kan)
k.fields['fluj'].queryset = kan.fluj.all()
ctx = {'form':form,'k':k}
return render_to_response('US/modificar_us.html',ctx,context_instance=RequestContext(request))
except:
user_story = us.objects.get(id=id_us)
p= proyecto.objects.get(id=user_story.proyecto_asociado.id)
form =us_form(instance= user_story)
form.fields['usuario_asignado'].queryset= p.miembro.all()
form.fields['sprint_asociado'].queryset= sprint.objects.filter(proyect=p)
#k.fields['us'].queryset = us.objects.get(id=id_us)
k.fields['fluj'].queryset = p.flujos.all()
#form.fields['flujo_asignado'].queryset= p.flujos.all()
ctx = {'form':form,'k':k}
return render_to_response('US/modificar_us.html',ctx,context_instance=RequestContext(request))
#---------------------------------------------------------------------------------------------------------------
@login_required(login_url= URL_LOGIN)
def eliminar_us_view(request,id_us):
"""
eliminar_us_view(request,id_us)
Vista que controla la eliminación de un B{User Story}.
@param request: Almacena la información del usuario que desea eliminar el B{User Story}.
@param id_us: Almacena la clave del B{User Story} a eliminar.
@requires: El usuario logeado debe tener rol de administrador o Scrum Master dentro del Proyecto
del que eliminará el B{User Story}
"""
user_story = us.objects.get(id=id_us)
if request.method == 'POST':
ua= user_story.usuario_asignado.all()
nombre=user_story.nombre
notificar_eli_us(ua,nombre)
user_story.delete()
if request.user.is_staff:
'''Si el usuario es administrador se le listan todos los us'''
ust = us.objects.all().order_by('prioridad')
ctx={'uss':ust,'form':buscar_us_form()}
return render_to_response('US/adm_us.html',ctx,context_instance=RequestContext(request))
else:
ust = us.objects.filter(usuario_asignado=request.user).order_by('prioridad')
ctx={'uss':ust,'form':buscar_us_form()}
return render_to_response('US/adm_us.html',ctx,context_instance=RequestContext(request))
ctx = {'user_story': user_story}
print user_story
return render_to_response('US/eliminar_us.html', ctx, context_instance=RequestContext(request))
#-------------------------------------------------------------------------------------------------------
@login_required(login_url= URL_LOGIN)
def consultar_us_view(request,id_us):
"""
consultar_us_view(request,id_us)
Vista que controla la consulta de información de un B{User Story}.
@param request: Almacena la información del usuario que desea consultar datos del B{User Story}.
@param id_us: Almacena la clave del B{User Story} a consultar.
@requires: El usuario debe estar logeado.
"""
if request.method=='GET':
try:
user_story = us.objects.get(id=id_us)
kan = kanban.objects.get(us=user_story)
p= proyecto.objects.get(id=user_story.proyecto_asociado.id)
form =consultar_form(instance= user_story)
form.fields['usuario_asignado'].queryset= user_story.usuario_asignado.all()
#k.fields['us'].queryset = us.objects.get(id=id_us)
form.fields['proyecto_asociado'].queryset= proyecto.objects.filter(id=user_story.proyecto_asociado.id)
#form.fields['flujo_asignado'].queryset= p.flujos.all()
k =kanban_form(instance=kan)
fluj= kan.fluj
#k.fields['fluj'].queryset = kan.fluj.all()
ctx = {'form':form,'k':fluj,'p':p}
return render_to_response('US/consultar_us.html',ctx,context_instance=RequestContext(request))
except:
user_story = us.objects.get(id=id_us)
p= proyecto.objects.get(id=user_story.proyecto_asociado.id)
form =consultar_form(instance= user_story)
form.fields['usuario_asignado'].queryset= user_story.usuario_asignado.all()
#form.fields['flujo_asignado'].queryset= p.flujos.all()
form.fields['proyecto_asociado'].queryset= proyecto.objects.filter(id=user_story.proyecto_asociado.id)
ctx = {'form':form,'p':p}
return render_to_response('US/consultar_us.html',ctx,context_instance=RequestContext(request))
@login_required(login_url= URL_LOGIN)
def buscar_us_view(request):
form = buscar_us_form()
if(request.method=='POST'):
form = buscar_us_form(request.POST)
form2 = buscar_us_form()
if form.is_valid():
busqueda= form.cleaned_data['opciones']
parametro = form.cleaned_data['busqueda']
if busqueda== 'nombre':
p = us.objects.filter(nombre=parametro)
ctx = {'mensaje': 'uss con nombre %s' %(parametro),'uss':p,'form':form2}
return render_to_response('US/adm_us.html', ctx, context_instance=RequestContext(request))
elif busqueda== 'cliente':
try:
u = User.objects.get(username=parametro)
p = us.objects.filter(cliente=u)
ctx = {'mensaje': 'uss con cliente %s' %(u),'ussk':p,'form':form2}
return render_to_response('US/adm_us.html', ctx, context_instance=RequestContext(request))
except:
if request.user.is_staff:
ust = us.objects.all()
ctx={'ussk':ust,'mensaje':'Cliente con username %s no existe'%(parametro),'form':buscar_us_form()}
return render_to_response('US/adm_us.html',ctx,context_instance=RequestContext(request))
else:
ust = us.objects.filter(miembro=request.user)
ctx={'ussk':ust,'mensaje':'Cliente con username %s no existe'%(parametro),'form':buscar_us_form()}
return render_to_response('US/adm_us.html',ctx,context_instance=RequestContext(request))
ctx = {'form': form}
return render_to_response('US/adm_us.html', ctx, context_instance=RequestContext(request))
def info_us(request):
if request.is_ajax():
k = request.GET['k']
ust = us.objects.get(id=k)
Sprint = ''
Flujo = ''
if ust.sprint_asociado == None:
Sprint = 'No asignado'
else:
Sprint = ust.sprint_asociado.nombre
try:
kanban_sprint = kanban.objects.get(us=ust)
Flujo = kanban_sprint.fluj.nombre
except:
print 'a'
Flujo = 'No asignado'
l = {'nombre':ust.nombre,'test':ust.tiempo_estimado,'tt':ust.tiempo_trabajado,'des':ust.descripcion,'priori':ust.get_prioridad_display(),'sprint':Sprint,'flujo':Flujo}
return HttpResponse(json.dumps(l))
@csrf_exempt
def asignar_ust(request):
if request.method == 'POST':
mensaje = ''
a = request.POST.get('k')
b = request.POST.get('f')
c = request.POST.get('s')
sp = sprint.objects.get(id=c)
mensaje='User Story ya asignado al Sprint seleccionado'
f = flujo.objects.get(id=b) #flujo a asignar
act = f.actividades.first() # primera actividad de flujo
g =us.objects.get(id=a) # User Story a reasignar
aux= kanban.objects.filter(us=g).count()
print 'aacaca'
#Cambio de sprint si difiere el actual al seleccionado
if g.sprint_asociado == None:
g.tiempo_trabajado = 0
sp.tiempo_estimado += g.tiempo_estimado
sp.tiempo_total += g.tiempo_trabajado
g.sprint_asociado=sp
g.save()
sp.save()
mensaje='Sprint Cambiado'
elif g.sprint_asociado != sp:
sprint_viejo = sprint.objects.get(id=g.sprint_asociado.id)
sprint_viejo.tiempo_estimado-= g.tiempo_estimado
sprint_viejo.tiempo_total -= g.tiempo_trabajado
sprint_viejo.save()
sp.tiempo_estimado += g.tiempo_estimado
sp.tiempo_total += g.tiempo_trabajado
g.sprint_asociado=sp
g.save()
sp.save()
mensaje='Sprint Cambiado'
if aux == 0 :
'''User Story sin flujo asignado'''
k= kanban.objects.get_or_create(us=g,fluj=f,actividad=act,prioridad=g.prioridad)
mensaje += ' - Flujo asignado correctamente'
else:
'''User Story ya tiene flujo asignado. Se verifica si el seleccionado es distinto al asignado'''
k = kanban.objects.get(us=g)
if f != k.fluj:
k.fluj=f
k.actividad=act
k.prioridad=g.prioridad
k.save()
#kanban.objects.create(us=g,fluj=f,actividad=act)
g.tiempo_trabajado= 0
g.save()
mensaje += ' - Cambio de Flujo realizado correctamente'
else:
mensaje += ' - User Story ya asignado al flujo seleccionado'
pass
l = {'mensaje':mensaje}
return HttpResponse(json.dumps(l)) |
994,389 | d2c3c3ecd6c974cea8f2a8fea888cdbd258a601d | import readOpenFoam as rof
class Mesh(object):
def __init__(self, directory):
self.directory = directory
# Properties
self.nNodes = None
self.nodes = None
self.nFaces = None
self.nIFaces = None
self.faces = None
self.nCells = None
self.cells = None
# Initializing functions
self._update_mesh()
def _update_mesh(self):
( self.nNodes,
self.nodes,
self.nFaces,
self.nIFaces,
self.faces,
self.nCells,
self.cells ) = rof.readOpenFoamMesh(self.directory)
mesh = Mesh('/home/numguy/Projects/cfd-py/cavity')
|
994,390 | 9f360ce9f74f05829f309406440b17948a9076c7 | import sys
import time
import threading
import pygame
import pygame.camera
import RPi.GPIO as GPIO
from pyimagesearch.facedetector import FaceDetector
from pyimagesearch import imutils
from picamera.array import PiRGBArray
from picamera import PiCamera
import argparse
import cv2
import numpy
# Define some colors
BLACK = ( 0, 0, 0)
WHITE = ( 255, 255, 255)
GPIO.setmode(GPIO.BOARD)
#LASER
laser = 36
GPIO.setup(laser, GPIO.OUT)
GPIO.output(laser, 1)
class Camera(object):
def __init__(self):
self.size = (640,480)
# initialize the camera and grab a reference to the raw camera
# capture
self.camera = PiCamera()
self.camera.resolution = self.size
self.camera.framerate = 32
self.rawCapture = PiRGBArray(self.camera, size=self.size)
# construct the face detector and allow the camera to warm
# up
self.fd = FaceDetector("cascades/haarcascade_frontalface_default.xml")
time.sleep(0.1)
def show_camera(self):
# capture frames from the camera
for f in self.camera.capture_continuous(self.rawCapture, format="bgr", use_video_port=True):
# grab the raw NumPy array representing the image
self.frame = f.array
# resize the frame and convert it to grayscale
self.frame = imutils.resize(self.frame, width = 300)
self.gray = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)
# detect faces in the image and then clone the frame
# so that we can draw on it
self.faceRects = self.fd.detect(self.gray, scaleFactor = 1.1, minNeighbors = 5,
minSize = (30, 30))
self.frameClone = self.frame.copy()
# loop over the face bounding boxes and draw them
for (fX, fY, fW, fH) in self.faceRects:
cv2.rectangle(self.frameClone, (fX, fY), (fX + fW, fY + fH), (0, 255, 0), 2)
# Desactivo el laser si detecto una cara
if len(self.faceRects) > 0:
GPIO.output(laser, 0)
else:
GPIO.output(laser, 1)
# show our detected faces, then clear the frame in
# preparation for the next frame
#cv2.imshow("Face", self.frameClone)
self.frame2 = self.rot180(numpy.rot90(self.frameClone))
self.frame2 = pygame.surfarray.make_surface(self.frame2)
screen.blit(self.frame2, (400,10))
pygame.display.update()
#pygame.display.flip()
self.rawCapture.truncate(0)
def rot180(self, frame):
self.frame = frame
return numpy.rot90(numpy.rot90(self.frame))
class Stepper(object):
def __init__(self, eix_rotacio):
self.eix_rotacio = eix_rotacio
self.controlPin = []
if self.eix_rotacio == "yaw":
self.controlPin = [11, 12, 13, 15]
elif self.eix_rotacio == "pitch":
self.controlPin = [31, 32, 33, 35]
for pin in self.controlPin:
GPIO.setup(pin, GPIO.OUT)
GPIO.output(pin, 0)
self.seq = [ [1,0,0,0],
[1,1,0,0],
[0,1,0,0],
[0,1,1,0],
[0,0,1,0],
[0,0,1,1],
[0,0,0,1],
[1,0,0,1] ]
def rotate(self, sentit_rotacio):
self.sentit_rotacio = sentit_rotacio
if sentit_rotacio != 0:
for i in range(64):
for halfstep in range(8):
for pin in range(4):
GPIO.output(self.controlPin[pin * self.sentit_rotacio], self.seq[halfstep][pin])
time.sleep(0.001)
# This is a simple class that will help us print to the screen
# It has nothing to do with the joysticks, just outputing the
# information.
class TextPrint:
def __init__(self):
self.reset()
self.font = pygame.font.Font(None, 20)
def Print(self, screen, textString):
textBitmap = self.font.render(textString, True, BLACK)
screen.blit(textBitmap, [self.x, self.y])
self.y += self.line_height
def reset(self):
self.x = 10
self.y = 10
self.line_height = 15
def indent(self):
self.x += 10
def unindent(self):
self.x -= 10
pygame.init()
# Set the width and height of the screen [width,height]
size = [710, 700]
screen = pygame.display.set_mode(size)
pygame.display.set_caption("Robopot")
#Loop until the user clicks the close button.
done = False
# Used to manage how fast the screen updates
clock = pygame.time.Clock()
# Initialize the joysticks
pygame.joystick.init()
# Get ready to print
textPrint = TextPrint()
cam = Camera()
#cam.show_camera()
def cam_thread():
cam.show_camera()
t_cam = threading.Thread(target=cam_thread)
t_cam.daemon = True # thread dies when main thread (only non-daemon thread) exits.
t_cam.start()
yaw, pitch = 0, 0
stepperYaw = Stepper("yaw")
stepperPitch = Stepper("pitch")
# -------- Main Program Loop -----------
while done==False:
# EVENT PROCESSING STEP
for event in pygame.event.get(): # User did something
if event.type == pygame.QUIT: # If user clicked close
done=True # Flag that we are done so we exit this loop
# Possible joystick actions: JOYAXISMOTION JOYBALLMOTION JOYBUTTONDOWN JOYBUTTONUP JOYHATMOTION
if event.type == pygame.JOYBUTTONDOWN:
print("Joystick button pressed.")
if event.type == pygame.JOYBUTTONUP:
print("Joystick button released.")
# DRAWING STEP
# First, clear the screen to white. Don't put other drawing commands
# above this, or they will be erased with this command.
screen.fill(WHITE)
textPrint.reset()
# Get count of joysticks
joystick_count = pygame.joystick.get_count()
textPrint.Print(screen, "Number of joysticks: {}".format(joystick_count) )
textPrint.indent()
# For each joystick:
for i in range(joystick_count):
joystick = pygame.joystick.Joystick(i)
joystick.init()
textPrint.Print(screen, "Joystick {}".format(i) )
textPrint.indent()
# Get the name from the OS for the controller/joystick
name = joystick.get_name()
textPrint.Print(screen, "Joystick name: {}".format(name) )
# Usually axis run in pairs, up/down for one, and left/right for
# the other.
axes = joystick.get_numaxes()
textPrint.Print(screen, "Number of axes: {}".format(axes) )
textPrint.indent()
for i in range( axes ):
axis = joystick.get_axis( i )
textPrint.Print(screen, "Axis {} value: {:>6.3f}".format(i, axis) )
textPrint.unindent()
buttons = joystick.get_numbuttons()
textPrint.Print(screen, "Number of buttons: {}".format(buttons) )
textPrint.indent()
for i in range( buttons ):
button = joystick.get_button( i )
textPrint.Print(screen, "Button {:>2} value: {}".format(i,button) )
textPrint.unindent()
# Hat switch. All or nothing for direction, not like joysticks.
# Value comes back in an array.
hats = joystick.get_numhats()
textPrint.Print(screen, "Number of hats: {}".format(hats) )
textPrint.indent()
for i in range( hats ):
hat = joystick.get_hat( i )
textPrint.Print(screen, "Hat {} value: {}".format(i, str(hat)) )
yaw = hat[0]
pitch = hat[1]
stepperYaw.rotate(yaw)
stepperPitch.rotate(pitch)
textPrint.unindent()
textPrint.unindent()
# ALL CODE TO DRAW SHOULD GO ABOVE THIS COMMENT
# Go ahead and update the screen with what we've drawn.
#pygame.display.flip()
#pygame.display.update()
# Limit to 20 frames per second
clock.tick(20)
# Close the window and quit.
# If you forget this line, the program will 'hang'
# on exit if running from IDLE.
pygame.quit ()
GPIO.cleanup() |
994,391 | 69ba2bb79d4a01154bcd00d63ca909a358d379f5 | s = str(input())
n = len(s)
ans1 = 0
ans2 = 0
for i in range(n):
if i%2 == 0 and s[i] == '0':
ans1 +=1
elif i%2 == 1 and s[i] == '1':
ans1 += 1
for i in range(n):
if i%2 == 0 and s[i] == '1':
ans2 +=1
elif i%2 == 1 and s[i] == '0':
ans2 += 1
print(min(ans1, ans2))
|
994,392 | 7daf55c57ae604eb8e78e1d62cf6c0ce1c3a7d36 | #!/usr/bin/env python3
# Program use Luhn algorithm to check for credit card number and return boolean.
# https://github.com/sagarapatel/Python-Number-Projects/blob/master/credit_card_check.py
# IDE PyCharm
# Python 3.8 compatible
import sys
# Check credit card number Luhn algorithm
def luhn(n):
try:
r = [int(ch) for ch in str(n)][::-1]
return (sum(r[0::2]) + sum(sum(divmod(d * 2, 10)) for d in r[1::2])) % 10 == 0
except ValueError:
print("Only digits excepted for check.")
sys.exit(0)
card_number = input("Please enter credit card number to check ")
print(luhn(card_number))
|
994,393 | a40685634cb05a860e351742b1d75d4697e4095b | #!/usr/bin/python3
import argparse
import javalang
def parse_args():
parser = argparse.ArgumentParser(description='Parse Layout Files')
parser.add_argument('-d','--debug', type=str, help='Turn on debug mode.')
parser.add_argument('-l','--line', type=str, help='line to parse.')
return parser.parse_args()
class LineParser:
def __init__(self, *args, **kwargs):
self.line = args[0].line
self.debug = args[0].debug
def _getParser(self):
tokens = javalang.tokenizer.tokenize(self.line)
parser = javalang.parser.Parser(tokens)
return parser
def getValidParsers(self):
parsers = [
'parse',
'parse_annotation',
'parse_annotation_element',
'parse_annotation_method_or_constant_rest',
'parse_annotation_type_body',
'parse_annotation_type_declaration',
'parse_annotation_type_element_declaration',
'parse_annotation_type_element_declarations',
'parse_annotations',
'parse_arguments',
'parse_array_creator_rest',
'parse_array_dimension',
'parse_array_initializer',
'parse_basic_type',
'parse_block',
'parse_block_statement',
'parse_catch_clause',
'parse_catches',
'parse_class_body',
'parse_class_body_declaration',
'parse_class_creator_rest',
'parse_class_or_interface_declaration',
'parse_compilation_unit',
'parse_constant_declarator',
'parse_constant_declarator_rest',
'parse_constant_declarators_rest',
'parse_constructor_declarator_rest',
'parse_created_name',
'parse_creator',
'parse_element_value',
'parse_element_value_array_initializer',
'parse_element_value_pair',
'parse_element_value_pairs',
'parse_element_values',
'parse_enum_body',
'parse_enum_constant',
'parse_enum_declaration',
'parse_explicit_generic_invocation',
'parse_explicit_generic_invocation_suffix',
'parse_expression',
'parse_expression_2',
'parse_expression_2_rest',
'parse_expression_3',
'parse_expressionl',
'parse_field_declarators_rest',
'parse_for_control',
'parse_for_init_or_update',
'parse_for_var_control',
'parse_for_var_control_rest',
'parse_for_variable_declarator_rest'
]
for parser in parsers:
try:
print(parser,'worked:',getattr(self._getParser(),parser)())
except:
continue
def parseUIAssignment(self,line=None):
if line:
self.line = line
if not self.line:
print("No line to parse given.")
return
if self.debug:
print("Parsing:",self.line)
try:
parser = self._getParser().parse_block_statement()
parser2 = self._getParser().parse_expression()
except:
print("\nCouldnt get parser for:",self.line)
return
try:
print('\nVar assigned:', parser.declarators[0].name, '\nUI ID:',parser.declarators[0].initializer.expression.arguments[0].member) #init.value.expression.arguments[0].member)
except:
try:
print('\nVar assigned:', parser2.expressionl.selectors[0].member,'\n',"UI ID:",parser2.value.expression.arguments[0].member)
except:
print('\nUnable to parse:',self.line,'\nUsing:\n1)',parser,'\n\n2)',parser2)
#print(self.expression)
#init = parser.parse_statement()
#init = parser.parse_variable_initializer()
#print(type(init),'\n',init)
#if type(init) == 'javalang.tree.Statement':
# print(
#print(init.value.type.name, init.expressionl.member, '=', init.value.expression.arguments[0].member)
if __name__ == '__main__':
args = parse_args()
lp = LineParser(args)
lp.parseUIAssignment()
|
994,394 | d44751d7d71c73bb9edaa8a3056c5f780c2e9bd3 | from collections import defaultdict
def solve1(n_players=479, last_marble_worth=7103500):
current = Node(0)
current.right = current
current.left = current
scores = defaultdict(int)
current_player = 1
for m in range(1, last_marble_worth + 1):
if m % 23 != 0:
first = current.right
new_marble = Node(m)
first.insert_right(new_marble)
current = first.right
else:
scores[current_player] += m
to_remove = current
for i in range(7):
to_remove = to_remove.left
scores[current_player] += to_remove.marble
current = to_remove.right
to_remove.remove_self()
current_player = (current_player + 1) % n_players
return max(scores.values())
class Node:
def __init__(self, marble):
self.marble = marble
self.left = None
self.right = None
def insert_right(self, node):
current_right = self.right
self.right = node
node.right = current_right
current_right.left = node
node.left = self
def remove_self(self):
left = self.left
right = self.right
left.right = right
right.left = left
def __str__(self):
return f'Node({self.marble})'
|
994,395 | 3cb553f2dbbd40aa4f87c471bd4cc6ff842ba8be | from enum import Enum, auto
class WorldType(Enum):
FOOD = auto()
REPRO = auto()
SURVIVAL = auto()
|
994,396 | 95af97cd7ca93a20a79f0e45aeae069c41c395a3 | # ---------------------------------------------------------------- #
# Median of Three
# Add short description
# (List)
# ---------------------------------------------------------------- #
# ---------------------------------------------------------------- #
# ---------------------------------------------------------------- #
# ---------------------------------------------------------------- #
# ---------------------------------------------------------------- #
# ---------------------------------------------------------------- #
# ---------------------------------------------------------------- #
|
994,397 | 2df9f491e3ae042b77d1900bbb7e47a085ce875c | import json
import os
import sys
import dcoscli
import docopt
import pkg_resources
from dcos import (cmds, cosmospackage, emitting, http, options, package,
subcommand, util)
from dcos.errors import DCOSException
from dcoscli import tables
from dcoscli.subcommand import default_command_info, default_doc
from dcoscli.util import decorate_docopt_usage
logger = util.get_logger(__name__)
emitter = emitting.FlatEmitter()
def main(argv):
try:
return _main(argv)
except DCOSException as e:
emitter.publish(e)
return 1
@decorate_docopt_usage
def _main(argv):
args = docopt.docopt(
default_doc("package"),
argv=argv,
version='dcos-package version {}'.format(dcoscli.version))
http.silence_requests_warnings()
return cmds.execute(_cmds(), args)
def _cmds():
"""
:returns: All of the supported commands
:rtype: dcos.cmds.Command
"""
return [
cmds.Command(
hierarchy=['package', 'update'],
arg_keys=[],
function=_update),
cmds.Command(
hierarchy=['package', 'repo', 'list'],
arg_keys=['--json'],
function=_list_repos),
cmds.Command(
hierarchy=['package', 'repo', 'add'],
arg_keys=['<repo-name>', '<repo-url>', '--index'],
function=_add_repo),
cmds.Command(
hierarchy=['package', 'repo', 'remove'],
arg_keys=['<repo-name>'],
function=_remove_repo),
cmds.Command(
hierarchy=['package', 'describe'],
arg_keys=['<package-name>', '--app', '--cli', '--options',
'--render', '--package-versions', '--package-version',
'--config'],
function=_describe),
cmds.Command(
hierarchy=['package', 'install'],
arg_keys=['<package-name>', '--package-version', '--options',
'--app-id', '--cli', '--app', '--yes'],
function=_install),
cmds.Command(
hierarchy=['package', 'list'],
arg_keys=['--json', '--app-id', '<package-name>'],
function=_list),
cmds.Command(
hierarchy=['package', 'search'],
arg_keys=['--json', '<query>'],
function=_search),
cmds.Command(
hierarchy=['package', 'uninstall'],
arg_keys=['<package-name>', '--all', '--app-id', '--cli', '--app'],
function=_uninstall),
cmds.Command(
hierarchy=['package'],
arg_keys=['--config-schema', '--info'],
function=_package),
]
def _package(config_schema, info):
"""
:param config_schema: Whether to output the config schema
:type config_schema: boolean
:param info: Whether to output a description of this subcommand
:type info: boolean
:returns: Process status
:rtype: int
"""
if config_schema:
schema = json.loads(
pkg_resources.resource_string(
'dcoscli',
'data/config-schema/package.json').decode('utf-8'))
emitter.publish(schema)
elif info:
_info()
else:
doc = default_doc("package")
emitter.publish(options.make_generic_usage_message(doc))
return 1
return 0
def _info():
"""Print package cli information.
:returns: Process status
:rtype: int
"""
emitter.publish(default_command_info("package"))
return 0
def _update():
"""
:returns: Deprecation notice
:rtype: str
"""
_get_package_manager()
notice = ("This command has been deprecated. "
"Repositories will be automatically updated after they are added"
" by `dcos package repo add`")
raise DCOSException(notice)
def _list_repos(is_json):
"""List configured package repositories.
:param json_: output json if True
:type json_: bool
:returns: Process status
:rtype: int
"""
package_manager = _get_package_manager()
repos = package_manager.get_repos()
if is_json:
return emitter.publish(repos)
elif repos.get("repositories"):
repos = ["{}: {}".format(repo.get("name"), repo.get("uri"))
for repo in repos.get("repositories")]
emitter.publish("\n".join(repos))
else:
msg = ("There are currently no repos configured. "
"Please use `dcos package repo add` to add a repo")
raise DCOSException(msg)
return 0
def _add_repo(repo_name, repo_url, index):
"""Add package repo and update repo with new repo
:param repo_name: name to call repo
:type repo_name: str
:param repo_url: location of repo to add
:type repo_url: str
:param index: index to add this repo
:type index: int
:rtype: None
"""
package_manager = _get_package_manager()
package_manager.add_repo(repo_name, repo_url, index)
return 0
def _remove_repo(repo_name):
"""Remove package repo and update repo with new repo
:param repo_name: name to call repo
:type repo_name: str
:returns: Process status
:rtype: int
"""
package_manager = _get_package_manager()
package_manager.remove_repo(repo_name)
return 0
def _describe(package_name,
app,
cli,
options_path,
render,
package_versions,
package_version,
config):
"""Describe the specified package.
:param package_name: The package to describe
:type package_name: str
:param app: If True, marathon.json will be printed
:type app: boolean
:param cli: If True, command.json | resource.json's cli property should
be printed
:type cli: boolean
:param options_path: Path to json file with options to override
config.json defaults.
:type options_path: str
:param render: If True, marathon.json will be rendered
:type render: boolean
:param package_versions: If True, a list of all package versions will
be printed
:type package_versions: boolean
:param package_version: package version
:type package_version: str | None
:param config: If True, config.json will be printed
:type config: boolean
:returns: Process status
:rtype: int
"""
# If the user supplied template options, they definitely want to
# render the template
if options_path:
render = True
# Expand ~ in the options file path
options_path = os.path.expanduser(options_path)
if package_versions and \
(app or cli or options_path or render or package_version or config):
raise DCOSException(
'If --package-versions is provided, no other option can be '
'provided')
package_manager = _get_package_manager()
pkg = package_manager.get_package_version(package_name, package_version)
pkg_json = pkg.package_json()
if package_version is None:
pkg_versions = pkg.package_versions()
del pkg_json['version']
pkg_json['versions'] = pkg_versions
if package_versions:
emitter.publish(pkg.package_versions())
elif cli or app or config:
user_options = _user_options(options_path)
options = pkg.options(user_options)
if cli:
emitter.publish(pkg.cli_definition())
if app:
if render:
app_output = pkg.marathon_json(options)
else:
app_output = pkg.marathon_template()
if app_output and app_output[-1] == '\n':
app_output = app_output[:-1]
emitter.publish(app_output)
if config:
config_output = pkg.config_json()
emitter.publish(config_output)
else:
emitter.publish(pkg_json)
return 0
def _user_options(path):
""" Read the options at the given file path.
:param path: file path
:type path: str
:returns: options
:rtype: dict
"""
if path is None:
return {}
else:
with util.open_file(path) as options_file:
return util.load_json(options_file)
def _confirm(prompt, yes):
"""
:param prompt: message to display to the terminal
:type prompt: str
:param yes: whether to assume that the user responded with yes
:type yes: bool
:returns: True if the user responded with yes; False otherwise
:rtype: bool
"""
if yes:
return True
else:
while True:
sys.stdout.write('{} [yes/no] '.format(prompt))
sys.stdout.flush()
response = sys.stdin.readline().strip().lower()
if response == 'yes' or response == 'y':
return True
elif response == 'no' or response == 'n':
return False
else:
emitter.publish(
"'{}' is not a valid response.".format(response))
def _install(package_name, package_version, options_path, app_id, cli, app,
yes):
"""Install the specified package.
:param package_name: the package to install
:type package_name: str
:param package_version: package version to install
:type package_version: str
:param options_path: path to file containing option values
:type options_path: str
:param app_id: app ID for installation of this package
:type app_id: str
:param cli: indicates if the cli should be installed
:type cli: bool
:param app: indicate if the application should be installed
:type app: bool
:param yes: automatically assume yes to all prompts
:type yes: bool
:returns: process status
:rtype: int
"""
if cli is False and app is False:
# Install both if neither flag is specified
cli = app = True
# Expand ~ in the options file path
if options_path:
options_path = os.path.expanduser(options_path)
user_options = _user_options(options_path)
package_manager = _get_package_manager()
pkg = package_manager.get_package_version(package_name, package_version)
pkg_json = pkg.package_json()
pre_install_notes = pkg_json.get('preInstallNotes')
if app and pre_install_notes:
emitter.publish(pre_install_notes)
if not _confirm('Continue installing?', yes):
emitter.publish('Exiting installation.')
return 0
if app and pkg.has_mustache_definition():
# render options before start installation
options = pkg.options(user_options)
# Install in Marathon
msg = 'Installing Marathon app for package [{}] version [{}]'.format(
pkg.name(), pkg.version())
if app_id is not None:
msg += ' with app id [{}]'.format(app_id)
emitter.publish(msg)
package_manager.install_app(
pkg,
options,
app_id)
if cli and pkg.has_cli_definition():
# Install subcommand
msg = 'Installing CLI subcommand for package [{}] version [{}]'.format(
pkg.name(), pkg.version())
emitter.publish(msg)
subcommand.install(pkg)
subcommand_paths = subcommand.get_package_commands(package_name)
new_commands = [os.path.basename(p).replace('-', ' ', 1)
for p in subcommand_paths]
if new_commands:
commands = ', '.join(new_commands)
plural = "s" if len(new_commands) > 1 else ""
emitter.publish("New command{} available: {}".format(plural,
commands))
post_install_notes = pkg_json.get('postInstallNotes')
if app and post_install_notes:
emitter.publish(post_install_notes)
return 0
def _list(json_, app_id, package_name):
"""List installed apps
:param json_: output json if True
:type json_: bool
:param app_id: App ID of app to show
:type app_id: str
:param package_name: The package to show
:type package_name: str
:returns: process return code
:rtype: int
"""
package_manager = _get_package_manager()
if app_id is not None:
app_id = util.normalize_app_id(app_id)
results = package.installed_packages(
package_manager, app_id, package_name)
# only emit those packages that match the provided package_name and app_id
if results or json_:
emitting.publish_table(emitter, results, tables.package_table, json_)
else:
msg = ("There are currently no installed packages. "
"Please use `dcos package install` to install a package.")
raise DCOSException(msg)
return 0
def _matches_package_name(name, pkg_info):
"""
:param name: the name of the package
:type name: str
:param pkg_info: the package description
:type pkg_info: dict
:returns: True if the name is not defined or the package matches that name;
False otherwise
:rtype: bool
"""
return name is None or pkg_info['name'] == name
def _matches_app_id(app_id, pkg_info):
"""
:param app_id: the application id
:type app_id: str
:param pkg_info: the package description
:type pkg_info: dict
:returns: True if the app id is not defined or the package matches that app
id; False otherwize
:rtype: bool
"""
return app_id is None or app_id in pkg_info.get('apps')
def _search(json_, query):
"""Search for matching packages.
:param json_: output json if True
:type json_: bool
:param query: The search term
:type query: str
:returns: Process status
:rtype: int
"""
if not query:
query = ''
package_manager = _get_package_manager()
results = package_manager.search_sources(query)
if json_ or results['packages']:
emitting.publish_table(emitter,
results,
tables.package_search_table,
json_)
else:
raise DCOSException('No packages found.')
return 0
def _uninstall(package_name, remove_all, app_id, cli, app):
"""Uninstall the specified package.
:param package_name: The package to uninstall
:type package_name: str
:param remove_all: Whether to remove all instances of the named package
:type remove_all: boolean
:param app_id: App ID of the package instance to uninstall
:type app_id: str
:returns: Process status
:rtype: int
"""
package_manager = _get_package_manager()
err = package.uninstall(
package_manager, package_name, remove_all, app_id, cli, app)
if err is not None:
emitter.publish(err)
return 1
return 0
def _get_cosmos_url():
"""
:returns: cosmos base url
:rtype: str
"""
config = util.get_config()
cosmos_url = config.get("package.cosmos_url")
if cosmos_url is None:
cosmos_url = util.get_config_vals(['core.dcos_url'], config)[0]
return cosmos_url
def _get_package_manager():
"""Returns type of package manager to use
:returns: PackageManager instance
:rtype: PackageManager
"""
cosmos_url = _get_cosmos_url()
cosmos_manager = cosmospackage.Cosmos(cosmos_url)
if cosmos_manager.enabled():
return cosmos_manager
else:
msg = ("This version of the DCOS CLI is not supported for your "
"cluster. Please downgrade the CLI to an older version: "
"https://dcos.io/docs/usage/cli/update/#downgrade"
)
raise DCOSException(msg)
|
994,398 | d37fcbf07a7c9daf4dd21b4c8024a6bf7f45825d |
import arabic_reshaper
from bidi.algorithm import get_display
from reportlab.platypus import *
from reportlab.platypus.flowables import Image
from reportlab.lib.utils import ImageReader
from reportlab.lib.styles import getSampleStyleSheet
from reportlab.lib.styles import ParagraphStyle
from reportlab.rl_config import defaultPageSize
from reportlab.lib.units import inch, cm, mm
from reportlab.lib.pagesizes import letter, A4, landscape
from reportlab.platypus import BaseDocTemplate, Frame, PageTemplate, Paragraph
from reportlab.lib.enums import TA_LEFT, TA_RIGHT, TA_CENTER, TA_JUSTIFY
from textwrap import wrap
from reportlab.lib import colors
from uuid import uuid4
from cgi import escape
from functools import partial
import os
from reportlab.pdfgen import canvas
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFont
import locale
tmpfilename=os.path.join(request.folder,'private',str(uuid4()))
doc = SimpleDocTemplate(tmpfilename,pagesize=A4, rightMargin=20,leftMargin=20, topMargin=2.5 * inch,bottomMargin=1.5 * inch)#, showBoundary=1)
style=ParagraphStyle(name='Normal',fontName='Arabic',fontSize=15)
style.alignment=TA_CENTER
_style = ParagraphStyle('Courier',fontName="Courier", fontSize=8, leading = 10)
item_style=ParagraphStyle(name='Normal',fontName='Arabic',fontSize=8)
item_style.alignment=TA_RIGHT
heading_style=ParagraphStyle(name='Normal',fontName='Arabic',fontSize=20)
heading_style.alignment=TA_CENTER
arabic_text = u'إذا أخذنا بعين'
arabic_text = arabic_reshaper.reshape(arabic_text) # join characters
arabic_text = get_display(arabic_text) # change orientation by using bidi
# canvas.setFont('Arabic', 32)
# canvas.drawString(x - 100, y, ar)
def sales_return_canvas(canvas, doc_invoice):
# Save the state of our canvas so we can draw on it
canvas.saveState()
_id = db(db.Sales_Return.id == request.args(0)).select().first()
_ma = db(db.Master_Account.id == _id.customer_code_id).select().first()
_cu = db(db.Customer.customer_account_no == str(_ma.account_code)).select().first()
if _cu:
_pobox = 'P.O. Box ' + str(_cu.po_box_no)
_area = str(_cu.area_name) + '\n' + str(_cu.country.upper())
else:
_pobox = _area = ''
_customer = _id.customer_code_id.account_name#.upper() + str('\n') + str(n.customer_code_id.area_name.upper()) + str('\n') + 'Unit No.: ' + str(n.customer_code_id.unit_no) + str('\n') + 'P.O. Box ' + str(n.customer_code_id.po_box_no) + ' Tel.No. ' + str(n.customer_code_id.telephone_no) + str('\n')+ str(n.customer_code_id.state.upper()) + ', ' + str(n.customer_code_id.country.upper())
_so = [
['SALES RETURN DRAFT'],
['Sales Return No. ', ':',str(_id.transaction_prefix_id.prefix)+str(_id.sales_return_no),'','Sales Return Date ',':',_id.sales_return_date.strftime('%d/%b/%Y')],
['Customer Code',':',_id.customer_code_id.account_code,'','Transaction Type',':','Credit'],
[_id.customer_code_id.account_name,'','','','Department',':',_id.dept_code_id.dept_name],
[_pobox,'','','', 'Location', ':',_id.location_code_id.location_name],
[_area,'','','', 'Sales Man',':',str(_id.sales_man_id.employee_id.first_name.upper()) + ' ' + str(_id.sales_man_id.employee_id.last_name.upper())],
['','','','','','','']]
header = Table(_so, colWidths=['*',10,'*',10,'*',10,'*'])#,rowHeights=(12))
header.setStyle(TableStyle([
# ('GRID',(0,0),(-1,-1),0.5, colors.Color(0, 0, 0, 0.2)),
('SPAN',(0,0),(-1,0)),
# ('SPAN',(0,3),(3,-1)),
('ALIGN',(0,0),(0,0),'CENTER'),
('FONTNAME', (0, 0), (-1, -1), 'Courier'),
('FONTNAME', (0, 0), (0, 0), 'Courier-Bold', 12),
('FONTSIZE',(0,0),(0,0),15),
('FONTSIZE',(0,1),(-1,1),8),
('FONTSIZE',(0,2),(-1,-1),8),
('VALIGN',(0,1),(-1,-1),'TOP'),
('BOTTOMPADDING',(0,1),(-1,-1),0),
('TOPPADDING',(0,1),(-1,-1),0),
('BOTTOMPADDING',(0,0),(-1,0),20),
]))
header.wrap(doc.width, doc.topMargin)
header.drawOn(canvas, doc.leftMargin, doc.height + doc.topMargin - .9 * inch)
_page = [['']]
footer = Table(_page, colWidths=['*',10,'*',10,'*',10,'*'])
footer.setStyle(TableStyle([
# ('GRID',(0,0),(-1,-1),0.5, colors.Color(0, 0, 0, 0.2)),
('FONTNAME', (0, 0), (-1, -1), 'Courier-Bold'),
('FONTSIZE',(0,0),(-1,-1),8),
('ALIGN', (0,0), (-1,-1), 'CENTER')]))
footer.wrap(doc.width, doc.bottomMargin)
footer.drawOn(canvas, doc.leftMargin, doc.bottomMargin + .1 * cm)
# Release the canvas
canvas.restoreState()
def get_sales_return_reports_id():
row = []
_id = db(db.Sales_Return.id == request.args(0)).select().first()
ctr = 0
_st = [['#','Item Code','Item Description','UOM','Cat','Qty']]
for t in db((db.Sales_Return_Transaction.sales_return_no_id == request.args(0)) & (db.Sales_Return_Transaction.delete == False)).select(orderby = db.Sales_Return_Transaction.id, left = db.Item_Master.on(db.Item_Master.id == db.Sales_Return_Transaction.item_code_id)):
ctr += 1
if t.Item_Master.uom_value == 1:
_qty = t.Sales_Return_Transaction.quantity
else:
_qty = card(t.Item_Master.id, t.Sales_Return_Transaction.quantity, t.Sales_Return_Transaction.uom)
if t.Sales_Return_Transaction.category_id == 3:
_net_price = 'FOC-Price'
else:
_net_price = locale.format('%.2F',t.Sales_Return_Transaction.net_price or 0, grouping = True)
if t.Sales_Return_Transaction.category_id != 4:
_category = t.Sales_Return_Transaction.category_id.mnemonic
else:
_category = ''
_st.append([ctr,Paragraph(t.Item_Master.item_code,style = _style), t.Item_Master.brand_line_code_id.brand_line_name+ '\n' + t.Item_Master.item_description,
t.Sales_Return_Transaction.uom, _category,_qty])
_st.append(['---* nothing to follows *---'])
_st_tbl = Table(_st, colWidths=[20,60,'*',50,50,50],repeatRows=1)
_st_tbl.setStyle(TableStyle([
# ('GRID',(0,0),(-1,-1),0.5, colors.Color(0, 0, 0, 0.2)),
('SPAN',(0,-1),(-1,-1)),
('LINEABOVE', (0,0), (-1,0), 0.25, colors.black,None, (2,2)),
('LINEBELOW', (0,0), (-1,0), 0.25, colors.black,None, (2,2)),
('LINEABOVE', (0,-1), (-1,-1), 0.25, colors.black,None, (2,2)),
('LINEBELOW', (0,2), (-1,-5), 0.5, colors.Color(0, 0, 0, 0.2)),
('FONTSIZE',(0,0),(-1,-1),8),
('FONTNAME', (0, 0), (-1, -1), 'Courier'),
('ALIGN',(0,-1),(-1,-1),'CENTER'),
('VALIGN',(0,0),(-1,-1),'TOP')]))
_sr_rem = Table([['Remarks: ', _id.remarks]], colWidths=[80,'*'])
_sr_rem.setStyle(TableStyle([
# ('GRID',(0,0),(-1,-1),0.5, colors.Color(0, 0, 0, 0.2)),
('FONTSIZE',(0,0),(-1,-1),8),
('FONTNAME', (0, 0), (-1, -1), 'Courier'),
('VALIGN',(0,0),(-1,-1),'TOP')]))
row.append(_st_tbl)
row.append(Spacer(1,.5*cm))
row.append(_sr_rem)
doc.build(row, onFirstPage=sales_return_canvas, onLaterPages = sales_return_canvas)
pdf_data = open(tmpfilename,"rb").read()
os.unlink(tmpfilename)
response.headers['Content-Type']='application/pdf'
return pdf_data
# ---- C A R D Function -----
def card(item, quantity, uom_value):
_itm_code = db(db.Item_Master.id == item).select().first()
if _itm_code.uom_value == 1:
return quantity
else:
return str(int(quantity) / int(uom_value)) + ' - ' + str(int(quantity) - int(quantity) / int(uom_value) * int(uom_value)) + '/' + str(int(uom_value))
|
994,399 | 6990cb753f9cbe02f2323f380ad6f07e8d381c46 | from django.contrib import admin
from .models import Attendee, Event
# Register your models here.
admin.site.register(Attendee)
admin.site.register(Event)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.