index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
14,100 | beb5392d30e4e9516e51d10981ca420936b64ed6 | from rest_framework import serializers
from . import models
class CreateOrderItemSerializer(serializers.ModelSerializer):
class Meta:
model = models.OrderItem
fields = '__all__'
class OrderItemSerializer(serializers.ModelSerializer):
drink = serializers.StringRelatedField()
class Meta:
model = models.OrderItem
fields = '__all__'
class OrderSerializer(serializers.ModelSerializer):
items = OrderItemSerializer(many=True)
total = serializers.SerializerMethodField()
class Meta:
model = models.Order
fields = '__all__'
depth = 1
def get_total(self, order):
return sum([item.drink.price for item in order.items.all()])
|
14,101 | d26c4d3f15ea6b5f38fc4d3e45795b4886ac7d67 | '''
Authors: Connor Finn, Josh Katz
Summer 2020
Description:
This script dictates a finate state machine that will be used to control he robot's actions and
transitions between different operating modes. Each state exists as a separate file in the states directory.
These states each have their own update step, as well as enter and exit functions which describe how the robot
will operate while within this state or while transitioning in or out of the state. The specific motions of the
robot are determined in the TrajectoryExecutor and LegTrajetoryGenorator files. The os currently assumes 100 Hz
Transitions:
Transitions between states are envoked through a PS4 controller. The controller commands are indicated in
robot_controller.py. It is possible to use a different controller, However, it will be necessary to update
the settings in both the robot_controller and PygameController files.
Currently:
+ R3 Toggles between Laying and Standing
+ x is used to reset the feet while standing
+ x is also used to recover when in a fallen position
+ L3 starts a march
+ the Left Joystick indicates the direction and speed of motion
+ R1 is the turbo button
+ The right Joystick indicates the turning direction and speed
'''
from states.state import State
from states.idle import Idle
from states.move import Move
from states.dance import Dance
from states.march import March
from states.lay import Lay
from states.stand import Stand
from states.standing_up import Standing_Up
from states.laying_down import Laying_Down
from states.reset_position import Reset_Position
from states.recovering import Recovering
from states.fallen import Fallen
import time
class StateMachine:
def __init__(self, robot):
self.state_names = ['Idle', 'Move', 'March', 'Lay', 'Dance', 'Stand',
'Standing_Up', 'Laying_Down', 'Reset_Position' , 'Fallen' , 'Recovering']
# create a dictionary of State objects
self.states = {}
for name in self.state_names:
target_class = eval(name)
self.states[name] = target_class()
self.current_state = self.states['Stand']
self.previous_state = None
self.robot = robot
def change_state(self, state_name):
# This function transitions the robot into a new state
# set the current state as the previous state
self.previous_state = self.current_state
# update the current state to the indicated state
self.current_state = self.states[state_name]
# Run the enter function for the new state
self.current_state.enter(self.robot)
def process_step(self, controller_state):
robot = self.robot
# run the update step
state_name = self.current_state.update(robot, controller_state)
# change the state if a new name is returned
if state_name != None:
self.change_state(state_name)
|
14,102 | e03dbd7507474c80a0bf87b4d90abd0e4f94e968 | # Copyright Contributors to the Amundsen project.
# SPDX-License-Identifier: Apache-2.0
from abc import abstractmethod
from typing import (
Iterator, List, Union,
)
from amundsen_common.utils.atlas import AtlasCommonParams, AtlasTableTypes
from amundsen_rds.models import RDSModel
from amundsen_rds.models.column import ColumnLineage as RDSColumnLineage
from amundsen_rds.models.table import TableLineage as RDSTableLineage
from databuilder.models.atlas_entity import AtlasEntity
from databuilder.models.atlas_relationship import AtlasRelationship
from databuilder.models.atlas_serializable import AtlasSerializable
from databuilder.models.graph_node import GraphNode
from databuilder.models.graph_relationship import GraphRelationship
from databuilder.models.graph_serializable import GraphSerializable
from databuilder.models.table_metadata import ColumnMetadata, TableMetadata
from databuilder.models.table_serializable import TableSerializable
from databuilder.serializers.atlas_serializer import get_entity_attrs
from databuilder.utils.atlas import AtlasRelationshipTypes, AtlasSerializedEntityOperation
class BaseLineage(GraphSerializable, AtlasSerializable, TableSerializable):
"""
Generic Lineage Interface
"""
LABEL = 'Lineage'
ORIGIN_DEPENDENCY_RELATION_TYPE = 'HAS_DOWNSTREAM'
DEPENDENCY_ORIGIN_RELATION_TYPE = 'HAS_UPSTREAM'
def __init__(self) -> None:
self._node_iter = self._create_node_iterator()
self._relation_iter = self._create_rel_iterator()
self._atlas_entity_iterator = self._create_next_atlas_entity()
self._atlas_relation_iterator = self._create_next_atlas_relation()
self._record_iter = self._create_record_iterator()
def create_next_node(self) -> Union[GraphNode, None]:
# return the string representation of the data
try:
return next(self._node_iter)
except StopIteration:
return None
def create_next_relation(self) -> Union[GraphRelationship, None]:
try:
return next(self._relation_iter)
except StopIteration:
return None
def _create_node_iterator(self) -> Iterator[GraphNode]:
"""
It won't create any node for this model
:return:
"""
return
yield
@abstractmethod
def _create_rel_iterator(self) -> Iterator[GraphRelationship]:
pass
def _create_next_atlas_entity(self) -> Iterator[AtlasEntity]:
attrs_mapping = [
(AtlasCommonParams.qualified_name, self._get_atlas_process_key()),
('name', self._get_atlas_process_key())
]
entity_attrs = get_entity_attrs(attrs_mapping)
entity = AtlasEntity(
typeName=AtlasTableTypes.process,
operation=AtlasSerializedEntityOperation.CREATE,
attributes=entity_attrs,
relationships=None
)
yield entity
def create_next_atlas_entity(self) -> Union[AtlasEntity, None]:
try:
return next(self._atlas_entity_iterator)
except StopIteration:
return None
def create_next_atlas_relation(self) -> Union[AtlasRelationship, None]:
try:
return next(self._atlas_relation_iterator)
except StopIteration:
return None
def _create_next_atlas_relation(self) -> Iterator[AtlasRelationship]:
upstream = AtlasRelationship(
relationshipType=AtlasRelationshipTypes.lineage_upstream,
entityType1=AtlasTableTypes.process,
entityQualifiedName1=self._get_atlas_process_key(),
entityType2=self._get_atlas_entity_type(),
entityQualifiedName2=self._get_atlas_process_key(),
attributes={}
)
yield upstream
for downstream_key in self.downstream_deps: # type: ignore
downstream = AtlasRelationship(
relationshipType=AtlasRelationshipTypes.lineage_downstream,
entityType1=AtlasTableTypes.process,
entityQualifiedName1=self._get_atlas_process_key(),
entityType2=self._get_atlas_entity_type(),
entityQualifiedName2=downstream_key,
attributes={}
)
yield downstream
@abstractmethod
def _get_atlas_process_key(self) -> str:
pass
@abstractmethod
def _get_atlas_entity_type(self) -> str:
pass
def create_next_record(self) -> Union[RDSModel, None]:
try:
return next(self._record_iter)
except StopIteration:
return None
@abstractmethod
def _create_record_iterator(self) -> Iterator[RDSModel]:
pass
class TableLineage(BaseLineage):
"""
Table Lineage Model. It won't create nodes but create upstream/downstream rels.
"""
def __init__(self,
table_key: str,
downstream_deps: List = None, # List of table keys
) -> None:
self.table_key = table_key
# a list of downstream dependencies, each of which will follow
# the same key
self.downstream_deps = downstream_deps or []
super().__init__()
def _create_rel_iterator(self) -> Iterator[GraphRelationship]:
"""
Create relations between source table and all the downstream tables
:return:
"""
for downstream_key in self.downstream_deps:
relationship = GraphRelationship(
start_key=self.table_key,
start_label=TableMetadata.TABLE_NODE_LABEL,
end_label=TableMetadata.TABLE_NODE_LABEL,
end_key=downstream_key,
type=TableLineage.ORIGIN_DEPENDENCY_RELATION_TYPE,
reverse_type=TableLineage.DEPENDENCY_ORIGIN_RELATION_TYPE,
attributes={}
)
yield relationship
def _get_atlas_process_key(self) -> str:
return self.table_key
def _get_atlas_entity_type(self) -> str:
return AtlasTableTypes.table
def _create_record_iterator(self) -> Iterator[RDSModel]:
"""
Create lineage records for source table and its all downstream tables.
:return:
"""
for downstream_key in self.downstream_deps:
record = RDSTableLineage(
table_source_rk=self.table_key,
table_target_rk=downstream_key
)
yield record
def __repr__(self) -> str:
return f'TableLineage({self.table_key!r})'
class ColumnLineage(BaseLineage):
"""
Column Lineage Model. It won't create nodes but create upstream/downstream rels.
"""
def __init__(self,
column_key: str,
downstream_deps: List = None, # List of column keys
) -> None:
self.column_key = column_key
# a list of downstream dependencies, each of which will follow
# the same key
self.downstream_deps = downstream_deps or []
super().__init__()
def _create_rel_iterator(self) -> Iterator[GraphRelationship]:
"""
Create relations between source column and all the downstream columns
:return:
"""
for downstream_key in self.downstream_deps:
relationship = GraphRelationship(
start_key=self.column_key,
start_label=ColumnMetadata.COLUMN_NODE_LABEL,
end_label=ColumnMetadata.COLUMN_NODE_LABEL,
end_key=downstream_key,
type=ColumnLineage.ORIGIN_DEPENDENCY_RELATION_TYPE,
reverse_type=ColumnLineage.DEPENDENCY_ORIGIN_RELATION_TYPE,
attributes={}
)
yield relationship
def _get_atlas_process_key(self) -> str:
return self.column_key
def _get_atlas_entity_type(self) -> str:
return AtlasTableTypes.column
def _create_record_iterator(self) -> Iterator[RDSModel]:
"""
Create lineage records for source column and its all downstream columns.
:return:
"""
for downstream_key in self.downstream_deps:
record = RDSColumnLineage(
column_source_rk=self.column_key,
column_target_rk=downstream_key
)
yield record
def __repr__(self) -> str:
return f'ColumnLineage({self.column_key!r})'
|
14,103 | 0de66eefe392bc00f394fad8ff856a385bd68a65 | import argparse
from bs4 import BeautifulSoup
from bs4.diagnose import diagnose
from bs4.element import Tag
from io import StringIO
import requests
import sys
from xml.etree import ElementTree
EDM_NAMESPACE = "http://docs.oasis-open.org/odata/ns/edm"
EDMX_NAMESPACE = "http://docs.oasis-open.org/odata/ns/edmx"
EDM_TAGS = ['Action', 'Annotation', 'Collection', 'ComplexType', 'EntityContainer', 'EntityType', 'EnumType', 'Key',
'Member', 'NavigationProperty', 'Parameter', 'Property', 'PropertyRef', 'PropertyValue', 'Record',
'Schema', 'Singleton', 'Term', 'TypeDefinition']
EDMX_TAGS = ['DataServices', 'Edmx', 'Include', 'Reference']
default_doc = """<?xml version="1.0" encoding="UTF-8"?>
<!-- Copyright 2014-2016 Distributed Management Task Force, Inc. (DMTF). All rights reserved.-->
<edmx:Edmx xmlns:edmx="http://docs.oasis-open.org/odata/ns/edmx" Version="4.0">
<edmx:Reference Uri="/redfish/v1/Schemas/ServiceRoot_v1.xml">
<edmx:Include Namespace="ServiceRoot"/>
<edmx:Include Namespace="ServiceRoot.v1_0_0"/>
<edmx:Include Namespace="ServiceRoot.v1_0_2"/>
</edmx:Reference>
</edmx:Edmx>
"""
def exercise_soup(soup):
"""
Sandbox function to test out walking and searching XML document
:param soup: BS4 soup instance to navigate
:return:
"""
if soup.is_xml:
tag = soup.Edmx
if tag is not None:
print('Found tag named "Edmx"')
print('tag name = {}'.format(tag.name))
print('tag namespace = {}'.format(tag.namespace))
print('tag prefix = {}'.format(tag.prefix))
else:
print('Did not find tag named "Edmx"')
else:
tag = soup.find('edmx:edmx')
if tag is not None:
print('Found tag named "edmx:edmx"')
print('tag name = {}'.format(tag.name))
print('tag namespace = {}'.format(tag.namespace))
print('tag prefix = {}'.format(tag.prefix))
else:
print('Did not find tag named "edmx:edmx"')
html = soup.html
if html is not None:
body = html.body
if body is not None:
if len(body.contents) > 0:
if isinstance(body.contents[0], Tag):
print('Found tag named "{}"'.format(body.contents[0].name))
print('len(html.body.contents) = {}'.format(len(body.contents)))
print('html.body.contents[0].name = {}'.format(body.contents[0].name))
print('html.body.contents[0].namespace = {}'.format(body.contents[0].namespace))
print('html.body.contents[0].prefix = {}'.format(body.contents[0].prefix))
# look for any tags in the first 10 children of the document
print()
print('len(soup.contents) = {}'.format(len(soup.contents)))
for n in range(10):
if len(soup.contents) > n:
print('child element [{}] found, type = {}'.format(n, type(soup.contents[n])))
if isinstance(soup.contents[n], Tag):
print(' soup.contents[{}].name = {}'.format(n, soup.contents[n].name))
print(' soup.contents[{}].namespace = {}'.format(n, soup.contents[n].namespace))
print(' soup.contents[{}].prefix = {}'.format(n, soup.contents[n].prefix))
def run_bs4_diagnose(doc):
print('Option "--diagnose" option specified; running document through bs4 diagnose() function')
print()
diagnose(doc)
def bs4_parse(doc, bs4_parser):
try:
print('Parsing document with BeautifulSoup4 and parser "{}"'.format(bs4_parser))
soup = BeautifulSoup(doc, bs4_parser)
print()
print('Parsed document (BeautifulSoup4 {}):'.format(bs4_parser))
print()
print(soup.prettify())
# is the document XML?
print()
print('is_xml = {}'.format(soup.is_xml))
# exercise_soup(soup)
except Exception as e:
print('Error parsing document with BeautifulSoup4, error: {}'.format(e))
def et_parse(doc):
try:
print('Parsing document with ElementTree')
# print('doc is of type {}'.format(type(doc)))
# print('doc is {}'.format(doc))
xml = ElementTree.parse(doc)
print()
print('Parsed document (ElementTree):')
print()
xml.write(sys.stdout, encoding='unicode', xml_declaration=True, method='xml')
print()
except ElementTree.ParseError as e:
print('Error parsing document with ElementTree, error: {}'.format(e))
def bad_edm_tags(tag):
return tag.namespace == EDM_NAMESPACE and tag.name not in EDM_TAGS
def bad_edmx_tags(tag):
return tag.namespace == EDMX_NAMESPACE and tag.name not in EDMX_TAGS
def other_ns_tags(tag):
return tag.namespace != EDM_NAMESPACE and tag.namespace != EDMX_NAMESPACE
def check_edmx(doc, bs4_parser):
try:
soup = BeautifulSoup(doc, bs4_parser)
print('Bad edm tags:')
for tag in soup.find_all(bad_edm_tags):
if tag.prefix is None:
print('{} (ns={})'.format(tag.name, tag.namespace))
else:
print('{}:{} (ns={})'.format(tag.prefix, tag.name, tag.namespace))
print()
print('Bad edmx tags:')
for tag in soup.find_all(bad_edmx_tags):
if tag.prefix is None:
print('{} (ns={})'.format(tag.name, tag.namespace))
else:
print('{}:{} (ns={})'.format(tag.prefix, tag.name, tag.namespace))
print()
print('Tags not in edm or edmx namespaces:')
for tag in soup.find_all(other_ns_tags):
if tag.prefix is None:
print('{} (ns={})'.format(tag.name, tag.namespace))
else:
print('{}:{} (ns={})'.format(tag.prefix, tag.name, tag.namespace))
print()
except Exception as e:
print('Error parsing document with BeautifulSoup4, error: {}'.format(e))
def main():
# For BeautifulSoup4:
# XML parsers: xml, lxml-xml
# HTML parsers: html.parser, lxml, html5lib
# For ElementTree:
# Uses a default XML parser
valid_parsers = ['html.parser', 'lxml', 'lxml-xml', 'xml', 'html5lib']
# Parse args
arg_parser = argparse.ArgumentParser(description='Tool to test various parsers')
group1 = arg_parser.add_mutually_exclusive_group(required=True)
group1.add_argument('--diagnose', action='store_true',
help='dump the results of the beautiful4 diagnose() function')
group1.add_argument('--bs4', help='parse with specified BeautifulSoup4 parser; list of valid parsers: {}'
.format(valid_parsers))
group1.add_argument('--etree', action='store_true', help='parse with ElementTree parser')
group1.add_argument('--edmx', action='store_true', help='use BS4 xml parser and check for valid edm/edmx tags')
group2 = arg_parser.add_mutually_exclusive_group()
group2.add_argument('-d', '--document', help='file name of document to parse')
group2.add_argument('-u', '--url', help='URL of document to parse')
args = arg_parser.parse_args()
bs4_diagnose = args.diagnose
doc_file = args.document
bs4_parser = args.bs4
use_etree = args.etree
edmx_check = args.edmx
url = args.url
# Get the doc to parse as a file object
doc = None
if doc_file is not None:
# TODO: add exception handling
doc = open(doc_file)
elif url is not None:
# TODO: add exception handling and better response checking
r = requests.get(url, verify=False)
if r.status_code == requests.codes.ok:
doc = StringIO(r.text)
else:
print('Request to get doc at URL {} did not return expected OK status code; status returned: {}'
.format(url, r.status_code))
exit(1)
else:
doc = StringIO(default_doc)
# Do the parsing
if edmx_check:
check_edmx(doc, 'xml')
if bs4_diagnose:
run_bs4_diagnose(doc)
elif bs4_parser is not None:
# Parse with BeautifulSoup4
bs4_parse(doc, bs4_parser)
elif use_etree:
# Parse with ElementTree
et_parse(doc)
if __name__ == "__main__":
main()
|
14,104 | 7cd7e35cf5e6ccbe04013365b889afd24d886351 | import pandas as pd
import math
from Record import Record
class Iris:
def __init__(self):
columns = ["SepalLength", "SepalWidth", "PedalLength", "PedalWidth", "Classification"]
self._data = pd.read_csv("iris.csv", names=columns)
self._records = self.getRecordList()
def getRecordList(self):
records = []
for index, row in self._data.iterrows():
record = Record(row.SepalLength, row.SepalWidth, row.PedalLength, row.PedalWidth, row.Classification)
records.append(record)
return records
def calculate_average(self, attr):
total = 0
for record in self._records:
if attr == "Sepal Length":
total += record.getSepalLength()
elif attr == "Sepal Width":
total += record.getSepalWidth()
elif attr == "Petal Length":
total += record.getPedalLength()
elif attr == "Petal Width":
total += record.getPedalWidth()
else:
print("Invalid Attribute Name")
return 0
return round(total/len(self._records), 2)
def calculate_median(self, attr):
listToBeSorted = []
for record in self._records:
if attr == "Sepal Length":
value = record.getSepalLength()
listToBeSorted.append(value)
elif attr == "Sepal Width":
value = record.getSepalWidth()
listToBeSorted.append(value)
elif attr == "Petal Length":
value = record.getPedalLength()
listToBeSorted.append(value)
elif attr == "Petal Width":
value = record.getPedalWidth()
listToBeSorted.append(value)
else:
print("Invalid Attribute Name")
return 0
listToBeSorted.sort()
size = len(listToBeSorted)
position = int(size/2) + 1
return listToBeSorted[int(position)]
def calculate_25(self, attr):
listToBeSorted = []
for record in self._records:
if attr == "Sepal Length":
value = record.getSepalLength()
listToBeSorted.append(value)
elif attr == "Sepal Width":
value = record.getSepalWidth()
listToBeSorted.append(value)
elif attr == "Petal Length":
value = record.getPedalLength()
listToBeSorted.append(value)
elif attr == "Petal Width":
value = record.getPedalWidth()
listToBeSorted.append(value)
else:
print("Invalid Attribute Name")
return 0
listToBeSorted.sort()
size = len(listToBeSorted)
position = int(size/4) + 1
return listToBeSorted[int(position)]
def calculate_75(self, attr):
listToBeSorted = []
for record in self._records:
if attr == "Sepal Length":
value = record.getSepalLength()
listToBeSorted.append(value)
elif attr == "Sepal Width":
value = record.getSepalWidth()
listToBeSorted.append(value)
elif attr == "Petal Length":
value = record.getPedalLength()
listToBeSorted.append(value)
elif attr == "Petal Width":
value = record.getPedalWidth()
listToBeSorted.append(value)
else:
print("Invalid Attribute Name")
return 0
listToBeSorted.sort()
size = len(listToBeSorted)
position = int((size/4)) * 3 + 1
return listToBeSorted[int(position)]
def calculate_variance(self, attr):
average = self.calculate_average(attr)
squaredValues = []
for record in self._records:
if attr == "Sepal Length":
value = record.getSepalLength() - average
squaredValue = value ** 2
squaredValues.append(squaredValue)
elif attr == "Sepal Width":
value = record.getSepalWidth() - average
squaredValue = value ** 2
squaredValues.append(squaredValue)
elif attr == "Petal Length":
value = record.getPedalLength() - average
squaredValue = value ** 2
squaredValues.append(squaredValue)
elif attr == "Petal Width":
value = record.getPedalWidth() - average
squaredValue = value ** 2
squaredValues.append(squaredValue)
else:
print("Invalid Attribute Name")
return 0
variance = 0
for squaredValue in squaredValues:
variance += squaredValue
return round(variance/(len(squaredValues) - 1), 2)
def get_max_range(self, attr):
filteredList = []
for record in self._records:
if attr == "Sepal Length":
filteredList.append(record.getSepalLength())
elif attr == "Sepal Width":
filteredList.append(record.getSepalWidth())
elif attr == "Petal Length":
filteredList.append(record.getPedalLength())
elif attr == "Petal Width":
filteredList.append(record.getPedalWidth())
else:
print("Invalid Attribute Name")
return 0
return max(filteredList)
def get_min_range(self, attr):
filteredList = []
for record in self._records:
if attr == "Sepal Length":
filteredList.append(record.getSepalLength())
elif attr == "Sepal Width":
filteredList.append(record.getSepalWidth())
elif attr == "Petal Length":
filteredList.append(record.getPedalLength())
elif attr == "Petal Width":
filteredList.append(record.getPedalWidth())
else:
print("Invalid Attribute Name")
return 0
return min(filteredList)
def value_in_range(self, lower, upper, attr):
inRange = []
for record in self._records:
if attr == "Sepal Length":
if record.getSepalLength() >= lower and record.getSepalLength() <= upper:
inRange.append(record.getSepalLength())
elif attr == "Sepal Width":
if record.getSepalWidth() >= lower and record.getSepalWidth() <= upper:
inRange.append(record.getSepalWidth())
elif attr == "Petal Length":
if record.getPedalLength() >= lower and record.getPedalLength() <= upper:
inRange.append(record.getPedalLength())
elif attr == "Petal Width":
if record.getPedalWidth() >= lower and record.getPedalWidth() <= upper:
inRange.append(record.getPedalWidth())
else:
print("Invalid Attribute Name")
return 0
return inRange |
14,105 | f1580afbfb45f8c1ba8c00ddec75b03e007f1a12 | import pulsar, re
client = pulsar.Client('pulsar://127.0.0.1:6650')
consumer = client.subscribe(topic=['persistent://public/default/check-click', 'persistent://public/default/check-impression'], subscription_name='ew')
while True:
msg = consumer.receive()
try:
print("Received message '{}' id='{}'".format(msg.value(), msg.message_id()))
# Acknowledge successful processing of the message
consumer.acknowledge(msg)
except:
# Message failed to be processed
consumer.negative_acknowledge(msg)
client.close() |
14,106 | 0c6568b3f20c73727993ff62de08f0999544843f | # 12.23
# TLE
class Solution(object):
def openLock(self, deadends, target):
"""
:type deadends: List[str]
:type target: str
:rtype: int
"""
# 记录deadends
dead = set()
for d in deadends:
dead.add(int(d))
#
dp = [[[[10000 for col in range(10)] for row in range(10)] for x in range(10)] for y in range(10)]
dp[0][0][0][0] = 0
#
for i in range(10) + range(9,0,-1):
for j in range(10) + range(9,0,-1):
for m in range(10) + range(9,0,-1):
for n in range(10) + range(9,0,-1):
if 1000*i + 100*j + 10*m + n in dead:
dp[i][j][m][n] = -1
else:
#
if not 1000*i + 100*j + 10*m + (10 + n - 1)%10 in dead: #左边不在dead中
dp[i][j][m][n] = min(dp[i][j][m][n], dp[i][j][m][(10 + n - 1)%10] + 1)
if not 1000*i + 100*j + 10*m + (n + 1)%10 in dead: #右边不在dead中
dp[i][j][m][n] = min(dp[i][j][m][n], dp[i][j][m][(n + 1)%10] + 1)
#
if not 1000*i + 100*j + 10*((10 + m - 1)%10) + n in dead:
dp[i][j][m][n] = min(dp[i][j][m][n], dp[i][j][(10 + m - 1)%10][n] + 1)
if not 1000*i + 100*j + 10*((m + 1)%10) + n in dead:
dp[i][j][m][n] = min(dp[i][j][m][n], dp[i][j][(m + 1)%10][n] + 1)
#
if not 1000*i + 100*((10 + j - 1)%10) + 10*m + n in dead:
dp[i][j][m][n] = min(dp[i][j][m][n], dp[i][(10 + j - 1)%10][m][n] + 1)
if not 1000*i + 100*((j + 1)%10) + 10*m + n in dead:
dp[i][j][m][n] = min(dp[i][j][m][n], dp[i][(j + 1)%10][m][n] + 1)
#
if not 1000*((10 + i - 1)%10) + 100*j + 10*m + n in dead:
dp[i][j][m][n] = min(dp[i][j][m][n], dp[(10 + i - 1)%10][j][m][n] + 1)
if not 1000*((i + 1)%10) + 100*j + 10*m + n in dead:
dp[i][j][m][n] = min(dp[i][j][m][n], dp[(i + 1)%10][j][m][n] + 1)
if str(i)+str(j)+str(m)+str(n) == target:
if (dp[i][j][m][n] == -1 or dp[i][j][m][n] > 9999):
return -1
return dp[i][j][m][n]
class Solution:
def openLock(self, deadends, target):
# BFS for the target
deadends = set(deadends)
visited = set('0000')
nLevel = 0
if '0000' in deadends or target in deadends: return -1
#
level = ['0000']
while level:
nLevel += 1
newLevel = []
for curr in level: #每一个元素
for i in range(4): #每一个位置
for j in [(int(curr[i]) - 1) % 10, (int(curr[i]) + 1) % 10]: #该位置+1 -1
candidate = curr[:i] + str(j) + curr[i + 1:]
if candidate not in visited and candidate not in deadends:
newLevel.append(candidate)
visited.add(candidate)
if candidate == target:
return nLevel
level = newLevel
return -1 |
14,107 | be689e386cc51ba6d94e0e24aa540b506e312373 | from collections import Counter
import numpy as np
import matplotlib.pyplot as plt
from core.tweet_reader import TweetReader
import statistics
import operator
reader = TweetReader('data/need/full-day-need/09_02.csv', text_column=1, separator='|', encoding='utf8')
# reader.add_file_content_to_corpus('data/need/full-day-need/08_24.csv', text_column=1, separator='|', encoding='utf8')
# reader.add_file_content_to_corpus('data/need/full-day-need/08_25.csv', text_column=1, separator='|', encoding='utf8')
# reader.add_file_content_to_corpus('data/need/full-day-need/08_26.csv', text_column=1, separator='|', encoding='utf8')
# reader.add_file_content_to_corpus('data/need/full-day-need/08_27.csv', text_column=1, separator='|', encoding='utf8')
# reader.add_file_content_to_corpus('data/need/full-day-need/08_28.csv', text_column=1, separator='|', encoding='utf8')
# reader.add_file_content_to_corpus('data/need/full-day-need/08_29.csv', text_column=1, separator='|', encoding='utf8')
# reader.add_file_content_to_corpus('data/need/full-day-need/08_30.csv', text_column=1, separator='|', encoding='utf8')
# reader.add_file_content_to_corpus('data/need/full-day-need/08_31.csv', text_column=1, separator='|', encoding='utf8')
# reader.add_file_content_to_corpus('data/need/full-day-need/09_01.csv', text_column=1, separator='|', encoding='utf8')
# reader.add_file_content_to_corpus('data/need/full-day-need/09_02.csv', text_column=1, separator='|', encoding='utf8')
wfreq = reader.extract_words_frequency(num_words=None, min_threshold=None, stop_word_file='input/harvey_stopwords.txt', ordered='asc')
needs = TweetReader('/home/long/TTU-SOURCES/harvey-need/data/daily-need/needs.csv', text_column=0, separator='|', encoding='utf8')
need_corpus = needs.get_corpus()
words_names = []
words_count = []
need_wfreq = dict()
frequencies = []
total_frequency = 0
for (word, freq) in wfreq:
if word not in need_corpus:
continue
need_wfreq[word] = freq
total_frequency += freq
frequencies.append(freq)
median_freq = statistics.median(frequencies)
## remove low frequency items
final_wfreq = dict()
for word, freq in need_wfreq.items():
if freq > median_freq:
final_wfreq[word] = freq
sorted_wfreq = sorted(final_wfreq.items(), key=operator.itemgetter(1))
for word, freq in sorted_wfreq:
words_names.append(word)
words_count.append(freq)
print(final_wfreq)
show_plot = True
if show_plot == True:
#
fig, ax = plt.subplots()
width = 0.56 # the width of the bars
ind = np.arange(len(words_count)) # the x locations for the groups
ax.barh(ind, words_count, width, color="blue")
ax.set_yticks(ind+width/2)
ax.set_yticklabels(words_names, minor=False)
plt.title('Word Frequency')
plt.xlabel('Frequencies')
plt.ylabel('Words')
for i, v in enumerate(words_count):
ax.text(v + 0.2, i - .15, str(v), color='blue', fontweight='bold')
plt.show()
|
14,108 | ac7a0c2aa6d6cf68fbaaf205a3908a28fce7d87c | import random, math
def clamp_color(rgb):
return map(rgb, clamp_value)
def clamp_value(val):
if val < 0:
return 0
if val > 1.0:
return 0.999
return val
def clamp_circular(val):
return val % 1.0
def sine(x):
# compute sine
x %= 6.28318531
if x > 3.14159265:
x -= 6.28318531
if x < 0:
return 1.27323954 * x + .405284735 * x * x
else:
return 1.27323954 * x - 0.405284735 * x * x
def circular_mean(angles):
if len(angles) == 0:
return 0
elif len(angles) == 1:
return angles[0]
else:
angles_radians = [(p % 1.0) * math.pi * 2 for p in angles]
vectors = [[math.cos(a), math.sin(a)] for a in angles_radians]
vectors_t = list(zip(*vectors))
#print(vectors_t)
angle = math.atan2(sum(vectors_t[1]), sum(vectors_t[0]))
return (angle / (2 * math.pi)) % 1
def generate_distributed_values(l, thresh):
too_close = [1, 1]
values = []
while any(too_close):
values = [random.random() for i in range(l)]
diffs = [values[(i + 1) % l] - values[i] for i in range(l)]
too_close = [abs(b) < thresh for b in diffs]
return values
def sine_phase(x):
# compute sine
# print x, int((x % 1.0) * sine_range), sine_table[int((x % 1.0) * sine_range)]
return sine_table[int((x % 1.0) * sine_range)]
# return 4 * abs(x % 1.0 - 0.5) - 1 # actually slow
# x %= 1.0
# if x > 0.5:
# x -= 1.0
# return 8 * x + 16 * x * x
#
# else:
# return 8 * x - 16 * x * x
def sign(x):
if x > 0:
return 1
elif x < 0:
return -1
elif x == 0:
return 0
else:
return x |
14,109 | 3853c674ce21e697a26a79d7f5837b5c395451f1 | import sys
freq = {} # frequency of words in text
line = input()
print(line.split())
for word in line.split():
freq[word] = freq.get(word,0)+1
print(freq[word])
words = freq.keys()
print(words)
for w in words:
print ("%s:%d" % (w,freq[w]))
# split is for taking words in place as a single one before any space
# getk |
14,110 | 84836e365d3e59372a3b85ed8a5e546206d4c9b3 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import smart_selects.db_fields
class Migration(migrations.Migration):
dependencies = [
('jobportal', '0003_auto_20160313_0121'),
]
operations = [
migrations.AlterField(
model_name='company',
name='organization_type',
field=models.CharField(default=b'PSU', max_length=20, verbose_name=b'Type of Organization', blank=True, choices=[(b'Private', b'Private'), (b'Government', b'Government'), (b'PSU', b'PSU'), (b'MNC(Indian Origin)', b'MNC(Indian Origin)'), (b'MNC(Foreign Origin)', b'MNC(Foreign Origin)'), (b'NGO', b'NGO'), (b'Other', b'Other')]),
),
migrations.AlterField(
model_name='companyreg',
name='organization_type_reg',
field=models.CharField(default=b'PSU', max_length=20, verbose_name=b'Type of Organization', blank=True, choices=[(b'Private', b'Private'), (b'Government', b'Government'), (b'PSU', b'PSU'), (b'MNC(Indian Origin)', b'MNC(Indian Origin)'), (b'MNC(Foreign Origin)', b'MNC(Foreign Origin)'), (b'NGO', b'NGO'), (b'Other', b'Other')]),
),
migrations.AlterField(
model_name='student',
name='dept',
field=smart_selects.db_fields.ChainedForeignKey(chained_model_field=b'year', to='jobportal.Department', chained_field=b'year'),
),
migrations.AlterField(
model_name='student',
name='nationality',
field=models.CharField(default=b'INDIAN', max_length=15, blank=True),
),
migrations.AlterField(
model_name='student',
name='prog',
field=smart_selects.db_fields.ChainedForeignKey(chained_model_field=b'dept', to='jobportal.Programme', chained_field=b'dept'),
),
migrations.AlterField(
model_name='student',
name='year',
field=models.ForeignKey(to='jobportal.Year'),
),
]
|
14,111 | 015afce36c219ff778bce3af2f866c4b0b39731f | import numpy as np
from scipy.integrate import ode
from scipy.optimize import curve_fit
from lmfit import minimize, Parameters
import json
from tqdm import tqdm
from bfmplot import pl
from bfmplot import brewer_qualitative, simple_cycler, markers
import bfmplot as bp
#brewer_qualitative[6] = brewer_qualitative[1]
colors = simple_cycler(brewer_qualitative)
class REPL(dict):
def __init__(self, items):
self.items = items
def __getitem__(self,i):
try:
return self.items[i]
except KeyError as e:
return i
with open('../data/all_confirmed_cases_with_population.json','r') as f:
data = json.load(f)
tuplelist = [ (p, d) for p, d in data.items()\
if max(d['cases']) >= 20\
and len(np.where(np.logical_and(np.array(d['times'])<=12,
np.array(d['cases'])>0))[0])\
>= 8
]
colors
tuplelist = sorted([ t for t in tuplelist ],key=lambda x: -max(x[1]['cases']))
n_fits = len(tuplelist)
n_col = int(np.ceil(np.sqrt(n_fits)))
n_row = 2
n_col = 4
fig, ax = pl.subplots(n_row,n_col,figsize=(8,3))
ax = ax.flatten()
titlemap = REPL({'mainland_china':'All w/o Hubei'})
letter = "abcdefg"
roman = [ "i", "ii", "iii", "iv", "v", "vi", "vii", "viii", "ix"]
i = -1
for province, pdata in tqdm(tuplelist[2:10]):
i += 1
print(province)
t = np.array(pdata['times'])
cases = np.array(pdata['cases'])
if pdata['dates'][0] == "2020-01-22 12:00:00":
t += 14/24
else:
dt = 0
if max(cases) <= 20:
continue
i0 = np.where(cases>0)[0][0]
t = t[i0:]
cases = cases[i0:]
i1 = np.where(t<=12)[0][-1]
_t = t[:i1+1]
_cases = cases[:i1+1]
print(t, cases)
if len(t) < 8:
continue
f = lambda x, mu, B: B*(x)**mu
p,_ = curve_fit(f, _t, _cases, [1.5,4.5])
print(p)
tt = np.logspace(np.log(t[0]), np.log(t[-1]), base=np.exp(1))
pl.sca(ax[i])
ax[i].text(0.7,0.2,
"$\mu={0:4.2f}$".format(p[0]),
transform=ax[i].transAxes,
ha='right',
va='bottom',
bbox={'facecolor':'w','edgecolor':'w','pad':0},
zorder = -1000,
)
ax[i].text(0.7,0.03,
titlemap[province],
transform=ax[i].transAxes,
ha='right',
va='bottom',
bbox={'facecolor':'w','edgecolor':'w','pad':0}
)
pl.plot(t, cases,marker=markers[i+2],c=colors[i+2],label='data',mfc='None')
pl.plot(tt, f(tt,*p),c='k',lw=1,label='$Q_I$')
_c = i % (n_col)
_r = i // (n_col)
if _r == n_row-1:
pl.xlabel('days since Jan. 20th')
if _c == 0 and _r == 0:
pl.ylabel('confirmed cases',)
pl.gca().yaxis.set_label_coords(-0.3,-0.2)
pl.xlim([1,30])
pl.xscale('log')
pl.yscale('log')
ylim = ax[i].set_ylim([1,2e3])
ylim = ax[i].get_ylim()
ax[i].plot([12,12],ylim,':')
ax[i].text(0.03,0.97,
"{}".format(roman[i]),
transform=ax[i].transAxes,
ha='left',
va='top',
fontweight='bold',
fontsize=10,
bbox={'facecolor':'w','edgecolor':'w','pad':0}
)
if i == 0:
ax[i].text(0.75,0.45,
"Feb. 2nd".format(p[0]),
transform=ax[i].transAxes,
ha='center',
va='bottom',
fontsize=9,
bbox={'facecolor':'w','edgecolor':'w','pad':0}
)
if _r < n_row-1:
[ x.set_visible(False) for x in ax[i].xaxis.get_major_ticks() ]
ax[i].set_yticks([1,10,100,1000])
bp.strip_axis(pl.gca())
ax[0].text(-0.4,1.1,
'C',
transform=ax[0].transAxes,
ha='left',
va='top',
fontweight='bold',
fontsize=14,
bbox={'facecolor':'w','edgecolor':'w','pad':0}
)
pl.gcf().tight_layout()
pl.gcf().subplots_adjust(wspace=0.3,hspace=0.3)
pl.gcf().savefig("powerlaw_fit_figures/fit_powerlaw_500.png",dpi=300)
pl.show()
|
14,112 | 33df834116d7671605d001780a1784835a610f46 | #immutable data structures, can be unpacked
inform = ['Hi', 4, 'yay']
v1, v2, v3 = inform
print(v1, v2, v3)
one_elem_tuple = (4, ) #you need the comma
print(one_elem_tuple)
|
14,113 | 6890ea0859c49f215e7d8e2929333a1dbf9b8568 |
# Importando pacotes
# Import
import urllib.request
# From packages
from bs4 import BeautifulSoup as bf4
# Obtenção de dados do ZAP
def find_substring(substring, string):
indices = []
index = -1 # Begin at -1 so index + 1 is 0
while True:
# Find next index of substring, by starting search from index + 1
index = string.find(substring, index + 1)
if index == -1:
break # All occurrences have been found
indices.append(index)
return indices
#Url's de destino:
url_principal = 'https://www.zapimoveis.com.br/venda/apartamentos/rj+rio-de-janeiro/'
url_seletor = '#{"precomaximo":"2147483647","parametrosautosuggest":[{"Bairro":"","Zona":"","Cidade":"RIO%20DE%20JANEIRO","Agrupamento":"","Estado":"RJ"}],"pagina":'
url_final=',"ordem":"Valor","paginaOrigem":"ResultadoBusca","semente":"702111458","formato":"Lista"}'
def searchPrice(url_principal,url_seletor,url_final,pgs):
for k in range(0,pgs):
#vetor para guardar dados:
p=[]
url_pag = str(pgs)
url = url_principal + url_seletor + url_pag + url_final
req = urllib.request.Request(
url,
data=None,
headers={
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.47 Safari/537.36'
}
)
f = urllib.request.urlopen(req)
content = bf4(f.read())
content=str(content)
positionI = find_substring('<div class="simple-card__prices simple-card__listing-prices"><p class="simple-card__price js-price heading-regular heading-regular__bolder align-left">',content)
j=0
while (j<len(positionI)):
inicio = positionI[j]+156
fim = inicio+12
preco_j = content[inicio:fim]
p.append(preco_j)
j=j+1
return p
ans = searchPrice(url_principal,url_seletor,url_final,2)
print(ans)
|
14,114 | 1b07cfd76a583d3605cd448998c678205ad89e91 | from ef_common.take_screenshot import take_screenshot
from hamcrest import assert_that, equal_to
import settings
from android.pages.evc.login_page import LoginPage
from settings import UAT_Account
from teacher_common.teacher_common_web_page import TeacherCommonWebPage
from android.tests.abstract_base_android_testcase import AbstractBaseAndroidTestCase
class TestChats(AbstractBaseAndroidTestCase):
@take_screenshot
def runTest(self):
# Student enter class.
student_name =UAT_Account.EFEC_BEG_IN_WHITE_LIST_Account['username']
login_page = LoginPage(self.browser)
debug_page = login_page.go_to_debug_option_page()
class_room_entry_page = debug_page.go_to_class_room_entry_page()
room_name = class_room_entry_page.get_class_room_name()
duration_time = class_room_entry_page.get_class_room_duration()
evc_class = class_room_entry_page.go_to_evc_room(student_name, room_name)
# Student send message
evc_class.select_chat_button()
evc_class.send_text_through_chat_dialog(33)
# Teacher enter class
teacher_name = settings.TEACHER_NAME_1
evc_room = TeacherCommonWebPage()
evc_room.join_specific_class_room(teacher_name, room_name=room_name, duration=duration_time)
evc_room.chat.send_chat_messages("T")
# Verify the message sent by student.
evc_room.chat.get_messages_by_user_name(student_name)
assert_that(evc_room.chat.get_messages_by_user_name(student_name)[0], equal_to('e'))
if __name__ == "__main__":
test = TestChats()
test.create_browser_driver()
test.runTest()
test.close_browser() |
14,115 | 39144f720bee1595a756be7b4b6563f797eaef32 | import numpy as np
#создали матрицу 10 на 10
#x = np.random.randint(100, size=(10,10))
#np.savetxt(delimiter=';',fname='ran.csv',fmt='%1.1i', X=x)
y = np.loadtxt(delimiter=';',fname='ran.csv')
#поиск минимального и максимального элемента и позиций
x_minim = np.min(y)
x_maxim = np.max(y)
tmp_min = np.where(y ==x_minim)
tmp_max = np.where(y ==x_maxim)
# меняем местами
y[tmp_min] = x_maxim
y[tmp_max] = x_minim
np.savetxt(delimiter=';',fname='ran_izm.csv',fmt='%1.1i', X=y)
|
14,116 | a3db0710c7c46c635e99c2afaca1d108a9153cde | from django.contrib.auth import get_user_model
from model_mommy import mommy
from pos_app.account.models import Doctor
from pos_app.category.models import Category, SubCategory
from pos_app.factory.models import Factory
from pos_app.payment.models import Payment, PaymentProduct
from pos_app.product.models import Embalase, UnitType, Product
User = get_user_model()
def create_user(username='username', email='email@user.com', password='password') -> User:
return User.objects.create_user(username, email, password)
def create_category(**kwargs) -> Category:
"""
:param kwargs: Category's fields
:return: Category object
"""
return mommy.make(Category, **kwargs)
def create_doctor(**kwargs) -> Doctor:
"""
:param kwargs: Doctor's fields
:return: Doctor object
"""
return mommy.make(Doctor, **kwargs)
def create_embalase(**kwargs) -> Embalase:
"""
:param kwargs: Embalase's fields
:return: Embalase object
"""
return mommy.make(Embalase, **kwargs)
def create_factory(**kwargs) -> Factory:
"""
:param kwargs: Factory's fields
:return: Factory object
"""
return mommy.make(Factory, **kwargs)
def create_subcategory(**kwargs) -> SubCategory:
"""
:param kwargs: SubCategory's fields
:return: SubCategory object
"""
return mommy.make(SubCategory, **kwargs)
def create_unit_type(**kwargs) -> UnitType:
"""
:param kwargs: UnitType's fields
:return: UnitType object
"""
return mommy.make(UnitType, **kwargs)
def create_product(**kwargs) -> Product:
"""
:param kwargs: Product's fields
:return: Product object
"""
return mommy.make(Product, **kwargs)
def create_payment(**kwargs) -> Payment:
"""
:param kwargs: Payment's fields
:return: Payment object
"""
return mommy.make(Payment, **kwargs)
def create_payment_product(**kwargs) -> PaymentProduct:
"""
:param kwargs: PaymentProduct's fields
:return: PaymentProduct object
"""
return mommy.make(PaymentProduct, **kwargs)
|
14,117 | 1d13316652cd60a909cb0bf5edc93fc0309f19fe |
chichen1 = open('chicken.txt', 'r', encoding='utf-8')
for line in chichen1:
print(line, end="")
chichen1.close() # 메모리에서 제거
|
14,118 | a5c85c08965d7fad4f7d37c2ac95de0287dd901c | #!/usr/bin/python
# coding: utf-8
import argparse
import sys
import Uploader
my_parser = argparse.ArgumentParser(
description='XSVF file processor.',
epilog='Parameters can be in a file, one per line, using @"file name"',
fromfile_prefix_chars='@')
my_parser.add_argument(
'-v', '--version',
action='version',
version='%(prog)s version 1.0.0')
my_parser.add_argument(
'-d', '--debug',
default=1,
type=int,
help='Debug verbosity'
' (type %(type)s, default=%(default)s)')
my_parser.add_argument(
'-i', '--iterations',
default=3,
type=int,
help='Number of iterations'
' (type %(type)s, default=%(default)s)')
def main():
Uploader.Uploader.add_arguments(my_parser)
args = my_parser.parse_args()
u = Uploader.Uploader(args)
fileNameList = []
for i in range(0, args.iterations):
fileNameList.append(open("/dev/zero", mode='r'))
u.upload_all_files(fileNameList)
error_code = u.error_code
sys.exit(error_code)
if __name__ == '__main__':
main()
|
14,119 | 44523d19990a173cc236fc5d821f9d59146c7177 | class Solution(object):
def findRelativeRanks(self, nums):
"""
:type nums: List[int]
:rtype: List[str]
"""
numsStrs = []
for i in range(len(nums)):
numsStrs.append(str(nums[i])+'-'+str(i))
numsStrs = sorted(numsStrs, lambda x,y: cmp(int(x.split("-")[0]), int(y.split("-")[0])))[::-1]
ranks = [''] * len(nums)
for i in range(len(numsStrs)):
index = int(numsStrs[i].split("-")[1])
if i == 0:
ranks[index] = "Gold Medal"
elif i == 1:
ranks[index] = "Silver Medal"
elif i == 2:
ranks[index] = "Bronze Medal"
else:
ranks[index] = str(i+1)
return ranks
assert Solution().findRelativeRanks([1, 2, 3, 4, 5]) == ["5", "4", "Bronze Medal", "Silver Medal", "Gold Medal"]
assert Solution().findRelativeRanks([10, 3, 8, 9, 4]) == ["Gold Medal", "5", "Bronze Medal", "Silver Medal", "4"] |
14,120 | 04069fdae7dfec4ea3198462f7492e36b27b85aa | def halil(start, end):
out = list()
if start <= 1:
start = 2
battal = [True] * (end + 1)
for p in range(start, end + 1):
if (battal[p]):
out.append(p)
for i in range(p, end + 1, p):
battal[i] = False
return out
print(halil(1, 100)) |
14,121 | c87ead78bd14ca7ec3d015fdaab4213591348bb9 | """ Contains ShoppingList class """
from google.appengine.ext import db
from google.appengine.api import memcache
from model.listitem import ListItem
from model.product import Product
import logging
import json
class ShoppingList(db.Model):
""" Model class for ShoppingList """
name = db.StringProperty()
items = db.StringProperty()
NAMESPACE = 'ShoppingList'
def to_dict(self):
""" For JSON serialization """
ret = dict([(p, unicode(getattr(self, p))) for p in self.properties()])
ret['id'] = self.key().id_or_name()
ret['items'] = self.items
return ret
def add_item(self, description, key, quantity):
""" Add an item to the list """
if description is None or description == '':
raise ValueError(" description not set")
product = None
if key is not None:
product = Product.get_by_id(int(key))
query = ListItem.all()
query.ancestor(self)
item = None
if product is not None:
query.filter('product_barcode = ', product.barcode)
item = query.get()
logging.info('Product: ' + str(product))
logging.info('Item: ' + str(item))
else:
query.filter('description = ', description)
item = query.get()
if item is None:
item = ListItem(parent=self)
item.description = description
item.quantity = quantity
if product:
item.product_barcode = product.barcode
item.put()
else:
item.quantity += quantity
item.put()
memcache.delete(str(self.key().id_or_name()), namespace=ShoppingList.NAMESPACE)
return item
def get_items(self):
""" Get all items """
query = ListItem.all()
query.ancestor(self)
list_items = query.fetch(1000)
ret = []
if not self.items:
return list_items
else:
ranked_items = json.loads(self.items)
logging.info('Len: ' + str(len(ranked_items)))
for ranked_item in ranked_items:
logging.info('Item: ' + str(ranked_item['id']))
for real_item in list_items:
if ranked_item['id'] == real_item.key().id_or_name():
ret.append(real_item)
for real_item in list_items:
for ranked_item in ranked_items:
if ranked_item['id'] == real_item.key().id_or_name():
break
else:
ret.append(real_item)
return ret
def delete_item(self, item_id):
""" Delete given item """
item = ListItem.get_by_id(int(item_id), self)
item.delete()
memcache.delete(str(self.key().id_or_name()), namespace=ShoppingList.NAMESPACE)
@staticmethod
def create_list(user, list_name):
""" Create a new list """
if list_name is None or list_name == "":
raise ValueError("list_name must not be empty error")
query = ShoppingList.all()
query.ancestor(user)
query.filter('name = ', list_name)
count = query.count()
if count > 0:
raise ValueError("a list with the same name already exists: " + str(list_name))
new_list = ShoppingList(parent=user)
new_list.name = list_name
new_list.put()
return new_list
|
14,122 | ff99728a2f18831789a492aef5c3a598711874b1 | """
Merge files that are devided out by range number into a single file.
Author: Hayden Elza
Email: hayden.elza@gmail.com
Created: 2019-07-26
"""
import os
# Data Sources
wd = os.path.dirname(os.getcwd())
west = os.path.join(wd, 'data/edited/TownshipsWest/')
east = os.path.join(wd, 'data/edited/TownshipsEast/')
sources = [west,east]
output = os.path.join(wd, 'data/edited/township_descriptions.txt')
# Open output
with open(output, 'w') as outfile:
# Iterate through sources
for source in sources:
# Walk directory
for dir_name, subdirs, files in os.walk(source):
print('Found directory: %s' % dir_name)
# Read each line for each file and write to output file
for file in files:
print('\t%s' % file)
with open(os.path.join(dir_name, file)) as infile:
for line in infile:
outfile.write(line)
|
14,123 | f090e0e880a100068d46815a45088a66bce1839d | import pygame
import os
########################################################################################
pygame.init() #초기화(반드시 필요)
#화면 크기 설정
screen_width = 640 #가로 크기
screen_height = 480 #세로 크기
screen = pygame.display.set_mode((screen_width, screen_height))
#화면 타이틀 설정
pygame.display.set_caption("pang pang") #게임이름
#fps
clock = pygame.time.Clock()
########################################################################################
#1. 사용자 게임 초기화(배경화면, 게임이미지, 좌표, 속도, 폰트 등)
current_path = os.path.dirname(__file__) #현재파일의 위치 반환
image_path = os.path.join(current_path, "images")
#배경이미지 불러오기
background = pygame.image.load(os.path.join(image_path, "background.png"))
#스테이지 만들기
stage = pygame.image.load(os.path.join(image_path, "stage.png"))
stage_size = stage.get_rect().size
stage_height = stage_size[1] #스테이지의 높이 위에 캐릭터를 두기 위해 사용
#캐릭터 불러오기
character = pygame.image.load(os.path.join(image_path, "character.png"))
character_size = character.get_rect().size #이미지의 크기를 구해옴
character_width = character_size[0]
character_height = character_size[1]
character_x_pos = (screen_width / 2) - (character_width / 2) #화면 가로의 절반 위치
character_y_pos = screen_height - stage_height - character_height #화면 가장아래
#이동할 좌표
character_to_x = 0
#이동속도
character_speed = 5
#무기 만들기
weapon = pygame.image.load(os.path.join(image_path, "weapon.png"))
weapon_size = weapon.get_rect().size #이미지의 크기를 구해옴
weapon_width = weapon_size[0]
#무기는 한번에 여러발 발사
weapons = []
#무기 이동 속도
weapon_speed = 10
# 이벤트 루프
running = True #게임이 진행중인가?
while running:
dt = clock.tick(30) #게임화면의 초당 프레임수 설정
# 2. 이벤트 처리(키보드, 마우스 등)
for event in pygame.event.get(): #어떤 이벤트가 발생하였는가?
if event.type == pygame.QUIT: #창이 닫히는 이벤트가 발생했는가?
running = False #게임이 진행중이 아님
if event.type == pygame.KEYDOWN: #키보드 키를 누름
if event.key == pygame.K_LEFT:
character_to_x -=character_speed
elif event.key == pygame.K_RIGHT:
character_to_x +=character_speed
elif event.key == pygame.K_SPACE:
weapon_x_pos = character_x_pos + (character_width / 2) - (weapon_width / 2)
weapon_y_pos = character_y_pos
weapons.append([weapon_x_pos, weapon_y_pos])
if event.type == pygame.KEYUP: #키보드 키를 뗌
if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:
character_to_x = 0
# 3. 게임 캐릭터 위치 정의
character_x_pos += character_to_x
if character_x_pos < 0:
character_x_pos = 0
elif character_x_pos > screen_width - character_width:
character_x_pos = screen_width - character_width
#무기 위치 조정
weapons = [ [w[0], w[1] - weapon_speed] for w in weapons]
#천장에 닿은 무기 없애기
weapons = [ [w[0], w[1]] for w in weapons if w[1] > 0]
# 4. 충돌 처리
# 5. 화면에 그리기
screen.blit(background, (0, 0))
for weapon_x_pos, weapon_y_pos in weapons:
screen.blit(weapon, (weapon_x_pos, weapon_y_pos))
screen.blit(stage, (0, screen_height - stage_height))
screen.blit(character, (character_x_pos, character_y_pos))
pygame.display.update() #게임화면 다시 그리기(반드시)
#pygame 종료
pygame.quit()
|
14,124 | e418190dfcf9107c44d15c5aa2e3156e3be24cfe | from ecies.utils import generate_key
from ecies import encrypt, decrypt
import binascii
import coincurve
pub = '0227ffac7d33231086df84e12f0856c0e985c18d3daa2c94c7abcbff9a6aa8b258'
priv = 'ee113297d1fb3c214722aadf59a3d94dff24264ffc5c34b78c903b36eb1aeca8'
def enclave_dec(priv, bytes_data):
# pub = '0227ffac7d33231086df84e12f0856c0e985c18d3daa2c94c7abcbff9a6aa8b258'
# priv = 'ee113297d1fb3c214722aadf59a3d94dff24264ffc5c34b78c903b36eb1aeca8'
# print(pub)
coincurve_privk = coincurve.PrivateKey.from_hex(priv)
# print(coincurve_privk.secret)
# k = generate_key()
# print(dir(k))
# print(bytes(str(k), 'utf-8').hex())
sk_hex = coincurve_privk.secret
pk_hex = coincurve_privk.public_key.format(True)
# print(k.secret)
# print(decrypt(sk_hex, encrypt(pk_hex, b'this is data')))
decrypted = decrypt(sk_hex, bytes_data)
print(decrypted)
return decrypted
# print(sk_hex.hex())
# print(pk_hex.hex())
# print(pubhex)
def enclave_enc(pub, bytes_data):
# pub = '0227ffac7d33231086df84e12f0856c0e985c18d3daa2c94c7abcbff9a6aa8b258'
# priv = 'ee113297d1fb3c214722aadf59a3d94dff24264ffc5c34b78c903b36eb1aeca8'
# print(pub)
# pubk = coincurve.PublicKey.from_hex(pub)
# print(coincurve_pubk)
# print(coincurve_privk.secret)
# k = generate_key()
# print(dir(k))
# print(bytes(str(k), 'utf-8').hex())
# sk_hex = coincurve_privk.secret
# pk_hex = coincurve_privk.public_key.format(True)
# print(k.secret)
# print(decrypt(sk_hex, encrypt(pk_hex, b'this is data')))
encrypted = encrypt(pub, bytes_data)
print(encrypted)
return encrypted
enclave_enc(pub, b'this is data')
enclave_dec(priv, enclave_enc(pub, b'thi is data'))
|
14,125 | 860a86d141976a267867ddfc4195c2ab1f05b770 | """JWT Helper"""
import json
import time
from jose import jwt
def create_jwt(config, payload={}):
with open(config["AssertionKeyFile"], "r") as f:
assertion_key = json.load(f)
header = {
"alg": "RS256",
"typ": "JWT",
"kid": assertion_key["kid"],
}
_payload = {
"iss": config["ChannelId"],
"sub": config["ChannelId"],
"aud": "https://api.line.me/",
"exp": int(time.time()) + 60 * 30,
}
_payload.update(payload)
return jwt.encode(_payload,
assertion_key,
algorithm='RS256',
headers=header)
|
14,126 | 263ac8e7ede2eba6a293300ee40329b0a9cb161f | import socket
from crypto import CryptoBox
class Client(CryptoBox):
def __init__(self):
CryptoBox.__init__(self)
self.sock = None
def connect(self, host, port, timeout=None):
try:
socket.setdefaulttimeout(timeout)
self.sock = socket.create_connection((host, port),
timeout)
if self.sock is None:
return False
else:
self.sock.settimeout(timeout)
return True
except:
return False
def sendall(self, buffer, nbytes=0):
nb = 0
if nbytes == 0:
nbytes = len(buffer)
while nb < nbytes:
nb += self.sock.send(buffer[nb:nbytes])
return nb
def recvall(self, nbytes):
nb = 0
buffer = ''
tmp_buffer = ''
while nb < nbytes:
tmp_buffer = self.sock.recv(1024)
buffer += tmp_buffer
nb += len(tmp_buffer)
return buffer[:nbytes]
def close(self):
self.sock.close()
|
14,127 | 6967426cd26d29b34f6cfa0872b2b3c8f52d5659 | """Common test tools."""
import asyncio
from unittest.mock import MagicMock, patch
from dsmr_parser.clients.protocol import DSMRProtocol
from dsmr_parser.clients.rfxtrx_protocol import RFXtrxDSMRProtocol
from dsmr_parser.obis_references import (
EQUIPMENT_IDENTIFIER,
EQUIPMENT_IDENTIFIER_GAS,
LUXEMBOURG_EQUIPMENT_IDENTIFIER,
P1_MESSAGE_TIMESTAMP,
Q3D_EQUIPMENT_IDENTIFIER,
)
from dsmr_parser.objects import CosemObject
import pytest
@pytest.fixture
async def dsmr_connection_fixture(hass):
"""Fixture that mocks serial connection."""
transport = MagicMock(spec=asyncio.Transport)
protocol = MagicMock(spec=DSMRProtocol)
async def connection_factory(*args, **kwargs):
"""Return mocked out Asyncio classes."""
return (transport, protocol)
connection_factory = MagicMock(wraps=connection_factory)
with patch(
"homeassistant.components.dsmr.sensor.create_dsmr_reader", connection_factory
), patch(
"homeassistant.components.dsmr.sensor.create_tcp_dsmr_reader",
connection_factory,
):
yield (connection_factory, transport, protocol)
@pytest.fixture
async def rfxtrx_dsmr_connection_fixture(hass):
"""Fixture that mocks RFXtrx connection."""
transport = MagicMock(spec=asyncio.Transport)
protocol = MagicMock(spec=RFXtrxDSMRProtocol)
async def connection_factory(*args, **kwargs):
"""Return mocked out Asyncio classes."""
return (transport, protocol)
connection_factory = MagicMock(wraps=connection_factory)
with patch(
"homeassistant.components.dsmr.sensor.create_rfxtrx_dsmr_reader",
connection_factory,
), patch(
"homeassistant.components.dsmr.sensor.create_rfxtrx_tcp_dsmr_reader",
connection_factory,
):
yield (connection_factory, transport, protocol)
@pytest.fixture
async def dsmr_connection_send_validate_fixture(hass):
"""Fixture that mocks serial connection."""
transport = MagicMock(spec=asyncio.Transport)
protocol = MagicMock(spec=DSMRProtocol)
protocol.telegram = {
EQUIPMENT_IDENTIFIER: CosemObject([{"value": "12345678", "unit": ""}]),
EQUIPMENT_IDENTIFIER_GAS: CosemObject([{"value": "123456789", "unit": ""}]),
P1_MESSAGE_TIMESTAMP: CosemObject([{"value": "12345678", "unit": ""}]),
}
async def connection_factory(*args, **kwargs):
"""Return mocked out Asyncio classes."""
if args[1] == "5L":
protocol.telegram = {
LUXEMBOURG_EQUIPMENT_IDENTIFIER: CosemObject(
[{"value": "12345678", "unit": ""}]
),
EQUIPMENT_IDENTIFIER_GAS: CosemObject(
[{"value": "123456789", "unit": ""}]
),
}
if args[1] == "5S":
protocol.telegram = {
P1_MESSAGE_TIMESTAMP: CosemObject([{"value": "12345678", "unit": ""}]),
}
if args[1] == "Q3D":
protocol.telegram = {
Q3D_EQUIPMENT_IDENTIFIER: CosemObject(
[{"value": "12345678", "unit": ""}]
),
}
return (transport, protocol)
connection_factory = MagicMock(wraps=connection_factory)
async def wait_closed():
if isinstance(connection_factory.call_args_list[0][0][2], str):
# TCP
telegram_callback = connection_factory.call_args_list[0][0][3]
else:
# Serial
telegram_callback = connection_factory.call_args_list[0][0][2]
telegram_callback(protocol.telegram)
protocol.wait_closed = wait_closed
with patch(
"homeassistant.components.dsmr.config_flow.create_dsmr_reader",
connection_factory,
), patch(
"homeassistant.components.dsmr.config_flow.create_tcp_dsmr_reader",
connection_factory,
):
yield (connection_factory, transport, protocol)
@pytest.fixture
async def rfxtrx_dsmr_connection_send_validate_fixture(hass):
"""Fixture that mocks serial connection."""
transport = MagicMock(spec=asyncio.Transport)
protocol = MagicMock(spec=RFXtrxDSMRProtocol)
protocol.telegram = {
EQUIPMENT_IDENTIFIER: CosemObject([{"value": "12345678", "unit": ""}]),
EQUIPMENT_IDENTIFIER_GAS: CosemObject([{"value": "123456789", "unit": ""}]),
P1_MESSAGE_TIMESTAMP: CosemObject([{"value": "12345678", "unit": ""}]),
}
async def connection_factory(*args, **kwargs):
return (transport, protocol)
connection_factory = MagicMock(wraps=connection_factory)
async def wait_closed():
if isinstance(connection_factory.call_args_list[0][0][2], str):
# TCP
telegram_callback = connection_factory.call_args_list[0][0][3]
else:
# Serial
telegram_callback = connection_factory.call_args_list[0][0][2]
telegram_callback(protocol.telegram)
protocol.wait_closed = wait_closed
with patch(
"homeassistant.components.dsmr.config_flow.create_rfxtrx_dsmr_reader",
connection_factory,
), patch(
"homeassistant.components.dsmr.config_flow.create_rfxtrx_tcp_dsmr_reader",
connection_factory,
):
yield (connection_factory, transport, protocol)
|
14,128 | fd839fba65c1f49d57ce7c956cf7c8980fdeb9d2 | #!/usr/bin/env python3
# -*- coding: u8 -*-
# File : config.py
# Author : Hai-Yong Jiang <haiyong.jiang1990@hotmail.com>
# Date : 26.01.2020
# Last Modified Date: 20.02.2020
# Last Modified By : Hai-Yong Jiang <haiyong.jiang1990@hotmail.com>
import yaml
from torch import optim
from torch import nn
from torch.optim.lr_scheduler import *
from torchvision import transforms
from nn import dataset_dict, method_dict, eval_dict
from libs.trainer import Trainer
import re
import logging
# General config
def load_config(path, default_path=None):
''' Loads config file.
Args:
path (str): path to config file
default_path (str): whether to use default path
'''
# Load configuration from file itself
with open(path, 'r') as f:
cfg_special = yaml.load(f, yaml.FullLoader)
# Check if we should inherit from a config
inherit_from = cfg_special.get('inherit_from')
# If yes, load this config first as default
# If no, use the default_path
if inherit_from is not None:
cfg = load_config(inherit_from, default_path)
elif default_path is not None:
with open(default_path, 'r') as f:
cfg = yaml.load(f, yaml.FullLoader)
else:
cfg = dict()
# Include main configuration
update_recursive(cfg, cfg_special)
return cfg
def update_recursive(dict1, dict2):
''' Update two config dictionaries recursively.
Args:
dict1 (dict): first dictionary to be updated
dict2 (dict): second dictionary which entries should be used
'''
for k, v in dict2.items():
if k not in dict1:
dict1[k] = dict()
if isinstance(v, dict):
if dict1[k] is None and v is not None:
dict1[k] = dict()
update_recursive(dict1[k], v)
else:
dict1[k] = v
# index network models
def get_model(cfg, bParallel=True, device="cuda"):
''' Returns the model instance.
Args:
cfg (dict): config dictionary
bParallel (bool): use gpu parallel or not
device (str): pytorch device
'''
method_name = cfg['method']
net = method_dict[method_name](cfg)
if bParallel:
net = nn.DataParallel(net)
return net.to(device)
# Trainer
def get_trainer(cfg, model, evaluator, device):
''' Returns a trainer instance.
Args:
model (nn.Module): the model which is used
optimizer (optimizer): pytorch optimizer
cfg (dict): config dictionary
'''
return Trainer(cfg, model, evaluator, device)
# Evaluator
def get_evaluator(cfg):
''' Returns evaluator.
Args:
cfg (dict): config dictionary
'''
eval_name = cfg['model']['eval_method']
return eval_dict[eval_name](cfg)
# index datasets
def get_dataset(mode, cfg):
''' Returns the dataset.
Args:
mode (enum) : train/val/test
cfg (dict): config dictionary
'''
dataset_name = cfg['data']['dataset']
if dataset_name not in dataset_dict:
logging.warning("Available datasets: %s" % (",".join(dataset_dict.keys())))
raise Exception("Error dataset name.")
return dataset_dict[dataset_name](mode, cfg)
def get_params(model, pattern):
''' Returns parameters matching the regex pattern.
Args:
model (class) : the network
pattern (str) : the regex pattern
'''
params = model.named_parameters()
params = {k:v for k,v in params if re.match(pattern, k) is not None}
logging.info("Filter parameters with regex (%s): " % pattern)
logging.info(",".join(params.keys()))
return params.values()
def get_optimizer(cfg, model, patterns =".*"):
''' Returns an optimizer with proper lrs.
Args:
cfg (dict): configurations
model (class) : the network
pattern (str/dict) : a regex pattern or a dict or regex patterns
'''
optimizer_func = None
if cfg["training"]["optimizer"] == "ADAM":
optimizer_func = lambda x,y: optim.Adam(x, y)
elif cfg["training"]["optimizer"] == "ADAMW":
optimizer_func = lambda x,y: optim.AdamW(x, y)
elif cfg["training"]["optimizer"] == "SGD":
optimizer_func = lambda x,y: optim.SGD(x, y, momentum=0.9)
else:
raise "Unexpected optimizer: " + cfg["training"]["optimizer"]
## optimizer
lr = float(cfg["training"]["lr"])
if isinstance(patterns, str):
optimizer = optimizer_func(get_params(model, patterns), lr)
elif isinstance(patterns, dict):
param_list = []
for name, lr in patterns.items():
param_list.append({"params": get_params(model, name), "lr": lr})
optimizer = optimizer_func(param_list, lr)
## scheduler
lr_scheduler = None
scheduler_name = cfg["training"]["scheduler"]
scheduler_params = cfg["training"]["scheduler_params"]
logging.info("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
logging.info("scheduler: " + str(scheduler_name))
logging.info("params: " + str(scheduler_params))
logging.info("optimizer: " + cfg["training"]["optimizer"])
logging.info("init lr = " + str(cfg["training"]["lr"]))
logging.info("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
if scheduler_name == "ReduceLROnPlateau":
lr_scheduler = ReduceLROnPlateau(optimizer, 'min', **scheduler_params)
elif scheduler_name == "StepLR":
lr_scheduler = StepLR(optimizer,
scheduler_params["step_size"],
gamma=scheduler_params["gamma"]
)
elif scheduler_name == "MultiStepLR":
lr_scheduler = MultiStepLR(optimizer, **scheduler_params)
return optimizer, lr_scheduler
|
14,129 | 27665cd1f4986463d2ae3523a6df2f85e1fcf7f1 | number = 1
number2 = 1.0
getattr = 'こんにちは'
is_ok = True
print(number, type(number))
print(number2, type(number2))
print(getattr, type(getattr))
print(is_ok, type(is_ok))
print(2 > 1)
print(1 < 1)
print(3 > 4)
print(5 * 2)
print('テスト・テスト') |
14,130 | 9e7eaa110c52651c1bcc572d79e2cdd66002833a | from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from django.utils.translation import ugettext as _
from accounts.models import UserProfile
from accounts.widgets import CalendarWidget
class NewUserForm(UserCreationForm):
email = forms.EmailField(label=_('Email'))
class Meta(UserCreationForm.Meta):
fields = (
'username',
'email',
'password1',
'password2',
)
def clean_email(self):
email = self.cleaned_data['email']
if User.objects.filter(email=email):
raise forms.ValidationError(
_('User with the same email is already registered'))
return email
class UserprofileForm(forms.ModelForm):
class Meta:
model = UserProfile
widgets = {
'birthday': CalendarWidget
}
|
14,131 | 5f5d5c2c3c628ef80f7a37d07b726c2007e96f9d | # Author: Nimesh Ghelani based on code by Mark D. Smucker
from collections import defaultdict
class Judgement:
def __init__(self, query_id: str, doc_id: str, relevance: int):
self.query_id = query_id
self.doc_id = doc_id
self.relevance = relevance
def key(self):
return self.query_id + '-' + self.doc_id
class Qrels:
class QrelsError(Exception):
pass
def __init__(self):
self.judgements = {}
self.query_2_reldoc_nos = defaultdict(set)
def add_judgement(self, j: Judgement):
if j.key() in self.judgements:
raise QrelsError(
'Cannot have duplicate queryID and docID data points')
self.judgements[j.key()] = j
if j.relevance != 0:
self.query_2_reldoc_nos[j.query_id].add(j.doc_id)
def get_query_ids(self):
return self.query_2_reldoc_nos.keys()
def get_relevance(self, query_id, doc_id):
key = query_id + '-' + doc_id
if key in self.judgements:
return self.judgements[key].relevance
return 0
|
14,132 | b030d535f2d26df09d3dc341292e37efa04ef958 | #!/usr/bin/env python3
import sys
import time
def timer() -> None:
try:
start_time = time.perf_counter()
while True:
seconds = int(time.perf_counter() - start_time)
clock = f"{int(seconds / 60):0>2}:{int(seconds % 60):0>2}"
sys.stdout.write("\r")
sys.stdout.write(clock)
sys.stdout.flush()
time.sleep(1)
except KeyboardInterrupt:
print("\nbye!")
except Exception as ex:
print(ex)
exit(1)
timer()
|
14,133 | 902b758c274fc2c0fa579a3020ba38785023b175 | # This file is part of Tryton. The COPYRIGHT file at the top level of
# this repository contains the full copyright notices and license terms.
from trytond.i18n import gettext
from trytond.pool import PoolMeta, Pool
from trytond.modules.party.exceptions import EraseError
class Replace(metaclass=PoolMeta):
__name__ = 'party.replace'
@classmethod
def fields_to_replace(cls):
return super().fields_to_replace() + [
('project.work', 'party'),
]
class Erase(metaclass=PoolMeta):
__name__ = 'party.erase'
def check_erase_company(self, party, company):
pool = Pool()
Work = pool.get('project.work')
super().check_erase_company(party, company)
works = Work.search([
('party', '=', party.id),
('company', '=', company.id),
('progress', '!=', 1),
])
if works:
raise EraseError(
gettext('project.msg_erase_party_opened_project',
party=party.rec_name,
company=company.rec_name))
|
14,134 | 9f1320843159024ca4b6c42ea25d24ce973bd6d1 | language = ['PYTHON']
languages = ['PYTHON','C','C++','JAVA','PERL']
print(language)
print(languages)
print(languages[0:3])
print(languages[1:4])
print(languages[2])
languages[2] = 'C#'
print(languages[2])
print(languages[-1])
print(languages[-2:-1]) |
14,135 | f61b35dd34444575689b5b53bb5384fa7cfdd027 | #!/usr/bin/python
import sys
'''
**UNDERSTAND THE PROBLEM**
Function returns the number of ways (permutations) Cookie Monster can eat n cookies
Calculating permutations usually == recursion
Recursive base case: When amount of cookies to eat equals 0
Edge case: negative numbers (return 0)
For 5 cookies in jar (13 permutations):
1+1+1+1+1
1+1+1+2
1+1+2+1
1+1+3
1+2+1+1
1+2+2
1+3+1
2+1+1+1
2+1+2
2+2+1
2+3
3+1+1
3+2
I want to find all the ways that the numbers 1, 2, and 3 can be added up and equal n, including duplicate (but reversed) uses
0,1,2,3
'''
# The cache parameter is here for if you want to implement
# a solution that is more efficient than the naive
# recursive solution
def eating_cookies(n, cache=None):
cookies_per_turn = [0,1,2,3]
if n == 0:
return 0
elif n <= 2:
return n
else:
if __name__ == "__main__":
if len(sys.argv) > 1:
num_cookies = int(sys.argv[1])
print("There are {ways} ways for Cookie Monster to eat {n} cookies.".format(ways=eating_cookies(num_cookies), n=num_cookies))
else:
print('Usage: eating_cookies.py [num_cookies]') |
14,136 | 30348139072d39167b453cc668788875773477c7 | import camera
from machine import UART
import machine
led = machine.Pin(4, machine.Pin.OUT)
machine.sleep(5000)
led.on()
uart = UART(1, 9600) # init with given baudrate
uart.init(9600, bits=8, parity=None, stop=1) # init with given parameters
camera.init()
buf = camera.capture()
camera.deinit()
led.off() |
14,137 | 389ec221ba8a57dc63e8b0d5b670158100358312 | #!/usr/bin/python
import RPi.GPIO as GPIO
import time
# set the GPIO mode
GPIO.setmode(GPIO.BCM)
# set pin 18 for output
GPIO.setup(18, GPIO.OUT)
# output to pin 18
GPIO.output(18, True)
# sleep for one second
time.sleep(1)
# turn off output for pin 18
GPIO.output(18, False)
# gpio cleanup
GPIO.cleanup()
|
14,138 | 7e2a2186150daa48076851e486d1a97422d0c7f6 | import itertools
class Game():
def __init__(self):
self.Actions = {}
self.numActions = 0
self.Start = None
self.Value = None
self.Goal = None
self.MovesLeft = None
self.TotalMoves = None
def setup(self):
self.Goal = int(input("What is the goal number? "))
self.setTotalMoves(int(input(" How many moves? ")))
self.setStart(int(input("What number to start at? ")))
done = False
while not done:
#Todo: revisit this in relation to the AddMove function, might need cleaning up.
moveName = input("Enter the name of the move: ")
value = input("Enter a value if any, n or nothing if it doesnt have one: ")
if not value.isdigit():
value = None
else:
value = int(value)
self.addMove(moveName,value)
done = input("Another move? Enter if yes, N to stop ")
# mygame.addMove("Add", 2)
# mygame.addMove("Add", 3)
# mygame.printMoves()
def reset(self):
self.Value = self.Start
self.MovesLeft = self.TotalMoves
def AddNum(self,value):
self.Value += value
def SubNum(self,value):
self.Value -= value
def MultNum(self,value):
self.Value *= value
def DivNum(self,value):
self.Value /= value
def DelNum(self,value):
#dont really need the value arg but we keep it for now
strVersion = str(self.Value)
end = len(strVersion)
self.Value = float(strVersion[:end-1]) #take out the last num and store again
def NegNum(self,value):
#do nothing with the value again
self.Value = -self.Value
def AppendNum(self,value):
strVersion = str(self.Value)
self.Value = float(strVersion + str(value))
def setStart(self,value):
self.Value = self.Start = value
def setGoal(self,value):
self.Goal = value
def setTotalMoves(self,value):
self.MovesLeft = self.TotalMoves = value
def addMove(self,moveName,moveValue = None):
self.numActions += 1
if moveName == "Add":
self.Actions[self.numActions] = (self.AddNum,moveValue,"Add")
elif moveName == "Sub":
self.Actions[self.numActions] = (self.SubNum,moveValue, "Subtract")
elif moveName == "Mul":
self.Actions[self.numActions] = (self.MultNum,moveValue, "Multiply")
elif moveName == "Div":
self.Actions[self.numActions] = (self.DivNum,moveValue, "Divide")
elif moveName == "Del":
self.Actions[self.numActions] = (self.DelNum,moveValue,"Delete")
elif moveName == "Neg":
self.Actions[self.numActions] = (self.NegNum,moveValue,"Negate")
elif moveName == "App":
self.Actions[self.numActions] = (self.AppendNum,moveValue,"Append")
def printMoves(self):
print (self.Actions)
def printGameState(self):
print("Action dictionary", self.Actions)
print("Total number of moves",self.TotalMoves)
print("Number of moves left is: ", self.MovesLeft,"\n")
print("The goal is: ", self.Goal)
print("Current value is: ", self.Value,"\n")
def generateMoves(self):
allPlays = itertools.combinations_with_replacement(range(1,self.numActions+1),self.TotalMoves)
return list(allPlays)
def tryMoves(self,someTuple):
for i in list(someTuple):
self.printGameState()
playable = self.Actions[i] #returns a tuple
playable[0](playable[1]) #call that action with its assigned value, if any
self.MovesLeft -= 1
self.printGameState()
def SolveGame(self):
for round in itertools.combinations_with_replacement(range(1, self.numActions + 1), self.TotalMoves):
self.reset()
print("Before: \n")
self.printGameState()
self.tryMoves(round)
print("After: \n")
#Todo: Change this to stop right after the game has been won, not until all moves are exhausted
if(self.Value == self.Goal):
print("Game won! Moves are: " + str(list(round)) )
for i in round:
actionName =self.Actions[i][2]
actionValue = self.Actions[i][1]
if actionValue == None:
actionValue = ""
else:
actionValue = str(actionValue)
print(actionName + ": " + actionValue)
break
else:
print("Tried: " + str(list(round)) + ", any key to try another game")
|
14,139 | 21531a4a4b034cbe4df9bb7d362bb6e93768363b | '''
Integers in each row are sorted in ascending from left to right.
Integers in each column are sorted in ascending from top to bottom.
[
[1, 4, 7, 11, 15],
[2, 5, 8, 12, 19],
[3, 6, 9, 16, 22],
[10, 13, 14, 17, 24],
[18, 21, 23, 26, 30]
]
start from the top right corner,
if target is smaller than current, go left
if target is larger than current, go down
O(m+n) time, O(1) space
'''
class Solution(object):
def searchMatrix(self, matrix, target):
"""
:type matrix: List[List[int]]
:type target: int
:rtype: bool
"""
m = len(matrix) # row
if m == 0: return False
n = len(matrix[0]) # column
if n == 0: return False
i, j = 0, n-1
while i < m and j >= 0:
if matrix[i][j] == target:
return True
elif matrix[i][j] < target:
i += 1
else:
j -= 1
return False
c = Solution()
print c.searchMatrix([[1,4,7,11,15],
[2,5,8,12,19],
[3,6,9,16,22],
[10,13,14,17,24],
[18,21,23,26,30]], 5)
|
14,140 | 4755c7ce368c36f194361c564fa22a93790c2f57 | def solution(cookie):
N = len(cookie)
best = 0
lsum, rsum = 0, sum(cookie)
for m in range(N):
lsum += cookie[m]
rsum -= cookie[m]
a, b = lsum, rsum
pa, pb = 0, N-1
while pa <= m and m+1 <= pb:
if a == b:
best = max(best, a)
break
if a < b:
b -= cookie[pb]
pb -= 1
else:
a -= cookie[pa]
pa += 1
return best
# print(solution([1,1,2,3]))
# print(solution([1,2,4,5]))
|
14,141 | cf8f7bf7c31a5c747debf539f154af81477da888 | from rest_framework import routers
from .views import EnergyTransductorViewSet
app_name = "transductors"
router = routers.DefaultRouter()
router.register(r'energy_transductors', EnergyTransductorViewSet)
urlpatterns = []
|
14,142 | 13affee18c2d110e8d3878c138350b55ee94b394 | # Returns index of x in arr if present, else -1
def binarySearch(arr, l, r, x):
# Check base case
if r >= l:
mid = l + (r - l) / 2
# If element is present at the middle itself
if arr[mid] == x:
return mid
# If element is smaller than mid, then it can only
# be present in left subarray
elif arr[mid] > x:
return binarySearch(arr, l, mid - 1, x)
# Else the element can only be present in right subarray
else:
return binarySearch(arr, mid + 1, r, x)
else:
# Element is not present in the array
return -1
def findPivot(arr):
return doFindPivot(arr, 0, len(arr) - 1)
def doFindPivot(arr, l, r):
if r >= l:
mid = l + (r - l) // 2
if arr[mid] > arr[mid + 1]:
return mid
elif arr[mid] > arr[0]:
return doFindPivot(arr, mid, r)
else:
return doFindPivot(arr, l, mid)
else:
return -1
def rotated_array_search(input_list, number):
"""
Find the index by searching in a rotated sorted array
Args:
input_list(array), number(int): Input array to search and the target
Returns:
int: Index or -1
"""
result = -1
if len(input_list) <= 0:
print("Please input a non-empty array!")
return result
pivotIdx = findPivot(input_list)
if number == input_list[0]:
return 0
elif number > input_list[0]:
result = binarySearch(input_list, 0, pivotIdx, number)
else:
result = binarySearch(input_list, pivotIdx + 1, len(input_list) - 1, number)
return result
def linear_search(input_list, number):
for index, element in enumerate(input_list):
if element == number:
return index
return -1
def test_function(test_case):
input_list = test_case[0]
number = test_case[1]
if linear_search(input_list, number) == rotated_array_search(input_list, number):
print("Pass")
else:
print("Fail")
# print(findPivot([6, 7, 8, 9, 10, 1, 2, 3, 4]))
# print(findPivot([6, 7, 8, 1, 2, 3, 4]))
# arr = [6, 7, 8, 9, 10, 1, 2, 3, 4]
# print(rotated_array_search(arr, 6))
# print(rotated_array_search(arr, 1))
# Test code below
# Test Case 1 - Normal case
test_function([[6, 7, 8, 9, 10, 1, 2, 3, 4], 6])
test_function([[6, 7, 8, 9, 10, 1, 2, 3, 4], 1])
test_function([[6, 7, 8, 1, 2, 3, 4], 8])
test_function([[6, 7, 8, 1, 2, 3, 4], 1])
test_function([[6, 7, 8, 1, 2, 3, 4], 10])
print(rotated_array_search([6, 7, 8, 9, 10, 1, 2, 3, 4], 1)) # 5
print(rotated_array_search([6, 7, 8, 9, 10, 1, 2, 3, 4], 7)) # 1
# Test Case 2 - Edge case: The target does not exist in the array
print(rotated_array_search([6, 7, 8, 9, 10, 1, 2, 3, 4], 11)) # -1
# Test Case 3 - Edge case: The target does not exist in the array
print(rotated_array_search([], 1)) # -1 Please input a non-empty array!
|
14,143 | 254766fa330eebbaf52327c04f345b30352ad880 | yd = float(91.44) # 정수가 아닌 실수 자료형
inch = float(2.54)
print("2.1yd = ",round(yd*2.1,1), end="cm\n")
print("10.5in = ",round(inch*10.5,1), end="cm\n")
#소수 자리수 정해주기 round(value,digit) digit만큼 표시 |
14,144 | 9bb359ebd2e94484bd756cac995437f68a4118e7 | from rolls import Roll
from players import Player
import random
def print_header():
print(f"-------------------------------------------------------")
print(f" Welcome to")
print(f" Rock, Paper, Scissors")
print(f"-------------------------------------------------------")
def get_user_name():
player_one = Player(input("\nWhat is your name? "))
return player_one
def output_match(player_one, player_two):
print(f"Hello {player_one.name}, you'll be playing against {player_two.name}")
def get_computer_roll():
return random.choice([Roll("Rock"), Roll("Gun"), Roll("Lightning"), Roll("Devil"), Roll("Dragon"),
Roll("Water"), Roll("Air"), Roll("Paper"), Roll("Sponge"), Roll("Wolf"),
Roll("Tree"), Roll("Human"), Roll("Snake"), Roll("Scissors"), Roll("Fire")])
def get_player_roll():
roll_dict = {1: "Rock", 2: "Gun", 3: "Lightning", 4: "Devil", 5: "Dragon", 6: "Water", 7: "Air", 8: "Paper", 9: "Sponge", 10: "Wolf", 11: "Tree", 12: "Human", 13: "Snake", 14: "Scissors", 15: "Fire"}
choice = 0
print("")
while not isinstance(choice, int) or choice not in range(1, 16):
print(f"Choose your roll:")
for roll in sorted(roll_dict.keys()):
print(f" {roll:2d}: {roll_dict[roll]}")
choice = int(input(f"Enter a number between 1 and 15: "))
#if choice == 0:
# print(f'')
return Roll(roll_dict[choice])
def play_game(player_two, score, game_length):
count = 1
while count <= game_length:
p2_roll = get_computer_roll()
p1_roll = get_player_roll()
print(f"You played {p1_roll.name} and {player_two.name} played {p2_roll.name}.")
if p2_roll.name in p1_roll.loses_to:
print(f"You lost that round.")
count += 1
score.append((0, 1))
elif p2_roll.name == p1_roll.name:
print(f"That was a tie. We'll redo that round.")
else:
print(f"You won that round.")
score.append((1, 0))
count += 1
return score
def calculate_score(score):
pone = 0
ptwo = 0
for one, two in score:
pone += one
ptwo += two
if pone == ptwo:
print(f'\nYou tied {pone}-{ptwo}.')
elif pone > ptwo:
print(f'\nYou won that match {pone}-{ptwo}.')
else:
print(f'\nYou lost that match {pone}-{ptwo}.')
def choose_game_size():
while True:
print("")
try:
choice = int(input(f'Enter the number of rounds you would like this match to be: '))
except ValueError:
pass
else:
break
return choice
def main():
print_header()
game_rolls = []
player_one = get_user_name()
player_two = Player()
game_length = choose_game_size()
output_match(player_one, player_two)
raw_score = play_game(player_two, game_rolls, game_length)
calculate_score(raw_score)
if __name__ == '__main__':
main()
|
14,145 | fcac3411475e8a9c5bed870eafaf8e23c79f9445 | #!/usr/bin/env python3
__author__ = 'Ron Li'
class Sha224:
""" A class to calculate SHA224 """
"""
Initialize table of round constants:
first 32 bits of the fractional parts of
the cube roots of the first 64 primes 2..311
"""
k = (0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5,
0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc,
0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7,
0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3,
0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5,
0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2)
"""
Initialize variables:
The second 32 bits of the fractional parts of
the square roots of the 9th through 16th primes 23..53
"""
h = [0xc1059ed8, 0x367cd507, 0x3070dd17, 0xf70e5939,
0xffc00b31, 0x68581511, 0x64f98fa7, 0xbefa4fa4]
def __init__(self):
self.data_len = 0
self.bit_len = 0
self.data_b = [0] * 64
def sha_init(self):
""" Nothing to be initialized in algorithm level so far """
pass
def rotr(self, x, y):
""" Rigth rotate operation """
return ((x >> y) | (x << (32-y))) & 0xFFFFFFFF
def sha_process(self):
""" Process the message in successive 512-bit chunks """
# Break chunk into sixteen 32-bit big-endian words m[0..15]
m = [0] * 64
# Extend the sixteen 32-bit words into sixty-four 32-bit words
j = 0
for i in range(16):
m[i] = self.data_b[j] << 24 | self.data_b[j+1] << 16\
| self.data_b[j+2] << 8 | self.data_b[j+3]
j += 4
for i in range(16, 64):
sig0 = self.rotr(m[i-15], 7) ^ self.rotr(m[i-15], 18)\
^ (m[i-15] >> 3)
sig1 = self.rotr(m[i-2], 17) ^ self.rotr(m[i-2], 19)\
^ (m[i-2] >> 10)
m[i] = (sig1 + m[i-7] + sig0 + m[i-16]) & 0xFFFFFFFF
# Initialize hash value for this chunk
a, b, c, d, e, f, g, h = self.h
for i in range(64):
ep0 = (self.rotr(a, 2) ^ self.rotr(a, 13) ^ self.rotr(a, 22))\
& 0xFFFFFFFF
ep1 = (self.rotr(e, 6) ^ self.rotr(e, 11) ^ self.rotr(e, 25))\
& 0xFFFFFFFF
ch = ((e & f) ^ ((~e) & g))
maj = ((a & b) ^ (a & c) ^ (b & c))
t1 = (h + ep1 + ch + self.k[i] + m[i]) & 0xFFFFFFFF
t2 = (ep0 + maj) & 0xFFFFFFFF
h = g
g = f
f = e
e = (d + t1) & 0xFFFFFFFF
d = c
c = b
b = a
a = (t1 + t2) & 0xFFFFFFFF
# Add this chunk's hash to result so far
self.h[0] = (self.h[0] + a) & 0xFFFFFFFF
self.h[1] = (self.h[1] + b) & 0xFFFFFFFF
self.h[2] = (self.h[2] + c) & 0xFFFFFFFF
self.h[3] = (self.h[3] + d) & 0xFFFFFFFF
self.h[4] = (self.h[4] + e) & 0xFFFFFFFF
self.h[5] = (self.h[5] + f) & 0xFFFFFFFF
self.h[6] = (self.h[6] + g) & 0xFFFFFFFF
self.h[7] = (self.h[7] + h) & 0xFFFFFFFF
def sha_update(self, input, len):
""" Update new data block """
# Convert string input to bytearray, if needed
if isinstance(input, str):
print("Convert str to bytes\n")
input = bytearray(input.encode('ascii'))
# Break message into 512-bit chunks
for i in range(len):
self.data_b[self.data_len] = input[i]
self.data_len += 1
if self.data_len == 64:
self.sha_process()
self.bit_len += 512
self.data_len = 0
def sha_digest(self):
""" Calculate the digest """
i = self.data_len
# Pad whatever data is left in the buffer
if self.data_len < 56:
self.data_b[i] = 0x80
i += 1
while i < 56:
self.data_b[i] = 0x00
i += 1
else:
self.data_b[i] = 0x80
i += 1
while i < 64:
self.data_b[i] = 0x00
i += 1
self.sha_process()
self.data_b[:56] = [0 for x in self.data_b[:56]]
# Append length of message in bits, as 64-bit big-endian integer
self.bit_len += self.data_len * 8
self.data_b[63] = self.bit_len & 0xFF
self.data_b[62] = (self.bit_len >> 8) & 0xFF
self.data_b[61] = (self.bit_len >> 16) & 0xFF
self.data_b[60] = (self.bit_len >> 24) & 0xFF
self.data_b[59] = (self.bit_len >> 32) & 0xFF
self.data_b[58] = (self.bit_len >> 40) & 0xFF
self.data_b[57] = (self.bit_len >> 48) & 0xFF
self.data_b[56] = (self.bit_len >> 56) & 0xFF
# Final transform to get final digest
self.sha_process()
return self.h[:7]
def uint_test(input):
""" Unit test for any giving inputs """
s = Sha224()
s.sha_update(input, len(input))
return s.sha_digest()
def main():
"""
Run tests on several unit tests
From https://www.di-mgt.com.au/sha_testvectors.html
"""
test = 'abc'
digest = uint_test(test)
print([hex(x) for x in digest])
test = ''
digest = uint_test(test)
print([hex(x) for x in digest])
test = 'abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq'
digest = uint_test(test)
print([hex(x) for x in digest])
test = """abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijkl
mnhijklmnoijklmnopjklmnopqklmnopqrlmnopqrsmnopqrstnopqrstu"""
digest = uint_test(test)
print([hex(x) for x in digest])
if __name__ == "__main__":
main()
|
14,146 | 019fa6a0d5f46f89452fe3c61297e3776f52cf51 | import math
import sys
class Problem():
def __init__(self):
self.mu_values = None
self.lattice_count_cache = { }
self.coprime_count_cache = { 1 : 0 }
def solve(self):
assert(self.get(10**6) == 159139)
for n in [3141592653589793]:
print(n, '=>', self.get(n))
def get(self, n):
# Prepare Möbius function for computing coprime lattice count
self.__init_mu_values(math.floor(math.sqrt(n)))
# P(n) = sum_{k >= 0} (-1)^k Q(n/2^k) where
# P(n) = {(x,y) in A(n) : x, y coprime and odd parity},
# Q(n) = {(x,y) in A(n) : x, y coprime}
total_count = 0
k = 0
while 2**k <= n:
coprime_count = self.__get_coprime_lattice_count(n // 2**k)
total_count += (-1)**k * coprime_count
print('get =>', k, coprime_count, total_count)
k += 1
return total_count
def __init_mu_values(self, n):
prime_sieve = [False for i in range(n + 1)]
m = math.floor(math.sqrt(n))
for i in range(2, m + 1):
if prime_sieve[i] is True:
continue
for j in range(i * i, n + 1, i):
prime_sieve[j] = True
values = [1 for i in range(n + 1)]
for i in range(2, n + 1):
if prime_sieve[i] is True:
continue
for j in range(i, n + 1, i):
values[j] *= -1
for j in range(i**2, n + 1, i**2):
values[j] = 0
self.mu_values = values
def __get_lattice_count(self, n):
# Let A(n) = #{(x,y) : x^2 + y^2 <= n, 0 < x < y}.
if n not in self.lattice_count_cache:
total_count = 0
x = 1
while True:
max_y = math.floor(math.sqrt(n - x**2))
min_y = x + 1
if max_y < min_y:
break
else:
total_count += (max_y - min_y + 1)
x += 1
self.lattice_count_cache[n] = total_count
return self.lattice_count_cache[n]
def __get_coprime_lattice_count(self, n):
# Q(n) = sum_{d <= sqrt[n]} mu(d) A(floor[n/d^2])
if n not in self.coprime_count_cache:
total_count = 0
m = math.floor(math.sqrt(n))
for i in range(1, m + 1):
mu = self.mu_values[i]
if mu != 0:
total_count += mu * self.__get_lattice_count(n // i**2)
self.coprime_count_cache[n] = total_count
return self.coprime_count_cache[n]
def main():
problem = Problem()
problem.solve()
if __name__ == '__main__':
sys.exit(main())
|
14,147 | 5a86c33cf59b82f0e366e216313239582f746c72 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
## 我们现有1,2,5,10面值的硬币.当我们有M元时,解出最小数目的硬币组合.
coin = [1, 2, 5, 10]
def take_coins(m):
tc = np.array([0, 0, 0, 0])
tc
return tc
if __name__ == '__main__':
List1 = [1, 2, 3, 4]
List2 = [5, 6, 7, 8]
ll = []
for i,j in zip(List1,List2):
ll.append(i*j)
for k in ll :
print(k)
|
14,148 | 298f64ad7df9488d678a24d5ab1c67dd445a3810 | # -*- coding: utf-8 -*-
"""
Created on Thu Feb 20 12:32:49 2020
@author: combitech
"""
from sklearn.preprocessing import LabelBinarizer
from tensorflow.keras.callbacks import ModelCheckpoint, CSVLogger
from tensorflow.keras.layers import Input, Dense, Conv2D, Flatten, Activation
from tensorflow.keras.optimizers import SGD
from tensorflow.keras import layers, models
from tensorflow.keras.models import Model
from tensorflow import keras
from tensorflow.keras.datasets import cifar100, cifar10
import tensorflow as tf
import matplotlib.pyplot as plt
lb = LabelBinarizer()
#(x_train, y_train), (x_test, y_test) = cifar100.load_data(label_mode='fine')
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
y_train = lb.fit_transform(y_train)
y_test = lb.transform(y_test)
#y_train = keras.utils.to_categorical(y_train, 10)
#y_test = keras.utils.to_categorical(y_test, 10)
def init_shallownet(in_dim, classes):
input_layer = Input(shape = in_dim, dtype='float32', name='in')
X = layers.Conv2D(32, (3,3), padding='same', name='conv_1')(input_layer)
X = Activation('relu')(X)
X = Flatten()(X)
X = Dense(classes, activation='softmax')(X)
d1 = Activation('softmax')(X)
return Model(inputs = input_layer, outputs = d1, name="model")
opt = keras.optimizers.RMSprop(learning_rate=0.0001, decay=1e-6)
#opt = SGD(lr=0.01, decay=0.01/40, momentum = 0.9)
shallowNet = init_shallownet((32,32, 3), 10)
shallowNet.summary()
shallowNet.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"])
checkpoint_val_loss = ModelCheckpoint("C:/Users/Tobias/CNN/ShallowNet/" + shallowNet.name + '_{epoch:02d}-{loss:.2f}_best_val_model.hdf5', monitor='val_loss', verbose=1, save_best_only=True, mode='auto', period=1)
early_stopping = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=20, verbose=1)
history = shallowNet.fit(x_train, y_train, batch_size=64, epochs=10000, validation_split=0.15, callbacks=[checkpoint_val_loss, early_stopping])
shallowNet.evaluate(x_test, y_test, batch_size=64, verbose=2)
def show(history):
val_acc = history['val_accuracy']
val_loss = history['val_loss']
acc = history['accuracy']
loss = history['loss']
epochs = range(len(acc))
plt.plot(epochs, acc, 'b', label='Training acc')
plt.plot(epochs, loss, 'c', label='Training loss')
plt.plot(epochs, val_acc, 'r', label='Validation acc')
plt.plot(epochs, val_loss, 'g', label='Validation loss')
plt.ylim(1.5)
plt.xlim(0.0)
plt.xlabel("Epoch")
plt.ylabel("Loss/Accuracy")
plt.legend()
plt.figure()
plt.show()
show(shallowNet.history.history)
|
14,149 | 4ac942b0285eb4ca615375cc51d9be1adcd8e9b4 | from django.shortcuts import render
from partidos.models import Partido
def ver_partidos(request):
partidos = Partido.objects.all().order_by('fecha')
return render(request,'partidos/ver_partidos.html',{ 'partidos':partidos })
|
14,150 | a64ad93762ffc97af7c5d620afcc5f7223b1bfee | # Generated by Django 2.2 on 2019-05-02 08:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('web', '0056_auto_20190418_1437'),
]
operations = [
migrations.AddField(
model_name='article',
name='storage_temperature_range',
field=models.CharField(blank=True, default='', max_length=64, verbose_name='Storage Temperature Range'),
),
migrations.AddField(
model_name='article',
name='storage_type',
field=models.CharField(choices=[('Colonial', 'Colonial'), ('Refrigerated', 'Refrigerated'), ('Frozen', 'Frozen'), ('Unspecified', 'Unspecified')], default='Unspecified', max_length=64, verbose_name='Storage Type'),
),
migrations.AddField(
model_name='historicalarticle',
name='storage_temperature_range',
field=models.CharField(blank=True, default='', max_length=64, verbose_name='Storage Temperature Range'),
),
migrations.AddField(
model_name='historicalarticle',
name='storage_type',
field=models.CharField(choices=[('Colonial', 'Colonial'), ('Refrigerated', 'Refrigerated'), ('Frozen', 'Frozen'), ('Unspecified', 'Unspecified')], default='Unspecified', max_length=64, verbose_name='Storage Type'),
),
]
|
14,151 | 6b3b6bd5ac52bede00bbf02fa672ab8ce019556b | #!/bin/env python
### Submit jobs using NtupleUtils.py, then check jobs' statuses every 30 seconds.
### When jobs are done, automatically proceed to the next step in the sequence
### MC: hadd --> normalize --> hadd-final
### Data: hadd --> skim --> hadd-final --> remove-duplicates
import os, sys, subprocess
import argparse
import time
def check_bjobs(job_name):
# Get the number of running and pending jobs, only proceed if both = 0
finish = False
jstatus = subprocess.Popen(["bjobs","-sum","-noheader","-J",job_name], stdout=subprocess.PIPE).communicate()[0]
jlist = [ x for x in jstatus.split(" ") if x != '' ]
rjobs = int(jlist[0])
pjobs = int(jlist[4])
print "Checking job status: ", job_name
print "- Running jobs: ", rjobs
print "- Pending jobs: ", pjobs
if rjobs == 0 and pjobs == 0: finish = True
return finish
def sub_sequence(tag, isData=False, submit=False, label=''):
basedir = os.environ['CMSSW_BASE']+'/src/RazorAnalyzer'
if not submit:
nosub = '--no-sub'
else:
nosub = ''
if isData:
data = '--data'
else:
data = ''
cmd_submit = list(filter(None,['python', 'python/ntupling/NtupleUtils.py', tag, '--submit', nosub, '--label', label, data]))
print ' '.join(cmd_submit)
subprocess.call(cmd_submit)
if submit:
job_done = False
while not job_done:
time.sleep(30)
job_done = check_bjobs('*'+label+'*')
# Before running hadd, we have to check that there are no zombie files in the output.
# If there are, abort immediately and let the user clean up the zombies before continuing
cmd_zombies = list(filter(None,['python', 'python/ntupling/NtupleUtils.py', tag, '--find-zombies', nosub, '--label', label, data]))
print ' '.join(cmd_zombies)
subprocess.call(cmd_zombies)
zombieFileName = "Zombies_%s_%s.txt"%(tag, label)
if isData:
zombieFileName = zombieFileName.replace(".txt","_Data.txt")
with open(zombieFileName) as zombieFile:
for line in zombieFile:
sys.exit("One or more zombie files were found! See the full list in %s"%zombieFileName)
cmd_hadd = list(filter(None,['python', 'python/ntupling/NtupleUtils.py', tag, '--hadd', nosub, '--label', label, data]))
print ' '.join(cmd_hadd)
subprocess.call(cmd_hadd)
if not isData:
cmd_normalize = list(filter(None,['python', 'python/ntupling/NtupleUtils.py', tag, '--normalize', nosub, '--label', label, data]))
print ' '.join(cmd_normalize)
subprocess.call(cmd_normalize)
else:
cmd_skim = list(filter(None,['python', 'python/ntupling/NtupleUtils.py', tag, '--skim', nosub, '--label', label, data]))
print ' '.join(cmd_skim)
subprocess.call(cmd_skim)
cmd_hadd_final = list(filter(None,['python', 'python/ntupling/NtupleUtils.py', tag, '--hadd-final', nosub, '--label', label, data]))
print ' '.join(cmd_hadd_final)
subprocess.call(cmd_hadd_final)
if isData:
cmd_remove_duplicates = list(filter(None,['python', 'python/ntupling/NtupleUtils.py', '--remove-duplicates', nosub, '--label', label, data, tag]))
print ' '.join(cmd_remove_duplicates)
subprocess.call(cmd_remove_duplicates)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--tag', help = '1L, 2L, ...', required = True)
parser.add_argument('--label', help = 'Label for RazorRun', required = True)
parser.add_argument('--data', action = 'store_true', help = 'Run on data (MC otherwise)')
parser.add_argument('--no-sub', dest = 'noSub', action = 'store_true', help = 'Print commands but do not submit')
args = parser.parse_args()
sub_sequence(args.tag, args.data, (not args.noSub), args.label)
|
14,152 | 77b52cb4c9f9cad6559818dc2a5cb866515bbdca | lines_nmbr = int(input())
longest_intersection = set()
for el in range(lines_nmbr):
first_set, second_set, intersection = set(), set(), set()
first_range, second_range = input().split('-')
first_start, first_end = first_range.split(',')
second_start, second_end = second_range.split(',')
first_start, first_end, second_start, second_end = int(first_start), int(first_end), int(second_start), int(second_end)
for el in range(first_start, first_end+1):
first_set.add(el)
for el in range(second_start, second_end+1):
second_set.add(el)
intersection = first_set & second_set
if len(intersection) > len(longest_intersection):
longest_intersection = first_set & second_set
longest_intersection = [str(el) for el in longest_intersection]
print(f'Longest intersection is [{", ".join(longest_intersection)}] with length {len(longest_intersection)}')
|
14,153 | f8545dcc435f8f9a921d725b92d4b0bb82694efc | #Program to print product of two numbers is odd or even
N1,N2=map(int,input().split())
product=N1*N2
if((product%2)==0):
print("even")
else:
print("odd")
|
14,154 | 8cedaf0ff401cbf273e5c54f84201c8b54a599de | import numpy as np
from .body import Body
from copy import deepcopy
from collections import defaultdict
from .client import BulletClient
class BodyCache:
def __init__(self, urdf_ds, client_id):
self.urdf_ds = urdf_ds
self.client = BulletClient(client_id)
self.cache = defaultdict(list)
self.away_transform = (0, 0, 1000), (0, 0, 0, 1)
def _load_body(self, label):
ds_idx = np.where(self.urdf_ds.index['label'] == label)[0].item()
object_infos = self.urdf_ds[ds_idx].to_dict()
body = Body.load(object_infos['urdf_path'],
scale=object_infos['scale'],
client_id=self.client.client_id)
body.pose = self.away_transform
self.cache[object_infos['label']].append(body)
return body
def hide_bodies(self):
n = 0
for body_list in self.cache.values():
for body in body_list:
pos = (1000, 1000, 1000 + n * 10)
orn = (0, 0, 0, 1)
body.pose = pos, orn
n += 1
def get_bodies_by_labels(self, labels):
self.hide_bodies()
gb_label = defaultdict(lambda: 0)
for label in labels:
gb_label[label] += 1
for label, n_instances in gb_label.items():
n_missing = gb_label[label] - len(self.cache[label])
for n in range(n_missing):
self._load_body(label)
remaining = deepcopy(dict(self.cache))
bodies = [remaining[label].pop(0) for label in labels]
return bodies
def get_bodies_by_ids(self, ids):
labels = [self.urdf_ds[idx]['label'] for idx in ids]
return self.get_bodies_by_labels(labels)
def __len__(self):
return sum([len(bodies) for bodies in self.cache.values()])
class TextureCache:
def __init__(self, texture_ds, client_id):
self.texture_ds = texture_ds
self.client = BulletClient(client_id)
self.cache = dict()
def _load_texture(self, idx):
self.cache[idx] = self.client.loadTexture(str(self.texture_ds[idx]['texture_path']))
def get_texture(self, idx):
if idx not in self.cache:
self._load_texture(idx)
return self.cache[idx]
@property
def cached_textures(self):
return list(self.cache.values())
|
14,155 | ea46c812f76b2988b43fcf384aee16babcdce38c | # -*- coding: utf-8 -*-
def compress_coordinate(elements: list) -> dict:
"""Means that reduce the numerical value while maintaining the magnitude
relationship.
Args:
elements: list of integer numbers (greater than -1).
Returns:
A dictionary's items ((original number, compressed number) pairs).
Landau notation: O(n log n)
"""
# See:
# https://atcoder.jp/contests/abc036/submissions/5707999?lang=ja
compressed_list = sorted(set(elements))
return {element: index for index, element in enumerate(compressed_list)}
def main():
import sys
from collections import defaultdict
from itertools import accumulate
input = sys.stdin.readline
n = int(input())
ab = [tuple(map(int, input().split())) for _ in range(n)]
c = list()
for ai, bi in ab:
c.append(ai)
c.append(ai + bi)
c1 = compress_coordinate(c)
d = [0] * (len(c1) + 1)
e = defaultdict(int)
for ai, bi in ab:
d[c1[ai]] += 1
d[c1[ai + bi]] -= 1
e[c1[ai]] = ai
e[c1[ai + bi]] = ai + bi
d = list(accumulate(d))
count = defaultdict(int)
for i, di in enumerate(d[:-1]):
count[di] += e[i + 1] - e[i]
ans = [0] * (n + 1)
for i in range(1, n + 1):
ans[i] = count[i]
print(*ans[1:])
if __name__ == "__main__":
main()
|
14,156 | a78e65ebd58ceb4a5997895830da386cf6f57c7f | import os
import json
import sys
import sqlite3
import requests
import netweet.collection.utils as utils # fix
class Collector:
"""Class used to collect tweets using the Twitter API.
"""
def __init__(self):
self.number_apps = None
self._keys = None
self._secret_keys = None
self._bearer_tokens = None
self.db_connector = None
self.db_cursor = None
self.db_path = None
@property
def keys(self):
"""Set API keys as a read-only property
"""
return self._keys
@property
def secret_keys(self):
"""Set API secret keys as a read-only propery
"""
return self._secret_keys
@property
def bearer_tokens(self):
"""Set API bearer tokens as a read-only property
"""
return self._bearer_tokens
def process_tokens(self, path_to_file):
"""Read a JSON file with a stricted schema and save tokens for API usage.
Args:
path_to_file:
Absolute path for the JSON file containing the app tokens related to
the Twitter API. The schema for the json file must follow the structure:
'{
"Apps": [{"api_key": "...",
"api_secret_key": "...",
"bearer_token": "..."}, {...}, ..., {...}]
}'
Returns:
No return. Only set the appropriate keys according to the parsed file.
"""
with open(path_to_file, "r") as f:
app = json.load(f)
# Validate the schema of the API KEYS file.
if not utils.validate_apikeys_schema(app):
raise BadSchemaError("""The schema of the file containing the keys does not
follow the required schema.""")
app_list = app["Apps"]
self._keys = [ x["api_key"] for x in app_list ]
self._secret_keys = [ x["api_secret_key"] for x in app_list ]
self._bearer_tokens = [ x["bearer_token"] for x in app_list ]
self.number_apps = len(self._keys)
def connect_database(self, db_path="/tmp/example.db"):
"""Connect with the database for tweet storage.
Args:
db_path:
Path to existing database. If the database does not exist, then it will
be created. If the path is not parsed, then a temporary database 'example.db'
will be created at "/tmp/"
Returns:
No returns.
"""
self.db_path = db_path
self.db_connector = sqlite3.connect(db_path)
self.db_connector.close()
def get_bearer_header(self, app_index=0):
"""Return a dictionary formatted with the bearer token authorization.
Args:
app_index:
The list index of the app list containing the authorization keys.
Returns:
Header dictionary to be used for 2.0 Authorization requests.
Raises:
BadAppIndexError:
If the index of the app is negative or larger than the number of
existing apps.
"""
if app_index > len(self._bearer_tokens)-1 or app_index < 0:
raise BadAppIndexError("Bad value for the app index")
return {"Authorization": f"Bearer {self._bearer_tokens[app_index]}"}
def request_data(self, search_query=None, app_index=0):
"""Make a HTTP request to collect tweets according the search string given.
Args:
search_string:
String storing the search request. It can contains all operators allowed
by the Twitter API.
Return:
response:
Response object returned from the HTTP request on the Twitter API.
Raises:
AttributeError:
If no query is given as argument for :search_query:
"""
tweet_obj_fields = utils.tweet_object_fields()
tweet_fields = ','.join(tweet_obj_fields["twitter_fields"])
params = {'query': search_query,
'tweet.fields': tweet_fields}
if search_query is None:
raise AttributeError("No query parsed.")
base_url = "https://api.twitter.com/2/tweets/search/recent?"
headers = self.get_bearer_header(app_index)
response = requests.get(base_url, headers=headers, params=params)
return response
def collect(self, query_list):
"""Pass a list of search queries to make a continuous collection of
tweets for each one.
"""
try:
while True:
# For each app, we iterate through the query list and make the API request.
for current_app in range(self.number_apps):
for q_index, current_query in enumerate(query_list):
response = self.request_data(search_query=current_query, app_index=current_app)
if utils.validate_response(response):
# STORE DATA
data = response.json()
print(data.keys())
#pass
else:
raise BadResponseError()
except KeyboardInterrupt:
return -1
except BadResponseError:
return -1
# Exceptions
class BadAppIndexError(Exception):
pass
class BadSchemaError(Exception):
pass
class BadResponseError(Exception):
pass |
14,157 | ccc80c49c27ad3a6b2d9d943cd8688b14d30a18c | # Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
import logging
from gossip.common import NullIdentifier
LOGGER = logging.getLogger(__name__)
class WaitTimer(object):
"""Wait timers represent a random duration incorporated into a wait
certificate.
Attributes:
WaitTimer.minimum_wait_time (float): The minimum wait time in seconds.
WaitTimer.target_wait_time (float): The target wait time in seconds.
WaitTimer.initial_wait_time (float): The initial wait time in seconds.
WaitTimer.certificate_sample_length (int): The number of certificates
to sample for the population estimate.
WaitTimer.fixed_duration_blocks (int): If fewer than
WaitTimer.fixed_duration_blocks exist, then base the local mean
on a ratio based on InitialWaitTime, rather than the history.
WaitTimer.poet_enclave (module): The PoetEnclave module to use for
executing enclave functions.
previous_certificate_id (str): The id of the previous certificate.
local_mean (float): The local mean wait time based on the history of
certs.
request_time (float): The request time.
duration (float): The duration of the wait timer.
validator_address (str): The address of the validator that created the
wait timer
"""
minimum_wait_time = 1.0
target_wait_time = 30.0
initial_wait_time = 3000.0
certificate_sample_length = 50
fixed_duration_blocks = certificate_sample_length
poet_enclave = None
@classmethod
def _compute_population_estimate(cls, certificates):
"""Estimates the size of the validator population by computing
the average wait time and the average local mean used by
the winning validator.
Since the entire population should be computing from the same
local mean based on history of certificates and we know that
the minimum value drawn from a population of size N of
exponentially distributed variables will be exponential with
mean being 1 / N, we can estimate the population size from the
ratio of local mean to global mean. a longer list of
certificates will provide a better estimator only if the
population of validators is relatively stable.
Note:
See the section entitled "Distribution of the minimum of
exponential random variables" in the page
http://en.wikipedia.org/wiki/Exponential_distribution
Args:
certificates (list): Previously committed certificates,
ordered newest to oldest
"""
assert isinstance(certificates, list)
assert len(certificates) >= cls.certificate_sample_length
sum_means = 0
sum_waits = 0
for certificate in certificates[:cls.certificate_sample_length]:
sum_waits += certificate.duration - cls.minimum_wait_time
sum_means += certificate.local_mean
avg_wait = sum_waits / len(certificates)
avg_mean = sum_means / len(certificates)
return avg_mean / avg_wait
@classmethod
def create_wait_timer(cls,
validator_address,
certificates):
"""Creates a wait timer in the enclave and then constructs
a WaitTimer object.
Args:
validator_address (str): A string representing the address of the
validator creating the wait timer.
certificates (list or tuple): A historical list of certificates.
Returns:
journal.consensus.poet.wait_timer.WaitTimer: A new wait timer.
"""
local_mean = cls.compute_local_mean(certificates)
previous_certificate_id = \
certificates[-1].identifier if certificates else NullIdentifier
# Create an enclave timer object and then use it to create a
# WaitTimer object
enclave_timer = \
cls.poet_enclave.create_wait_timer(
validator_address=validator_address,
previous_certificate_id=previous_certificate_id,
local_mean=local_mean,
minimum_wait_time=cls.minimum_wait_time)
timer = cls(enclave_timer)
LOGGER.info('wait timer created; %s', timer)
return timer
@classmethod
def compute_local_mean(cls, certificates):
"""Computes the local mean wait time based on the certificate
history.
Args:
certificates (list or tuple): A historical list of certificates.
Returns:
float: The local mean wait time.
"""
if not isinstance(certificates, (list, tuple)):
raise TypeError
count = len(certificates)
if count < cls.fixed_duration_blocks:
ratio = 1.0 * count / cls.fixed_duration_blocks
local_mean = \
(cls.target_wait_time * (1 - ratio**2)) + \
(cls.initial_wait_time * ratio**2)
else:
local_mean = \
cls.target_wait_time * \
cls._compute_population_estimate(certificates)
return local_mean
@property
def population_estimate(self):
return self.local_mean / WaitTimer.target_wait_time
def __init__(self, enclave_timer):
self.previous_certificate_id =\
str(enclave_timer.previous_certificate_id)
self.local_mean = float(enclave_timer.local_mean)
self.request_time = float(enclave_timer.request_time)
self.duration = float(enclave_timer.duration)
self.validator_address = str(enclave_timer.validator_address)
self._enclave_wait_timer = enclave_timer
self._expires = self.request_time + self.duration + 0.1
self._serialized_timer = None
def __str__(self):
return \
'TIMER, {0:0.2f}, {1:0.2f}, {2}'.format(
self.local_mean,
self.duration,
self.previous_certificate_id)
def serialize(self):
"""Serializes the underlying enclave wait timer
"""
if self._serialized_timer is None:
self._serialized_timer = self._enclave_wait_timer.serialize()
return self._serialized_timer
def has_expired(self, now):
"""Determines whether the timer has expired.
Args:
now (float): The current time.
Returns:
bool: True if the timer has expired, false otherwise.
"""
if now < self._expires:
return False
return self._enclave_wait_timer.has_expired()
def set_wait_timer_globals(target_wait_time=None,
initial_wait_time=None,
certificate_sample_length=None,
fixed_duration_blocks=None,
minimum_wait_time=None):
if target_wait_time is not None:
WaitTimer.target_wait_time = float(target_wait_time)
if initial_wait_time is not None:
WaitTimer.initial_wait_time = float(initial_wait_time)
if certificate_sample_length is not None:
WaitTimer.certificate_sample_length = int(certificate_sample_length)
WaitTimer.fixed_duration_blocks = int(certificate_sample_length)
if fixed_duration_blocks is not None:
WaitTimer.fixed_duration_blocks = int(fixed_duration_blocks)
if minimum_wait_time is not None:
WaitTimer.minimum_wait_time = float(minimum_wait_time)
|
14,158 | ba99e6e8f9f2fe2b2ae4777e732d9dc96e18973e | import openpyxl
import os
#To create directory in windows if one doesnt exists
#outPath = "C:\\Users\\syede\\Downloads\\SeleniumOutFiles"
#if not os.path.isdir(outPath):
# os.makedirs(outPath)
'''
os.path.dirname(os.path.dirname(__file__))+"\\testCases\\testData\\loginData.xlsx"
inFileName = ".\\testData\\loginData.xlsx"
inSheetName = "Sheet1"
#To merge filename , sheetname with filepath for both input and output files
inPath = "C:\\Users\\syede\\Downloads" # filepath mame
inFileName ="login.xlsx"
inSheetName = "Sheet1"
outPath = "C:\\Users\\syede\\Downloads\\SeleniumOutFiles"
outFileName= "login_output.xlsx"
outSheetName = "output"
inFile = os.path.join(inPath,inFileName)
outFile = os.path.join(outPath, outFileName)
'''
# function to get number of rows from an input file
def getRowCount(inFile,inSheetName):
workbook = openpyxl.load_workbook(inFile)
sheet=workbook[inSheetName]
rows = sheet.max_row
return rows
# function to get number of columns from an input file
def getColCount(inFile,inSheetName):
workbook = openpyxl.load_workbook(inFile)
sheet=workbook[inSheetName]
cols = sheet.max_column
return cols
# function to get read the input file based on rownumber and colnumber from an input file
def readXL(inFile, inSheetName, rows, cols):
workbook = openpyxl.load_workbook(inFile)
sheet= workbook[inSheetName]
return sheet.cell(row=rows, column=cols).value
# function to read the input file and write it to an output file based on row and col and then append a value to a specific position
# this is helpful if you are automating a test and want to capture test result for each login or test case result
# see example of Salesforce login for 3 different login credentials and store each test result
def writeXL(inFile, inSheetName, rows, cols, Value):
workbook = openpyxl.load_workbook(inFile)
sheet= workbook[inSheetName]
#sheet.cell(row=rows, column=cols).value
#workbook.save(outFile)
sheet.cell(row=rows, column=cols).value= Value
workbook.save(inFile)
#getRowCount(inFile, inSheetName)
#getColCount(inFile, inSheetName)
#readXL(inFile, inSheetName, 2,3)
#writeXL(inFile, inSheetName, 2,15, "Value")
|
14,159 | fcefb788c2f8da6c3d0a4a677a3d93b8967306e1 | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from collections import deque
import pytest
from nvflare.apis.event_type import EventType
from nvflare.apis.fl_context import FLContext, FLContextManager
from nvflare.app_common.resource_managers.list_resource_manager import ListResourceManager
class MockEngine:
def __init__(self, run_name="exp1"):
self.fl_ctx_mgr = FLContextManager(
engine=self,
identity_name="__mock_engine",
job_id=run_name,
public_stickers={},
private_stickers={},
)
def new_context(self):
return self.fl_ctx_mgr.new_context()
def fire_event(self, event_type: str, fl_ctx: FLContext):
pass
CHECK_TEST_CASES = [
({"gpu": [1, 2, 3, 4]}, {"gpu": 1}, True, {"gpu": [1]}),
({"gpu": [1, 2, 3, 4]}, {"gpu": 4}, True, {"gpu": [1, 2, 3, 4]}),
({"gpu": [1]}, {"gpu": 1}, True, {"gpu": [1]}),
({"gpu": [1], "cpu": [1, 2, 3, 4, 5]}, {"gpu": 1, "cpu": 3}, True, {"gpu": [1], "cpu": [1, 2, 3]}),
({"gpu": [1]}, {"gpu": 2}, False, {}),
({"gpu": [1, 2]}, {"gpu": 5}, False, {}),
({"gpu": [1, 2]}, {"cpu": 1}, False, {}),
]
TEST_CASES = [
({"gpu": [1, 2, 3, 4]}, {"gpu": 1}, {"gpu": [1]}),
({"gpu": [1, 2, 3, 4]}, {"gpu": 4}, {"gpu": [1, 2, 3, 4]}),
({"gpu": [1]}, {"gpu": 1}, {"gpu": [1]}),
({"gpu": [1], "cpu": [1, 2, 3, 4, 5]}, {"gpu": 1, "cpu": 3}, {"gpu": [1], "cpu": [1, 2, 3]}),
]
class TestListResourceManager:
@pytest.mark.parametrize(
"resources, resource_requirement, expected_check_result, expected_reserved_resources", CHECK_TEST_CASES
)
def test_check_resource(self, resources, resource_requirement, expected_check_result, expected_reserved_resources):
engine = MockEngine()
list_resource_manager = ListResourceManager(resources=resources)
with engine.new_context() as fl_ctx:
check_result, token = list_resource_manager.check_resources(
resource_requirement=resource_requirement, fl_ctx=fl_ctx
)
assert expected_check_result == check_result
if expected_check_result:
assert expected_reserved_resources == list_resource_manager.reserved_resources[token][0]
@pytest.mark.parametrize("resources, resource_requirement, expected_reserved_resources", TEST_CASES)
def test_cancel_resource(self, resources, resource_requirement, expected_reserved_resources):
engine = MockEngine()
list_resource_manager = ListResourceManager(resources=resources)
with engine.new_context() as fl_ctx:
_, token = list_resource_manager.check_resources(resource_requirement=resource_requirement, fl_ctx=fl_ctx)
assert expected_reserved_resources == list_resource_manager.reserved_resources[token][0]
with engine.new_context() as fl_ctx:
list_resource_manager.cancel_resources(
resource_requirement=resource_requirement, token=token, fl_ctx=fl_ctx
)
assert list_resource_manager.reserved_resources == {}
@pytest.mark.parametrize("resources, resource_requirement, expected_reserved_resources", TEST_CASES)
def test_allocate_resource(self, resources, resource_requirement, expected_reserved_resources):
engine = MockEngine()
list_resource_manager = ListResourceManager(resources=resources)
with engine.new_context() as fl_ctx:
_, token = list_resource_manager.check_resources(resource_requirement=resource_requirement, fl_ctx=fl_ctx)
assert expected_reserved_resources == list_resource_manager.reserved_resources[token][0]
with engine.new_context() as fl_ctx:
result = list_resource_manager.allocate_resources(
resource_requirement=resource_requirement, token=token, fl_ctx=fl_ctx
)
assert result == expected_reserved_resources
@pytest.mark.parametrize("resources, resource_requirement, expected_reserved_resources", TEST_CASES)
def test_free_resource(self, resources, resource_requirement, expected_reserved_resources):
engine = MockEngine()
list_resource_manager = ListResourceManager(resources=resources)
with engine.new_context() as fl_ctx:
check_result, token = list_resource_manager.check_resources(
resource_requirement=resource_requirement, fl_ctx=fl_ctx
)
assert expected_reserved_resources == list_resource_manager.reserved_resources[token][0]
with engine.new_context() as fl_ctx:
result = list_resource_manager.allocate_resources(
resource_requirement=resource_requirement, token=token, fl_ctx=fl_ctx
)
with engine.new_context() as fl_ctx:
list_resource_manager.free_resources(resources=result, token=token, fl_ctx=fl_ctx)
assert list_resource_manager.reserved_resources == {}
def test_check_one_check_two_then_allocate_two_allocate_one(self):
engine = MockEngine()
list_resource_manager = ListResourceManager(resources={"gpu": [f"gpu_{i}" for i in range(4)]})
resource_requirement = {"gpu": 1}
with engine.new_context() as fl_ctx:
check1, token1 = list_resource_manager.check_resources(
resource_requirement=resource_requirement, fl_ctx=fl_ctx
)
check2, token2 = list_resource_manager.check_resources(
resource_requirement=resource_requirement, fl_ctx=fl_ctx
)
with engine.new_context() as fl_ctx:
result = list_resource_manager.allocate_resources(
resource_requirement=resource_requirement, token=token2, fl_ctx=fl_ctx
)
assert result == {"gpu": ["gpu_1"]}
with engine.new_context() as fl_ctx:
result = list_resource_manager.allocate_resources(
resource_requirement=resource_requirement, token=token1, fl_ctx=fl_ctx
)
assert result == {"gpu": ["gpu_0"]}
def test_check_one_cancel_one_check_four_then_allocate_four(self):
engine = MockEngine()
list_resource_manager = ListResourceManager(resources={"gpu": [f"gpu_{i}" for i in range(4)]})
resource_requirement1 = {"gpu": 1}
resource_requirement2 = {"gpu": 4}
with engine.new_context() as fl_ctx:
check1, token1 = list_resource_manager.check_resources(
resource_requirement=resource_requirement1, fl_ctx=fl_ctx
)
with engine.new_context() as fl_ctx:
list_resource_manager.cancel_resources(
resource_requirement=resource_requirement1, token=token1, fl_ctx=fl_ctx
)
with engine.new_context() as fl_ctx:
check2, token2 = list_resource_manager.check_resources(
resource_requirement=resource_requirement2, fl_ctx=fl_ctx
)
result = list_resource_manager.allocate_resources(
resource_requirement=resource_requirement2, token=token2, fl_ctx=fl_ctx
)
assert result == {"gpu": ["gpu_0", "gpu_1", "gpu_2", "gpu_3"]}
def test_check_and_timeout(self):
timeout = 5
engine = MockEngine()
list_resource_manager = ListResourceManager(
resources={"gpu": [f"gpu_{i}" for i in range(4)]}, expiration_period=timeout
)
resource_requirement = {"gpu": 1}
with engine.new_context() as fl_ctx:
list_resource_manager.handle_event(event_type=EventType.SYSTEM_START, fl_ctx=fl_ctx)
check_result, token = list_resource_manager.check_resources(
resource_requirement=resource_requirement, fl_ctx=fl_ctx
)
assert {"gpu": ["gpu_0"]} == list_resource_manager.reserved_resources[token][0]
time.sleep(timeout + 1)
with engine.new_context() as fl_ctx:
list_resource_manager.handle_event(event_type=EventType.SYSTEM_END, fl_ctx=fl_ctx)
assert list_resource_manager.reserved_resources == {}
assert list_resource_manager.resources == {"gpu": deque(["gpu_0", "gpu_1", "gpu_2", "gpu_3"])}
|
14,160 | 59fbdd01e0f7e5c3dd20508b05dfb217a73acb61 | import requests
class Equity(object):
def __init__(self, ticker, api_key=None):
self.api_key = api_key or QUANDL_API_KEY
self.host = 'https://www.quandl.com/api/v3/datatables/SHARADAR/SF1.json'
self.params = {'api_key': self.api_key, 'ticker': ticker, 'dimension': 'MRY', 'qopts.latest': 1, 'qopts.columns': 'eps,pe1,currentratio,divyield,dps,de,roa,roe,roic'}
@property
def company_data(self):
resp = requests.get(self.host, params=self.params)
data = resp.json()['datatable']
data_dict = {}
count = 0
if data['data']:
for col in data['columns']:
data_dict.update({col['name'] : data['data'][0][count]})
count +=1
return data_dict
@property
def eps(self):
data = self.company_data
eps = data.get('eps', None)
return eps
@property
def pe(self):
data = self.company_data
pe = data.get('pe1', None)
return pe
@property
def current_ratio(self):
data = self.company_data
current_ratio = data.get('currentratio', None)
return current_ratio
@property
def dividend_yield(self):
data = self.company_data
div_yield = data.get('divyield', None)
return div_yield
@property
def dividend_per_share(self):
data = self.company_data
dividend_per_share = data.get('dps', None)
return dividend_per_share
@property
def debt_equity_ratio(self):
data = self.company_data
debt_equity_ratio = data.get('de', None)
return debt_equity_ratio
@property
def return_on_assets(self):
data = self.company_data
return_on_assets = data.get('roa', None)
return return_on_assets
@property
def return_on_equity(self):
data = self.company_data
return_on_equity = data.get('roe', None)
return return_on_equity
@property
def return_on_invest_capital(self):
data = self.company_data
return_on_invest_capital = data.get('roic', None)
return return_on_invest_capital
|
14,161 | 98b947b7ea5817aa02c52b7a3ae8943a22f4905f | import urllib.request
import urllib.parse
url = 'https://movie.douban.com/typerank?type_name=%E5%8A%A8%E4%BD%9C&type=5&interval_id=100:90&action=&'
headers={
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.146 Safari/537.36'
}
page = int(input('请输入要查看的页码; '))
start = (page-1)*20
limit = 20
data={
'start':start,
'limit':limit
}
data = urllib.parse.urlencode(data).encode('utf-8')
#构建路径(get请求)
# url += data
#构建请求(post请求)
req = urllib.request.Request(url=url, data=data, headers=headers)
#发送请求
res = urllib.request.urlopen(req)
# print(res.read().decode('utf-8'))
with open('douban.html', 'wb') as fw:
fw.write(res.read())
|
14,162 | 67d323f53d203ec0eff8fc87325c2caa27587c76 | # -*- coding: utf-8 -*-
from odoo import models, fields, api
class WeightType(models.Model):
_name = 'weight.type'
_inherit = ['mail.thread', 'mail.activity.mixin']
_description = "WeightType"
name = fields.Char('Size',required=True)
note = fields.Char('Note')
active=fields.Boolean(default=True)
shipment_method=fields.Selection([
('air', 'Air'),
('land', 'Land'),
('sea', 'Sea'),
('express', 'Express')],string="Shipment Method")
weight_from=fields.Float(string='Weight From', default=0.0)
weight_to=fields.Float(string='Weight To', default=0.0)
uom_id=fields.Many2one('uom.uom',
default=lambda self: self.env['uom.uom'].search([('name', '=', 'kg')], limit=1),readonly=True)
uom_tow_id=fields.Many2one('uom.uom',
default=lambda self: self.env['uom.uom'].search([('name', '=', 'kg')], limit=1),readonly=True)
|
14,163 | b67a80451a68ca4ac17a419124734e86d7f85eb9 | from CommonFunctionality import CommonFunctionality
from CallRecording import CallRecording
class CallRecordingPage(object):
"""Module for all the Call Recording"""
def __init__(self, browser):
self.common_func = CommonFunctionality(browser)
self.call_recording = CallRecording(browser)
|
14,164 | 0679cab0e70559e955782a5ae8251c8ab3a4d2cc | import os
os.system('cls')
print("******** Suma de dos numeros ********")
numero1 = float(input("Ingrese el primer numero: "))
numero2 = float(input("Ingrese el segundo numero: "))
resultado = numero1 + numero2
print("El resultado de la suma de los numeros es: ", resultado) |
14,165 | 1bf29301fc32b57c8bbf6fe9a0e4460267498a75 | import cv2
from .rules import *
from .loader import *
from .model_renderer import show, colorize, parse_palette, parse_sinks
from .osc import OSCSink, PrintSender
from argparse import ArgumentParser
parser = ArgumentParser(prog="Wireworld Composer", description="""
Compose sequences using wireworld simulations. Check the readme for details.
""")
parser.add_argument("config", help="path to the config file", type=str)
parser.add_argument("--windowsize", help="Resolution of the output animation, defaults to 512 pixels", type=int, default=512)
args = parser.parse_args()
data,world = load_file(args.config)
config = Config.parse_raw(data)
config.palette = parse_palette(config.palette)
config.sinks = parse_sinks(config.sinks)
if isinstance(config.osc, OscConfig):
output = OSCSink(config.osc)
else:
output = PrintSender(config.osc)
output.start()
field = parse_ascii(config, world)
step = int((60. / (config.tempo.bpm * config.tempo.subdivisions)) * 1000)
while True:
show(colorize(config, field), (args.windowsize, args.windowsize))
tick(field, output)
cv2.waitKey(step) |
14,166 | 44a6fc9c4fc965e61e0b0a5ee63e4f3a004fb8c7 | """Randomly pick customer and print customer info"""
# Add code starting here
# Hint: remember to import any functions you need from
# other files or libraries
|
14,167 | 2cfc97f4251d3bc70328edd596d9ff05944beecf | from django.core.urlresolvers import reverse
from example.models import Car
from example.tests.utils import filter_models, BaseTestCase
class AggregatesAPITestCase(BaseTestCase):
def setUp(self):
super().setUp()
self.car_api_url = reverse('car-list')
def test_count(self):
results = self.query_agg_api(
self.car_api_url,
'aggregate[Count]=id'
)
self.assertIn('count_id', results)
self.assertEqual(results['count_id'], len(self.cars))
# we test that filtering works in conjuction with our aggregates
classification = Car.CLASSIFICATION[0][0]
results = self.query_agg_api(
self.car_api_url,
f'classification={classification}',
'aggregate[Count]=id',
)
self.assertIn('count_id', results)
self.assertEqual(
results['count_id'],
len(filter_models(self.cars, classification=classification))
)
def test_min(self):
classification = Car.CLASSIFICATION[0][0]
results = self.query_agg_api(
self.car_api_url,
'aggregate[Min]=retail_price'
)
self.assertIn('min_retail_price', results)
self.assertEqual(
results['min_retail_price'],
float(min([x.retail_price for x in self.cars]))
)
def test_max(self):
results = self.query_agg_api(
self.car_api_url,
'aggregate[Max]=retail_price'
)
self.assertIn('max_retail_price', results)
self.assertEqual(
results['max_retail_price'],
float(max([x.retail_price for x in self.cars]))
)
def test_sum(self):
results = self.query_agg_api(
self.car_api_url,
'aggregate[Sum]=retail_price'
)
self.assertIn('sum_retail_price', results)
self.assertEqual(
results['sum_retail_price'],
float(sum([x.retail_price for x in self.cars]))
) |
14,168 | e6afd09f511f7c9193e9e8fdad7c570fabb5cf44 | """
============================
author:Administrator
time:2019/7/24
E-mail:540453724@qq.com
============================
"""
user = int(input("请输入数值:"))
if user%2 == 0:
print(True)
else:
print(False) |
14,169 | 36de82fdedeeafd3053f3dcde0c13ce273c41951 | # coding: utf-8
import zerorpc
import logging
logging.basicConfig(level=logging.INFO)
class CentralRPCClient:
"""the rpc client to connect server run on central"""
def __init__(self, endpoint):
self.endpoint = endpoint
self.client = None
def start(self):
logging.info('starting a rpc client to server: %s' % self.endpoint)
self.client = zerorpc.Client()
self.client.connect(self.endpoint)
# c.connect('tcp://127.0.0.1:4243')
def stop(self):
logging.info('stopping a rpc client to server: %s' % self.endpoint)
self.client.close()
def train(self, app_id, train_params):
self.client.do_train(app_id, train_params)
def distribute_code(self, app_id, cluster):
logging.info("asking server to distribute code of %s to workers:%s ..." % (app_id, cluster))
file = "F:\\PythonWorkspace\\tf-package-demo\\tf-estimator-cluster-app.zip"
self.client.do_disCode(file, cluster)
def dis_code(self, file, cluster):
logging.info("asking server to distribute code of %s to workers:%s ..." % (file, cluster))
self.client.do_disCode(file, cluster)
if __name__ == '__main__':
endpoint = 'tcp://172.17.171.108:14243'
# endpoint = 'tcp://0.0.0.0:4243'
# endpoint = 'tcp://10.244.0.1:4243'
# endpoint = 'tcp://127.0.0.1:4243'
client = CentralRPCClient(endpoint)
client.start()
file = "tf-estimator-cluster-app.zip"
client.dis_code(file, ['172.17.171.190'])
# # client.rain(123, {})
# client.distribute_code("ac0534fe-fd23-11e8-8ec9-309c23c29f89", ['172.17.171.190'])
# client.stop()
|
14,170 | 227904d1e59082d65ba972f04636f576057c2eb2 | from demo import CTPN
import numpy as np
import sys,os
import glob
import mahotas
import shutil
import matplotlib.pyplot as plt
from PIL import Image
import tensorflow as tf
import os.path as ops
import numpy as np
import cv2
import argparse
import matplotlib.pyplot as plt
try:
from cv2 import cv2
except ImportError:
pass
from crnn_model import crnn_model
from global_configuration import config
from local_utils import log_utils, data_utils
logger = log_utils.init_logger()
from math import atan2,degrees,fabs,sin,radians,cos
sys.path.append(os.getcwd())
class CRNN(object):
#load the model
def __init__(self):
print("CRNN init")
def call_crnn_rpc(self,image_data):
return
class CTPN_CRNN(object):
#load the model
def __init__(self):
print("CTPN CRNN Init")
# 文本检测
def text_detection(self,im,image_name):
ctpn = CTPN()
img,text_recs = ctpn.get_text_box(im,image_name)
return img, text_recs
def dumpRotateImage(self,img, degree, pt1, pt2, pt3, pt4):
height, width = img.shape[:2]
heightNew = int(width * fabs(sin(radians(degree))) + height * fabs(cos(radians(degree))))
widthNew = int(height * fabs(sin(radians(degree))) + width * fabs(cos(radians(degree))))
matRotation = cv2.getRotationMatrix2D((width / 2, height / 2), degree, 1)
matRotation[0, 2] += (widthNew - width) / 2
matRotation[1, 2] += (heightNew - height) / 2
imgRotation = cv2.warpAffine(img, matRotation, (widthNew, heightNew), borderValue=(255, 255, 255))
pt1 = list(pt1)
pt3 = list(pt3)
[[pt1[0]], [pt1[1]]] = np.dot(matRotation, np.array([[pt1[0]], [pt1[1]], [1]]))
[[pt3[0]], [pt3[1]]] = np.dot(matRotation, np.array([[pt3[0]], [pt3[1]], [1]]))
imgOut = imgRotation[int(pt1[1]):int(pt3[1]), int(pt1[0]):int(pt3[0])]
height, width = imgOut.shape[:2]
return imgOut
def crnnRec(self, im, text_recs):
index = 0
images = []
for rec in text_recs:
pt1 = (rec[0], rec[1])
pt2 = (rec[2], rec[3])
pt3 = (rec[6], rec[7])
pt4 = (rec[4], rec[5])
partImg = self.dumpRotateImage(im, degrees(atan2(pt2[1] - pt1[1], pt2[0] - pt1[0])), pt1, pt2, pt3, pt4)
if(partImg.shape[0]==0 or partImg.shape[1]==0 or partImg.shape[2]==0):
continue
#mahotas.imsave('data/tmp/%s.jpg'%index, partImg)
# image = Image.open('data/test.jpg').convert('L')
#先转灰度再去做识别
image = Image.fromarray(partImg).convert('L')
#image.save('data/tmp/gray_%s.jpg'%index)
height,width,channel=partImg.shape[:3]
print(height,width,channel)
print(image.size)
#调整为width*32大小的图,CRNN都是基于不定长等高32的样本训练
scale = image.size[1] * 1.0 / 32
w = image.size[0] / scale
w = int(w)
rcnnImgSize = w,32
image = image.resize(rcnnImgSize, Image.ANTIALIAS)
#得到了合适大小的图片,交给CRNN去做识别
im_arr = np.fromstring(image.tobytes(), dtype=np.uint8)
#im_arr = im_arr.reshape((image.size[1], image.size[0], 1))
images.append(im_arr)
index += 1
crnn_model = CRNN()
for i in range(len(images)):
preds.append(crnn_model.call_crnn_rpc(images[i]))
return preds
# 文本识别
def text_recognition(self,img, text_recs):
preds = self.crnnRec(im=img, text_recs=text_recs)
return preds
def do(self,img_name,is_show=True):
print("---------------------------------------------------------------")
print("start to recognize : %s"%img_name)
# 读取图片
im = cv2.imread(img_name)
# 利用CTPN检测文本块
img, text_recs = self.text_detection(im,img_name)
# 使用CRNN识别文本
preds = self.text_recognition(img, text_recs)
# 输出识别结果
for i in range(len(preds)):
print("%s" % (preds[i]))
print("---------------------------------------------------------------")
# Matplotlib pyplot.imshow(): M x N x 3 image, where last dimension is RGB.
# OpenCV cv2.imshow(): M x N x 3 image, where last dimension is BGR
# plt.imshow(cv2.imread(image_path, cv2.IMREAD_COLOR)[:, :, (2, 1, 0)])
if(is_show):
srcBGR = img.copy()
destRGB = cv2.cvtColor(srcBGR,cv2.COLOR_BGR2RGB)
plt.imshow(destRGB)
plt.show()
def ctpn_crnn_do(ctpn_crnn,im_name):
ctpn_crnn.do(im_name,False)
if __name__ == '__main__':
if os.path.exists("data/results/"):
shutil.rmtree("data/results/")
os.makedirs("data/results/")
ctpn_crnn = CTPN_CRNN()
img_names = glob.glob(os.path.join("./data/demo",'*.jpg'))+\
glob.glob(os.path.join("./data/demo",'*.png'))+\
glob.glob(os.path.join("./data/demo",'*.bmp'))
for im_name in img_names:
ctpn_crnn_do(ctpn_crnn,im_name)
|
14,171 | cd2cc102abb7a8aa3a5f33967103d8c5e6d7dbac | #!/usr/bin/env python3
"""
Tutorial Lesson 4
=================
This is the fourth tutorial arrowhead program.
The previous lessons introduced all of the baisc concepts of arrowhead. Now
we'll look at how one can work with flow state.
As established previously, the first argument of each step function refers to a
step object (an instance of a unque Step subclass that is named after the
decorated method name). State objects are instantiated early in flow instance
intialization. You cannot share state between invocations of the same Flow
class as you will be instantiating unique flow objects. That's just for
reference, let's see what we can do instead.
You can assign any attribute of the step object. Typically you will do that so
that other steps can refer to it later. We'll do just that with this greeter
program. The what_is_your_name step assigns the 'name' attribute on the step
object. That attribute is accessed from the greet step, through the flow
argument. The flow argument is a new thing that we haven't seen before, it's
actually the 'self' you would typically expect as it refers to the instance of
the flow itself. Each step becomes a step object that is assigned to the name
of the method. So we can just reach out to each step and see (or alter yaiks!)
anything inside.
There is no error handling in this program but you get the idea. It's very
simple and powerful, when you know which step is responsible for setting the
state you care about. As we'll see later you can also abstract that away.
So there you have it, each step can store any state attribute (except Meta,
that's reserved), then you can each out to any other step by accepting the flow
argument. Through that argument you get the instance of the flow class you are
executing in and you can access each step as it were an attribute.
"""
from arrowhead import Flow, step, arrow, main
class Greeting(Flow):
@step(initial=True)
@arrow(to='what_is_your_name')
def hi(step):
print("Hi")
@step
@arrow(to='greet')
def what_is_your_name(step):
step.name = input("what is your name? ")
@step(accepting=True)
def greet(step, flow):
print("Hey {name}".format(name=flow.what_is_your_name.name))
if __name__ == '__main__':
main(Greeting)
|
14,172 | ca7ca4bbe546d5efc33e256eb6bd516a45be0cd9 | from sklearn.datasets import load_iris
from sklearn.model_selection import cross_val_score
from sklearn.neural_network import MLPClassifier
if __name__ == "__main__":
X, y = load_iris(return_X_y=True)
# MLP stands for multi-layer perceptron.
clf = MLPClassifier(solver="sgd", random_state=1)
print(cross_val_score(clf, X, y, cv=5))
|
14,173 | 6d5ea44f7e58ef1f7044c8fc2fcc2e11204406fd |
print(type(5)) |
14,174 | 82575b41353e9d60ff71ed4398c7ecbe69b964f8 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 13 13:56:41 2019
@author: lab
"""
import os
import h5py
import numpy as np
import torch
from pathlib import Path
from tools.loggers import call_logger
#from tools.loggers import call_logger
#logger = utils.get_logger('UNet3DPredictor')
import os.path as osp
from tqdm import tqdm
import pandas as pd
import cv2
fd_h5_truth = '../data/h5_rsa'
fd_h5_pred = '../checkpoints/3dunet_tvfl1_3dseres_c1b2_fd_rs_upad_upsamp/h5_pred'
fd_png_pred = '../checkpoints/3dunet_tvfl1_3dseres_c1b2_fd_rs_upad_upsamp/png_pred'
h5_list = list(Path(fd_h5_truth).glob('*.h5'))
for h5_t in tqdm(h5_list):
fn = h5_t.stem
fn_pred = Path(fd_h5_pred)/(fn + '_predictions.h5')
with h5py.File(str(h5_t), 'r') as ft:
xyzs = ft['xyz'][...]
with h5py.File(str(fn_pred), 'r') as fp:
lbs = fp['predictions'][...][0]
ws,we,hs,he,ds,de,hh,ww,dd = xyzs
png_fd = Path(fd_png_pred)/fn
png_fd.mkdir(parents=True,exist_ok = True)
for zz in range(xyzs[-1]):
lb_z_crp = lbs[zz]
img = np.zeros((hh,ww),dtype = 'uint8')
img[hs:he+1,ws:we+1] = (lb_z_crp*255).astype('uint8')
cv2.imwrite(str(png_fd/ (str(zz) +'.png')), img)
#def _get_output_file(dataset,out_path, suffix='_predictions'):
# #return f'{os.path.splitext(dataset.file_path)[0]}{suffix}.h5'
# return str(Path(out_path)/(Path(dataset.file_path).stem + suffix + '.h5'))
#
#
#def _get_dataset_names(config, number_of_datasets):
# dataset_names = config.get('dest_dataset_name')
# if dataset_names is not None:
# if isinstance(dataset_names, str):
# return [dataset_names]
# else:
# return dataset_names
# else:
# default_prefix = 'predictions'
# if number_of_datasets == 1:
# return [default_prefix]
# else:
# return [f'{default_prefix}{i}' for i in range(number_of_datasets)]
#
#
#
#def save_predictions(prediction_maps, output_file, dataset_names):
# """
# Saving probability maps to a given output H5 file. If 'average_channels'
# is set to True average the probability_maps across the the channel axis
# (useful in case where each channel predicts semantically the same thing).
#
# Args:
# prediction_maps (list): list of numpy array containing prediction maps in separate channels
# output_file (string): path to the output H5 file
# dataset_names (list): list of dataset names inside H5 file where the prediction maps will be saved
# """
# assert len(prediction_maps) == len(dataset_names), 'Each prediction map has to have a corresponding dataset name'
# logger.info(f'Saving predictions to: {output_file}...')
#
# with h5py.File(output_file, "w") as output_h5:
# for prediction_map, dataset_name in zip(prediction_maps, dataset_names):
# #logger.info(f"Creating dataset '{dataset_name}'...")
# output_h5.create_dataset(dataset_name, data=prediction_map, compression="gzip")
#
#
#
#
#if __name__ == '__main__':
# # Load configuration
# config = load_config()
#
# # Load model state
# model_path = config['model_path']
#
# model_fd = Path(model_path).parent
#
#
# logger = call_logger(log_file = str(model_fd/'test_log.txt'),log_name = 'UNetPredict')
#
#
#
# if 'output_path' in config.keys():
# out_path = config['output_path']
# else:
# out_path = str(model_fd/'h5_pred')
#
# os.makedirs(out_path,exist_ok = True)
#
#
#
#
# logger.info('Loading HDF5 datasets...')
#
#
# datasets_config = config['datasets']
#
#
# logger.info('Loading HDF5 datasets...')
# test_loaders = get_test_loaders(config)
# l_test_loaders = (list(test_loaders))
#
#
# p_ids = list()
# recpreF1 = list()
# for test_loader in l_test_loaders:
# logger.info(f"Processing '{test_loader.dataset.file_path}'...")
#
#
#
# output_file = _get_output_file(test_loader.dataset,out_path)
#
# # run the model prediction on the entire dataset and save to the 'output_file' H5
#
# dataset_names = _get_dataset_names(config, len(predictions))
#
#
# save_predictions(predictions, output_file, dataset_names)
#
# predictionsoutput_file
# output_file
# dataset_names
# ori_h5 = test_loader.dataset.file_path
#
# with h5py.File(ori_h5, 'r') as f:
# label = f['label'][...]
#
# from sklearn.metrics import confusion_matrix
#
# #%%
# if config['model']['final_sigmoid']:
# mask_t = (predictions[0]>=0.5).astype('int')
# else:
# mask_t = (predictions[0][1]>=0.5).astype('int')
# label_t = label
#
#
#
#
|
14,175 | dd9395876a1f3492e411579eb90f09ae5e6d3c77 | # resides at $HOME/.ipython/profile_default/ipython_config.py
from pathlib import Path
import logging
_logger = logging.getLogger(Path(__file__).stem)
_logger.info('disabling parso logging')
logging.getLogger('parso').setLevel(level=logging.WARNING)
|
14,176 | 80875f0cf4ea88bc6d3e9a2c9cfbfe4e9b454d0e | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipaySecurityRiskHufuAuthQueryModel(object):
def __init__(self):
self._code = None
self._policy = None
self._serial = None
@property
def code(self):
return self._code
@code.setter
def code(self, value):
self._code = value
@property
def policy(self):
return self._policy
@policy.setter
def policy(self, value):
self._policy = value
@property
def serial(self):
return self._serial
@serial.setter
def serial(self, value):
self._serial = value
def to_alipay_dict(self):
params = dict()
if self.code:
if hasattr(self.code, 'to_alipay_dict'):
params['code'] = self.code.to_alipay_dict()
else:
params['code'] = self.code
if self.policy:
if hasattr(self.policy, 'to_alipay_dict'):
params['policy'] = self.policy.to_alipay_dict()
else:
params['policy'] = self.policy
if self.serial:
if hasattr(self.serial, 'to_alipay_dict'):
params['serial'] = self.serial.to_alipay_dict()
else:
params['serial'] = self.serial
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipaySecurityRiskHufuAuthQueryModel()
if 'code' in d:
o.code = d['code']
if 'policy' in d:
o.policy = d['policy']
if 'serial' in d:
o.serial = d['serial']
return o
|
14,177 | d7fafd7c0af34cd9d6c03048774b7b421ece7c5c | #!/usr/bin/env python
# coding: utf-8
# In[2]:
from keras.models import Sequential
from keras.layers import Dense
import numpy
import pandas as pd
# In[5]:
import xgboost as xgb
from sklearn.metrics import mean_squared_error
import pandas as pd
import numpy as np
# In[3]:
train = pd.read_csv("C:/Users/TheUnlikelyMonk/Desktop/Analytics Vidhya/Club Mahindra/newtraindata.csv")
test = pd.read_csv("C:/Users/TheUnlikelyMonk/Desktop/Analytics Vidhya/Club Mahindra/newtestdata.csv")
# In[6]:
train.columns
# In[7]:
train= train.drop(['Unnamed: 0'], axis=1)
test = test.drop(['Unnamed: 0'], axis = 1)
# In[ ]:
# In[8]:
X, y = train.iloc[:,:-1],train.iloc[:,-1]
# In[9]:
xg_reg = xgb.XGBRegressor(objective ='reg:linear', colsample_bytree = 0.3, learning_rate = 0.1,
max_depth = 5, alpha = 10, n_estimators = 10)
# In[17]:
# In[ ]:
xg_reg.fit(X,y)
# In[18]:
# In[11]:
preds = xg_reg.predict(test)
# In[ ]:
# In[12]:
preds
# In[ ]:
preds1
# In[13]:
predictions = pd.DataFrame(preds)
# In[ ]:
# In[14]:
predictions.to_csv("xgb.csv")
# In[ ]:
|
14,178 | b1f4f120fdf58f5f6f84cacec9633ba00fbcfa14 | def f1():
print("mod1 f1")
def f2():
print("mod1 f2ß")
|
14,179 | a7466e8cb47b35c216abb0edbd8b8566fe6362aa | from django.shortcuts import render
from django.http import HttpResponse
from django.shortcuts import render
from django import template
# Create your views here.
def home(request):
return render(request, 'templates/index.html')
def hello(request):
return HttpResponse(u'Hellow World! from hello', content_type="text/plain")
def static_handler(request):
return render(request, 'templates/static_handler.html') |
14,180 | 53225f429cd26531f22ada0cd1c6890d27b0e534 | from django.urls import path
from . import views
urlpatterns = [
path('', views.dashboard,name='test'),
# path('downloadimage/<str:imageid>/$', views.download_image, name='download_image'),
path('filter/<int:id>/', views.filter,name='filter'),
path('mapview', views.mapView,name='mapview'),
path('tablefilter', views.tableView,name='tablefilter'),
path('tableview', views.tableView,name='tableview'),
# path('chartview', views.chartView,name='chartview'),
path('analyticfilter', views.analyticView,name='analyticfilter'),
path('analyticview', views.analyticView,name='analyticview'),
] |
14,181 | 20668303321637d3328bff1a625f9a906a603285 |
print "hola mundo"
print 5 + 3.45
|
14,182 | 8e573eb9d016997bff577101f853deeb0557e3ae | """ Class for the score table: Marker.
Attributes:
__level: in str we put the name "Level" and then we attribute the value 0 as integer.
__alive: in str we put the name "Alive" and then we attribute the value 0 as integer.
__saved: in str we put the name "Saved" and then we attribute the value 0 as integer.
__died: in str we put the name "Died" and then we attribute the value 0 as integer.
__ladders: in str we put the name "Ladders" and then we attribute the value 0 as integer.
__umbrellas: in str we put the name "Umbrellas" and then we attribute the value 0 as integer.
__blockers: in str we put the name "Blockers" and then we attribute the value 0 as integer.
"""
class Marker:
__level: str = "Level: "
__level_value: int = 0
__alive: str = "Alive: "
__alive_value: int = 0
__saved: str = "Saved: "
__saved_value: int = 0
__died: str = "Died: "
__died_value: int = 0
__ladders: str = "Ladders: "
__ladder_value: int = 0
__umbrellas: str = "Umbrellas: "
__umbrellas_value: int = 0
__blockers: str = "Blockers: "
__blockers_value: int = 0
def __init__(self, level, alive, saved, died, ladders, umbrellas, blockers):
self.__level_value = level
self.__alive_value = alive
self.__saved_value = saved
self.__died_value = died
self.__ladders_value = ladders
self.__umbrellas_value = umbrellas
self.__blockers_value = blockers
# Getters for the game board's text
@property
def level(self):
return self.__level
@property
def alive(self):
return self.__alive
@property
def saved(self):
return self.__saved
@property
def died(self):
return self.__died
@property
def ladders(self):
return self.__ladders
@property
def umbrellas(self):
return self.__umbrellas
@property
def blockers(self):
return self.__blockers
@property
def level_value(self):
return self.__level_value
@level_value.setter
def level_value(self, value):
self.__level_value = value
@property
def alive_value(self):
return self.__alive_value
@alive_value.setter
def alive_value(self, value):
self.__alive_value = value
@property
def saved_value(self):
return self.__saved_value
@saved_value.setter
def saved_value(self, value):
self.__saved_value = value
@property
def died_value(self):
return self.__died_value
@died_value.setter
def died_value(self, value):
self.__died_value = value
@property
def ladders_value(self):
return self.__ladders_value
@ladders_value.setter
def ladders_value(self, value):
self.__ladders_value = value
@property
def umbrellas_value(self):
return self.__umbrellas_value
@umbrellas_value.setter
def umbrellas_value(self, value):
self.__umbrellas_value = value
@property
def blockers_value(self):
return self.__blockers_value
@blockers_value.setter
def blockers_value(self, value):
self.__blockers_value = value
|
14,183 | 86a6769a9470126f0a633b1719b2bc751641941c | DEBUG = True
DEVELOPER_KEY = "AI39si5pbWC1StZw1ughtM2KuK4XORJVds7wl_QiYZnW-wsiyLr8oeX3Let3oCvQdHBGf8zee_FUudLzeEvorouufUZbuWhWtg" |
14,184 | b5a12618cb7da12b43204336cbd0ab900eb82976 | from pkg import t_1
t_1.prt('') |
14,185 | eefe616c1f8169348369d8950d93894cfd1ffb4e | import turtle
t = turtle
iterations = input("Enter the number of generations: ") # type in the string
iterations = int(iterations)
startingLen = 500 # length of gen0 line
# pick up the pen and move the turtle to the left
t.up()
t.setpos(-startingLen*1/2, 0)
t.speed(0)
dragon = 'F'
# dragon = 'FRFRF' # dragon snowflake axiom
# make the final L system based on number of iterations
for i in range(iterations):
# replace any line in the system with a line that has a triangle
dragon = dragon.replace('F', 'FLFRFRFFLFLFRF')
t.down()
# draw the line in red, fill in enclosed spaces in black
t.color('red', 'black')
# t.begin_fill()
for move in dragon:
if move == 'F':
t.forward(startingLen / (4 ** iterations - 1))
if move == 'R':
t.right(90)
if move == 'L':
t.left(90)
# t.end_fill()
t.mainloop()
|
14,186 | b8d704d33c8430706319aff1590eb560e48c0eea | #!/usr/bin/env python
# coding: utf-8
# In[1]:
from sqlalchemy import create_engine
import psycopg2
import pandas as pd
import datetime
#datos
hostname = os.getenv("pc_vl_ip")
username = os.getenv("pc_vl_username")
password = os.getenv("pc_vl_pass")
tabla = '"ADJU"'
update = "UPDATE public.""{0}"" SET vers = '{1}'"
select = "SELECT vers from public.""{0}"" "
con = psycopg2.connect(host=hostname_vero, user=username, password=password)
lista_db = 'SELECT datname FROM pg_database WHERE datistemplate = false'
bases = pd.read_sql(lista_db,con)
################### calcula la version anterior de base de datos
def bdanterior(bd):
caracter = bd.find("_")
t = bd[: caracter + 1 ]
listado = []
for i, fila in bases.iterrows():
if str(t) in fila.datname:
listado.append(fila.datname)
listado.sort()
j=0
for i in listado:
j = j + 1
if str(bd) in i:
bd = i
break
bd2 = listado[j-2]
return(bd2)
###################
print('Ingresa la base de datos a actualizar')
bd = input()
for i, fila in bases.iterrows():
if str(bd) in fila.datname:
print("Iniciando base de datos {0}\n".format(fila.datname))
con_b = psycopg2.connect(host=hostname_vero, user=username, password=password, dbname = fila.datname)
cursor = con_b.cursor()
cursor.execute(select.format(tabla))
a = cursor.fetchall()
version = a[0][0]
print('La version actual es '+ version)
con_b.commit()
con_b.close()
print('Ingresa la base de datos de la cual tomar la version o presiona Enter para cambiar por una version anterior')
bd2 = input()
if bd2 == '':
bd2 = bdanterior(bd)
else:
for i, fila in bases.iterrows():
if str(bd) in fila.datname:
print(fila.datname + ' esta en la lista de bases de datos')
for i, fila in bases.iterrows():
if str(bd2) in fila.datname:
print("Iniciando base de datos {0}\n".format(fila.datname))
con_c = psycopg2.connect(host=hostname_vero, user=username, password=password, dbname = fila.datname)
cursor = con_c.cursor()
cursor.execute(select.format(tabla))
result1 = cursor.fetchall()
print('La version de '+ fila.datname + ' es ' + result1[0][0])
con_c.commit()
con_c.close()
vers = result1[0][0]
for i, fila in bases.iterrows():
if str(bd) in fila.datname:
print("Actualizando base de datos {0}\n".format(fila.datname))
con_b = psycopg2.connect(host=hostname_vero, user=username, password=password, dbname = fila.datname)
cursor = con_b.cursor()
cursor.execute(update.format(tabla,vers))
con_b.commit()
con_b.close()
# In[ ]:
|
14,187 | 34d1ebfd2d1dce2db3bae1bcba300fd7c20f2425 | from email import encoders
from email.mime.base import MIMEBase
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
import smtplib
import Config
import ExceptionManager
import datetime
exceptionFileName = "Email.py"
class Email:
def __init__(self, toaddr):
self.fromaddr = Config.getMailFrom()
if toaddr == None:
self.toaddr = Config.getMailTo()
else:
self.toaddr = toaddr
self.password = Config.getMailPassword()
self.msg = MIMEMultipart()
def attach(self, file):
try:
part = MIMEBase('application', 'octet-stream')
filename = file.name
attachment = open(file.spoolPath, "rb")
part.set_payload((attachment).read())
encoders.encode_base64(part)
part.add_header('Content-Disposition',
"attachment; filename= %s" % filename)
self.msg.attach(part)
except Exception as error:
ExceptionManager.WriteException(
str(error), "attach", exceptionFileName)
def sendmail(self, branch):
dateTime = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
self.msg['From'] = self.fromaddr
self.msg['To'] = self.toaddr
self.msg['Subject'] = "Ziraat " + branch + " Deploy - " + dateTime
self.body = "Ziraat " + branch + " Deployu Tamamlanmıştır. \n Çalışan scriptlerin sonuçları ek olarak eklendi."
self.msg.attach(MIMEText(self.body, 'plain'))
try:
server = smtplib.SMTP('smtp.gmail.com', 587)
server.starttls()
server.login(self.fromaddr, self.password)
text = self.msg.as_string()
server.sendmail(self.fromaddr, self.toaddr, text)
server.quit()
except Exception as error:
ExceptionManager.WriteException(
str(error), "sendmail", exceptionFileName)
return False
return True |
14,188 | 234441a58357716f8660cff04545054fe0d9ff5d | #!/usr/bin/env python
# Simple script to create ebs snapshots for volume list and cleanup of snapshots after n of days
import boto
from boto import ec2
from datetime import datetime
from datetime import timedelta
now = datetime.now()
today = now.date()
#Definitions
num_of_days_to_keep=13
volume_list = { 'portaldata': 'vol-111111','bankdata': 'vol-2222222' }
conn = boto.ec2.connect_to_region("ap-southeast-2", aws_access_key_id='AAAAAAAAAAAAAAAAAAAAAAAAA', aws_secret_access_key='BBBBBBBBBBBBBBBBBBBBBBBBBBBBBB')
for name,id in volume_list.items():
print "%s id is %s"% (name,id)
desc = "%s-%s" % (name,today)
snap_obj = conn.create_snapshot(id, description=desc)
if (snap_obj):
snap_obj.add_tags({'user': 'portalsnapshot'})
print "%s snapshot id %s"% (name,snap_obj.id)
two_weeks_ago = (today - timedelta(days=num_of_days_to_keep))
for name,id in volume_list.items():
OldDesc = "%s-%s" % (name,two_weeks_ago)
snapshots = conn.get_all_snapshots(filters = {"description": OldDesc})
for i in snapshots:
print i.id
try:
print "delete snapshot %s" % i.id
conn.delete_snapshot(i.id)
except Exception, err:
print Exception, err
|
14,189 | eeb5e862d8cf48ef7d0bc265a2d6182f30e7d0b1 | from rest_framework import pagination
class CustomPaginator(pagination.PageNumberPagination):
page_size = 3
|
14,190 | e8e765147fd52782597416f303bfa095a7c080bd | import os
from cs50 import SQL
from flask import Flask, flash, jsonify, redirect, render_template, request, session
from flask_session import Session
from tempfile import mkdtemp
from werkzeug.exceptions import default_exceptions, HTTPException, InternalServerError
from werkzeug.security import check_password_hash, generate_password_hash
from helpers import apology, login_required, lookup, usd
# Configure application
app = Flask(__name__)
# Ensure templates are auto-reloaded
app.config["TEMPLATES_AUTO_RELOAD"] = True
# Ensure responses aren't cached
@app.after_request
def after_request(response):
response.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
response.headers["Expires"] = 0
response.headers["Pragma"] = "no-cache"
return response
# Custom filter
app.jinja_env.filters["usd"] = usd
# Configure session to use filesystem (instead of signed cookies)
app.config["SESSION_FILE_DIR"] = mkdtemp()
app.config["SESSION_PERMANENT"] = False
app.config["SESSION_TYPE"] = "filesystem"
Session(app)
# Configure CS50 Library to use SQLite database
db = SQL("sqlite:///finance.db")
@app.route("/")
@login_required
def index():
"""Show portfolio of stocks"""
#if request.method == "GET":
#Выбрать знак акции,и кол-во акции которые пренадлежат id
#stocks_shares = db.execute("SELECT symbol, shares FROM total WHERE id=:id ORDER BY symbol",
#id=session["user_id"])
#return render_template("index.html")
#return redirect(url_for("index.html"))
return apology("TODO")
@app.route("/buy", methods=["GET", "POST"])
@login_required
def buy():
"""Buy shares of stock"""
if request.method == "POST":
if not request.form.get("symbol"):
return apology("Missing symbol")
elif not request.form.get("shares"):
return apology("Missing shares")
# Проверка поля внутри формы, число или нет.
elif not request.form.get("shares").isdigit():
return apology("Please chose integer")
# проверка числа на позитивность.
elif int(request.form.get("shares")) < 1:
return apology("number of stocks is less zero", 400)
# проверка цены по символу
symbol = request.form.get("symbol")
quote = lookup(symbol)
# Проверка на валидность символа
if quote == None :
return apology("The stock does not exist", 400)
# Сохраняем цену данного символа в переменную
price = quote["price"]
# Вибираем кеш пользователя из базы данных.
cash = db.execute("SELECT cash FROM users WHERE id=:id", id=session["user_id"])
# цену символа умножаем на число пользователя, если оно больше чем бюджет,вернуть apology
if float(price) * int(request.form.get("shares")) > float(cash[0]["cash"]):
return apology("You don't have enough cash", 400)
#
else:
# обновляем кеш
rows3 = db.execute("UPDATE users SET cash =:update_cash WHERE id=:id", update_cash = float(cash[0]["cash"]) - (float(price)*int(request.form.get("shares"))), id=session["user_id"])
# Вибираем в портфеле все символы, для проверки на наличие shares (кол-во) акций
rows2 = db.execute("SELECT * FROM portfolio WHERE id=:id AND symbol=:symbol",id=session["user_id"], symbol=symbol )
# Если нету shares в определенном символе,тогда добавить.
if len(rows2) == 0:
db.execute("INSERT INTO partfolio ( id, symbol, shares) VALUES (:id, :symbol, :shares)",id=session["user_id"] )
else:
#Если есть уже кол-во акций,тогда обновить старое кол-во на новое кол-во.
db.execute("UPDATE partfolio SET shares= shares + :shares",shares = shares)
else:
return render_template("buy.html")
@app.route("/check", methods=["GET"])
def check():
"""Return true if username available, else false, in JSON format"""
return jsonify("TODO")
@app.route("/history")
@login_required
def history():
"""Show history of transactions"""
return apology("TODO")
@app.route("/login", methods=["GET", "POST"])
def login():
"""Log user in"""
# Forget any user_id
session.clear()
# User reached route via POST (as by submitting a form via POST)
if request.method == "POST":
# Ensure username was submitted
if not request.form.get("username"):
return apology("must provide username", 403)
# Ensure password was submitted
elif not request.form.get("password"):
return apology("must provide password", 403)
# Query database for username
rows = db.execute("SELECT * FROM users WHERE username = :username",
username=request.form.get("username"))
# Ensure username exists and password is correct
if len(rows) != 1 or not check_password_hash(rows[0]["hash"], request.form.get("password")):
return apology("invalid username and/or password", 403)
# Remember which user has logged in
session["user_id"] = rows[0]["id"]
# Redirect user to home page
return redirect("/")
# User reached route via GET (as by clicking a link or via redirect)
else:
return render_template("login.html")
@app.route("/logout")
def logout():
"""Log user out"""
# Forget any user_id
session.clear()
# Redirect user to login form
return redirect("/")
@app.route("/quote", methods=["GET", "POST"])
@login_required
def quote():
"""Get stock quote."""
if request.method == "POST":
if not request.form.get("symbol"):
return apology("please provide symbol",400)
symbol = request.form.get("symbol").upper()
quote = lookup(symbol)
if quote == None:
return apology("Invalid symbol")
return render_template("quoted.html", name=quote["name"], symbol=symbol, price=quote["price"])
else:
# если метод Get
return render_template("quote.html")
@app.route("/register", methods=["GET", "POST"])
def register():
"""Register user"""
session.clear()
# если метод POST
if request.method == "POST":
# запрос имени
username = request.form.get("username")
if not username:
return apology("please provide username",400)
# Запрос пароля
password = request.form.get("password")
if not password:
return apology("must provide password", 400)
# запрос подтверждения пароля
confirmation = request.form.get("confirmation")
# Если пароль потверждение не сопадает с паролем,извинение.
if password != confirmation:
return apology("Password doesn't match")
# защита пароля, хеширование пароля на екране.
hashpass = generate_password_hash(password)
#Проверка имени на оригинальность в базе данных.
result = db.execute("SELECT username FROM users WHERE username=:username",
username=request.form.get("username"))
# Если имея совпадает тогда 400
if result:
return apology("Username already exists")
#Добавление пароля и имени, защита пароля
rows = db.execute("INSERT INTO users (username, hash) VALUES (:username, :hash)", username=request.form.get("username"),hash = hashpass)
# Query database for username
rows = db.execute("SELECT * FROM users WHERE username = :username",
username=request.form.get("username"))
# Ensure username exists and password is correct
#if len(rows) != 1 or not pwd_context.verify(request.form.get("password"), rows[0]["hash"]):
#return apology("invalid username and/or password", 403)
# Открыть cash ползователя из базы данных, найти его по id
#cash = db.execute("SELECT cash FROM users WHERE id=:id", id=session["user_id"])
#запомнить пользователя, при входе.
session["user_id"] = rows[0]["id"]
#Redirect user to home page
#return render_template("index.html", cash=cash[0]["cash"], budget=cash[0]["cash"])
return redirect("/")
else:
return render_template("register.html")
@app.route("/sell", methods=["GET", "POST"])
@login_required
def sell():
"""Sell shares of stock"""
return apology("TODO")
def errorhandler(e):
"""Handle error"""
if not isinstance(e, HTTPException):
e = InternalServerError()
return apology(e.name, e.code)
# Listen for errors
for code in default_exceptions:
app.errorhandler(code)(errorhandler)
|
14,191 | 62407594e47a41365fcb5add072a0d7bb1c3cf59 | class A:
name="a"
def __init__(self,name=None):
self.name=name
def Question25():
a=A()
a.name="hhh"
print(a.name,A.name)
b=A("bbb")
print(b.name,A.name)
Question25() |
14,192 | f4cb071c64782c8d405747f0e5a6615a4b14c83f | import numpy as np
# Parameters
trials = 100000
n_bits = 120
n_patterns = [12, 24, 48, 70, 100, 120]
zero_diagonal = False
def generate_patterns(n_bits, n_pattern):
size = (n_bits, n_pattern)
p = np.random.randint(2, size=size)
p_transform = 2 * p - 1
return p_transform
def weight(x, i, j):
if i == j and zero_diagonal:
return 0
sum = 0
rows, cols = np.shape(x)
for mu in range(cols):
sum += x[i, mu] * x[j, mu]
sum /= rows
return sum
def sgn(x):
return 1 if x >= 0 else -1
def asynchronous_update(x, i_pattern, i_bit):
sum = 0
for j in range(n_bits):
sum += x[j, i_pattern] * weight(x, i_bit, j)
output = sgn(sum)
return output
def main():
for n_pattern in n_patterns:
n_error = 0
for _ in range(trials):
patterns = generate_patterns(n_bits, n_pattern)
i_pattern = np.random.randint(n_pattern)
i_bit = np.random.randint(n_bits)
output = asynchronous_update(patterns, i_pattern, i_bit)
n_error += 0 if patterns[i_bit, i_pattern] == output else 1
p_error = n_error / trials
print(f"{p_error:.4f}")
if __name__ == '__main__':
main()
|
14,193 | 1eef3da125d8564ea13f53a1a8183249280cde61 | n,m=map(int,input().split())
a=[list(input()) for _ in range(n)]
b=[list(input()) for _ in range(m)]
r=n-m+1
print('Yes' if any([all([a[x//r+y//m][x%r+y%m]==b[y//m][y%m] for y in range(m**2)]) for x in range(r**2)]) else 'No') |
14,194 | 53a8d362db3f69122f1d6f2cc654835efa803f46 | from api.util.utils import Logger
from api import dao, const
from api.web import config
import numpy as np
class KGraphData:
def __init__(self, all_rows):
self.id_gen = 0
self.all_rows = all_rows
self.nodes = []
self.links = []
self.category = {"title": "论文", "author": "作者", "organ": "机构", "source": "期刊", "keyword": "关键词",
"firstduty": "第一作者", "fund": "基金", "year": "发表年份"}
def build(self):
for row in self.all_rows:
summary = row['summary']
title = row['title']
authorField = row['author']
organField = row['organ']
source = row['source']
keywordField = row['keyword']
firstduty = row['firstduty']
fundField = row['fund']
year = row['year']
############标 题###########
# 节点:标题
titleNode = self.createNode(title, 0, "title", summary)
############作 者###########
if authorField:
coAuthorList = []
for author in str(authorField).split(";"):
if author and str(author).strip() != '':
# 节点:作者
authorNode = self.createNode(author, 0, "author", '作者:' + author)
coAuthorList.append(authorNode)
# 连线:论文——作者
self.createLink('作者', authorNode['id'], titleNode['id'])
for author1 in coAuthorList:
for author2 in coAuthorList:
if author1['label'] != author2['label']:
# 连线: 共现作者
self.createLink('共现作者', author1['id'], author2['id'])
############机 构###########
if organField:
coOrganList = []
for organ in organField.split(';'):
if organ and str(organ).strip() != '':
# 节点:机构
organNode = self.createNode(organ, 0, "organ", '机构:' + organ)
coOrganList.append(organNode)
# 连线:机构——>论文
self.createLink("发文机构", organNode['id'], titleNode['id'])
for organ1 in coOrganList:
for organ2 in coOrganList:
if organ1['label'] != organ2['label']:
# 连线: 共现机构
self.createLink('共现机构', organ1['id'], organ2['id'])
############标 题###########
# 节点:发文期刊
sourceNode = self.createNode(source, 0, 'source', '期刊:' + source)
# 连线:发文期刊——>论文
self.createLink('发文期刊', sourceNode['id'], titleNode['id'])
###########关 键 词###########
if keywordField:
coKeywordList = []
for keyword in keywordField.split(';'):
if keyword and str(keyword).strip() != '':
# 节点:关键词
keywordNode = self.createNode(keyword, 0, 'keyword', '关键词:' + keyword)
coKeywordList.append(keywordNode)
# 连线:关键词——>论文
self.createLink('关键词', titleNode['id'], keywordNode['id'])
for keyword1 in coKeywordList:
for keyword2 in coKeywordList:
if keyword1['label'] != keyword2['label']:
# 连线:共现关键词
self.createLink('共现关键词', keyword1['id'], keyword2['id'])
###########第 一 作 者###########
if firstduty:
firstduty = str(firstduty).replace(";", "")
# 节点:第一作者
firstdutyNode = self.createNode(firstduty, 0, 'firstduty', '第一作者:' + firstduty)
# 连线:第一作者——>论文
self.createLink('第一作者', firstdutyNode['id'], titleNode['id'])
###########基 金###########
if fundField:
cofundList = []
for fund in fundField.split(';'):
# 节点:基金
fundNode = self.createNode(fund, 0, 'fund', '基金:' + fund)
cofundList.append(fundNode)
# 连线:基金——>论文
self.createLink('基金支持', fundNode['id'], titleNode['id'])
# 连线:基金——>第一作者
self.createLink('作者基金', fundNode['id'], firstdutyNode['id'])
for fund1 in cofundList:
for fund2 in cofundList:
if fund1['label'] != fund2['label']:
# 连线:共现基金
self.createLink('共现基金', fund1['id'], fund2['id'])
###########年###########
if year and str(year).strip() != '':
yearNode = self.createNode(str(year) + '年', 0, 'year', '发表年:' + str(year))
self.createLink('发表年', yearNode['id'], titleNode['id'])
return {'categories': self.category, 'data': {'nodes': self.nodes, 'edges': self.links}}
def createNode(self, label, value, category, info):
for node in self.nodes:
if label == node['label']:
return node
self.id_gen += 1
node = {"id": self.id_gen, "label": label, "value": value, "categories": [category], "info": info}
self.nodes.append(node)
return node
def createLink(self, label, source, target):
for link in self.links:
if (link['label'] == label and link['from'] == source and link['to'] == target):
return link
self.id_gen += 1
link = {"id": self.id_gen, "label": label, "from": source, "to": target}
self.links.append(link)
return link
class StatManager:
def __init__(self):
self.log = Logger(__name__).get_log
self.dao = dao
# 按照年份统计论文数量
def statArticlesByYear(self, dsid):
sql = "SELECT pubyear, COUNT(1) AS count FROM {} WHERE dsid ='{}' AND pubyear!='' GROUP BY pubyear ORDER BY pubyear".format(
const.tbl_sci_dataset, dsid)
all = self.dao.query_sci_dataset(sql)
return all
# 按照国家统计论文数量
def statArticlesByCountry(self, dsid):
sql = "SELECT country, COUNT(1) AS count FROM (SELECT arrayJoin(country) AS country FROM {} WHERE dsid ='{}' AND LENGTH(country)>0) GROUP BY country ORDER BY COUNT(1)".format(
const.tbl_sci_dataset, dsid)
all = self.dao.query_sci_dataset(sql)
return all
# 按照地区统计论文数量
def statArticlesByProvince(self, dsid):
sql = "SELECT province, COUNT(1) AS count FROM (SELECT arrayJoin(province) AS province FROM {} WHERE dsid ='{}' AND LENGTH(province)>0) GROUP BY province ORDER BY COUNT(1) DESC".format(
const.tbl_sci_dataset, dsid)
all = self.dao.query_sci_dataset(sql)
return all
# 按照机构统计论文数量
def statArticlesByOrg2(self, dsid):
sql = "SELECT org, COUNT(1) AS count FROM (SELECT arrayJoin(orgs2) AS org FROM {} WHERE dsid ='{}') GROUP BY org HAVING COUNT(1)>3 ORDER BY COUNT(1) DESC".format(
const.tbl_sci_dataset, dsid)
all = self.dao.query_sci_dataset(sql)
return all
# 按照出版物统计论文数量
def statArticlesByJournal(self, dsid):
sql = "SELECT publication, COUNT(1) AS count FROM {} WHERE dsid ='{}' AND publication!='' GROUP BY publication HAVING COUNT(1)>3 ORDER BY COUNT(1) DESC".format(
const.tbl_sci_dataset, dsid)
all = self.dao.query_sci_dataset(sql)
return all
# 按照一作统计论文数量
def statArticlesByFirstDuty(self, dsid):
sql = "SELECT firstduty, COUNT(1) AS count FROM {} WHERE dsid ='{}' AND firstduty!='' GROUP BY firstduty HAVING COUNT(1)>3 ORDER BY COUNT(1) DESC".format(
const.tbl_sci_dataset, dsid)
all = self.dao.query_sci_dataset(sql)
return all
# 按照作者统计论文数量
def statArticlesByAuthor(self, dsid):
sql = "SELECT author, COUNT(1) AS count FROM (SELECT arrayJoin(authors) AS author FROM {} WHERE dsid ='{}') GROUP BY author HAVING COUNT(1)>3 ORDER BY COUNT(1) DESC".format(
const.tbl_sci_dataset, dsid)
all = self.dao.query_sci_dataset(sql)
return all
# 基金支持论文历年统计
def statArticlesByFund(self, dsid):
sql = "SELECT fund, COUNT(1) AS count FROM (SELECT arrayJoin(funds2) AS fund FROM {} WHERE dsid ='{}') GROUP BY fund ORDER BY COUNT(1) DESC".format(
const.tbl_sci_dataset, dsid)
all = self.dao.query_sci_dataset(sql)
return all
# 学科分布统计
def statArticlesBySubject(self, dsid):
xList = []
yList = []
for row in []:
xList.append(row['persons'])
yList.append(row['count'])
return xList, yList
# 合著人数统计 , 针对堆叠条形图生成数据结构
def statPersonsByCoAuthor(self, dsid):
sql = "SELECT LENGTH (authors) as persons, toInt32(pubyear) as pubyear , COUNT(1) AS count FROM {} WHERE dsid ='{}' AND LENGTH(authors)>0 GROUP BY persons,pubyear ORDER BY count ,pubyear".format(
const.tbl_sci_dataset, dsid)
all = self.dao.query_sci_dataset(sql)
def __find(pubyear, persons, datas):
for row in datas:
if row['pubyear'] == pubyear and row['persons'] == persons:
return row['count']
return 0
allpubyears = np.array([row['pubyear'] for row in all])
allpubyears = [n for n in range(allpubyears.min(), allpubyears.max() + 1)]
allpersons = np.array([row['persons'] for row in all])
allpersons = [n for n in range(allpersons.min(), allpersons.max() + 1)]
series = []
for persons in allpersons: # 合著人数
count_list = [__find(pubyear, persons, all) for pubyear in allpubyears] # 发表年份
series.append({'name': str(persons)+'人', 'type':'bar', 'stack':'总量', 'label':{'show':False, 'position':'insideRight'}, 'data':count_list})
legend = {'data': [str(persons)+'人' for persons in allpersons]}
yAxis = {'type':'category', 'data':allpubyears}
return legend, yAxis, series
# 关键词词频统计
def statKwsByCount(self, dsid, sub_min):
sql = "SELECT kw, COUNT(1) AS count FROM (SELECT arrayJoin(kws) AS kw FROM {} WHERE dsid ='{}') GROUP BY kw HAVING COUNT(1)>{} ORDER BY count DESC".format(
const.tbl_sci_dataset, dsid, sub_min)
all = self.dao.query_sci_dataset(sql) # 返回结构[ {'count': 1, 'pubyear': '2009', 'persons': 1} .....]
return all
# 作者共现矩阵
def coocmatrix_author(self, dsid):
sql = "SELECT authors FROM {} WHERE dsid ='{}' AND LENGTH(authors)>0".format(const.tbl_sci_dataset, dsid)
all = self.dao.query_sci_dataset(sql)
return all
# 基金共现矩阵
def coocmatrix_fund(self, dsid):
sql = "SELECT funds2 FROM {} WHERE dsid ='{}' AND LENGTH(funds2)>0".format(const.tbl_sci_dataset, dsid)
all = self.dao.query_sci_dataset(sql)
return all
# 国家共现矩阵
def coocmatrix_country(self, dsid):
sql = "SELECT country FROM {} WHERE dsid ='{}' AND LENGTH(country)>0".format(const.tbl_sci_dataset, dsid)
all = self.dao.query_sci_dataset(sql)
return all
# 机构共现矩阵
def coocmatrix_orgs2(self, dsid):
sql = "SELECT orgs2 FROM {} WHERE dsid ='{}' AND LENGTH(orgs2)>0".format(const.tbl_sci_dataset, dsid)
all = self.dao.query_sci_dataset(sql)
return all
# 关键词共现矩阵
def coocmatrix_keyword(self, dsid):
sql = "SELECT kws FROM {} WHERE dsid ='{}' AND LENGTH(kws)>0".format(const.tbl_sci_dataset, dsid)
all = self.dao.query_sci_dataset(sql)
return all
# 主题词词频统计
def statTwsByCount(self, dsid):
sql = "SELECT kw, COUNT(1) AS count FROM (SELECT arrayJoin(kws) AS kw FROM {} WHERE dsid ='{}') GROUP BY kw ORDER BY count DESC".format(
const.tbl_sci_dataset, dsid)
print(sql)
all = self.dao.query_sci_dataset(sql)
xList = []
yList = []
for row in all:
xList.append(row['kw'])
yList.append(row['count'])
return xList, yList
def kg(self, userId, dsid, count):
sql = "SELECT title,author,organ,source,keyword,summary,firstduty,fund,year FROM sci_cnki WHERE usercode='{}' AND dsid={} limit {}".format(
userId, dsid, count)
print(sql)
all = self.db.fetch_all(sql)
id_gen = 0
return KGraphData(all).build()
statManager = StatManager() |
14,195 | 0003bf8cf2e21f6164c1b7d4270a39830041a744 | import maya.cmds as cmds
import glTools.utils.stringUtils
import glTools.utils.surface
def loadPlugin():
"""
Load glCurveUtils plugin.
"""
# Check if plugin is loaded
if not cmds.pluginInfo('glCurveUtils', q=True, l=True):
# Load Plugin
try:
cmds.loadPlugin('glCurveUtils')
except:
raise Exception('Unable to load glCurveUtils plugin!')
# Return Result
return 1
def create(surface, spansU=0, spansV=0, prefix=None):
"""
"""
# Load Plugins
loadPlugin()
# ==========
# - Checks -
# ==========
# Check Surface
if not glTools.utils.surface.isSurface(surface):
raise Exception('Object "' + surface + '" is not a valid nurbs surface!')
# Check Prefix
if not prefix: prefix = glTools.utils.stringUtils.stripSuffix(surface)
# Get Surface Details
if not spansU: spansU = cmds.getAttr(surface + '.spansU')
if not spansV: spansV = cmds.getAttr(surface + '.spansV')
minU = cmds.getAttr(surface + '.minValueU')
maxU = cmds.getAttr(surface + '.maxValueU')
minV = cmds.getAttr(surface + '.minValueV')
maxV = cmds.getAttr(surface + '.maxValueV')
incU = (maxU - minU) / spansU
incV = (maxV - minV) / spansV
# =============
# - Rebuild U -
# =============
crvU = []
for i in range(spansU + 1):
# Duplicate Surface Curve
dupCurve = cmds.duplicateCurve(surface + '.u[' + str(incU * i) + ']', ch=True, rn=False, local=False)
dupCurveNode = cmds.rename(dupCurve[1], prefix + '_spanU' + str(i) + '_duplicateCurve')
dupCurve = cmds.rename(dupCurve[0], prefix + '_spanU' + str(i) + '_crv')
crvU.append(dupCurve)
# Set Curve Length
arcLen = cmds.arclen(dupCurve)
setLen = cmds.createNode('setCurveLength', n=prefix + '_spanU' + str(i) + '_setCurveLength')
crvInfo = cmds.createNode('curveInfo', n=prefix + '_spanU' + str(i) + '_curveInfo')
blendLen = cmds.createNode('blendTwoAttr', n=prefix + '_spanU' + str(i) + 'length_blendTwoAttr')
cmds.addAttr(dupCurve, ln='targetLength', dv=arcLen, k=True)
cmds.connectAttr(dupCurveNode + '.outputCurve', crvInfo + '.inputCurve', f=True)
cmds.connectAttr(dupCurveNode + '.outputCurve', setLen + '.inputCurve', f=True)
cmds.connectAttr(crvInfo + '.arcLength', blendLen + '.input[0]', f=True)
cmds.connectAttr(dupCurve + '.targetLength', blendLen + '.input[1]', f=True)
cmds.connectAttr(blendLen + '.output', setLen + '.length', f=True)
cmds.connectAttr(setLen + '.outputCurve', dupCurve + '.create', f=True)
# Add Control Attributes
cmds.addAttr(dupCurve, ln='lockLength', min=0, max=1, dv=1, k=True)
cmds.addAttr(dupCurve, ln='lengthBias', min=0, max=1, dv=0, k=True)
cmds.connectAttr(dupCurve + '.lockLength', blendLen + '.attributesBlender', f=True)
cmds.connectAttr(dupCurve + '.lengthBias', setLen + '.bias', f=True)
# Loft New Surface
srfU = cmds.loft(crvU, ch=True, uniform=True, close=False, autoReverse=False, degree=3)
srfUloft = cmds.rename(srfU[1], prefix + '_rebuildU_loft')
srfU = cmds.rename(srfU[0], prefix + '_rebuildU_srf')
# Rebuild 0-1
rebuildSrf = cmds.rebuildSurface(srfU, ch=True, rpo=True, rt=0, end=1, kr=0, kcp=1, su=0, du=3, sv=0, dv=3, tol=0)
rebuildSrfNode = cmds.rename(rebuildSrf[1], prefix + '_rebuildU_rebuildSurface')
# Add Control Attributes
cmds.addAttr(srfU, ln='lockLength', min=0, max=1, dv=1, k=True)
cmds.addAttr(srfU, ln='lengthBias', min=0, max=1, dv=0, k=True)
for crv in crvU:
cmds.connectAttr(srfU + '.lockLength', crv + '.lockLength', f=True)
cmds.connectAttr(srfU + '.lengthBias', crv + '.lengthBias', f=True)
# =============
# - Rebuild V -
# =============
crvV = []
for i in range(spansV + 1):
# Duplicate Surface Curve
dupCurve = cmds.duplicateCurve(srfU + '.v[' + str(incV * i) + ']', ch=True, rn=False, local=False)
dupCurveNode = cmds.rename(dupCurve[1], prefix + '_spanV' + str(i) + '_duplicateCurve')
dupCurve = cmds.rename(dupCurve[0], prefix + '_spanV' + str(i) + '_crv')
crvV.append(dupCurve)
# Set Curve Length
arcLen = cmds.arclen(dupCurve)
setLen = cmds.createNode('setCurveLength', n=prefix + '_spanV' + str(i) + '_setCurveLength')
crvInfo = cmds.createNode('curveInfo', n=prefix + '_spanV' + str(i) + '_curveInfo')
blendLen = cmds.createNode('blendTwoAttr', n=prefix + '_spanV' + str(i) + 'length_blendTwoAttr')
cmds.addAttr(dupCurve, ln='targetLength', dv=arcLen, k=True)
cmds.connectAttr(dupCurveNode + '.outputCurve', crvInfo + '.inputCurve', f=True)
cmds.connectAttr(dupCurveNode + '.outputCurve', setLen + '.inputCurve', f=True)
cmds.connectAttr(crvInfo + '.arcLength', blendLen + '.input[0]', f=True)
cmds.connectAttr(dupCurve + '.targetLength', blendLen + '.input[1]', f=True)
cmds.connectAttr(blendLen + '.output', setLen + '.length', f=True)
cmds.connectAttr(setLen + '.outputCurve', dupCurve + '.create', f=True)
# Add Control Attribute
cmds.addAttr(dupCurve, ln='lockLength', min=0, max=1, dv=1, k=True)
cmds.addAttr(dupCurve, ln='lengthBias', min=0, max=1, dv=0, k=True)
cmds.connectAttr(dupCurve + '.lockLength', blendLen + '.attributesBlender', f=True)
cmds.connectAttr(dupCurve + '.lengthBias', setLen + '.bias', f=True)
# Loft New Surface
srfV = cmds.loft(crvV, ch=True, uniform=True, close=False, autoReverse=False, degree=3)
srfVloft = cmds.rename(srfV[1], prefix + '_rebuildV_loft')
srfV = cmds.rename(srfV[0], prefix + '_rebuildV_srf')
# Rebuild 0-1
rebuildSrf = cmds.rebuildSurface(srfV, ch=True, rpo=True, rt=0, end=1, kr=0, kcp=1, su=0, du=3, sv=0, dv=3, tol=0)
rebuildSrfNode = cmds.rename(rebuildSrf[1], prefix + '_rebuildV_rebuildSurface')
# Add Control Attribute
cmds.addAttr(srfV, ln='lockLength', min=0, max=1, dv=1, k=True)
cmds.addAttr(srfV, ln='lengthBias', min=0, max=1, dv=0, k=True)
for crv in crvV:
cmds.connectAttr(srfV + '.lockLength', crv + '.lockLength', f=True)
cmds.connectAttr(srfV + '.lengthBias', crv + '.lengthBias', f=True)
# ===================
# - Build Hierarchy -
# ===================
rebuildUGrp = cmds.group(em=True, n=prefix + '_rebuildU_grp')
cmds.parent(crvU, rebuildUGrp)
cmds.parent(srfU, rebuildUGrp)
rebuildVGrp = cmds.group(em=True, n=prefix + '_rebuildV_grp')
cmds.parent(crvV, rebuildVGrp)
cmds.parent(srfV, rebuildVGrp)
rebuildGrp = cmds.group(em=True, n=prefix + '_lockLength_grp')
cmds.parent(rebuildUGrp, rebuildGrp)
cmds.parent(rebuildVGrp, rebuildGrp)
# =================
# - Return Result -
# =================
return rebuildGrp
|
14,196 | 8e3e772353bc72691c50744cd865617e6d2eaa33 | n = int(input())
a = [int(input()) for _ in range(n)]
left = [a[0]]
right = [a[-1]]
for i in range(1, n):
left.append(max(left[i-1], a[i]))
right.append(max(right[i-1], a[n-i-1]))
right = right[::-1]
print(right[1])
for i in range(1, n-1):
print(max(left[i-1], right[i+1]))
print(left[-2]) |
14,197 | c207f1008c18e147f9aa9e9131501fffb540b272 | from utils.utils import capital
import spacy
import pandas as pd
class NeExtractor():
def __init__(self):
self.ne_type = ['ORG', 'PERSON', 'LOC']
self.sp = spacy.load('en_core_web_sm')
pass
def extract(self, cluster: pd.DataFrame):
rawtext = " ".join(cluster.description)
ne = [self.sp(rawtext)]
ne = [(e.text, e.lemma_, e.label_)
for entities in ne for e in entities.ents]
ne = set((capital(n[1]), n[2]) for n in ne if n[2] in self.ne_type)
return list(ne)
def extract_top_3(self, cluster: pd.DataFrame):
rawtext = " ".join(cluster.body)
ne = [self.sp(rawtext)]
ne = [(e.text, e.lemma_, e.label_)
for entities in ne for e in entities.ents]
count_o = {}
count_p = {}
count_l = {}
for n in ne:
if n[2] not in self.ne_type:
pass
else:
if n[2] == self.ne_type[0]:
count = count_o
elif n[2] == self.ne_type[1]:
count = count_p
else:
count = count_l
if n[1] in count:
count[n[1]] += 1
else:
count[n[1]] = 1
count_o = [capital(n[0]) for n in sorted(count_o.items(),key=(lambda x:x[1]), reverse=True)[:3]]
count_l = [capital(n[0]) for n in sorted(count_l.items(),key=(lambda x:x[1]), reverse=True)[:3]]
count_p = [capital(n[0]) for n in sorted(count_p.items(),key=(lambda x:x[1]), reverse=True)[:3]]
return (count_p, count_o, count_l)
|
14,198 | 28d74701a8e292e162dff711f8f705120fe32a26 | """
You are at a birthday party and are asked to distribute cake to your
guests. Each guess is only satisfied if the size of the piece of cake
they’re given, matches their appetite (i.e. is greater than or equal to
their appetite). Given two arrays, appetite and cake where the
ithelement of appetite represents the ith guest’s appetite, and the
elements of cake represents the sizes of cake you have to distribute,
return the maximum number of guests that you can satisfy.
Ex: Given the following arrays appetite and cake...
appetite = [1, 2, 3], cake = [1, 2, 3], return 3.
Ex: Given the following arrays appetite and cake...
appetite = [3, 4, 5], cake = [2], return 0.
"""
def max_guests(appetite: list[int], cake: list[int]) -> int:
"""Returns maximum number of guests you can satisfy"""
guest_count = 0
appetite_index = len(appetite) - 1
cake_index = len(cake) - 1
while appetite_index >= 0 and cake_index >= 0:
appetite_size = appetite[appetite_index]
cake_size = cake[cake_index]
if cake_size >= appetite_size:
# cake is fed
cake_index -= 1
guest_count += 1
# else, the person is skipped
appetite_index -= 1
return guest_count
def main() -> None:
"""Main function"""
appetite = [1, 2, 3]
cake = [1, 2, 3]
# appetite = [3, 4, 5]
# cake = [2]
print(max_guests(appetite, cake))
if __name__ == "__main__":
main()
|
14,199 | 5db59231af3b2240f873956fd28aace748b90e5d | import data.data_fetcher
import device
import time
import Queue
import threading
my_data_list = data.data_fetcher.get_pod_routers([1,2,3,4,5,6,7,8,9],[1,2,3,4])
my_data_list = my_data_list + data.data_fetcher.get_pod_switches([1,2,3,4,5,6,7,8,9],[1])
my_device_list = []
for i in my_data_list:
my_device_list.append(device.Device(i))
queue = Queue.Queue()
class ThreadDevice(threading.Thread):
def __init__(self,queue):
threading.Thread.__init__(self)
self.queue = queue
def run(self):
while True:
try:
device = self.queue.get()
device.pre_process()
device.login("username","password")
device.enable()
device.reset()
device.disconnect()
device.post_process()
except:
continue
finally:
self.queue.task_done()
start = time.time()
for i in range(10):
t = ThreadDevice(queue)
t.setDaemon(True)
t.start()
for device in my_device_list:
queue.put(device)
queue.join()
print "Elapsed Time : %s" %(time.time() - start)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.