content stringlengths 7 1.05M |
|---|
"""
Bucket by height, similar to last problem
I need a lookup for i, j to know how much "volume" is interior and occupied between "i, j"
Make a lookup table of value -> [min index of at least value, max index of at least value]
Lookup (i, j) -> volume of walls between i, j
"""
"""
i, j = 0, 1
total = 0
walVolume = 0
startInd = 0
for i in range(0, len(height)):
"""
class Solution:
def trap(self, height):
waterColumns = [0 for _ in range(len(height))]
highestSeen = height[0]
for i in range(len(height)):
waterColumns[i] = max(highestSeen, height[i])
highestSeen = max(highestSeen, height[i])
highestSeen = 0
for i in range(len(height) - 1, -1, -1):
waterColumns[i] = min(max(highestSeen, height[i]), waterColumns[i])
highestSeen = max(highestSeen, height[i])
total = 0
for i in range(len(height)):
waterInColumn = waterColumns[i] - height[i]
if waterInColumn > 0:
total += waterInColumn
return total
def test(heights):
solver = Solution()
print(solver.trap(heights))
test([0, 1, 0, 2, 1, 0, 1, 3, 2, 1, 2, 1])
test([4, 2, 0, 3, 2, 5]) |
class TweetParser(object):
def __init__(self):
pass
'''
get an entity from a tweet if it exists
'''
def get_entity(self, entity_type, tweet):
if self.contains_entity(entity_type, tweet):
return [entity for entity in tweet["entities"][entity_type]]
return []
'''
returns True if tweet contains one or more entities (hashtag, url, or media)
'''
@staticmethod
def contains_entity(entity_type, tweet):
if "entities" not in tweet:
return False
elif entity_type in tweet["entities"] and len(tweet["entities"][entity_type]) > 0:
return True
return False
'''
gets a particular field for an entity if it exists
'''
@staticmethod
def get_entity_field(field, entity):
# beacuse all entities are actually lists
# of entity objects
for entity_object in entity:
if field in entity_object:
return entity[field]
return None
'''
tests a tweet to see if it passes a
custom filter method, this just returns the
value of the filter method passed in
'''
@staticmethod
def tweet_passes_custom_filter(function, tweet):
return function(tweet)
'''
removes all the the specified field from a tweet
'''
@staticmethod
def strip_tweet(keep_fields, tweet):
stripped_tweet = {}
expanded_fields = [field_path.split('.') for field_path in keep_fields]
for expanded_field in expanded_fields:
prev = {}
prev_tweet = {}
temp_iteration_dict = {}
for count, field in enumerate(expanded_field):
#if its a top level field
if field in tweet:
if count+1 == len(expanded_field):
temp_iteration_dict[field] = tweet[field]
else:
temp_iteration_dict[field] = {}
prev_tweet = tweet[field]
prev = temp_iteration_dict[field]
# if its a mid level field
elif field in prev_tweet:
if count+1 == len(expanded_field):
prev[field] = prev_tweet[field]
else:
prev[field] = {}
prev_tweet = prev_tweet[field]
prev = prev[field]
# merge into main dict
c = temp_iteration_dict.copy()
stripped_tweet.update(c)
return stripped_tweet
'''
just tests multiple custom filters
see, tweet_passes_custom_filter
'''
def tweet_passes_custom_filter_list(self, function_list, tweet):
for function in function_list:
if not self.tweet_passes_custom_filter(function, tweet):
return False
return True
'''
return true or false depends if
tweet passes through the filter
filters are just dictionaries.
filter = mongo style query dict
'''
def tweet_passes_filter(self, filter_obj, tweet):
if filter_obj == {}:
return True
# lists of tuples that
# come from our dicts
flat_tweet_list = []
for tweet_tuple in self.flatten_dict(tweet):
flat_tweet_list.append(tweet_tuple)
for filter_tuple in self.flatten_dict(filter_obj):
if filter_tuple not in flat_tweet_list:
return False
return True
'''
get a list where each element in the list
is a tuple that contains, (['path','to','value'], value_at_path)
'''
def flatten_dict(self, dict_obj, path=None):
if path is None:
path = []
if isinstance(dict_obj, dict):
for key in dict_obj.keys():
local_path = path[:]
local_path.append(key)
for val in self.flatten_dict(dict_obj[key], local_path):
yield val
else:
yield path, dict_obj
'''
pulls out the columns from a tweet, usually used for making
tweets into a fixed schema/csv/sql table/columnd based data
structure, used for csv dump and sqlite db generation
'''
def parse_columns_from_tweet(self, tweet, columns):
def return_val_for_column(tweet, columns):
temp_tweet = {}
for sub_field in columns:
if temp_tweet == {}:
temp_tweet = tweet
try:
if sub_field.isdigit():
sub_field = int(sub_field)
val = temp_tweet[sub_field]
if isinstance(val,dict) or isinstance(val,list):
temp_tweet = val
continue
else:
if isinstance(val,str):
val = val.replace('\n',' ').replace('\r',' ')
return val
except (KeyError, IndexError) as e:
return None
break
ret_columns = []
for field in columns:
split_field = field.split('.')
ret_columns.append((field,return_val_for_column(tweet, split_field)))
return ret_columns
'''
author @yvan
tweet parser is a tool for making tweet filters to apply to streams of tweets
''' |
""" Standard exceptions
:Author: Jonathan Karr <karr@mssm.edu>
:Date: 2021-04-29
:Copyright: 2021, SED-ML Editors
:License: Apache 2.0
"""
__all__ = [
'AlgorithmCannotBeSubstitutedException',
]
class AlgorithmCannotBeSubstitutedException(Exception):
""" Exception that the algorithm substitution policy does not allow an algorithm to be substituted """
pass # pragma: no cover
|
class Headers(object):
def __init__(self):
self.app_key = None
self.Authorization = None
def getApp_key(self):
return self.app_key
def setApp_key(self, app_key):
self.app_key = app_key
def getAuthorization(self):
return self.Authorization
def setAuthorization(self, Authorization):
self.Authorization = Authorization |
#!/usr/bin/python3
# -- coding: utf-8 --
# Date: Tue 01 June 2021 14:04:28 CET
# Author: Margaux Fouque
# Description:
# SOLARIUS est un outil technique permettant de suivre la course du soleil
# tout au long de la journée.
# Python Version 3.9
# ECAM - AAM 1
''' Contrôle d'un servomoteur avec un Raspberry Pi
Le programme demande à l'utilisateur d'entrer le rapport cyclique
(duty cycle) désiré, et le servomoteur se met à la position correspondante. ''' |
#finding sum of elements of a list
lst=[10,20,30]
sum=0
for i in lst:
sum=sum+i
print(sum)
print('\n')
#printing index values along with their names
for i in range(1,11):
print(i,'CHENNAI')
print('\n')
#dsplaying elements of a list using for
lst=[10,20,30]
for i in (lst):
print(i) |
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def lowestCommonAncestor(self, root: 'TreeNode', p: 'TreeNode', q: 'TreeNode') -> 'TreeNode':
'''
dfs, bottom up
'''
self.ans = None
def dfs(node, p, q):
if not node:
return 0
lres = dfs(node.left, p, q) if node.left else 0
rres = dfs(node.right, p, q) if node.right else 0
nres = 1 if node == p else 2 if node == q else 0
if lres + rres + nres == 3:
self.ans = node
return 0
return max(lres, rres, nres)
dfs(root, p, q)
return self.ans
|
def laceStringsRecur(s1, s2):
"""
s1 and s2 are strings.
Returns a new str with elements of s1 and s2 interlaced,
beginning with s1. If strings are not of same length,
then the extra elements should appear at the end.
"""
def helpLaceStrings(s1, s2, out):
if s1 == '':
return out + s2
if s2 == '':
return out + s1
else:
return out + helpLaceStrings(s1[1:], s2[1:], s1[0] + s2[0])
return helpLaceStrings(s1, s2, '') |
class Employee:
def __init__(self, name, age, salary):
self.name = name
self.age = age
self.salary = salary
def work(self):
print(f'{self.name} is working ...')
class SoftwareEngineer(Employee):
def __init__(self, name, age, level, salary):
super().__init__(name,age,salary)
self.level = level
def debug(self):
print(f'{self.name} is debuging ... and he gets {self.salary} $$$')
class Designer(Employee):
pass
se = SoftwareEngineer('Mihai', 22, 'Junior', 2500)
d = Designer('John', 24, 4000)
print(se.salary, se.level)
print(se.debug())
|
# Crie um programa que leia vários números inteiros pelo
# teclado. No final da execução, mostre a média entre
# todos os valores e qual foi o maior e o menor valores
# lidos. O programa deve perguntar ao usuário se ele quer
# ou não continuar a digitar valores.
soma = count = 0
menor = 99999
maior = -1
#n = int(input('Valor {}: '.format(count+1)))
#op = input('Digitar outro valor? (S/N): ')
op = 'S'
while op not in 'Nn':
n = int(input('Valor {}: '.format(count + 1)))
op = input('Digitar outro valor? (S/N): ')
if n < menor:
menor = n
if n > maior:
maior = n
count += 1
soma += n
media = soma / count
print('\nNúmeros digitados: {}.\nSoma total: {}'
'\nMédia: {}\nMenor: {}\nMaior: {}'.format(count, soma, media, menor, maior)) |
# Začína, končí alebo obsahuje?
# Na vstupe získate dva reťazca. Otestujte či prvý reťazec začína, končí alebo iba obsahuje druhý reťazec.
# Možné výstupy:
# "{}" začína a končí "{}"
# "{}" začína "{}"
# "{}" končí "{}"
# "{}" obsahuje "{}"
# "{}" neobsahuje "{}"
# Miesto {} doplňte prvý a druhý reťazec.
# Sample Input:
# abcabc abc
# Sample Output:
# "abcabc" začína a končí "abc"
kopka_slamy, ihla = input().split()
realita = ihla in kopka_slamy
if realita == True:
if kopka_slamy.startswith(ihla) and kopka_slamy.endswith(ihla) == True:
print(f'"{kopka_slamy}" začína a končí "{ihla}"')
elif kopka_slamy.startswith(ihla) == True:
print(f'"{kopka_slamy}" začína "{ihla}"')
elif kopka_slamy.endswith(ihla) == True:
print(f'"{kopka_slamy}" končí "{ihla}"')
elif realita:
print(f'"{kopka_slamy}" obsahuje "{ihla}"')
else:
print(f'"{kopka_slamy}" neobsahuje "{ihla}"')
|
print('Welcome to the YES/NO POLLiNG APP.')
issue = input('\nWhat is the yes/no issue you will be voting on today? : ')
vote_number = int(input('What is the number of voters you will allow on the issue? : '))
password = input('Enter a password for the polling results: ')
yes = 0
no = 0
results = {}
for i in range(vote_number):
name = input('\nEnter your full name: ').title().strip()
if name in results.keys():
print('\nSorry, it seems that someone with that name has already voted.')
else:
print('\nHere is our issue: ' + issue )
choice = input('What do you think? YES/NO: ').lower().strip()
if choice == 'yes' or choice == 'y':
choice = 'yes'
yes += 1
elif choice == 'no' or choice == 'n':
choice = 'no'
no += 1
else:
print('That is not a YES/NO answer, but okay...')
# add vote to dictionary results
# the trickiest part :/
results[name] = choice
print('\nThank you ' + name + '. Your vote of ' + results[name] + ' has been recorded.')
# show who actually voted
total_votes = len(results.keys())
print('\nThe following ' + str(total_votes) + ' peaple voted: ')
for key in results.keys():
print(key)
# summarize the voting results
print('\nOn the following issue: ' + issue)
if yes > no:
print('YES wins! ' + str(yes) + ' votes to ' + str(no) + '.')
if yes < no:
print('NO wins! ' + str(no) + ' votes to ' + str(yes) + '.')
else:
print('It was a tie. ' + str(yes) + ' votes to ' + str(no) + '.')
# admin access
guess = input('\nTo see the voting results, enter the admin password: ')
if guess == password:
for key, value in results.items():
print('Voter: ' + key + '\tVote: ' + value)
else:
print('Sorry, that is not the correct password.') |
class Solution(object):
def findMedianSortedArrays(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: float
"""
n1 = len(nums1)
n2 = len(nums2)
even = 1
if (n1+n2) == 1:
if n1==1:
return nums1[0]
else:
return nums2[0]
elif (n1+n2)%2 == 1:
even = 0
else:
even = 1
i1 = 0
i2 = 0
count = 0
sum = 0
temp = 0
list = []
print ((n1+n2)//2+1)
while count < ((n1+n2)//2+1):
if i2 == n2:
list.append(nums1[i1])
i1 = i1+1
elif i1 == n1:
list.append(nums2[i2])
i2 = i2+1
elif nums1[i1]>=nums2[i2]:
list.append(nums2[i2])
i2 = i2+1
else:
list.append(nums1[i1])
i1 = i1+1
count = count +1
if even == 0:
return list[count-1]
else:
return float(list[count-2]+list[count-1])/2 |
def onlyFlag():
flag = hero.findFlag()
if flag:
if hero.isReady("jump"):
hero.jumpTo(flag.pos)
hero.pickUpFlag(flag)
def summonIt():
if hero.canCast("summon-burl"):
hero.cast("summon-burl")
if hero.canCast("summon-undead"):
hero.cast("summon-undead")
if hero.canCast("raise-dead") and len(hero.findCorpses()):
hero.cast("raise-dead")
def attackIt(enemy):
distance = hero.distanceTo(enemy)
if distance < 25 and hero.canCast("fear"):
hero.cast("fear", enemy)
elif distance < 30 and hero.canCast("chain-lightning"):
hero.cast("chain-lightning", enemy)
elif distance < 30 and hero.canCast("poison-cloud"):
hero.cast("poison-cloud", enemy)
elif distance < 15 and hero.health < hero.maxHealth / 1.5:
hero.cast("drain-life", enemy)
else:
hero.attack(enemy)
def drainFriend():
friend = hero.findNearest(hero.findFriends())
if friend and friend.type != "burl" and hero.distanceTo(friend) <= 15:
hero.cast("drain-life", friend)
def battleTactics():
onlyFlag()
summonIt()
enemy = hero.findNearestEnemy()
if enemy and enemy.type != "sand-yak":
attackIt(enemy)
if hero.health < hero.maxHealth / 2:
drainFriend()
while True:
battleTactics()
|
end = 6
total = 0
for num in range(end + 1):
total += num
print(total)
|
class BaseButton:
def __init__(self):
pass
def onclick(self):
pass
|
def getSumOf3Numbers(array , target):
"""
Write a function that takes in a non-empty array of distinct integers
and an integer representing a target sum. The function should find all
triplets in the array that sum up to the target sum and return a
two-dimensional array of all these triplets. The numbers in each
triplet should be ordered in ascending order, and the triplets
themselves should be ordered in ascending order with respect to the
numbers they hold. If no three numbers sum up to the target sum, the
function should return an empty array.
"""
#sort the array
array.sort()
NumSums = []
for i in range(len(array)-2):
right = len(array)-1
left = i+1
while left < right:
# print(right , left)
currSum = array[i]+array[left]+array[right]
if currSum == target:
NumSums.append([array[i],array[left],array[right]])
left +=1
right -=1
elif currSum < target:
left +=1
elif currSum > target:
right -=1
else:
print("passs")
pass
return NumSums
def GetSumByIteration(array , target):
"""
THis function uses iterations
"""
NumSums =[]
for i in range(len(array)-2):
for j in range(i+1 , len(array)-1):
for k in range(j+1 , len(array)):
currSum = array[i]+array[j]+array[k]
if currSum == target:
NumSums.append((array[i],array[j],array[k]))
return NumSums
res1 = GetSumByIteration([12, 3, 1, 2, -6, 5, -8, 6], 0)
print(res1)
res = getSumOf3Numbers([12, 3, 1, 2, -6, 5, -8, 6], 0)
print(res)
|
n = int(input("Enter the number to be rotated:"))
a = int(input("Enter the bits to be rotated:"))
print("1.Left Rotation 2.Right Rotation")
b = int(input("Enter your choice:"))
if b == 1:
print("The number",n,"after left rotation by",a,"bits is", n << a)
elif b == 2:
print("The number",n,"after right rotation by",a,"bits is", n >> a)
else:
print("Invalid choice")
|
"""
Binary to Decimal and Back Converter - Develop a converter to convert a decimal number to binary or a binary number to its decimal equivalent.
"""
print("Welcome to Binary to Decimal & Back converter!")
while True:
action = input("Choose Your action:\n[1] - Convert binary to decimal\n[2] - Convert decimal to binary\n[3] - End program\n")
if action == "1":
number = input("Input binary number to convert:")
print("Binary: {}, decimal: {}".format(number, int(number, 2)))
elif action == "2":
number = int(input("Input decimal number to convert:"))
print("Decimal: {}, binary: {}".format(number, bin(number).replace("0b", "")))
elif action == "3":
print("You have chosen to end.")
break
else:
print("Please type in correct action.")
|
def ws_message(message):
# ASGI WebSocket packet-received and send-packet message types
# both have a "text" key for their textual data.
message.reply_channel.send({
"text": message.content['text'],
})
|
class Solution:
def closeStrings(self, word1: str, word2: str) -> bool:
if len(word1) != len(word2):
return False
count1 = Counter(word1)
count2 = Counter(word2)
if count1.keys() != count2.keys():
return False
return sorted(count1.values()) == sorted(count2.values())
|
# The Western Suburbs Croquet Club has two categories of membership, Senior and Open. They would like your help with an application form that will tell prospective members which category they will be placed.
#
# To be a senior, a member must be at least 55 years old and have a handicap greater than 7. In this croquet club, handicaps range from -2 to +26; the better the player the lower the handicap.
#
# Input
# Input will consist of a list of pairs. Each pair contains information for a single potential member. Information consists of an integer for the person's age and an integer for the person's handicap.
#
# Output
# Output will consist of a list of string values (in Haskell: Open or Senior) stating whether the respective member is to be placed in the senior or open category.
#
# Example
# input = [(18, 20), (45, 2), (61, 12), (37, 6), (21, 21), (78, 9)]
# output = ["Open", "Open", "Senior", "Open", "Open", "Senior"]
output = [(16, 23),(73,1),(56, 20),(1, -1)]
new = []
def open_or_senior(data):
for list_1 in data:
# print(type(list_1))
if list_1[0] >= 55 and list_1[1] >= 7:
new.append("Senior")
else:
new.append("Open")
return new
print(open_or_senior(output))
# For Pros >>
def openOrSenior(data):
return ["Senior" if age >= 55 and handicap >= 7 else "Open" for (age, handicap) in data]
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
{
'target_name': 'woff2_dec',
'type': 'static_library',
'include_dirs': [
'src',
'<(DEPTH)/third_party/brotli/dec',
],
'dependencies': [
'<(DEPTH)/third_party/brotli/brotli.gyp:brotli',
],
'sources': [
'src/buffer.h',
'src/round.h',
'src/store_bytes.h',
'src/table_tags.cc',
'src/table_tags.h',
'src/variable_length.cc',
'src/variable_length.h',
'src/woff2_common.cc',
'src/woff2_common.h',
'src/woff2_dec.cc',
'src/woff2_dec.h',
'src/woff2_out.cc',
'src/woff2_out.h',
],
# TODO(ksakamoto): http://crbug.com/167187
'msvs_disabled_warnings': [
4267,
],
},
],
}
|
"""
Given a non-negative integer represented as non-empty a singly linked list of digits, plus one to the integer.
You may assume the integer do not contain any leading zero, except the number 0 itself.
The digits are stored such that the most significant digit is at the head of the list.
Example :
Input: [1,2,3]
Output: [1,2,4]
"""
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def plusOne(self, head: ListNode) -> ListNode:
dummy = ListNode(0)
dummy.next = head
slow = fast = dummy
while fast:
if fast.val != 9:
slow = fast
fast = fast.next
slow.val += 1
slow = slow.next
while slow:
slow.val = 0
slow = slow.next
return dummy.next if dummy.val == 0 else dummy
|
# encoding: utf-8
# convert dictionary to object
class dict2obj(object):
def __init__(self, dictionary):
self.__dict__ = dictionary |
a = "test"
b = 0
def fun1(val):
global b
b = b + 1
#a = "hello"
def fun2():
global b
nonlocal val
b += 5
val += 1000
print(val)
fun2()
print(a)
fun1(100)
print(b) |
class Solution:
def isSubsequence(self, s: str, t: str) -> bool:
prev = 0
for letter in s:
found = False
for i in range(prev, len(t)):
if letter == t[i]:
prev = i+1
found = True
break
if not found:
return False
return True |
__author__ = 'Aaron Yang'
__email__ = 'byang971@usc.edu'
__date__ = '8/24/2020 11:04 PM'
class Solution:
def findSubsequences(self, nums):
self.res = []
def backtrace(nums, temp):
if len(temp) >= 2 and temp not in self.res:
self.res.append(temp)
if not nums:
return
for i in range(len(nums)):
if not temp or nums[i] >= temp[-1]:
backtrace(nums[i + 1:], temp + [nums[i]])
backtrace(nums, [])
return self.res
if __name__ == '__main__':
arr = [4, 6, 7, 7]
result = Solution().findSubsequences(arr)
print(result)
|
'''
Clase que maneja datos de historial (Resultados de solicitud pasados)
'''
class History:
def __init__(self,message,arrReqRes):
self.__message=message # requests
self.__arrReqRes=arrReqRes
def getHistory(self):
data=[]
for reqRes in self.__arrReqRes:
data.append(reqRes.get_RequestResult())
data={"requests":self.__message, "arrRequest":data}
return data
def getHistoryNull(self):
data={"requests":self.__message}
return data
def getMessage(self):
return self.__message
def setMessage(self,message):
self.__message=message
def getArrReqRes(self):
return self.__arrReqRes
def setArrReqRes(self,arrReqRes):
self.__arrReqRes=arrReqRes |
class Loss(object):
def __init__(self, dout):
self.dout = dout
def eval(self, _ytrue, _ypred):
"""
Subclass should implement log p(Y | F)
:param output: (batch_size x Dout) matrix containing true outputs
:param latent_val: (MC x batch_size x Q) matrix
of latent function values, usually Q=F
:return:
"""
raise NotImplementedError("Subclass should implement this.")
def get_name(self):
raise NotImplementedError("Subclass should implement this.")
|
__all__ = [
"WriteError",
"InvalidQueryError",
"DoesNotExistError",
"MultipleObjectsReturnedError",
"FieldDoesNotExistError",
]
class WriteError(Exception):
pass
class InvalidQueryError(Exception):
pass
class DoesNotExistError(Exception):
pass
class MultipleObjectsReturnedError(Exception):
pass
class FieldDoesNotExistError(Exception):
pass
class IndexCreationError(Exception):
pass
class DuplicateKeyError(Exception):
pass
|
#! /usr/bin/python3
# -*- coding: utf-8 -*-
# @Time : 2019/3/9 4:19 PM
# @Author : xiaoliji
# @Email : yutian9527@gmail.com
"""
矩阵中的路径
>>> hasPath('ABEESFCSADME', rows=3, cols=4, path='SEE')
True
>>> hasPath('abtgcfcsjdeh', rows=3, cols=4, path='bfce')
True
>>> hasPath('abtgcfcsjdeh', rows=3, cols=4, path='abfb')
False
>>> hasPath('ABCESFCSADEE', rows=3, cols=4, path='SEE')
True
>>> hasPath('ABCESFCSADEE', rows=3, cols=4, path='ABCESEEEFS')
True
"""
def hasPath(matrix: str, rows: int, cols: int, path: str) -> 'bool':
"""
回溯法,此题牛客网中的testcase不全,排行榜的答案不一定是正确的。
如doctest中的第一个示例,应该是True,但如果返回False,也能通过牛客网的测试用例。
"""
for i in range(rows):
for j in range(cols):
if matrix[i * cols + j] == path[0]:
if spread(list(matrix), rows, cols, path[1:], i, j):
return True
return False
def spread(matrix: str, rows: int, cols: int, path: str, i: int, j: int) -> 'bool':
if not path:
return True
matrix[i * cols + j] = '-'
spreaded = False
for x, y in ((i - 1, j), (i + 1, j), (i, j - 1), (i, j + 1)):
if 0 <= x < rows and 0 <= y < cols and matrix[x * cols + y] == path[0]:
if spread(matrix, rows, cols, path[1:], x, y):
spreaded = True
return spreaded |
'''
Install mysql connector for python
Use the command in CMD: "pip install mysql-connector-python"
Also, install "MySQL Workbench"
Create a Database in workbench within 'root' user, here "myDB"
create a table in the database, here 'emp'
Now, go to any IDE or text editor to connect python with Database
''' |
def decode_orientation(net, n_classes, loss_fn,
train_data, train_labels, test_data, test_labels,
n_iter=1000, L2_penalty=0, L1_penalty=0):
""" Initialize, train, and test deep network to decode binned orientation from neural responses
Args:
net (nn.Module): deep network to run
n_classes (scalar): number of classes in which to bin orientation
loss_fn (function): loss function to run
train_data (torch.Tensor): n_train x n_neurons tensor with neural
responses to train on
train_labels (torch.Tensor): n_train x 1 tensor with orientations of the
stimuli corresponding to each row of train_data, in radians
test_data (torch.Tensor): n_test x n_neurons tensor with neural
responses to train on
test_labels (torch.Tensor): n_test x 1 tensor with orientations of the
stimuli corresponding to each row of train_data, in radians
n_iter (int, optional): number of iterations to run optimization
L2_penalty (float, optional): l2 penalty regularizer coefficient
L1_penalty (float, optional): l1 penalty regularizer coefficient
Returns:
(list, torch.Tensor): training loss over iterations, n_test x 1 tensor with predicted orientations of the
stimuli from decoding neural network
"""
# Bin stimulus orientations in training set
train_binned_labels = stimulus_class(train_labels, n_classes)
test_binned_labels = stimulus_class(test_labels, n_classes)
# Run GD on training set data, using learning rate of 0.1
# (add optional arguments test_data and test_binned_labels!)
train_loss, test_loss = train(net, loss_fn, train_data, train_binned_labels,
learning_rate=0.1, test_data=test_data,
test_labels=test_binned_labels, n_iter=n_iter,
L2_penalty=L2_penalty, L1_penalty=L1_penalty)
# Decode neural responses in testing set data
out = net(test_data)
out_labels = np.argmax(out.detach(), axis=1) # predicted classes
frac_correct = (out_labels==test_binned_labels).sum() / len(test_binned_labels)
print(f'>>> fraction correct = {frac_correct:.3f}')
return train_loss, test_loss, out_labels
# Set random seeds for reproducibility
np.random.seed(1)
torch.manual_seed(1)
n_classes = 20
# Initialize network
net = DeepNetSoftmax(n_neurons, 20, n_classes) # use M=20 hidden units
# Initialize built-in PyTorch negative log likelihood loss function
loss_fn = nn.NLLLoss()
# Uncomment below to train network and run it on test images
# this function uses the train function you wrote before
train_loss, test_loss, predicted_test_labels = decode_orientation(net, n_classes, loss_fn,
resp_train, stimuli_train, resp_test, stimuli_test)
# Plot results
with plt.xkcd():
plot_decoded_results(train_loss, test_loss, stimuli_test, predicted_test_labels) |
'''
Utilitários Python para auxiliar na programação.
dir -> Apresenta todos os atributos/propriedades e funções/métodos disponíveis para
determinado tipo de dado ou variável.
dir(tipo de dado/variável)
dir(help)
help -> Apresenta a documentação/como utilizar osa tributos/propriedades e funções/métodos
disponíveis para detrminado tipo de dados ou variável.
help(tido de dado/variável.propriedade)
ex: help("Geek".lower)
''' |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created: April 2019
Author: A. P. Naik
Description: Physical constants and unit conversions, for general use. All SI
units.
"""
# constants
pi = 3.141592653589793
G = 6.67408e-11 # m^3 kg s^-2
# units
pc = 3.0857e+16 # metres
kpc = 3.0857e+19 # metres
Mpc = 3.0857e+22 # metres
M_sun = 1.9885e+30 # kg
year = 31536000.0 # seconds
|
"""
Connect and manage field sensor in the area or centralized end points.
Also collects local temperature, humidity, air pressure, movement (gyroscope)
Comm method: wifi and mesh. Sat Comm, LoRa and GSM are still in development
"""
|
"""Npm integration testing
"""
load("@build_bazel_rules_nodejs//:index.bzl", "nodejs_test")
# Returns the manifest path of a file: `workspace/path/to/file`
def _to_manifest_path(ctx, file):
if file.short_path.startswith("../"):
# Strip the ../ from short_path to external repository
return file.short_path[3:]
else:
# Add the repository name for short_path to local repository
return ctx.workspace_name + "/" + file.short_path
def _npm_integration_test_config_impl(ctx):
if len(ctx.files.test_files) == 0:
fail("No files were found to run under integration testing.")
if ctx.attr.debug:
for f in ctx.files.test_files:
if f.is_directory:
fail("In debug mode, directory test_files labels not supported.")
commands = []
for c in ctx.attr.commands:
commands.append(ctx.expand_location(c, targets = ctx.attr.data))
# pass --define vars to test; these are added to the environment using process.env().
env_vars = {}
for k in ctx.attr.configuration_env_vars:
if k in ctx.var.keys():
env_vars[k] = ctx.var[k]
# Serialize configuration file for test runner
ctx.actions.write(
output = ctx.outputs.config,
content = """// npm_integration_test runner config generated by npm_integration_test rule
module.exports = {{
testFiles: [ {TMPL_test_files} ],
commands: [ {TMPL_commands} ],
npmPackages: {{ {TMPL_npm_packages} }},
checkNpmPackages: [ {TMPL_check_npm_packages} ],
envVars: {{ {TMPL_env_vars} }},
debug: {TMPL_debug},
}};
""".format(
TMPL_test_files = ", ".join(["'%s'" % f.short_path for f in ctx.files.test_files]),
TMPL_commands = ", ".join(["'%s'" % s for s in commands]),
TMPL_npm_packages = ", ".join(["'%s': '%s'" % (ctx.attr.npm_packages[n], n.files.to_list()[0].short_path) for n in ctx.attr.npm_packages]),
TMPL_check_npm_packages = ", ".join(["'%s'" % s for s in ctx.attr.check_npm_packages]),
TMPL_env_vars = ", ".join(["'%s': '%s'" % (k, env_vars[k]) for k in env_vars]),
TMPL_debug = "true" if ctx.attr.debug else "false",
),
)
runfiles = [ctx.outputs.config] + ctx.files.test_files + ctx.files.npm_packages
return [DefaultInfo(runfiles = ctx.runfiles(files = runfiles))]
_NPM_INTEGRATION_TEST_CONFIG_ATTRS = {
"commands": attr.string_list(
default = [],
mandatory = True,
doc = """The list of test commands to run. Defaults to `[]`.""",
),
"configuration_env_vars": attr.string_list(
doc = """Pass these configuration environment variables to the resulting test.
Chooses a subset of the configuration environment variables (taken from `ctx.var`), which also
includes anything specified via the --define flag.
Note, this can lead to different results for the test.""",
default = [],
),
"check_npm_packages": attr.string_list(
doc = """A list of npm packages that should be replaced in this test.
This attribute checks that none of the npm packages lists is found in the workspace-under-test's
package.json file unlinked to a generated npm package.
This can be used to verify that all npm package artifacts that need to be tested against are indeed
replaced in all integration tests. For example,
```
check_npm_packages = [
"@angular/common",
"@angular/compiler",
"@angular/compiler-cli",
"@angular/core",
],
```
If an `npm_packages` replacement on any package listed is missed then the test will fail. Since listing all
npm packages in `npm_packages` is expensive as any change will result in all integration tests re-running,
this attribute allows a fine grained `npm_packages` per integration test with the added safety that none
are missed for any one test.
""",
),
"data": attr.label_list(
doc = """Data dependencies for test.""",
allow_files = True,
),
"debug": attr.bool(
doc = """Setup the test for debugging.
If set to true then the package.json replacement are done in-place instead of a tmp folder
and the test is not run. This is used to configure the test folder for local testing and debugging.
""",
default = False,
),
"npm_packages": attr.label_keyed_string_dict(
doc = """A label keyed string dictionary of npm package replacements to make in the workspace-under-test's
package.json with npm package targets. The targets should be pkg_tar tar.gz archives.
For example,
```
npm_packages = {
"//packages/common:npm_package_archive": "@angular/common",
"//packages/compiler:npm_package_archive": "@angular/compiler",
"//packages/compiler-cli:npm_package_archive": "@angular/compiler-cli",
"//packages/core:npm_package_archive": "@angular/core",
}
```""",
allow_files = True,
),
"test_files": attr.label(
doc = """A filegroup of all files necessary to run the test.""",
allow_files = True,
),
}
_npm_integration_test_config = rule(
implementation = _npm_integration_test_config_impl,
doc = """Generates an npm_integration_test config.""",
attrs = _NPM_INTEGRATION_TEST_CONFIG_ATTRS,
outputs = {
"config": "%{name}.js",
},
)
def npm_integration_test(name, **kwargs):
"""Runs an npm integration test.
See _NPM_INTEGRATION_TEST_CONFIG_ATTRS above for configuration arguments.
"""
commands = kwargs.pop("commands", [])
configuration_env_vars = kwargs.pop("configuration_env_vars", [])
check_npm_packages = kwargs.pop("check_npm_packages", [])
npm_packages = kwargs.pop("npm_packages", {})
test_files = kwargs.pop("test_files", [])
data = kwargs.pop("data", [])
_npm_integration_test_config(
name = name + ".config",
commands = commands,
configuration_env_vars = configuration_env_vars,
check_npm_packages = check_npm_packages,
data = data,
npm_packages = npm_packages,
test_files = test_files,
visibility = ["//visibility:private"],
tags = ["manual"],
testonly = True,
)
# Config for debug target below
_npm_integration_test_config(
name = name + ".debug.config",
commands = commands,
configuration_env_vars = configuration_env_vars,
check_npm_packages = check_npm_packages,
data = data,
npm_packages = npm_packages,
test_files = test_files,
debug = True,
visibility = ["//visibility:private"],
tags = ["manual"],
testonly = True,
)
tags = kwargs.pop("tags", [])
npm_deps = ["@npm//tmp"]
nodejs_test(
name = name,
data = data + npm_deps + [":%s.config" % name, ":%s.config.js" % name],
tags = tags,
templated_args = ["$(location :%s.config.js)" % name],
entry_point = "//tools/npm_integration_test:test_runner.js",
**kwargs
)
# Setup a .debug target that sets the debug attribute to True.
# This target must be run with `bazel run` so it is tagged manual.
nodejs_test(
name = name + ".debug",
data = data + npm_deps + [":%s.debug.config" % name, ":%s.debug.config.js" % name],
tags = tags + ["manual", "local"],
templated_args = ["$(location :%s.debug.config.js)" % name],
entry_point = "//tools/npm_integration_test:test_runner.js",
**kwargs
)
|
## Multiples of 3 or 5
## 6 kyu
## https://www.codewars.com/kata/514b92a657cdc65150000006
def solution(n):
mult_sum = 0
for i in range (3, n):
if i % 3 == 0 or i % 5 ==0:
mult_sum += i
return mult_sum |
#******************************************************************************
# (C) 2018, Stefan Korner, Austria *
# *
# The Space Python Library is free software; you can redistribute it and/or *
# modify it under under the terms of the MIT License as published by the *
# Massachusetts Institute of Technology. *
# *
# The Space Python Library is distributed in the hope that it will be useful, *
# but WITHOUT ANY WARRANTY; without even the implied warranty of *
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the MIT License *
# for more details. *
#******************************************************************************
# Link Simulation *
#******************************************************************************
__all__ = ["IF", "TMTC"]
|
class Compliance:
def __init__(self, Status=None):
self.Status = Status
class Malware:
def __init__(self, Name=None, Path=None, State=None, Type=None):
self.Name = Name
self.Path = Path
self.State = State
self.Type = Type
class Network:
def __init__(self, DestinationDomain=None, DestinationIpV4=None, DestinationIpV6=None, DestinationPort=None,
Direction=None, Protocol=None, SourceDomain=None, SourceIpV4=None, SourceIpV6=None, SourceMac=None,
SourcePort=None):
self.DestinationDomain = DestinationDomain
self.DestinationIpV4 = DestinationIpV4
self.DestinationIpV6 = DestinationIpV6
self.DestinationPort = DestinationPort
self.Direction = Direction
self.Protocol = Protocol
self.SourceDomain = SourceDomain
self.SourceIpV4 = SourceIpV4
self.SourceIpV6 = SourceIpV6
self.SourceMac = SourceMac
self.SourcePort = SourcePort
class Note:
def __init__(self, Text=None, UpdatedAt=None, UpdatedBy=None):
self.Text = Text
self.UpdatedAt = UpdatedAt
self.UpdatedBy = UpdatedBy
class Process:
def __init__(self, LaunchedAt=None, Name=None, ParentPid=None, Path=None, Pid=None, TerminatedAt=None):
self.LaunchedAt = LaunchedAt
self.Name = Name
self.ParentPid = ParentPid
self.Path = Path
self.Pid = Pid
self.TerminatedAt = TerminatedAt
class ProductFields:
def __init__(self, string=None):
self.string = string
class RelatedFinding:
def __init__(self, Id=None, ProductArn=None):
self.Id = Id
self.ProductArn = ProductArn
class Recommendation:
def __init__(self, Text=None, Url=None):
self.Text = Text
self.Url = Url
class Remediation:
def __init__(self, Recommendation=None):
self.Recommendation = Recommendation
class AwsEc2Instance:
def __init__(self, IamInstanceProfileArn=None, ImageId=None, IpV4Addresses=None, IpV6Addresses=None, KeyName=None,
LaunchedAt=None, SubnetId=None, Type=None, VpcId=None):
self.IamInstanceProfileArn = IamInstanceProfileArn
self.ImageId = ImageId
self.IpV4Addresses = IpV4Addresses
self.IpV6Addresses = IpV6Addresses
self.KeyName = KeyName
self.LaunchedAt = LaunchedAt
self.SubnetId = SubnetId
self.Type = Type
self.VpcId = VpcId
class AwsIamAccessKey:
def __init__(self, CreatedAt=None, Status=None, UserName=None):
self.CreatedAt = CreatedAt
self.Status = Status
self.UserName = UserName
class AwsS3Bucket:
def __init__(self, OwnerId=None, OwnerName=None):
self.OwnerId = OwnerId
self.OwnerName = OwnerName
class Container:
def __init__(self, ImageId=None, ImageName=None, LaunchedAt=None, Name=None):
self.ImageId = ImageId
self.ImageName = ImageName
self.LaunchedAt = LaunchedAt
self.Name = Name
class Details:
def __init__(self, AwsEc2Instance=None, AwsIamAccessKey=None, AwsS3Bucket=None, Container=None, Other=None):
self.AwsEc2Instance = AwsEc2Instance
self.AwsIamAccessKey = AwsIamAccessKey
self.AwsS3Bucket = AwsS3Bucket
self.Container = Container
self.Other = Other
class Resource:
def __init__(self, Details=None, Id=None, Partition=None, Region=None, Tags=None, Type=None):
self.Details = Details
self.Id = Id
self.Partition = Partition
self.Region = Region
self.Tags = Tags
self.Type = Type
class Severity:
def __init__(self, Normalized=None, Product=None, Label=None):
self.Normalized = Normalized
self.Product = Product
self.Label = Label
class ThreatIntelIndicator:
def __init__(self, Category=None, LastObservedAt=None, Source=None, SourceUrl=None, Type=None, Value=None):
self.Category = Category
self.LastObservedAt = LastObservedAt
self.Source = Source
self.SourceUrl = SourceUrl
self.Type = Type
self.Value = Value
class UserDefinedFields:
def __init__(self, SourceRuleName=None, SourceEmail=None, SourceUsername=None, SourceFullName=None,
SourceLoginName=None, SourceExtraData=None,
SourceHostname=None, SourceDestinations=None):
self.SourceRuleName = SourceRuleName
self.SourceEmail = SourceEmail
self.SourceUsername = SourceUsername
self.SourceFullName = SourceFullName
self.SourceLoginName = SourceLoginName
self.SourceExtraData = SourceExtraData
self.SourceHostname = SourceHostname
self.SourceDestinations = SourceDestinations
class Finding:
def __init__(self, AwsAccountId=None, Compliance=None, Confidence=None, CreatedAt=None, Criticality=None,
Description=None, FirstObservedAt=None, GeneratorId=None, Id=None, LastObservedAt=None, Malware=None,
Network=None, Note=None, Process=None, ProductArn=None, ProductFields=None, RecordState=None,
RelatedFindings=None, Remediation=None, Resources=None, SchemaVersion=None, Severity=None,
SourceUrl=None, ThreatIntelIndicators=None, Title=None, Types=None, UpdatedAt=None,
UserDefinedFields=None, VerificationState=None, WorkflowState=None):
self.AwsAccountId = AwsAccountId
self.Compliance = Compliance
self.Confidence = Confidence
self.CreatedAt = CreatedAt
self.Criticality = Criticality
self.Description = Description
self.FirstObservedAt = FirstObservedAt
self.GeneratorId = GeneratorId
self.Id = Id
self.LastObservedAt = LastObservedAt
self.Malware = Malware
self.Network = Network
self.Note = Note
self.Process = Process
self.ProductArn = ProductArn
self.ProductFields = ProductFields
self.RecordState = RecordState
self.RelatedFindings = RelatedFindings
self.Remediation = Remediation
self.Resources = Resources
self.SchemaVersion = SchemaVersion
self.Severity = Severity
self.SourceUrl = SourceUrl
self.ThreatIntelIndicators = ThreatIntelIndicators
self.Title = Title
self.Types = Types
self.UpdatedAt = UpdatedAt
self.UserDefinedFields = UserDefinedFields
self.VerificationState = VerificationState
self.WorkflowState = WorkflowState
class JsonFormatClass:
def __init__(self, Finding=None):
self.Finding = Finding
|
"""
Maintain version for CropMl.
"""
MAJOR = 0
"""(int) Version major component."""
MINOR = 0
"""(int) Version minor component."""
POST = 2
"""(int) Version post or bugfix component."""
__version__ = ".".join([str(s) for s in (MAJOR, MINOR, POST)])
|
train_task_id = '2T736'
initial_epoch = 0
epoch_num = 24
lr = 1e-3
decay = 5e-4
# clipvalue = 0.5 # default 0.5, 0 means no clip
patience = 2
load_weights = True
lambda_inside_score_loss = 4.0
lambda_side_vertex_code_loss = 1.0
lambda_side_vertex_coord_loss = 1.0
total_img = 222199
validation_split_ratio = 0.1
max_train_img_size = int(train_task_id[-3:])
max_predict_img_size = int(train_task_id[-3:]) # 2400
assert max_train_img_size in [256, 384, 512, 640, 736], \
'max_train_img_size must in [256, 384, 512, 640, 736]'
if max_train_img_size == 256:
batch_size = 8
elif max_train_img_size == 384:
batch_size = 4
elif max_train_img_size == 512:
batch_size = 2
else:
batch_size = 1
steps_per_epoch = total_img * (1 - validation_split_ratio) // batch_size
validation_steps = total_img * validation_split_ratio // batch_size
#data_dir = '/media/haoxin/A1/data/AdvancedEAST'
data_dir = '/data/kuaidi01/dataset_detect/AdvancedEast_data'
origin_image_dir_name = 'image_all/'
origin_txt_dir_name = 'txt_all/'
train_image_dir_name = 'images_%s/' % train_task_id
train_label_dir_name = 'labels_%s/' % train_task_id
show_gt_image_dir_name = 'show_gt_images_%s/' % train_task_id
show_act_image_dir_name = 'show_act_images_%s/' % train_task_id
gen_origin_img = True
draw_gt_quad = True
draw_act_quad = True
val_fname = 'val_%s.txt' % train_task_id
train_fname = 'train_%s.txt' % train_task_id
# in paper it's 0.3, maybe to large to this problem
shrink_ratio = 0.2
# pixels between 0.1 and 0.3 are side pixels
shrink_side_ratio = 0.6
epsilon = 1e-4
num_channels = 3
feature_layers_range = range(5, 1, -1)
# feature_layers_range = range(3, 0, -1)
feature_layers_num = len(feature_layers_range)
# pixel_size = 4
pixel_size = 2 ** feature_layers_range[-1]
locked_layers = False
model_weights_path = 'model/weights_%s.{epoch:03d}-{val_loss:.3f}.h5' \
% train_task_id
saved_model_file_path = 'saved_model/east_model_%s.h5' % train_task_id
saved_model_weights_file_path = 'saved_model/east_model_weights_%s.h5'\
% train_task_id
pixel_threshold = 0.9
side_vertex_pixel_threshold = 0.9
trunc_threshold = 0.1
predict_cut_text_line = False
predict_write2txt = True
|
peso_maior = 0
peso_menor = 0
for i in range(1, 6):
peso_pessoa = float(input(f'Peso da {i}ª pessoa em Kg: '))
if i == 1:
peso_maior = peso_pessoa
peso_menor = peso_pessoa
else:
if peso_pessoa > peso_maior:
peso_maior = peso_pessoa
if peso_pessoa < peso_menor:
peso_menor = peso_pessoa
print(f'O peso maior lido foi de {peso_maior}Kg!')
print(f'O peso menor lido foi de {peso_menor}Kg!')
|
# Section07-2
# 파이썬 클래스 상세 이해
# 상속, 다중상속
# 예제1
# 상속 기본
# 슈퍼클래스(부모) 및 서브 클래스(자식) -> 모든 속성, 메소드 사용 가능
# 중복 코드 최소화 코드의 생산성..
# 라면 -> 속성(종류, 회사, 맛, 면 종류, 이름) : 부모 클래스
# car class 인스턴스 화
class car:
""" parent calss """
def __init__(self, tp, color):
self.type = tp
self.color = color
def show(self):
return 'Car Class "show method!"'
# 클래스 상속 받아서 car 클래스에 있는 메소드 사용 가능
class BmwCar(car):
"""Sub Class"""
def __init__(self, car_name, tp, color):
super().__init__(tp, color) # 부모가 super class
self.car_name = car_name # 물려 받을거는 받고 내가 사용할거는 사용.
def show_model(self) -> None:
return "Your Car Name : %s"% self.car_name
# super calss car, subclass BenzCar, BmwCar
class BenzCar(car):
"""Sub Class"""
def __init__(self, car_name, tp, color):
super().__init__(tp, color) # 부모가 super class
self.car_name = car_name # 물려 받을거는 받고 내가 사용할거는 사용.
def show_model(self) -> None:
return "Your Car Name : %s"% self.car_name
def show(self):
print(super().show()) # 부모 클래스 소환
return 'Car Info : %s %s %s' % (self.car_name, self.type, self.color)
# 일반 사용
# 부모와 자식 다 가져올 수 있다.
model1 = BmwCar('520d', 'sedan', 'red')
print(model1.color) # Super
print(model1.type) # Super
print(model1.car_name) #Sub
print(model1.show()) # Super class에 구현 되어 있다.
print(model1.show_model()) # Sub
print(model1.__dict__) # 부모와 자식 상속을 다 받음을 확인 가능
print()
# Method Overriding(오버라이딩)
model2 = BenzCar("220d", "suv", "black")
print(model2.show()) # 현재 메소드는 sub class에 가지고 온 것. 상속 받을 것 가지고 오고 새로 추가할 것을 가지고 온다. 목적에 맞게 재구현
# Parent Method Call
model3 = BenzCar("350s", "sedan", "silver")
print(model3.show()) # 부모 클래스도 소환된다.
# Inheritance Info(상속정보)
print(BmwCar.mro()) # 상속관계 보기
print(BenzCar.mro())
# 예제2
# 다중 상속
class X():
pass
class Y():
pass
class Z():
pass
class A(X, Y):
pass
class B(Y, Z):
pass
class M(B, A, Z): # 모든 클래스 사용 가능 다중 상속
pass
print()
print(M.mro())
print(A.mro()) |
#encoding:utf-8
subreddit = 'tifu'
t_channel = '@r_channels_tifu'
def send_post(submission, r2t):
return r2t.send_simple(submission)
|
#
# @lc app=leetcode id=217 lang=python3
#
# [217] Contains Duplicate
#
class Solution:
def containsDuplicate(self, nums: List[int]) -> bool:
# first approach: use a hashtable, add the number to the hashtable
# if the number is already there, return True
# second approach: use a set. return len(set) != len(nums)
# if len is different, it means the list contains non-unique values (returns True)
return len(nums) != len(set(nums))
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
class LookupCache:
"""
Prime a cache of all valid knowledge base phrases to prevent costly DB lookups for phrases that do not exist.
"""
def __init__(self,
resource_path='../kb-classifier/data-wiki/resources.txt',
redirect_path='../kb-classifier/data-wiki/redirect_resources.txt',
anchor_path='../kb-classifier/data-wiki/anchors.txt',
use_anchors_only=False):
"""
:param resource_path: the path to the file containing valid resources in DBpedia.
:param redirect_path: the path to the file containing redirects in DBpedia.
:param anchor_path: the path to the file containing valid anchor text in DBpedia.
:param use_anchors_only: set to True if only anchors should be considered and not direct resource or redirect
matches.
"""
if not use_anchors_only:
self.resource_cache = self.load_phrase_cache(resource_path)
self.redirect_cache = self.load_phrase_cache(redirect_path)
self.anchor_cache = self.load_phrase_cache(anchor_path)
self.use_anchors_only = use_anchors_only
print('Cache Initialised')
def load_phrase_cache(self, phrase_path):
"""
Given a file of phrases, returns a set of those phrases so it can be used as a cache.
:param phrase_path: the path to the file containing phrases.
:returns: a set of phrases contained in the file.
"""
valid_phrases = set()
with open(phrase_path, 'r') as phrases:
for phrase in phrases:
if phrase.strip() != '':
valid_phrases.add(phrase.strip())
return valid_phrases
def contains_exact(self, phrase):
"""
Does an exact resource match for the phrase exist.
:param phrase: the phrase to match.
:returns: True if a resource exists for the phrase. Will always return False if use_anchors_only=True.
"""
# Resources always have their first letter as a capital
for_resource_search = phrase
if len(for_resource_search) > 1:
for_resource_search = for_resource_search[0].upper() + for_resource_search[1:]
return not self.use_anchors_only and for_resource_search in self.resource_cache
def contains_redirect(self, phrase):
"""
Does a redirect match for the phrase exist.
:param phrase: the phrase to match.
:returns: True if a redirect exists for the phrase. Will always return False if use_anchors_only=True.
"""
# Resources always have their first letter as a capital
for_resource_search = phrase
if len(for_resource_search) > 1:
for_resource_search = for_resource_search[0].upper() + for_resource_search[1:]
return not self.use_anchors_only and for_resource_search in self.redirect_cache
def contains_anchor(self, phrase):
"""
Does an anchor match for the phrase exist.
:param phrase: the phrase to match.
:returns: True if an anchor exists for the phrase.
"""
return phrase in self.anchor_cache
def translate(self, phrase):
"""
No translation necessary for DBpedia.
"""
return phrase
|
def dictionary_masher(dict_a, dict_b):
for key, value in dict_b.items():
if key not in dict_a:
dict_a[key] = value
return dict_a |
class Accessor(property):
"""Accessor is a property factory for structure fields
The Accessor is used by the Meta metaclass to generate accessors for the
structure fields inside a new strcuture class.
"""
def __init__(self, item, key):
super().__init__(
lambda s: item.getter(s.__instance__.data[key]),
lambda s, v: item.setter(s.__instance__.data[key], v),
)
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.append(os.path.join(os.path.dirname(__name__), '..'))
# sys.path.insert(0, os.path.abspath('../src/dvg_devices'))
# -- Project information -----------------------------------------------------
project = "DvG_Devices"
copyright = "2021, Dennis van Gils"
author = "Dennis van Gils"
# The full version, including alpha/beta/rc tags
release = "1.0.0"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.napoleon",
"sphinx.ext.viewcode",
"sphinx.ext.intersphinx",
"sphinx_qt_documentation",
]
intersphinx_mapping = {
"PyQt5": ("https://www.riverbankcomputing.com/static/Docs/PyQt5/", None),
"NumPy": ("https://numpy.org/doc/stable/", None),
"python": ("https://docs.python.org/3", None),
"serial": ("https://pyserial.readthedocs.io/en/latest/", None),
"dvg_qdeviceio": (
"https://python-dvg-qdeviceio.readthedocs.io/en/latest/",
None,
),
}
qt_documentation = "Qt5"
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
master_doc = "index"
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# 'bizstyle', 'classic', 'sphinx_rtd_theme'
html_theme = "sphinx_rtd_theme"
html_theme_path = [
"_themes",
]
# html_theme_options = {
# 'canonical_url': '',
# 'analytics_id': 'UA-XXXXXXX-1', # Provided by Google in your dashboard
# 'logo_only': False,
# 'display_version': True,
# 'prev_next_buttons_location': 'bottom',
# 'style_external_links': False,
# 'style_nav_header_background': '#2980B9',
# Toc options
# 'collapse_navigation': True,
# 'sticky_navigation': True,
# 'navigation_depth': 4,
# 'includehidden': True,
# 'titles_only': False
# }
html_last_updated_fmt = "%d-%m-%Y"
html4_writer = True
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# Napoleon settings
napoleon_google_docstring = True
napoleon_numpy_docstring = False
napoleon_include_init_with_doc = False
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False # True to create block. Downside is that we lose hyperlinks to class variables
napoleon_use_param = False # False
napoleon_use_rtype = True
|
class Solution:
def thirdMax(self, nums: List[int]) -> int:
if len(set(nums))<3:
return max(nums)
for i in range(2):
for i in range(nums.count(max(nums))):
nums.remove(max(nums))
return max(nums)
|
# Copyright 2019 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"Rule to run the terser binary under bazel"
load("@build_bazel_rules_nodejs//:providers.bzl", "run_node")
_DOC = """Run the terser minifier.
Typical example:
```python
load("//packages/terser:index.bzl", "terser_minified")
terser_minified(
name = "out.min",
src = "input.js",
config_file = "terser_config.json",
)
```
Note that the `name` attribute determines what the resulting files will be called.
So the example above will output `out.min.js` and `out.min.js.map` (since `sourcemap` defaults to `true`).
If the input is a directory, then the output will also be a directory, named after the `name` attribute.
"""
_TERSER_ATTRS = {
"args": attr.string_list(
doc = """Additional command line arguments to pass to terser.
Terser only parses minify() args from the config file so additional arguments such as `--comments` may
be passed to the rule using this attribute. See https://github.com/terser/terser#command-line-usage for the
full list of terser CLI options.""",
),
"config_file": attr.label(
doc = """A JSON file containing Terser minify() options.
This is the file you would pass to the --config-file argument in terser's CLI.
https://github.com/terser-js/terser#minify-options documents the content of the file.
Bazel will make a copy of your config file, treating it as a template.
> Run bazel with `--subcommands` to see the path to the copied file.
If you use the magic strings `"bazel_debug"` or `"bazel_no_debug"`, these will be
replaced with `true` and `false` respecting the value of the `debug` attribute
or the `--compilation_mode=dbg` bazel flag.
For example,
```
{
"compress": {
"arrows": "bazel_no_debug"
}
}
```
Will disable the `arrows` compression setting when debugging.
If `config_file` isn't supplied, Bazel will use a default config file.
""",
allow_single_file = True,
# These defaults match how terser was run in the legacy built-in rollup_bundle rule.
# We keep them the same so it's easier for users to migrate.
default = Label("//packages/terser:terser_config.default.json"),
),
"debug": attr.bool(
doc = """Configure terser to produce more readable output.
Instead of setting this attribute, consider using debugging compilation mode instead
bazel build --compilation_mode=dbg //my/terser:target
so that it only affects the current build.
""",
),
"sourcemap": attr.bool(
doc = "Whether to produce a .js.map output",
default = True,
),
"src": attr.label(
doc = """File(s) to minify.
Can be a .js file, a rule producing .js files as its default output, or a rule producing a directory of .js files.
Note that you can pass multiple files to terser, which it will bundle together.
If you want to do this, you can pass a filegroup here.""",
allow_files = [".js", ".map", ".mjs"],
mandatory = True,
),
"terser_bin": attr.label(
doc = "An executable target that runs Terser",
default = Label("//packages/terser/bin:terser"),
executable = True,
cfg = "host",
),
}
def _filter_js(files):
return [f for f in files if f.is_directory or f.extension == "js" or f.extension == "mjs"]
def _terser(ctx):
"Generate actions to create terser config run terser"
# CLI arguments; see https://www.npmjs.com/package/terser#command-line-usage
args = ctx.actions.args()
inputs = ctx.files.src[:]
outputs = []
sources = _filter_js(inputs)
sourcemaps = [f for f in inputs if f.extension == "map"]
directory_srcs = [s for s in sources if s.is_directory]
if len(directory_srcs) > 0:
if len(sources) > 1:
fail("When directories are passed to terser_minified, there should be only one input")
outputs.append(ctx.actions.declare_directory(ctx.label.name))
else:
outputs.append(ctx.actions.declare_file("%s.js" % ctx.label.name))
if ctx.attr.sourcemap:
outputs.append(ctx.actions.declare_file("%s.js.map" % ctx.label.name))
args.add_all([s.path for s in sources])
args.add_all(["--output", outputs[0].path])
debug = ctx.attr.debug or ctx.var["COMPILATION_MODE"] == "dbg"
if debug:
args.add("--debug")
args.add("--beautify")
if ctx.attr.sourcemap:
# Source mapping options are comma-packed into one argv
# see https://github.com/terser-js/terser#command-line-usage
source_map_opts = ["includeSources", "base=" + ctx.bin_dir.path]
if len(sourcemaps) == 0:
source_map_opts.append("content=inline")
elif len(sourcemaps) == 1:
source_map_opts.append("content='%s'" % sourcemaps[0].path)
else:
fail("When sourcemap is True, there should only be one or none input sourcemaps")
# Add a comment at the end of the js output so DevTools knows where to find the sourcemap
source_map_opts.append("url='%s.js.map'" % ctx.label.name)
# This option doesn't work in the config file, only on the CLI
args.add_all(["--source-map", ",".join(source_map_opts)])
opts = ctx.actions.declare_file("_%s.minify_options.json" % ctx.label.name)
inputs.append(opts)
ctx.actions.expand_template(
template = ctx.file.config_file,
output = opts,
substitutions = {
"\"bazel_debug\"": str(debug).lower(),
"\"bazel_no_debug\"": str(not debug).lower(),
},
)
args.add_all(["--config-file", opts.path])
args.add_all(ctx.attr.args)
run_node(
ctx,
inputs = inputs,
outputs = outputs,
executable = "terser_bin",
arguments = [args],
env = {"COMPILATION_MODE": ctx.var["COMPILATION_MODE"]},
progress_message = "Minifying JavaScript %s [terser]" % (outputs[0].short_path),
)
return [
DefaultInfo(files = depset(outputs)),
]
terser_minified = rule(
doc = _DOC,
implementation = _terser,
attrs = _TERSER_ATTRS,
)
|
#
# PySNMP MIB module GSM7312-QOS-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/GSM7312-QOS-MIB
# Produced by pysmi-0.3.4 at Wed May 1 13:20:03 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, ValueSizeConstraint, ConstraintsUnion, ConstraintsIntersection, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "ValueSizeConstraint", "ConstraintsUnion", "ConstraintsIntersection", "SingleValueConstraint")
gsm7312, = mibBuilder.importSymbols("GSM7312-REF-MIB", "gsm7312")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
TimeTicks, Unsigned32, Counter32, ModuleIdentity, Gauge32, Bits, MibScalar, MibTable, MibTableRow, MibTableColumn, ObjectIdentity, NotificationType, IpAddress, MibIdentifier, Integer32, iso, Counter64 = mibBuilder.importSymbols("SNMPv2-SMI", "TimeTicks", "Unsigned32", "Counter32", "ModuleIdentity", "Gauge32", "Bits", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ObjectIdentity", "NotificationType", "IpAddress", "MibIdentifier", "Integer32", "iso", "Counter64")
DisplayString, TextualConvention, RowStatus = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention", "RowStatus")
gsm7312QOS = ModuleIdentity((1, 3, 6, 1, 4, 1, 4526, 1, 6, 3))
gsm7312QOS.setRevisions(('2003-05-06 12:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: gsm7312QOS.setRevisionsDescriptions(('Initial revision.',))
if mibBuilder.loadTexts: gsm7312QOS.setLastUpdated('200305061200Z')
if mibBuilder.loadTexts: gsm7312QOS.setOrganization('Netgear')
if mibBuilder.loadTexts: gsm7312QOS.setContactInfo('')
if mibBuilder.loadTexts: gsm7312QOS.setDescription('')
mibBuilder.exportSymbols("GSM7312-QOS-MIB", PYSNMP_MODULE_ID=gsm7312QOS, gsm7312QOS=gsm7312QOS)
|
'''
You are given an array points where points[i] = [xi, yi] is the coordinates of the ith point on a 2D plane. Multiple points can have the same coordinates.
You are also given an array queries where queries[j] = [xj, yj, rj] describes a circle centered at (xj, yj) with a radius of rj.
For each query queries[j], compute the number of points inside the jth circle. Points on the border of the circle are considered inside.
Return an array answer, where answer[j] is the answer to the jth query.
'''
class Solution:
def countPoints(self, points: List[List[int]], queries: List[List[int]]) -> List[int]:
def calc_distance(xi, yi, xj, yj):
return ((xj - xi) ** 2 + (yj - yi) ** 2) ** 0.5
answer = []
for i in queries:
c = 0
for j in points:
if calc_distance(i[0], i[1], j[0], j[1]) <= i[2]: c += 1
answer.append(c)
return answer
|
class Solution(object):
def tribonacci(self, n):
"""
:type n: int
:rtype: int
"""
if n <= 1:
return n
dp = [0] * (n + 1)
dp[0] = 0
dp[1] = 1
dp[2] = 1
for i in range(3, n + 1):
dp[i] = dp[i - 1] + dp[i - 2] + dp[i - 3]
return dp[-1] |
def extractWwwWhimsicalstargazersNet(item):
'''
Parser for 'www.whimsicalstargazers.net'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('The Reborn Little Girl Won\'t Give Up', 'The Reborn Little Girl Won\'t Give Up', 'translated'),
('Drop!! ~A Tale of the Fragrance Princess~', 'Drop!! ~A Tale of the Fragrance Princess~', 'translated'),
('The Saint\'s Magic Power is Omnipotent', 'The Saint\'s Magic Power is Omnipotent', 'translated'),
('princess bibliophile', 'princess bibliophile', 'translated'),
('a rose dedicated to you', 'a rose dedicated to you', 'translated'),
('slimes can dream too', 'slimes can dream too', 'translated'),
('i was reincarnated and now i'm a maid', 'i was reincarnated and now i\'m a maid', 'translated'),
('i was reincarnated and now i\'m a maid', 'i was reincarnated and now i\'m a maid', 'translated'),
('blue monster\'s shell', 'The Blue Monster\'s Shell', 'translated'),
('Eliza', 'I Reincarnated as a Noble Villainess But Why Did It Turn Out Like This?', 'translated'),
('Just the Two of Us in this Vast World', 'Just the Two of Us in this Vast World', 'translated'),
('Kill The Dragon', 'Kill The Dragon', 'translated'),
('east road quest', 'east road quest', 'translated'),
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
#!/usr/bin/env python3
"""
This document is created by magic at 2018/8/16
"""
class BaseStack:
"""
doc 利用python实现一个栈
"""
def __init__(self):
self.items = []
def push(self, value):
self.items.append(value)
def pop(self):
item = self.items[-1]
self.items = self.items[0:self.size - 1]
return item
@property
def size(self):
return len(self.items)
@property
def empty(self):
return len(self.items) == 0
if __name__ == '__main__':
s = BaseStack()
s.push('123')
s.push("magic")
s.push(5)
print(s.items)
print(s.pop())
print(s.pop())
print(s.size)
print(s.pop())
print(s.empty)
|
def somaAll(x,y):
s = c = 0
l = [x]
while x < y:
s = x + 1
l.append(s)
x+=1
s=0
while c <len(l):
s += l[c]
c+=1
return s
def somaintervalo(ini,fim):
c=ini
s=0
while(c<=fim):
s+=c
c+=1
return s
print(somaAll(1,3))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
class Fibonacci:
def __init__(self, max_n):
self.MaxN = max_n
self.N = 0
self.A = 0
self.B = 0
def __iter__(self):
self.N = 0
self.A = 0
self.B = 1
return self
def __next__(self):
if self.N < self.MaxN:
self.N += 1
self.A, self.B = self.B, self.A + self.B
return self.A
else:
raise StopIteration
for f in Fibonacci(14):
print(f, end=" ")
print()
print(list(Fibonacci(16)))
|
# 5. Создать (программно) текстовый файл, записать в него программно набор чисел, разделенных пробелами. Программа
# должна подсчитывать сумму чисел в файле и выводить ее на экран.
with open('numbers.txt', 'w+') as f:
is_writing = True
while is_writing:
new_string = input('> ')
new_string = new_string.split()
for num in new_string:
try:
float(num)
except ValueError:
is_writing = False
break
else:
f.write(num + ' ')
f.seek(0)
total = 0
for number in f.read().split():
total += float(number)
print(total)
|
def addr(regs, a, b, c):
result = regs[:]
result[c] = regs[a] + regs[b]
return result
def addi(regs, a, b, c):
result = regs[:]
result[c] = regs[a] + b
return result
def mulr(regs, a, b, c):
result = regs[:]
result[c] = regs[a] * regs[b]
return result
def muli(regs, a, b, c):
result = regs[:]
result[c] = regs[a] * b
return result
def banr(regs, a, b, c):
result = regs[:]
result[c] = regs[a] & regs[b]
return result
def bani(regs, a, b, c):
result = regs[:]
result[c] = regs[a] & b
return result
def borr(regs, a, b, c):
result = regs[:]
result[c] = regs[a] | regs[b]
return result
def bori(regs, a, b, c):
result = regs[:]
result[c] = regs[a] | b
return result
def setr(regs, a, b, c):
result = regs[:]
result[c] = regs[a]
return result
def seti(regs, a, b, c):
result = regs[:]
result[c] = a
return result
def gtir(regs, a, b, c):
result = regs[:]
if a > regs[b]:
result[c] = 1
else:
result[c] = 0
return result
def gtri(regs, a, b, c):
result = regs[:]
if regs[a] > b:
result[c] = 1
else:
result[c] = 0
return result
def gtrr(regs, a, b, c):
result = regs[:]
if regs[a] > regs[b]:
result[c] = 1
else:
result[c] = 0
return result
def eqir(regs, a, b, c):
result = regs[:]
if a == regs[b]:
result[c] = 1
else:
result[c] = 0
return result
def eqri(regs, a, b, c):
result = regs[:]
if regs[a] == b:
result[c] = 1
else:
result[c] = 0
return result
def eqrr(regs, a, b, c):
result = regs[:]
if regs[a] == regs[b]:
result[c] = 1
else:
result[c] = 0
return result
ops = {
"addr": addr,
"addi": addi,
"mulr": mulr,
"muli": muli,
"banr": banr,
"bani": bani,
"borr": borr,
"bori": bori,
"setr": setr,
"seti": seti,
"gtir": gtir,
"gtri": gtri,
"gtrr": gtrr,
"eqir": eqir,
"eqri": eqri,
"eqrr": eqrr,
}
def solve(input):
registers = [0, 0, 0, 0, 0, 0]
parts = input[0].split()
ip_reg = int(parts[1])
ip = 0
instructions = []
for line in input[1:]:
instruction = line.split()
instructions.append((instruction[0], int(instruction[1]), int(instruction[2]), int(instruction[3])))
iterations = 0
while True:
instruction = instructions[ip]
op = ops[instruction[0]]
registers[ip_reg] = ip
after = op(registers, *instruction[1:])
# print(iterations, ip, registers, instruction, after)
registers = after
ip = registers[ip_reg] + 1
if ip < 0 or ip >= len(instructions):
break
# if iterations > 500:
# break
iterations += 1
print(registers[0])
# with open('test.txt', 'r') as f:
# input = f.read().splitlines()
# solve(input)
with open('input.txt', 'r') as f:
input = f.read().splitlines()
solve(input)
|
n = int(input())
m = int(input())
dif = n - m
print(dif)
|
def alphabetically_first(s1, s2):
sorted_list = sorted(list([s1, s2]))
return sorted_list[0]
|
class Solution:
def slowestKey(self, releaseTimes: List[int], keysPressed: str) -> str:
pressed=[0]*26
lastRelease=0
for i in range(len(releaseTimes)):
index=ord(keysPressed[i])-ord('a')
pressed[index]=max(pressed[index],releaseTimes[i]-lastRelease)
lastRelease=releaseTimes[i]
maxPressed=max(pressed)
for i in range(25,-1,-1):
if pressed[i]==maxPressed:
return chr(i+ord('a')) |
# Copyright (c) 2014-2015, NVIDIA CORPORATION. All rights reserved.
class DigitsError(Exception):
"""
DIGITS custom exception
"""
pass
class DeleteError(DigitsError):
"""
Errors that occur when deleting a job
"""
pass
class LoadImageError(DigitsError):
"""
Errors that occur while loading an image
"""
pass
|
# 打印输出可变参数的信息
def print_args(*args):
"""打印输出接收可变参数的args的信息"""
print('type(args) = ', type(args))
print('len(args) = ', len(args))
print('args = ', args)
print_args()
print()
print_args(1, 2, 3)
|
class TestApi:
def test_api_with_valid_url_bitly(self, post):
# when we ask to shorten a valid url
response = post(
'/shortlinks',
data={'url': 'https://www.withnottplum.com', 'provider': 'bitly'}
)
# then
assert response.status_code == 200
assert response.get_json()['data']['url'] == 'https://www.withnottplum.com'
assert response.get_json()['data']['short_link'].startswith('https://bit.ly')
def test_api_with_valid_url_tinyurl(self, post):
# when we ask to shorten a valid url
response = post(
'/shortlinks',
data={'url': 'https://www.pekosestate.com', 'provider': 'tinyurl'}
)
# then
assert response.status_code == 200
assert response.get_json()['data']['url'] == 'https://www.pekosestate.com'
assert response.get_json()['data']['short_link'].startswith('https://tinyurl.com')
def test_api_with_invalid_url_bitly(self, post):
# when we ask to shorten a valid url
response = post(
'/shortlinks',
data={'url': 'https://www.pekos estate'}
)
assert response.status_code == 422
assert response.get_json()['error']['message'] == 'Malformed URL'
def test_api_default_provider(self, post):
# when we ask to shorten a valid url
response = post(
'/shortlinks',
data={'url': 'https://www.withplum.com'}
)
assert response.status_code == 200
assert response.get_json()['data']['short_link'].startswith('https://bit.ly')
|
def classify_numbers(numbers):
pos = [n for n in numbers if n >= 0]
neg = [n for n in numbers if n < 0]
even = [n for n in numbers if n % 2 == 0]
odd = [n for n in numbers if n % 2 != 0]
return pos, neg, even, odd
pos, neg, even, odd = classify_numbers([int(x) for x in input().split(', ')])
print(f'Positive: {", ".join([str(x) for x in pos])}')
print(f'Negative: {", ".join([str(x) for x in neg])}')
print(f'Even: {", ".join([str(x) for x in even])}')
print(f'Odd: {", ".join([str(x) for x in odd])}')
|
sexo = ''
while sexo != 'M' or sexo != 'F':
sexo = str(input('Digite seu sexo (M/F): ')).upper().strip()[0]
if sexo == 'M' or sexo == 'F':
break
else:
print('Digite um dado válido!')
print('Sexo registrado com sucesso!') |
#!/usr/bin/python3
k, t = list(map(int, input().split()))
a = t // k
result = 0
if (a % 2 == 0):
result = t % k
else:
# print("k - t%k + 1", k, t)
result = k - t % k
print(result) |
# 题意:反转数组
# 解法1:递归,终止条件为head为None返回[],其余返回‘递归+head.val’
# 解法2:辅助栈,Python在遍历一个链表的时候,将值依次放入到一个list中,遍历结束后,翻转list输出即可。
# 注意这里的head就是listNode结构,head其实就相当于一个头节点,调用字节点需要通过.next函数。
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def reversePrint(self, head):
# head: ListNode
# List[int]
if not head:
return []
return self.reversePrint(head.next) + [head.val]
class Solution_2:
def reversePrint(self, head):
stack = []
while head:
stack.append(head.val)
head = head.next
return stack[::-1]
|
class Solution:
def sortArray(self, nums: List[int]) -> List[int]:
def quicksort(left,right):
if left>=right:return
pivot = left
i=j=pivot+1
while j<=right:
if nums[j]<nums[pivot]:
nums[i],nums[j] = nums[j],nums[i]
i+=1
j+=1
nums[pivot],nums[i-1] = nums[i-1],nums[pivot]
quicksort(left,i-1)
quicksort(i,right)
quicksort(0,len(nums)-1)
return nums
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 09.04.2018 14:18
:Licence MIT
Part of grammpy
"""
|
""" Encoding Aliases Support
This module is used by the encodings package search function to
map encodings names to module names.
Note that the search function normalizes the encoding names before
doing the lookup, so the mapping will have to map normalized
encoding names to module names.
Contents:
The following aliases dictionary contains mappings of all IANA
character set names for which the Python core library provides
codecs. In addition to these, a few Python specific codec
aliases have also been added.
"""
aliases = {
# Please keep this list sorted alphabetically by value !
# ascii codec
'646' : 'ascii',
'ansi_x3.4_1968' : 'ascii',
'ansi_x3_4_1968' : 'ascii', # some email headers use this non-standard name
'ansi_x3.4_1986' : 'ascii',
'cp367' : 'ascii',
'csascii' : 'ascii',
'ibm367' : 'ascii',
'iso646_us' : 'ascii',
'iso_646.irv_1991' : 'ascii',
'iso_ir_6' : 'ascii',
'us' : 'ascii',
'us_ascii' : 'ascii',
# base64_codec codec
'base64' : 'base64_codec',
'base_64' : 'base64_codec',
# big5 codec
'big5_tw' : 'big5',
'csbig5' : 'big5',
# big5hkscs codec
'big5_hkscs' : 'big5hkscs',
'hkscs' : 'big5hkscs',
# bz2_codec codec
'bz2' : 'bz2_codec',
# cp037 codec
'037' : 'cp037',
'csibm037' : 'cp037',
'ebcdic_cp_ca' : 'cp037',
'ebcdic_cp_nl' : 'cp037',
'ebcdic_cp_us' : 'cp037',
'ebcdic_cp_wt' : 'cp037',
'ibm037' : 'cp037',
'ibm039' : 'cp037',
# cp1026 codec
'1026' : 'cp1026',
'csibm1026' : 'cp1026',
'ibm1026' : 'cp1026',
# cp1125 codec
'1125' : 'cp1125',
'ibm1125' : 'cp1125',
'cp866u' : 'cp1125',
'ruscii' : 'cp1125',
# cp1140 codec
'1140' : 'cp1140',
'ibm1140' : 'cp1140',
# cp1250 codec
'1250' : 'cp1250',
'windows_1250' : 'cp1250',
# cp1251 codec
'1251' : 'cp1251',
'windows_1251' : 'cp1251',
# cp1252 codec
'1252' : 'cp1252',
'windows_1252' : 'cp1252',
# cp1253 codec
'1253' : 'cp1253',
'windows_1253' : 'cp1253',
# cp1254 codec
'1254' : 'cp1254',
'windows_1254' : 'cp1254',
# cp1255 codec
'1255' : 'cp1255',
'windows_1255' : 'cp1255',
# cp1256 codec
'1256' : 'cp1256',
'windows_1256' : 'cp1256',
# cp1257 codec
'1257' : 'cp1257',
'windows_1257' : 'cp1257',
# cp1258 codec
'1258' : 'cp1258',
'windows_1258' : 'cp1258',
# cp273 codec
'273' : 'cp273',
'ibm273' : 'cp273',
'csibm273' : 'cp273',
# cp424 codec
'424' : 'cp424',
'csibm424' : 'cp424',
'ebcdic_cp_he' : 'cp424',
'ibm424' : 'cp424',
# cp437 codec
'437' : 'cp437',
'cspc8codepage437' : 'cp437',
'ibm437' : 'cp437',
# cp500 codec
'500' : 'cp500',
'csibm500' : 'cp500',
'ebcdic_cp_be' : 'cp500',
'ebcdic_cp_ch' : 'cp500',
'ibm500' : 'cp500',
# cp775 codec
'775' : 'cp775',
'cspc775baltic' : 'cp775',
'ibm775' : 'cp775',
# cp850 codec
'850' : 'cp850',
'cspc850multilingual' : 'cp850',
'ibm850' : 'cp850',
# cp852 codec
'852' : 'cp852',
'cspcp852' : 'cp852',
'ibm852' : 'cp852',
# cp855 codec
'855' : 'cp855',
'csibm855' : 'cp855',
'ibm855' : 'cp855',
# cp857 codec
'857' : 'cp857',
'csibm857' : 'cp857',
'ibm857' : 'cp857',
# cp858 codec
'858' : 'cp858',
'csibm858' : 'cp858',
'ibm858' : 'cp858',
# cp860 codec
'860' : 'cp860',
'csibm860' : 'cp860',
'ibm860' : 'cp860',
# cp861 codec
'861' : 'cp861',
'cp_is' : 'cp861',
'csibm861' : 'cp861',
'ibm861' : 'cp861',
# cp862 codec
'862' : 'cp862',
'cspc862latinhebrew' : 'cp862',
'ibm862' : 'cp862',
# cp863 codec
'863' : 'cp863',
'csibm863' : 'cp863',
'ibm863' : 'cp863',
# cp864 codec
'864' : 'cp864',
'csibm864' : 'cp864',
'ibm864' : 'cp864',
# cp865 codec
'865' : 'cp865',
'csibm865' : 'cp865',
'ibm865' : 'cp865',
# cp866 codec
'866' : 'cp866',
'csibm866' : 'cp866',
'ibm866' : 'cp866',
# cp869 codec
'869' : 'cp869',
'cp_gr' : 'cp869',
'csibm869' : 'cp869',
'ibm869' : 'cp869',
# cp932 codec
'932' : 'cp932',
'ms932' : 'cp932',
'mskanji' : 'cp932',
'ms_kanji' : 'cp932',
# cp949 codec
'949' : 'cp949',
'ms949' : 'cp949',
'uhc' : 'cp949',
# cp950 codec
'950' : 'cp950',
'ms950' : 'cp950',
# euc_jis_2004 codec
'jisx0213' : 'euc_jis_2004',
'eucjis2004' : 'euc_jis_2004',
'euc_jis2004' : 'euc_jis_2004',
# euc_jisx0213 codec
'eucjisx0213' : 'euc_jisx0213',
# euc_jp codec
'eucjp' : 'euc_jp',
'ujis' : 'euc_jp',
'u_jis' : 'euc_jp',
# euc_kr codec
'euckr' : 'euc_kr',
'korean' : 'euc_kr',
'ksc5601' : 'euc_kr',
'ks_c_5601' : 'euc_kr',
'ks_c_5601_1987' : 'euc_kr',
'ksx1001' : 'euc_kr',
'ks_x_1001' : 'euc_kr',
# gb18030 codec
'gb18030_2000' : 'gb18030',
# gb2312 codec
'chinese' : 'gb2312',
'csiso58gb231280' : 'gb2312',
'euc_cn' : 'gb2312',
'euccn' : 'gb2312',
'eucgb2312_cn' : 'gb2312',
'gb2312_1980' : 'gb2312',
'gb2312_80' : 'gb2312',
'iso_ir_58' : 'gb2312',
# gbk codec
'936' : 'gbk',
'cp936' : 'gbk',
'ms936' : 'gbk',
# hex_codec codec
'hex' : 'hex_codec',
# hp_roman8 codec
'roman8' : 'hp_roman8',
'r8' : 'hp_roman8',
'csHPRoman8' : 'hp_roman8',
# hz codec
'hzgb' : 'hz',
'hz_gb' : 'hz',
'hz_gb_2312' : 'hz',
# iso2022_jp codec
'csiso2022jp' : 'iso2022_jp',
'iso2022jp' : 'iso2022_jp',
'iso_2022_jp' : 'iso2022_jp',
# iso2022_jp_1 codec
'iso2022jp_1' : 'iso2022_jp_1',
'iso_2022_jp_1' : 'iso2022_jp_1',
# iso2022_jp_2 codec
'iso2022jp_2' : 'iso2022_jp_2',
'iso_2022_jp_2' : 'iso2022_jp_2',
# iso2022_jp_2004 codec
'iso_2022_jp_2004' : 'iso2022_jp_2004',
'iso2022jp_2004' : 'iso2022_jp_2004',
# iso2022_jp_3 codec
'iso2022jp_3' : 'iso2022_jp_3',
'iso_2022_jp_3' : 'iso2022_jp_3',
# iso2022_jp_ext codec
'iso2022jp_ext' : 'iso2022_jp_ext',
'iso_2022_jp_ext' : 'iso2022_jp_ext',
# iso2022_kr codec
'csiso2022kr' : 'iso2022_kr',
'iso2022kr' : 'iso2022_kr',
'iso_2022_kr' : 'iso2022_kr',
# iso8859_10 codec
'csisolatin6' : 'iso8859_10',
'iso_8859_10' : 'iso8859_10',
'iso_8859_10_1992' : 'iso8859_10',
'iso_ir_157' : 'iso8859_10',
'l6' : 'iso8859_10',
'latin6' : 'iso8859_10',
# iso8859_11 codec
'thai' : 'iso8859_11',
'iso_8859_11' : 'iso8859_11',
'iso_8859_11_2001' : 'iso8859_11',
# iso8859_13 codec
'iso_8859_13' : 'iso8859_13',
'l7' : 'iso8859_13',
'latin7' : 'iso8859_13',
# iso8859_14 codec
'iso_8859_14' : 'iso8859_14',
'iso_8859_14_1998' : 'iso8859_14',
'iso_celtic' : 'iso8859_14',
'iso_ir_199' : 'iso8859_14',
'l8' : 'iso8859_14',
'latin8' : 'iso8859_14',
# iso8859_15 codec
'iso_8859_15' : 'iso8859_15',
'l9' : 'iso8859_15',
'latin9' : 'iso8859_15',
# iso8859_16 codec
'iso_8859_16' : 'iso8859_16',
'iso_8859_16_2001' : 'iso8859_16',
'iso_ir_226' : 'iso8859_16',
'l10' : 'iso8859_16',
'latin10' : 'iso8859_16',
# iso8859_2 codec
'csisolatin2' : 'iso8859_2',
'iso_8859_2' : 'iso8859_2',
'iso_8859_2_1987' : 'iso8859_2',
'iso_ir_101' : 'iso8859_2',
'l2' : 'iso8859_2',
'latin2' : 'iso8859_2',
# iso8859_3 codec
'csisolatin3' : 'iso8859_3',
'iso_8859_3' : 'iso8859_3',
'iso_8859_3_1988' : 'iso8859_3',
'iso_ir_109' : 'iso8859_3',
'l3' : 'iso8859_3',
'latin3' : 'iso8859_3',
# iso8859_4 codec
'csisolatin4' : 'iso8859_4',
'iso_8859_4' : 'iso8859_4',
'iso_8859_4_1988' : 'iso8859_4',
'iso_ir_110' : 'iso8859_4',
'l4' : 'iso8859_4',
'latin4' : 'iso8859_4',
# iso8859_5 codec
'csisolatincyrillic' : 'iso8859_5',
'cyrillic' : 'iso8859_5',
'iso_8859_5' : 'iso8859_5',
'iso_8859_5_1988' : 'iso8859_5',
'iso_ir_144' : 'iso8859_5',
# iso8859_6 codec
'arabic' : 'iso8859_6',
'asmo_708' : 'iso8859_6',
'csisolatinarabic' : 'iso8859_6',
'ecma_114' : 'iso8859_6',
'iso_8859_6' : 'iso8859_6',
'iso_8859_6_1987' : 'iso8859_6',
'iso_ir_127' : 'iso8859_6',
# iso8859_7 codec
'csisolatingreek' : 'iso8859_7',
'ecma_118' : 'iso8859_7',
'elot_928' : 'iso8859_7',
'greek' : 'iso8859_7',
'greek8' : 'iso8859_7',
'iso_8859_7' : 'iso8859_7',
'iso_8859_7_1987' : 'iso8859_7',
'iso_ir_126' : 'iso8859_7',
# iso8859_8 codec
'csisolatinhebrew' : 'iso8859_8',
'hebrew' : 'iso8859_8',
'iso_8859_8' : 'iso8859_8',
'iso_8859_8_1988' : 'iso8859_8',
'iso_ir_138' : 'iso8859_8',
# iso8859_9 codec
'csisolatin5' : 'iso8859_9',
'iso_8859_9' : 'iso8859_9',
'iso_8859_9_1989' : 'iso8859_9',
'iso_ir_148' : 'iso8859_9',
'l5' : 'iso8859_9',
'latin5' : 'iso8859_9',
# johab codec
'cp1361' : 'johab',
'ms1361' : 'johab',
# koi8_r codec
'cskoi8r' : 'koi8_r',
# kz1048 codec
'kz_1048' : 'kz1048',
'rk1048' : 'kz1048',
'strk1048_2002' : 'kz1048',
# latin_1 codec
#
# Note that the latin_1 codec is implemented internally in C and a
# lot faster than the charmap codec iso8859_1 which uses the same
# encoding. This is why we discourage the use of the iso8859_1
# codec and alias it to latin_1 instead.
#
'8859' : 'latin_1',
'cp819' : 'latin_1',
'csisolatin1' : 'latin_1',
'ibm819' : 'latin_1',
'iso8859' : 'latin_1',
'iso8859_1' : 'latin_1',
'iso_8859_1' : 'latin_1',
'iso_8859_1_1987' : 'latin_1',
'iso_ir_100' : 'latin_1',
'l1' : 'latin_1',
'latin' : 'latin_1',
'latin1' : 'latin_1',
# mac_cyrillic codec
'maccyrillic' : 'mac_cyrillic',
# mac_greek codec
'macgreek' : 'mac_greek',
# mac_iceland codec
'maciceland' : 'mac_iceland',
# mac_latin2 codec
'maccentraleurope' : 'mac_latin2',
'maclatin2' : 'mac_latin2',
# mac_roman codec
'macintosh' : 'mac_roman',
'macroman' : 'mac_roman',
# mac_turkish codec
'macturkish' : 'mac_turkish',
# mbcs codec
'dbcs' : 'mbcs',
# ptcp154 codec
'csptcp154' : 'ptcp154',
'pt154' : 'ptcp154',
'cp154' : 'ptcp154',
'cyrillic_asian' : 'ptcp154',
# quopri_codec codec
'quopri' : 'quopri_codec',
'quoted_printable' : 'quopri_codec',
'quotedprintable' : 'quopri_codec',
# rot_13 codec
'rot13' : 'rot_13',
# shift_jis codec
'csshiftjis' : 'shift_jis',
'shiftjis' : 'shift_jis',
'sjis' : 'shift_jis',
's_jis' : 'shift_jis',
# shift_jis_2004 codec
'shiftjis2004' : 'shift_jis_2004',
'sjis_2004' : 'shift_jis_2004',
's_jis_2004' : 'shift_jis_2004',
# shift_jisx0213 codec
'shiftjisx0213' : 'shift_jisx0213',
'sjisx0213' : 'shift_jisx0213',
's_jisx0213' : 'shift_jisx0213',
# tactis codec
'tis260' : 'tactis',
# tis_620 codec
'tis620' : 'tis_620',
'tis_620_0' : 'tis_620',
'tis_620_2529_0' : 'tis_620',
'tis_620_2529_1' : 'tis_620',
'iso_ir_166' : 'tis_620',
# utf_16 codec
'u16' : 'utf_16',
'utf16' : 'utf_16',
# utf_16_be codec
'unicodebigunmarked' : 'utf_16_be',
'utf_16be' : 'utf_16_be',
# utf_16_le codec
'unicodelittleunmarked' : 'utf_16_le',
'utf_16le' : 'utf_16_le',
# utf_32 codec
'u32' : 'utf_32',
'utf32' : 'utf_32',
# utf_32_be codec
'utf_32be' : 'utf_32_be',
# utf_32_le codec
'utf_32le' : 'utf_32_le',
# utf_7 codec
'u7' : 'utf_7',
'utf7' : 'utf_7',
'unicode_1_1_utf_7' : 'utf_7',
# utf_8 codec
'u8' : 'utf_8',
'utf' : 'utf_8',
'utf8' : 'utf_8',
'utf8_ucs2' : 'utf_8',
'utf8_ucs4' : 'utf_8',
# uu_codec codec
'uu' : 'uu_codec',
# zlib_codec codec
'zip' : 'zlib_codec',
'zlib' : 'zlib_codec',
# temporary mac CJK aliases, will be replaced by proper codecs in 3.1
'x_mac_japanese' : 'shift_jis',
'x_mac_korean' : 'euc_kr',
'x_mac_simp_chinese' : 'gb2312',
'x_mac_trad_chinese' : 'big5',
}
|
class GridResizeDirection(Enum, IComparable, IFormattable, IConvertible):
"""
Specifies whether a System.Windows.Controls.GridSplitter control redistributes space between rows or between columns.
enum GridResizeDirection,values: Auto (0),Columns (1),Rows (2)
"""
def __eq__(self, *args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self, *args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self, *args):
pass
def __gt__(self, *args):
pass
def __init__(self, *args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self, *args):
pass
def __lt__(self, *args):
pass
def __ne__(self, *args):
pass
def __reduce_ex__(self, *args):
pass
def __str__(self, *args):
pass
Auto = None
Columns = None
Rows = None
value__ = None
|
"""
简单工厂模式
实现一个肯德基类
可以生产汉堡
"""
class Foodstuff:
"""食材"""
vegetables = 'vegetables' # 蔬菜
meat = 'meat' # 肉
fruits = 'fruits' # 水果
eggs = 'eggs' # 鸡蛋
class Hamburger:
"""汉堡对象"""
def __init__(self, *args):
j = "".join([i + ' ' for i in args])
self.name = "this is a {} hamburger".format(j)
print("Make a {} burger".format(j))
def __str__(self):
return self.name
class VegetablesMeat(Foodstuff):
"""蔬菜水果汉堡"""
def get_hamburger(self):
return Hamburger(self.vegetables, self.fruits)
class FruitsEggs(Foodstuff):
"""水果鸡蛋汉堡"""
def get_hamburger(self):
return Hamburger(self.fruits, self.eggs)
class KFCFactory:
"""汉堡工厂"""
@staticmethod
def buy_hamburger(hamburger_name):
if hamburger_name == 'vegetables fruits':
return VegetablesMeat()
elif hamburger_name == 'fruits eggs':
return FruitsEggs()
else:
return VegetablesMeat()
if __name__ == '__main__':
vegetables_fruits = KFCFactory.buy_hamburger('vegetables fruits').get_hamburger()
fruits_eggs = KFCFactory.buy_hamburger('fruits eggs').get_hamburger()
|
#! /usr/bin/env python3
def ins_sort_indices(A):
F = list(range(len(A)))
for j in range(len(A)):
key = A[F[j]]
i = j-1
while i >= 0 and A[F[i]] > key:
F[i+1] = F[i]
i = i-1
F[i+1] = j
return F
def T(F):
T = list(range(len(F)))
for (i, f) in enumerate(F):
T[f] = i
return T
if __name__ == '__main__':
As = [
[1, 2, 3],
[3, 5, 4],
[2, 3, 1],
]
for a in As:
print('A=', a)
indices = ins_sort_indices(a)
t = T(indices)
print('sorted(A)=', end='')
for i in indices:
print(a[i], end='')
print('')
print('F=', indices)
print('T=', t)
print('')
|
"""
Django accounts management made easy.
"""
VERSION = (1, 0, 2)
__version__ = '.'.join((str(each) for each in VERSION[:4]))
def get_version():
"""
Returns string with digit parts only as version.
"""
return '.'.join((str(each) for each in VERSION[:3]))
|
#addListElements.py
list1 = [5, 4, 9, 10, 3, 5]
list2 = [6, 3, 2, 1, 5, 3]
print("list1 elements:", list1[0], list1[1], list1[2], list1[3], list1[4])
print("list2 elements:", list2[0], list2[1], list2[2], list2[3], list2[4])
list3 = []
j = len(list1)
for i in range(j):
list3.insert(0,list1[i] + list2[i])
print("list3:", list3) |
"""TEST that will not run, because the filename does not start with 'test_'"""
def test_what_will_not_run():
assert True is False
|
class Account:
def __init__(self, name, balance, min_balance):
self.name = name
self.balance = balance
self.min_balance = min_balance
def deposit(self, amount):
self.balance += amount
def withdraw(self, amount):
if self.balance - amount >= self.min_balance:
self.balance -= amount
else:
print("Sorry, not enough funds!")
def statements(self):
print('Account Balance:Rs{}'.format(self.balance))
class Current(Account):
def __init__(self, name, balance):
super().__init__(name, balance, min_balance =-1000)
def __str__(self):
return "{}'s Current Account : Balance is Rs{}".format(self.name, self.balance)
class Savings(Account):
def __init__(self, name, balance):
super().__init__(name, balance, min_balance =0)
def __str__(self):
return "{}'s Savings Account : Balance is Rs{}".format(self.name, self.balance)
|
# Beispielprogramm für das Buch "Python Challenge"
#
# Copyright 2020 by Michael Inden
def quick_sort(values):
# rekursiver Abbruch
if len(values) <= 1:
return values
# Aufsammeln kleiner gleich / größer als Pivot
pivot = values[0]
below_or_equals = [value for value in values[1:] if value <= pivot]
aboves = [value for value in values[1:] if value > pivot]
# rekursiver Abstieg
sorted_lowers_part = quick_sort(below_or_equals)
sorted_uppers_part = quick_sort(aboves)
# Zusammenfügen
return sorted_lowers_part + [pivot] + sorted_uppers_part
def main():
values = [4, 2, 7, 9, 1, 6, 5, 8, 3]
print(quick_sort(values))
values2 = [1, 2, 6, 9, 4, 7, 8, 3]
print(quick_sort(values2))
if __name__ == "__main__":
main()
|
# 定义了失败类型的异常信息
""" failure type exceptions
these exceptions will mark test as failure
"""
class MyBaseFailure(Exception):
pass
class ParseTestsFailure(MyBaseFailure):
pass
class ValidationFailure(MyBaseFailure):
pass
class ExtractFailure(MyBaseFailure):
pass
class SetupHooksFailure(MyBaseFailure):
pass
class TeardownHooksFailure(MyBaseFailure):
pass
""" error type exceptions
these exceptions will mark test as error
"""
class MyBaseError(Exception):
pass
class FileFormatError(MyBaseError):
pass
class TestCaseFormatError(FileFormatError):
pass
class TestSuiteFormatError(FileFormatError):
pass
class ParamsError(MyBaseError):
pass
class NotFoundError(MyBaseError):
pass
class FileNotFound(FileNotFoundError, NotFoundError):
pass
class FunctionNotFound(NotFoundError):
pass
class VariableNotFound(NotFoundError):
pass
class EnvNotFound(NotFoundError):
pass
class CSVNotFound(NotFoundError):
pass
class ApiNotFound(NotFoundError):
pass
class TestcaseNotFound(NotFoundError):
pass
class SummaryEmpty(MyBaseError):
""" test result summary data is empty
"""
|
#
# PySNMP MIB module JUNIPER-LSYSSP-SCHEDULER-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/JUNIPER-LSYSSP-SCHEDULER-MIB
# Produced by pysmi-0.3.4 at Wed May 1 14:00:09 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, Integer, OctetString = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "Integer", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, ConstraintsUnion, SingleValueConstraint, ValueSizeConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "ConstraintsUnion", "SingleValueConstraint", "ValueSizeConstraint", "ConstraintsIntersection")
jnxLsysSpScheduler, = mibBuilder.importSymbols("JUNIPER-LSYS-SECURITYPROFILE-MIB", "jnxLsysSpScheduler")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
Gauge32, TimeTicks, ModuleIdentity, Unsigned32, Integer32, ObjectIdentity, NotificationType, iso, Counter32, IpAddress, MibScalar, MibTable, MibTableRow, MibTableColumn, Bits, MibIdentifier, Counter64 = mibBuilder.importSymbols("SNMPv2-SMI", "Gauge32", "TimeTicks", "ModuleIdentity", "Unsigned32", "Integer32", "ObjectIdentity", "NotificationType", "iso", "Counter32", "IpAddress", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Bits", "MibIdentifier", "Counter64")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
jnxLsysSpSchedulerMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 17, 2, 1))
if mibBuilder.loadTexts: jnxLsysSpSchedulerMIB.setLastUpdated('201005191644Z')
if mibBuilder.loadTexts: jnxLsysSpSchedulerMIB.setOrganization('Juniper Networks, Inc.')
if mibBuilder.loadTexts: jnxLsysSpSchedulerMIB.setContactInfo('Juniper Technical Assistance Center Juniper Networks, Inc. 1194 N. Mathilda Avenue Sunnyvale, CA 94089 E-mail: support@juniper.net HTTP://www.juniper.net')
if mibBuilder.loadTexts: jnxLsysSpSchedulerMIB.setDescription('This module defines the scheduler-specific MIB for Juniper Enterprise Logical-System (LSYS) security profiles. Juniper documentation is recommended as the reference. The LSYS security profile provides various static and dynamic resource management by observing resource quota limits. Security scheduler resource is the focus in this MIB. ')
jnxLsysSpSchedulerObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 17, 2, 1, 1))
jnxLsysSpSchedulerSummary = MibIdentifier((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 17, 2, 1, 2))
jnxLsysSpSchedulerTable = MibTable((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 17, 2, 1, 1, 1), )
if mibBuilder.loadTexts: jnxLsysSpSchedulerTable.setStatus('current')
if mibBuilder.loadTexts: jnxLsysSpSchedulerTable.setDescription('LSYSPROFILE scheduler objects for scheduler resource consumption per LSYS.')
jnxLsysSpSchedulerEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 17, 2, 1, 1, 1, 1), ).setIndexNames((1, "JUNIPER-LSYSSP-SCHEDULER-MIB", "jnxLsysSpSchedulerLsysName"))
if mibBuilder.loadTexts: jnxLsysSpSchedulerEntry.setStatus('current')
if mibBuilder.loadTexts: jnxLsysSpSchedulerEntry.setDescription('An entry in scheduler resource table.')
jnxLsysSpSchedulerLsysName = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 17, 2, 1, 1, 1, 1, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 64)))
if mibBuilder.loadTexts: jnxLsysSpSchedulerLsysName.setStatus('current')
if mibBuilder.loadTexts: jnxLsysSpSchedulerLsysName.setDescription('The name of the logical system for which scheduler resource information is retrieved. ')
jnxLsysSpSchedulerProfileName = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 17, 2, 1, 1, 1, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 32))).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxLsysSpSchedulerProfileName.setStatus('current')
if mibBuilder.loadTexts: jnxLsysSpSchedulerProfileName.setDescription('The security profile name string for the LSYS.')
jnxLsysSpSchedulerUsage = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 17, 2, 1, 1, 1, 1, 3), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxLsysSpSchedulerUsage.setStatus('current')
if mibBuilder.loadTexts: jnxLsysSpSchedulerUsage.setDescription('The current resource usage count for the LSYS.')
jnxLsysSpSchedulerReserved = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 17, 2, 1, 1, 1, 1, 4), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxLsysSpSchedulerReserved.setStatus('current')
if mibBuilder.loadTexts: jnxLsysSpSchedulerReserved.setDescription('The reserved resource count for the LSYS.')
jnxLsysSpSchedulerMaximum = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 17, 2, 1, 1, 1, 1, 5), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxLsysSpSchedulerMaximum.setStatus('current')
if mibBuilder.loadTexts: jnxLsysSpSchedulerMaximum.setDescription('The maximum allowed resource usage count for the LSYS.')
jnxLsysSpSchedulerUsedAmount = MibScalar((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 17, 2, 1, 2, 1), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxLsysSpSchedulerUsedAmount.setStatus('current')
if mibBuilder.loadTexts: jnxLsysSpSchedulerUsedAmount.setDescription('The scheduler resource consumption over all LSYS.')
jnxLsysSpSchedulerMaxQuota = MibScalar((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 17, 2, 1, 2, 2), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxLsysSpSchedulerMaxQuota.setStatus('current')
if mibBuilder.loadTexts: jnxLsysSpSchedulerMaxQuota.setDescription('The scheduler resource maximum quota for the whole device for all LSYS.')
jnxLsysSpSchedulerAvailableAmount = MibScalar((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 17, 2, 1, 2, 3), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxLsysSpSchedulerAvailableAmount.setStatus('current')
if mibBuilder.loadTexts: jnxLsysSpSchedulerAvailableAmount.setDescription('The scheduler resource available in the whole device.')
jnxLsysSpSchedulerHeaviestUsage = MibScalar((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 17, 2, 1, 2, 4), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxLsysSpSchedulerHeaviestUsage.setStatus('current')
if mibBuilder.loadTexts: jnxLsysSpSchedulerHeaviestUsage.setDescription('The most amount of scheduler resource consumed of a LSYS.')
jnxLsysSpSchedulerHeaviestUser = MibScalar((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 17, 2, 1, 2, 5), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 64))).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxLsysSpSchedulerHeaviestUser.setStatus('current')
if mibBuilder.loadTexts: jnxLsysSpSchedulerHeaviestUser.setDescription('The LSYS name that consume the most scheduler resource.')
jnxLsysSpSchedulerLightestUsage = MibScalar((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 17, 2, 1, 2, 6), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxLsysSpSchedulerLightestUsage.setStatus('current')
if mibBuilder.loadTexts: jnxLsysSpSchedulerLightestUsage.setDescription('The least amount of scheduler resource consumed of a LSYS.')
jnxLsysSpSchedulerLightestUser = MibScalar((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 17, 2, 1, 2, 7), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 64))).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxLsysSpSchedulerLightestUser.setStatus('current')
if mibBuilder.loadTexts: jnxLsysSpSchedulerLightestUser.setDescription('The LSYS name that consume the least scheduler resource.')
mibBuilder.exportSymbols("JUNIPER-LSYSSP-SCHEDULER-MIB", jnxLsysSpSchedulerTable=jnxLsysSpSchedulerTable, jnxLsysSpSchedulerUsedAmount=jnxLsysSpSchedulerUsedAmount, jnxLsysSpSchedulerMIB=jnxLsysSpSchedulerMIB, jnxLsysSpSchedulerProfileName=jnxLsysSpSchedulerProfileName, jnxLsysSpSchedulerMaximum=jnxLsysSpSchedulerMaximum, jnxLsysSpSchedulerLsysName=jnxLsysSpSchedulerLsysName, jnxLsysSpSchedulerReserved=jnxLsysSpSchedulerReserved, PYSNMP_MODULE_ID=jnxLsysSpSchedulerMIB, jnxLsysSpSchedulerLightestUser=jnxLsysSpSchedulerLightestUser, jnxLsysSpSchedulerHeaviestUser=jnxLsysSpSchedulerHeaviestUser, jnxLsysSpSchedulerObjects=jnxLsysSpSchedulerObjects, jnxLsysSpSchedulerEntry=jnxLsysSpSchedulerEntry, jnxLsysSpSchedulerUsage=jnxLsysSpSchedulerUsage, jnxLsysSpSchedulerHeaviestUsage=jnxLsysSpSchedulerHeaviestUsage, jnxLsysSpSchedulerAvailableAmount=jnxLsysSpSchedulerAvailableAmount, jnxLsysSpSchedulerLightestUsage=jnxLsysSpSchedulerLightestUsage, jnxLsysSpSchedulerSummary=jnxLsysSpSchedulerSummary, jnxLsysSpSchedulerMaxQuota=jnxLsysSpSchedulerMaxQuota)
|
class solution:
def __init__(self):
self.best = 0
self.bestIndividual = []
self.solutions = []
self.population_fitness = []
self.optimizer = ""
self.objfname = ""
self.lb = 0
self.ub = 0
self.dim = 0
self.popnum = 0
self.maxiers = 0
self.objf = 0
|
name = "Waldo"
text = "Can you find where Wally is?"
if text.find(name):
print("Found Waldo")
else:
print("Cannot find Waldo")
|
## maintain two array to keep track of dfs particular movement and visited vertex also..
def checkcycle( node, adj:list, visited:list, dfsvisited:list):
visited[node] = 1
dfsvisited[node] = 1
for i in adj[node]:
if(visited[i] == 0):
if(checkcycle(i, adj, visited, dfsvisited) == True):
return True
elif(dfsvisited[i] == 1):
return True
dfsvisited[node] = 0
return False
def iscyclic( adj:list, N):
visited = [0] * (N)
dfsvisited = [0] * (N)
for i in range(N):
if(visited[i] == 0):
if(checkcycle(i, adj, visited, dfsvisited) == True):
return True
return False
## Driver code..!!!
if __name__ == "__main__":
V = int(input())
adj = []
for i in range(V):
u = list(map(int,input().split()))
adj.append(u)
## function call...!!!
if(iscyclic(adj, V)):
print("YES, cycle is present in graph")
else:
print("No cycle detect")
"""
'''
sample input...
7
0 1
1 2
2 3
4 3
4 5
4 6
1 6
'''
""" |
# -*- coding: utf-8 -*-
"""
This file is part of the open source project py-dynasynthetic
(see https://github.com/micha-k/py-dynasynthetic).
Author: Michael Kessel
Contact: I have an email account 'dev' on a host called 'michaelkessel' listed
in the toplevel domain 'de'.
"""
__version__ = '0.5'
|
def insertion_sort(l):
for x in range(len(l)):
pos = x
while pos > 0 and l[pos-1] > l[pos]:
l[pos-1], l[pos] = l[pos], l[pos-1]
pos -= 1
list1 = [9, 2, 6, 5, 1, 7]
insertion_sort(list1)
print(list1)
|
"""
B. Fair Division
time limit per test2 seconds
memory limit per test256 megabytes
inputstandard input
outputstandard output
Alice and Bob received n candies from their parents.
Each candy weighs either 1 gram or 2 grams. Now they want to divide all candies
among themselves fairly so that the total weight of Alice's candies is equal
to the total weight of Bob's candies.
Check if they can do that.
Note that candies are not allowed to be cut in half.
Input
The first line contains one integer t (1≤t≤104) — the number of test cases. Then t test cases follow.
The first line of each test case contains an integer n (1≤n≤100) — the number of
candies that Alice and Bob received.
The next line contains n integers a1,a2,…,an — the weights of the candies.
The weight of each candy is either 1 or 2.
It is guaranteed that the sum of n over all test cases does not exceed 105.
Output
For each test case, output on a separate line:
"YES", if all candies can be divided into two sets with the same weight;
"NO" otherwise.
You can output "YES" and "NO" in any case (for example, the strings yEs, yes,
Yes and YES will be recognized as positive).
"""
def fair_divisible(weights):
if sum(weights) % 2 == 1:
print("NO")
return
weights = sorted(weights)
alice = 0
bob = 0
while weights:
if weights[-1] == 1 and bob - alice > 1:
alice += weights.pop()
alice += weights.pop()
else:
alice += weights.pop()
if weights:
if weights[-1] == 1 and alice - bob > 1:
bob += weights.pop()
bob += weights.pop()
else:
bob += weights.pop()
text = "YES" if alice == bob else "NO"
print(text)
if __name__ == '__main__':
num_tests = int(input())
all_weights = []
for _ in range(num_tests):
candles = int(input())
weights_ = input().split(" ")
weights_ = [int(w) for w in weights_]
all_weights.append(weights_)
for w in all_weights:
fair_divisible(w)
"""fair_divisible([1, 1])
fair_divisible([1, 2])
fair_divisible([1, 2, 1, 2])
fair_divisible([2, 2, 2])
fair_divisible([2, 1, 2])
fair_divisible([2, 1, 1, 1, 1])"""
|
# encoding: utf-8
# module GH_IO.Types calls itself Types
# from GH_IO,Version=1.0.0.0,Culture=neutral,PublicKeyToken=6a29997d2e6b4f97
# by generator 1.145
""" NamespaceTracker represent a CLS namespace. """
# no imports
# no functions
# classes
class GH_BoundingBox(object):
"""
Represents a 3D bounding box,denoted by two points.
GH_BoundingBox(nMin: GH_Point3D,nMax: GH_Point3D)
GH_BoundingBox(Minx: float,Miny: float,Minz: float,Maxx: float,Maxy: float,Maxz: float)
"""
def ToString(self):
"""
ToString(self: GH_BoundingBox) -> str
Converts this structure to a human-readable string.
Returns: A string representation of the box structure.
"""
pass
@staticmethod
def __new__(self,*__args):
"""
__new__[GH_BoundingBox]() -> GH_BoundingBox
__new__(cls: type,nMin: GH_Point3D,nMax: GH_Point3D)
__new__(cls: type,Minx: float,Miny: float,Minz: float,Maxx: float,Maxy: float,Maxz: float)
"""
pass
Max=None
Min=None
class GH_Interval1D(object):
"""
Represents two double precision floating point values.
GH_Interval1D(na: float,nb: float)
"""
def ToString(self):
"""
ToString(self: GH_Interval1D) -> str
Converts this structure to a human-readable string.
Returns: A string representation of the Interval structure.
"""
pass
@staticmethod
def __new__(self,na,nb):
"""
__new__[GH_Interval1D]() -> GH_Interval1D
__new__(cls: type,na: float,nb: float)
"""
pass
a=None
b=None
class GH_Interval2D(object):
"""
Represents two double precision domains.
GH_Interval2D(nu: GH_Interval1D,nv: GH_Interval1D)
GH_Interval2D(nu0: float,nu1: float,nv0: float,nv1: float)
"""
def ToString(self):
"""
ToString(self: GH_Interval2D) -> str
Converts this structure to a human-readable string.
Returns: A string representation of the two-dimensional Interval structure.
"""
pass
@staticmethod
def __new__(self,*__args):
"""
__new__[GH_Interval2D]() -> GH_Interval2D
__new__(cls: type,nu: GH_Interval1D,nv: GH_Interval1D)
__new__(cls: type,nu0: float,nu1: float,nv0: float,nv1: float)
"""
pass
u=None
v=None
class GH_Item(object,GH_IBinarySupport,GH_IXmlSupport):
"""
Represents a single data item in a chunk.
GH_Item(item_name: str,item_data: bool)
GH_Item(item_name: str,item_index: int,item_data: bool)
GH_Item(item_name: str,item_data: Byte)
GH_Item(item_name: str,item_index: int,item_data: Byte)
GH_Item(item_name: str,item_data: int)
GH_Item(item_name: str,item_index: int,item_data: int)
GH_Item(item_name: str,item_data: Int64)
GH_Item(item_name: str,item_index: int,item_data: Int64)
GH_Item(item_name: str,item_data: Single)
GH_Item(item_name: str,item_index: int,item_data: Single)
GH_Item(item_name: str,item_data: float)
GH_Item(item_name: str,item_index: int,item_data: float)
GH_Item(item_name: str,item_data: Decimal)
GH_Item(item_name: str,item_index: int,item_data: Decimal)
GH_Item(item_name: str,item_data: DateTime)
GH_Item(item_name: str,item_index: int,item_data: DateTime)
GH_Item(item_name: str,item_data: Guid)
GH_Item(item_name: str,item_index: int,item_data: Guid)
GH_Item(item_name: str,item_data: str)
GH_Item(item_name: str,item_index: int,item_data: str)
GH_Item(item_name: str,item_data: Array[Byte])
GH_Item(item_name: str,item_index: int,item_data: Array[Byte])
GH_Item(item_name: str,item_data: Array[float])
GH_Item(item_name: str,item_index: int,item_data: Array[float])
GH_Item(item_name: str,item_data: Point)
GH_Item(item_name: str,item_index: int,item_data: Point)
GH_Item(item_name: str,item_data: PointF)
GH_Item(item_name: str,item_index: int,item_data: PointF)
GH_Item(item_name: str,item_data: Size)
GH_Item(item_name: str,item_index: int,item_data: Size)
GH_Item(item_name: str,item_data: SizeF)
GH_Item(item_name: str,item_index: int,item_data: SizeF)
GH_Item(item_name: str,item_data: Rectangle)
GH_Item(item_name: str,item_index: int,item_data: Rectangle)
GH_Item(item_name: str,item_data: RectangleF)
GH_Item(item_name: str,item_index: int,item_data: RectangleF)
GH_Item(item_name: str,item_data: Color)
GH_Item(item_name: str,item_index: int,item_data: Color)
GH_Item(item_name: str,item_data: Bitmap)
GH_Item(item_name: str,item_index: int,item_data: Bitmap)
GH_Item(item_name: str,item_data: GH_Point2D)
GH_Item(item_name: str,item_index: int,item_data: GH_Point2D)
GH_Item(item_name: str,item_data: GH_Point3D)
GH_Item(item_name: str,item_index: int,item_data: GH_Point3D)
GH_Item(item_name: str,item_data: GH_Point4D)
GH_Item(item_name: str,item_index: int,item_data: GH_Point4D)
GH_Item(item_name: str,item_data: GH_Interval1D)
GH_Item(item_name: str,item_index: int,item_data: GH_Interval1D)
GH_Item(item_name: str,item_data: GH_Interval2D)
GH_Item(item_name: str,item_index: int,item_data: GH_Interval2D)
GH_Item(item_name: str,item_data: GH_Line)
GH_Item(item_name: str,item_index: int,item_data: GH_Line)
GH_Item(item_name: str,item_data: GH_BoundingBox)
GH_Item(item_name: str,item_index: int,item_data: GH_BoundingBox)
GH_Item(item_name: str,item_data: GH_Plane)
GH_Item(item_name: str,item_index: int,item_data: GH_Plane)
GH_Item(item_name: str,item_data: GH_Version)
GH_Item(item_name: str,item_index: int,item_data: GH_Version)
"""
@staticmethod
def CreateFrom(*__args):
"""
CreateFrom(node: XmlNode) -> GH_Item
Creates a new instance of GH_Item and sets the fields from an Xml node object.
node: Xml node object that defines the field data.
Returns: The constructed and read item.
CreateFrom(reader: BinaryReader) -> GH_Item
Creates a new instance of GH_Item and sets the fields from a reader object.
reader: Reader object that defines the field data.
Returns: The constructed and read item.
"""
pass
def Read(self,*__args):
"""
Read(self: GH_Item,node: XmlNode)
Deserialize this item from an Xml node.
node: Xml node to serialize from.
Read(self: GH_Item,reader: BinaryReader)
Deserialize this item from a binary stream.
reader: Reader to deserialize with.
"""
pass
def ToString(self):
"""
ToString(self: GH_Item) -> str
Converts the struct into a human readable format.
"""
pass
def Write(self,writer):
"""
Write(self: GH_Item,writer: XmlWriter)
Serialize this item into an Xml stream.
writer: Writer to serialize with.
Write(self: GH_Item,writer: BinaryWriter)
Serialize this item into a binary stream.
writer: Writer to serialize with.
"""
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,item_name,*__args):
"""
__new__(cls: type,item_name: str,item_data: bool)
__new__(cls: type,item_name: str,item_index: int,item_data: bool)
__new__(cls: type,item_name: str,item_data: Byte)
__new__(cls: type,item_name: str,item_index: int,item_data: Byte)
__new__(cls: type,item_name: str,item_data: int)
__new__(cls: type,item_name: str,item_index: int,item_data: int)
__new__(cls: type,item_name: str,item_data: Int64)
__new__(cls: type,item_name: str,item_index: int,item_data: Int64)
__new__(cls: type,item_name: str,item_data: Single)
__new__(cls: type,item_name: str,item_index: int,item_data: Single)
__new__(cls: type,item_name: str,item_data: float)
__new__(cls: type,item_name: str,item_index: int,item_data: float)
__new__(cls: type,item_name: str,item_data: Decimal)
__new__(cls: type,item_name: str,item_index: int,item_data: Decimal)
__new__(cls: type,item_name: str,item_data: DateTime)
__new__(cls: type,item_name: str,item_index: int,item_data: DateTime)
__new__(cls: type,item_name: str,item_data: Guid)
__new__(cls: type,item_name: str,item_index: int,item_data: Guid)
__new__(cls: type,item_name: str,item_data: str)
__new__(cls: type,item_name: str,item_index: int,item_data: str)
__new__(cls: type,item_name: str,item_data: Array[Byte])
__new__(cls: type,item_name: str,item_index: int,item_data: Array[Byte])
__new__(cls: type,item_name: str,item_data: Array[float])
__new__(cls: type,item_name: str,item_index: int,item_data: Array[float])
__new__(cls: type,item_name: str,item_data: Point)
__new__(cls: type,item_name: str,item_index: int,item_data: Point)
__new__(cls: type,item_name: str,item_data: PointF)
__new__(cls: type,item_name: str,item_index: int,item_data: PointF)
__new__(cls: type,item_name: str,item_data: Size)
__new__(cls: type,item_name: str,item_index: int,item_data: Size)
__new__(cls: type,item_name: str,item_data: SizeF)
__new__(cls: type,item_name: str,item_index: int,item_data: SizeF)
__new__(cls: type,item_name: str,item_data: Rectangle)
__new__(cls: type,item_name: str,item_index: int,item_data: Rectangle)
__new__(cls: type,item_name: str,item_data: RectangleF)
__new__(cls: type,item_name: str,item_index: int,item_data: RectangleF)
__new__(cls: type,item_name: str,item_data: Color)
__new__(cls: type,item_name: str,item_index: int,item_data: Color)
__new__(cls: type,item_name: str,item_data: Bitmap)
__new__(cls: type,item_name: str,item_index: int,item_data: Bitmap)
__new__(cls: type,item_name: str,item_data: GH_Point2D)
__new__(cls: type,item_name: str,item_index: int,item_data: GH_Point2D)
__new__(cls: type,item_name: str,item_data: GH_Point3D)
__new__(cls: type,item_name: str,item_index: int,item_data: GH_Point3D)
__new__(cls: type,item_name: str,item_data: GH_Point4D)
__new__(cls: type,item_name: str,item_index: int,item_data: GH_Point4D)
__new__(cls: type,item_name: str,item_data: GH_Interval1D)
__new__(cls: type,item_name: str,item_index: int,item_data: GH_Interval1D)
__new__(cls: type,item_name: str,item_data: GH_Interval2D)
__new__(cls: type,item_name: str,item_index: int,item_data: GH_Interval2D)
__new__(cls: type,item_name: str,item_data: GH_Line)
__new__(cls: type,item_name: str,item_index: int,item_data: GH_Line)
__new__(cls: type,item_name: str,item_data: GH_BoundingBox)
__new__(cls: type,item_name: str,item_index: int,item_data: GH_BoundingBox)
__new__(cls: type,item_name: str,item_data: GH_Plane)
__new__(cls: type,item_name: str,item_index: int,item_data: GH_Plane)
__new__(cls: type,item_name: str,item_data: GH_Version)
__new__(cls: type,item_name: str,item_index: int,item_data: GH_Version)
"""
pass
def __repr__(self,*args):
""" __repr__(self: object) -> str """
pass
def __str__(self,*args):
pass
DebuggerDisplay=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Readonly property used during Debugging.
Get: DebuggerDisplay(self: GH_Item) -> str
"""
HasIndex=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the index existence implication. The item is considered to have an index qualifier
if the index value is larger than or equal to zero.
Get: HasIndex(self: GH_Item) -> bool
"""
HasName=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the name validity of this item.
The item is considered to have an invalid name if string.IsNullOrEmpty(name)
Get: HasName(self: GH_Item) -> bool
"""
HasType=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the type set validity of this item.
The item is considered to have a type if type != GH_Types.unset
Get: HasType(self: GH_Item) -> bool
"""
Index=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the index of an item.
Typically,indices are set at construction and do not change.
If you change indices after construction,you could corrupt an archive.
Get: Index(self: GH_Item) -> int
Set: Index(self: GH_Item)=value
"""
InternalData=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Retrieves the internal data of this item.
No type casting is performed.
Get: InternalData(self: GH_Item) -> object
"""
Name=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the name of this item.
Typically,names are set at construction and do not change.
If you change names after construction,you could corrupt an archive.
Get: Name(self: GH_Item) -> str
Set: Name(self: GH_Item)=value
"""
Type=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the type of this item.
Type flags are set during construction and cannot be altered.
Get: Type(self: GH_Item) -> GH_Types
"""
_bool=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Returns the internal data of this item cast to a Boolean.
If the data is not stored as a Boolean,a conversion exception might be thrown.
Get: _bool(self: GH_Item) -> bool
"""
_boundingbox=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Returns the internal data of this item cast to a BoundingBox.
If the data is not stored as a BoundingBox,a conversion exception might be thrown.
Get: _boundingbox(self: GH_Item) -> GH_BoundingBox
"""
_byte=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Returns the internal data of this item cast to a Byte.
If the data is not stored as a Byte,a conversion exception might be thrown.
Get: _byte(self: GH_Item) -> Byte
"""
_bytearray=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Returns the internal data of this item cast to a Byte array.
If the data is not stored as a Byte array,a conversion exception might be thrown.
Get: _bytearray(self: GH_Item) -> Array[Byte]
"""
_date=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Returns the internal data of this item cast to a DateTime.
If the data is not stored as a DateTime,a conversion exception might be thrown.
Get: _date(self: GH_Item) -> DateTime
"""
_decimal=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Returns the internal data of this item cast to a Decimal.
If the data is not stored as a Decimal,a conversion exception might be thrown.
Get: _decimal(self: GH_Item) -> Decimal
"""
_double=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Returns the internal data of this item cast to a Double.
If the data is not stored as a Double,a conversion exception might be thrown.
Get: _double(self: GH_Item) -> float
"""
_doublearray=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Returns the internal data of this item cast to a Byte array.
If the data is not stored as a Byte array,a conversion exception might be thrown.
Get: _doublearray(self: GH_Item) -> Array[float]
"""
_drawing_bitmap=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Returns the internal data of this item cast to a Bitmap.
If the data is not stored as a Bitmap,a conversion exception might be thrown.
Get: _drawing_bitmap(self: GH_Item) -> Bitmap
"""
_drawing_color=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Returns the internal data of this item cast to a Color.
If the data is not stored as a Color,a conversion exception might be thrown.
Get: _drawing_color(self: GH_Item) -> Color
"""
_drawing_point=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Returns the internal data of this item cast to a Point.
If the data is not stored as a Point,a conversion exception might be thrown.
Get: _drawing_point(self: GH_Item) -> Point
"""
_drawing_pointf=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Returns the internal data of this item cast to a PointF.
If the data is not stored as a PointF,a conversion exception might be thrown.
Get: _drawing_pointf(self: GH_Item) -> PointF
"""
_drawing_rectangle=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Returns the internal data of this item cast to a Rectangle.
If the data is not stored as a Rectangle,a conversion exception might be thrown.
Get: _drawing_rectangle(self: GH_Item) -> Rectangle
"""
_drawing_rectanglef=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Returns the internal data of this item cast to a RectangleF.
If the data is not stored as a RectangleF,a conversion exception might be thrown.
Get: _drawing_rectanglef(self: GH_Item) -> RectangleF
"""
_drawing_size=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Returns the internal data of this item cast to a Size.
If the data is not stored as a Size,a conversion exception might be thrown.
Get: _drawing_size(self: GH_Item) -> Size
"""
_drawing_sizef=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Returns the internal data of this item cast to a SizeF.
If the data is not stored as a SizeF,a conversion exception might be thrown.
Get: _drawing_sizef(self: GH_Item) -> SizeF
"""
_guid=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Returns the internal data of this item cast to a Guid.
If the data is not stored as a Guid,a conversion exception might be thrown.
Get: _guid(self: GH_Item) -> Guid
"""
_int32=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Returns the internal data of this item cast to an Int32.
If the data is not stored as an Int32,a conversion exception might be thrown.
Get: _int32(self: GH_Item) -> int
"""
_int64=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Returns the internal data of this item cast to an Int64.
If the data is not stored as an Int64,a conversion exception might be thrown.
Get: _int64(self: GH_Item) -> Int64
"""
_interval1d=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Returns the internal data of this item cast to an Interval1D.
If the data is not stored as an Interval1D,a conversion exception might be thrown.
Get: _interval1d(self: GH_Item) -> GH_Interval1D
"""
_interval2d=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Returns the internal data of this item cast to an Interval2D.
If the data is not stored as an Interval2D,a conversion exception might be thrown.
Get: _interval2d(self: GH_Item) -> GH_Interval2D
"""
_line=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Returns the internal data of this item cast to a Line.
If the data is not stored as a Line,a conversion exception might be thrown.
Get: _line(self: GH_Item) -> GH_Line
"""
_plane=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Returns the internal data of this item cast to a Plane.
If the data is not stored as a Plane,a conversion exception might be thrown.
Get: _plane(self: GH_Item) -> GH_Plane
"""
_point2d=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Returns the internal data of this item cast to a Point2D.
If the data is not stored as a Point2D,a conversion exception might be thrown.
Get: _point2d(self: GH_Item) -> GH_Point2D
"""
_point3d=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Returns the internal data of this item cast to a Point3D.
If the data is not stored as a Point3D,a conversion exception might be thrown.
Get: _point3d(self: GH_Item) -> GH_Point3D
"""
_point4d=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Returns the internal data of this item cast to a Point4D.
If the data is not stored as a Point4D,a conversion exception might be thrown.
Get: _point4d(self: GH_Item) -> GH_Point4D
"""
_single=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Returns the internal data of this item cast to a Single.
If the data is not stored as a Single,a conversion exception might be thrown.
Get: _single(self: GH_Item) -> Single
"""
_string=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Returns the internal data of this item cast to a String.
If the data is not stored as a String,a conversion exception might be thrown.
Get: _string(self: GH_Item) -> str
"""
_version=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Returns the internal data of this item cast to a Version.
If the data is not stored as a Version,a conversion exception might be thrown.
Get: _version(self: GH_Item) -> GH_Version
"""
class GH_Line(object):
"""
Represents a 3D line segment,denoted by start and endpoints.
GH_Line(nA: GH_Point3D,nB: GH_Point3D)
GH_Line(Ax: float,Ay: float,Az: float,Bx: float,By: float,Bz: float)
"""
def ToString(self):
"""
ToString(self: GH_Line) -> str
Converts this structure to a human-readable string.
Returns: A string representation of the line structure.
"""
pass
@staticmethod
def __new__(self,*__args):
"""
__new__[GH_Line]() -> GH_Line
__new__(cls: type,nA: GH_Point3D,nB: GH_Point3D)
__new__(cls: type,Ax: float,Ay: float,Az: float,Bx: float,By: float,Bz: float)
"""
pass
A=None
B=None
class GH_Plane(object):
"""
Represents a 3D plane system,defined by origin point and {X,Y} axis directions.
GH_Plane(nOrigin: GH_Point3D,nXAxis: GH_Point3D,nYAxis: GH_Point3D)
GH_Plane(Ox: float,Oy: float,Oz: float,Xx: float,Xy: float,Xz: float,Yx: float,Yy: float,Yz: float)
"""
def ToString(self):
"""
ToString(self: GH_Plane) -> str
Converts this structure to a human-readable string.
Returns: A string representation of the plane structure.
"""
pass
@staticmethod
def __new__(self,*__args):
"""
__new__[GH_Plane]() -> GH_Plane
__new__(cls: type,nOrigin: GH_Point3D,nXAxis: GH_Point3D,nYAxis: GH_Point3D)
__new__(cls: type,Ox: float,Oy: float,Oz: float,Xx: float,Xy: float,Xz: float,Yx: float,Yy: float,Yz: float)
"""
pass
Origin=None
XAxis=None
YAxis=None
class GH_Point2D(object):
"""
Represents a 2D point coordinate with double precision floating point components.
GH_Point2D(nx: float,ny: float)
"""
def ToString(self):
"""
ToString(self: GH_Point2D) -> str
Converts this structure to a human-readable string.
Returns: A string representation of the two-dimenionsional point structure.
"""
pass
@staticmethod
def __new__(self,nx,ny):
"""
__new__[GH_Point2D]() -> GH_Point2D
__new__(cls: type,nx: float,ny: float)
"""
pass
x=None
y=None
class GH_Point3D(object):
"""
Represents a 3D point coordinate with double precision floating point components.
GH_Point3D(nx: float,ny: float,nz: float)
"""
def ToString(self):
"""
ToString(self: GH_Point3D) -> str
Converts this structure to a human-readable string.
Returns: A string representation of the three-dimenionsional point structure.
"""
pass
@staticmethod
def __new__(self,nx,ny,nz):
"""
__new__[GH_Point3D]() -> GH_Point3D
__new__(cls: type,nx: float,ny: float,nz: float)
"""
pass
x=None
y=None
z=None
class GH_Point4D(object):
"""
Represents a 4D point coordinate with double precision floating point components.
GH_Point4D(nx: float,ny: float,nz: float,nw: float)
"""
def ToString(self):
"""
ToString(self: GH_Point4D) -> str
Converts this structure to a human-readable string.
Returns: A string representation of the four-dimenionsional point structure.
"""
pass
@staticmethod
def __new__(self,nx,ny,nz,nw):
"""
__new__[GH_Point4D]() -> GH_Point4D
__new__(cls: type,nx: float,ny: float,nz: float,nw: float)
"""
pass
w=None
x=None
y=None
z=None
class GH_Types(Enum,IComparable,IFormattable,IConvertible):
"""
Contains flags for all data types currently supported by GH_IO.dll
enum GH_Types,values: gh_bool (1),gh_boundingbox (71),gh_byte (2),gh_bytearray (20),gh_date (8),gh_decimal (7),gh_double (6),gh_doublearray (21),gh_drawing_bitmap (37),gh_drawing_color (36),gh_drawing_point (30),gh_drawing_pointf (31),gh_drawing_rectangle (34),gh_drawing_rectanglef (35),gh_drawing_size (32),gh_drawing_sizef (33),gh_guid (9),gh_int32 (3),gh_int64 (4),gh_interval1d (60),gh_interval2d (61),gh_line (70),gh_plane (72),gh_point2d (50),gh_point3d (51),gh_point4d (52),gh_single (5),gh_string (10),gh_version (80),unset (0)
"""
def __eq__(self,*args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self,*args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self,*args):
pass
def __gt__(self,*args):
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self,*args):
pass
def __lt__(self,*args):
pass
def __ne__(self,*args):
pass
def __reduce_ex__(self,*args):
pass
def __str__(self,*args):
pass
gh_bool=None
gh_boundingbox=None
gh_byte=None
gh_bytearray=None
gh_date=None
gh_decimal=None
gh_double=None
gh_doublearray=None
gh_drawing_bitmap=None
gh_drawing_color=None
gh_drawing_point=None
gh_drawing_pointf=None
gh_drawing_rectangle=None
gh_drawing_rectanglef=None
gh_drawing_size=None
gh_drawing_sizef=None
gh_guid=None
gh_int32=None
gh_int64=None
gh_interval1d=None
gh_interval2d=None
gh_line=None
gh_plane=None
gh_point2d=None
gh_point3d=None
gh_point4d=None
gh_single=None
gh_string=None
gh_version=None
unset=None
value__=None
class GH_Version(object):
"""
Basic version type. Contains Major,Minor and Revision fields.
GH_Version(v_major: int,v_minor: int,v_revision: int)
GH_Version(other: GH_Version)
"""
def Equals(self,obj):
"""
Equals(self: GH_Version,obj: object) -> bool
Performs value equality comparison.
obj: Object to compare with.
If obj is a null reference or not a GH_Version instance,
false is returned.
Returns: True if obj is a GH_Version instance which is equal to this one.
"""
pass
def GetHashCode(self):
"""
GetHashCode(self: GH_Version) -> int
Returns the hash code for this instance.
Returns: A hash code for the current version object.
"""
pass
def ToString(self):
"""
ToString(self: GH_Version) -> str
Default formatter for Version data: M.m.RRRR
Revision section is padded with
zeroes until it is at least 4 digits long.
Returns: A string represtation of the Version structure.
"""
pass
def __eq__(self,*args):
""" x.__eq__(y) <==> x==y """
pass
def __ge__(self,*args):
pass
def __gt__(self,*args):
pass
def __le__(self,*args):
pass
def __lt__(self,*args):
pass
@staticmethod
def __new__(self,*__args):
"""
__new__[GH_Version]() -> GH_Version
__new__(cls: type,v_major: int,v_minor: int,v_revision: int)
__new__(cls: type,other: GH_Version)
"""
pass
def __ne__(self,*args):
pass
major=None
minor=None
revision=None
|
def _parse_categories(lines):
"""
Read (category_id, category_name) pairs from the categories section.
Each line consists of an integer followed a tab and then the category name.
This section is separated from the lexicon by a line consisting of a single "%".
"""
for line in lines:
line = line.strip()
if line == "%":
return
# ignore non-matching groups of categories
if "\t" in line:
category_id, category_name = line.split("\t", 1)
yield category_id, category_name
def _parse_lexicon(lines, category_mapping):
"""
Read (match_expression, category_names) pairs from the lexicon section.
Each line consists of a match expression followed by a tab and then one or more
tab-separated integers, which are mapped to category names using `category_mapping`.
"""
for line in lines:
line = line.strip()
parts = line.split("\t")
yield parts[0], [category_mapping[category_id] for category_id in parts[1:]]
def read_dic(filepath):
"""
Reads a LIWC lexicon from a file in the .dic format, returning a tuple of
(lexicon, category_names), where:
* `lexicon` is a dict mapping string patterns to lists of category names
* `category_names` is a list of category names (as strings)
"""
with open(filepath) as lines:
# read up to first "%" (should be very first line of file)
for line in lines:
if line.strip() == "%":
break
# read categories (a mapping from integer string to category name)
category_mapping = dict(_parse_categories(lines))
# read lexicon (a mapping from matching string to a list of category names)
lexicon = dict(_parse_lexicon(lines, category_mapping))
return lexicon, list(category_mapping.values())
|
def test_example() -> None:
assert True, "not True"
assert 1 + 1 == 2
assert 4 / 2 == 2
assert 2 * 2 == 4
assert "ab" + "bc" == "abbc"
|
soma = contador = 0
while True:
numero = int(input('Digite um número: '))
if numero == 999:
break
soma += numero
contador += 1
print(f'Você digitou {contador} e a soma deles vale {soma}.') |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/config/hp_defaults.ipynb (unless otherwise specified).
__all__ = ['allow_base_class', 'name_epoch', 'name_last_epoch', 'name_model_history', 'model_file_name',
'path_experiments', 'defaults', 'root', 'metric', 'op', 'result_file', 'min_iterations', 'use_previous_best',
'name_logger', 'verbose']
# Cell
allow_base_class=False
name_epoch='epochs'
name_last_epoch='last_epoch'
name_model_history='model_history.pk'
model_file_name='model.h5'
path_experiments='results/hpsearch'
defaults={}
root=''
metric='accuracy'
op='max'
result_file='dict_results.pk'
min_iterations=50
use_previous_best=True
name_logger='experiment_manager'
verbose=0 |
# Given the head of a singly linked list, group all thenodes
# # with odd indices together followed by the nodes with even indices, and return the reordered list.
# #
# # The first node is considered odd, and the secondnode is even, and so on.
# #
# # Note that the relative order inside both the even and odd groups should remain as it
# # was in the input.
# #
# # You must solve the problem in O(1) extra space complexity and O(n)
# # time complexity.
# #
# # Example 1:
# #
# # Input: head = [1, 2, 3, 4, 5]
# # Output: [1, 3, 5, 2, 4]
# #
# # Example 2:
# #
# # Input: head = [2, 1, 3, 5, 6, 4, 7]
# # Output: [2, 3, 6, 7, 1, 5, 4]
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def oddEvenList(self, head: Optional[ListNode]) -> Optional[ListNode]:
if head is None or head.next is None or head.next.next is None:
return head
odd = head
even = head.next
even_head = head.next
while even and even.next:
odd.next = even.next
odd = odd.next
even.next = odd.next
even = even.next
odd.next = even_head
return head
|
days = {
'mon': 'Понедельник',
'tue': 'Вторник',
'wed': 'Среда',
'thu': 'Четверг',
'fri': 'Пятница',
'sat': 'Суббота',
'sun': 'Воскресенье'
}
|
# -*- coding: utf-8 -*-
"""
Исключения NewEraAI
"""
class NewEraAIException(Exception):
"""Базовый класс для всех пользовательских исключений NewEraAI"""
pass
class LanguagesSRError(NewEraAIException):
"""Указан неподдерживаемый язык для распознавания речи"""
pass
class DictSRError(NewEraAIException):
"""Указан неподдерживаемый размер словаря для распознавания речи"""
pass
class SRModelNotActivatedError(NewEraAIException):
"""Модель распознавания речи не активирована"""
pass
class TypeEncodeVideoError(NewEraAIException):
"""Указан неподдерживаемый тип кодирования видео"""
pass
class PresetCFREncodeVideoError(NewEraAIException):
"""Указан неподдерживаемый параметр обеспечивающий определенную скорость кодирования и сжатия видео"""
pass
class SRInputTypeError(NewEraAIException):
"""Указан неподдерживаемый тип файла для распознавания речи"""
pass
class IsADirectoryOriginalError(NewEraAIException):
"""Директории с оригинальными видео не найдена"""
pass
class IsADirectorySplittedError(NewEraAIException):
"""Директории с разделенными видеофрагментами не найдена"""
pass |
class Atom:
__slots__ = ["name", "type", "resname", "resid", "x", "y", "z",
"diameter", "rotmass", "charge", "mass",
"sigma", "epsilon", "dipole"]
def __init__(self, **kwargs):
for key in self.__slots__:
try:
setattr(self, key, kwargs[key])
except KeyError:
setattr(self, key, None)
def __repr__(self):
line = "<Atom name={0}, resname={1}, resnum={2}, type={3}>"
return line.format(self.name, self.resname, self.resid, self.type)
@staticmethod
def compare(val1, val2):
"""
Compare two values.
Return the second value if both values are the same or the first value is None.
Return the first value if the second value is None.
Raise exception if values are different and neither is None.
Args:
val1: First value
val2: Second value
Returns: One of the values
"""
if val1 == val2:
return val2
elif val1 is None:
return val2
elif val2 is None:
return val1
else:
raise ValueError("Values for comparison are different and not None.")
def populate(self, other):
"""
Populate this Atom using values from Atom other, where this Atom is missing data.
Args:
other: Another Atom instance
Returns: Nothing
"""
for key in self.__slots__:
val = Atom.compare(getattr(self, key), getattr(other, key))
setattr(self, key, val)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.