content stringlengths 7 1.05M | fixed_cases stringlengths 1 1.28M |
|---|---|
# Maximum Erasure Value
'''
You are given an array of positive integers nums and want to erase a subarray containing unique elements. The score you get by erasing the subarray is equal to the sum of its elements.
Return the maximum score you can get by erasing exactly one subarray.
An array b is called to be a subarray of a if it forms a contiguous subsequence of a, that is, if it is equal to a[l],a[l+1],...,a[r] for some (l,r).
Example 1:
Input: nums = [4,2,4,5,6]
Output: 17
Explanation: The optimal subarray here is [2,4,5,6].
Example 2:
Input: nums = [5,2,1,2,5,2,1,2,5]
Output: 8
Explanation: The optimal subarray here is [5,2,1] or [1,2,5].
Constraints:
1 <= nums.length <= 105
1 <= nums[i] <= 104
Hide Hint #1
The main point here is for the subarray to contain unique elements for each index. Only the first subarrays starting from that index have unique elements.
Hide Hint #2
This can be solved using the two pointers technique
'''
class Solution:
def maximumUniqueSubarray(self, nums: List[int]) -> int:
res,s,e = 0,0,0
add = 0
return 1
for i in range(len(nums)):
if nums[i] not in nums[s:i]:
add = add+nums[i]
print('if')
else:
s = s+nums[s:i].index(nums[i])+1
add = sum(nums[s:i+1])
print('else')
e = i
res = max(res,add)
print(s,e,add, res)
return res
| """
You are given an array of positive integers nums and want to erase a subarray containing unique elements. The score you get by erasing the subarray is equal to the sum of its elements.
Return the maximum score you can get by erasing exactly one subarray.
An array b is called to be a subarray of a if it forms a contiguous subsequence of a, that is, if it is equal to a[l],a[l+1],...,a[r] for some (l,r).
Example 1:
Input: nums = [4,2,4,5,6]
Output: 17
Explanation: The optimal subarray here is [2,4,5,6].
Example 2:
Input: nums = [5,2,1,2,5,2,1,2,5]
Output: 8
Explanation: The optimal subarray here is [5,2,1] or [1,2,5].
Constraints:
1 <= nums.length <= 105
1 <= nums[i] <= 104
Hide Hint #1
The main point here is for the subarray to contain unique elements for each index. Only the first subarrays starting from that index have unique elements.
Hide Hint #2
This can be solved using the two pointers technique
"""
class Solution:
def maximum_unique_subarray(self, nums: List[int]) -> int:
(res, s, e) = (0, 0, 0)
add = 0
return 1
for i in range(len(nums)):
if nums[i] not in nums[s:i]:
add = add + nums[i]
print('if')
else:
s = s + nums[s:i].index(nums[i]) + 1
add = sum(nums[s:i + 1])
print('else')
e = i
res = max(res, add)
print(s, e, add, res)
return res |
# --------------------------------------------------------------
class ModelSimilarity:
'''
Uses a model (e.g. Word2Vec model) to calculate the similarity between two terms.
'''
def __init__( self, model ):
self.model = model
def similarity( self, ranking_i, ranking_j ):
sim = 0.0
pairs = 0
for term_i in ranking_i:
for term_j in ranking_j:
try:
sim += self.model.similarity(term_i, term_j)
pairs += 1
except:
#print "Failed pair (%s,%s)" % (term_i,term_j)
pass
if pairs == 0:
return 0.0
return sim/pairs
# --------------------------------------------------------------
class WithinTopicMeasure:
'''
Measures within-topic coherence for a topic model, based on a set of term rankings.
'''
def __init__( self, metric ):
self.metric = metric
def evaluate_ranking( self, term_ranking ):
return self.metric.similarity( term_ranking, term_ranking )
def evaluate_rankings( self, term_rankings ):
scores = []
overall = 0.0
for topic_index in range(len(term_rankings)):
score = self.evaluate_ranking( term_rankings[topic_index] )
scores.append( score )
overall += score
overall /= len(term_rankings)
return overall
| class Modelsimilarity:
"""
Uses a model (e.g. Word2Vec model) to calculate the similarity between two terms.
"""
def __init__(self, model):
self.model = model
def similarity(self, ranking_i, ranking_j):
sim = 0.0
pairs = 0
for term_i in ranking_i:
for term_j in ranking_j:
try:
sim += self.model.similarity(term_i, term_j)
pairs += 1
except:
pass
if pairs == 0:
return 0.0
return sim / pairs
class Withintopicmeasure:
"""
Measures within-topic coherence for a topic model, based on a set of term rankings.
"""
def __init__(self, metric):
self.metric = metric
def evaluate_ranking(self, term_ranking):
return self.metric.similarity(term_ranking, term_ranking)
def evaluate_rankings(self, term_rankings):
scores = []
overall = 0.0
for topic_index in range(len(term_rankings)):
score = self.evaluate_ranking(term_rankings[topic_index])
scores.append(score)
overall += score
overall /= len(term_rankings)
return overall |
xs1 = ys[42:
5:
-1]
xs2 = ys[:
2:
3]
xs3 = ys[::
3]
| xs1 = ys[42:5:-1]
xs2 = ys[:2:3]
xs3 = ys[::3] |
class Stack:
topNode = None
class Node:
def __init__(self, value):
self.value = value
self.nextNode = None
def __repr__(self):
return "[{}]".format(self.value)
def __init__(self, iterable):
if len(iterable) != 0:
for k in iterable:
new_node = self.Node(k)
new_node.nextNode = self.topNode
self.topNode = new_node
def __repr__(self):
lines = []
working_node = self.topNode
if working_node is None:
return "[EmptyStack]"
while working_node is not None:
lines.append(str(working_node))
working_node = working_node.nextNode
return "\n |\n".join(lines)
def peek(self):
return self.topNode.value
def push(self, value):
new_node = self.Node(value)
new_node.nextNode = self.topNode
self.topNode = new_node
def pop(self):
self.topNode = self.topNode.nextNode
def is_empty(self):
if self.topNode is None:
return True
return False
if __name__ == '__main__':
stack = Stack([3, 5])
stack.push(6)
stack.pop()
print(stack)
stack.pop()
stack.pop()
print(stack, stack.is_empty())
| class Stack:
top_node = None
class Node:
def __init__(self, value):
self.value = value
self.nextNode = None
def __repr__(self):
return '[{}]'.format(self.value)
def __init__(self, iterable):
if len(iterable) != 0:
for k in iterable:
new_node = self.Node(k)
new_node.nextNode = self.topNode
self.topNode = new_node
def __repr__(self):
lines = []
working_node = self.topNode
if working_node is None:
return '[EmptyStack]'
while working_node is not None:
lines.append(str(working_node))
working_node = working_node.nextNode
return '\n |\n'.join(lines)
def peek(self):
return self.topNode.value
def push(self, value):
new_node = self.Node(value)
new_node.nextNode = self.topNode
self.topNode = new_node
def pop(self):
self.topNode = self.topNode.nextNode
def is_empty(self):
if self.topNode is None:
return True
return False
if __name__ == '__main__':
stack = stack([3, 5])
stack.push(6)
stack.pop()
print(stack)
stack.pop()
stack.pop()
print(stack, stack.is_empty()) |
'''
'''
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Test.Summary = 'Testing ATS active timeout'
Test.SkipUnless(
Condition.HasCurlFeature('http2')
)
ts = Test.MakeATSProcess("ts", select_ports=True, enable_tls=True)
server = Test.MakeOriginServer("server", delay=8)
request_header = {"headers": "GET /file HTTP/1.1\r\nHost: *\r\n\r\n", "timestamp": "5678", "body": ""}
response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "5678", "body": ""}
server.addResponse("sessionfile.log", request_header, response_header)
ts.addSSLfile("../tls/ssl/server.pem")
ts.addSSLfile("../tls/ssl/server.key")
ts.Disk.records_config.update({
'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir),
'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir),
'proxy.config.url_remap.remap_required': 1,
'proxy.config.http.transaction_active_timeout_out': 2,
})
ts.Disk.remap_config.AddLine(
'map / http://127.0.0.1:{0}/'.format(server.Variables.Port))
ts.Disk.ssl_multicert_config.AddLine(
'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key'
)
tr = Test.AddTestRun("tr")
tr.Processes.Default.StartBefore(server)
tr.Processes.Default.StartBefore(ts)
tr.Processes.Default.Command = 'curl -i http://127.0.0.1:{0}/file'.format(ts.Variables.port)
tr.Processes.Default.Streams.stdout = Testers.ContainsExpression("Activity Timeout", "Request should fail with active timeout")
tr2 = Test.AddTestRun("tr")
tr2.Processes.Default.Command = 'curl -k -i --http1.1 https://127.0.0.1:{0}/file'.format(ts.Variables.ssl_port)
tr2.Processes.Default.Streams.stdout = Testers.ContainsExpression("Activity Timeout", "Request should fail with active timeout")
tr3 = Test.AddTestRun("tr")
tr3.Processes.Default.Command = 'curl -k -i --http2 https://127.0.0.1:{0}/file'.format(ts.Variables.ssl_port)
tr3.Processes.Default.Streams.stdout = Testers.ContainsExpression("Activity Timeout", "Request should fail with active timeout")
| """
"""
Test.Summary = 'Testing ATS active timeout'
Test.SkipUnless(Condition.HasCurlFeature('http2'))
ts = Test.MakeATSProcess('ts', select_ports=True, enable_tls=True)
server = Test.MakeOriginServer('server', delay=8)
request_header = {'headers': 'GET /file HTTP/1.1\r\nHost: *\r\n\r\n', 'timestamp': '5678', 'body': ''}
response_header = {'headers': 'HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n', 'timestamp': '5678', 'body': ''}
server.addResponse('sessionfile.log', request_header, response_header)
ts.addSSLfile('../tls/ssl/server.pem')
ts.addSSLfile('../tls/ssl/server.key')
ts.Disk.records_config.update({'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), 'proxy.config.url_remap.remap_required': 1, 'proxy.config.http.transaction_active_timeout_out': 2})
ts.Disk.remap_config.AddLine('map / http://127.0.0.1:{0}/'.format(server.Variables.Port))
ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key')
tr = Test.AddTestRun('tr')
tr.Processes.Default.StartBefore(server)
tr.Processes.Default.StartBefore(ts)
tr.Processes.Default.Command = 'curl -i http://127.0.0.1:{0}/file'.format(ts.Variables.port)
tr.Processes.Default.Streams.stdout = Testers.ContainsExpression('Activity Timeout', 'Request should fail with active timeout')
tr2 = Test.AddTestRun('tr')
tr2.Processes.Default.Command = 'curl -k -i --http1.1 https://127.0.0.1:{0}/file'.format(ts.Variables.ssl_port)
tr2.Processes.Default.Streams.stdout = Testers.ContainsExpression('Activity Timeout', 'Request should fail with active timeout')
tr3 = Test.AddTestRun('tr')
tr3.Processes.Default.Command = 'curl -k -i --http2 https://127.0.0.1:{0}/file'.format(ts.Variables.ssl_port)
tr3.Processes.Default.Streams.stdout = Testers.ContainsExpression('Activity Timeout', 'Request should fail with active timeout') |
class take_skip:
def __init__(self, step, count):
self.step = step
self.count = count
self.start = 0
self.end = step * count
def __iter__(self):
return self
def __next__(self):
index = self.start
if index >= self.end:
raise StopIteration
self.start += self.step
return index
numbers = take_skip(10, 5)
for number in numbers:
print(number) | class Take_Skip:
def __init__(self, step, count):
self.step = step
self.count = count
self.start = 0
self.end = step * count
def __iter__(self):
return self
def __next__(self):
index = self.start
if index >= self.end:
raise StopIteration
self.start += self.step
return index
numbers = take_skip(10, 5)
for number in numbers:
print(number) |
class Solution:
def longestCommonPrefix(self, strs: List[str]) -> str:
if len(strs) == 0:
return ''
def getCommonPrefix(s1, s2):
result = []
for i in range(min(len(s1), len(s2))):
if s1[i] == s2[i]:
result.append(s1[i])
else:
break
return ''.join(result)
commonPrefix = strs[0]
for i in range(1, len(strs)):
commonPrefix = getCommonPrefix(commonPrefix, strs[i])
return commonPrefix | class Solution:
def longest_common_prefix(self, strs: List[str]) -> str:
if len(strs) == 0:
return ''
def get_common_prefix(s1, s2):
result = []
for i in range(min(len(s1), len(s2))):
if s1[i] == s2[i]:
result.append(s1[i])
else:
break
return ''.join(result)
common_prefix = strs[0]
for i in range(1, len(strs)):
common_prefix = get_common_prefix(commonPrefix, strs[i])
return commonPrefix |
n = int(input())
a = list(map(int, input().split()))
xor = a[0]
for x in a[1:]:
xor ^= x
ans = print(*[xor ^ x for x in a]) | n = int(input())
a = list(map(int, input().split()))
xor = a[0]
for x in a[1:]:
xor ^= x
ans = print(*[xor ^ x for x in a]) |
class DictTrafo(object):
def __init__(self, trafo_dict=None, prefix=None):
if trafo_dict is None:
trafo_dict = {}
self.trafo_dict = trafo_dict
if type(prefix) is str:
self.prefix = (prefix,)
elif type(prefix) is tuple:
self.prefix = prefix
else:
self.prefix = None
def transform(self, in_dict, trafo_dict=None, keep_none=False):
if trafo_dict is None:
trafo_dict = self.trafo_dict
res = {}
for key in trafo_dict:
val = trafo_dict[key]
tval = type(val)
# sub dict
if tval is dict:
vres = self.transform(in_dict, val)
# (callable, rel_path)
elif tval is tuple and len(val) == 2 and callable(val[0]):
rel_path = self.read_rel_path(val[1], in_dict)
vres = val[0](key, rel_path)
# a rel_path in in_dict
elif tval in (str, tuple, list):
vres = self.read_rel_path(val, in_dict)
# invalid
else:
raise ValueError("invalid type in trafo_dict: %s" + val)
if vres is not None or keep_none:
res[key] = vres
return res
def read_rel_path(self, path, in_dict):
abs_path = []
if self.prefix:
abs_path += self.prefix
if type(path) is str:
abs_path.append(path)
else:
abs_path += path
return self.read_path(abs_path, in_dict)
def read_path(self, path, in_dict):
if len(path) == 0:
return in_dict
if type(in_dict) is not dict:
return None
key = path[0]
if key in in_dict:
val = in_dict[key]
path = path[1:]
return self.read_path(path, val)
| class Dicttrafo(object):
def __init__(self, trafo_dict=None, prefix=None):
if trafo_dict is None:
trafo_dict = {}
self.trafo_dict = trafo_dict
if type(prefix) is str:
self.prefix = (prefix,)
elif type(prefix) is tuple:
self.prefix = prefix
else:
self.prefix = None
def transform(self, in_dict, trafo_dict=None, keep_none=False):
if trafo_dict is None:
trafo_dict = self.trafo_dict
res = {}
for key in trafo_dict:
val = trafo_dict[key]
tval = type(val)
if tval is dict:
vres = self.transform(in_dict, val)
elif tval is tuple and len(val) == 2 and callable(val[0]):
rel_path = self.read_rel_path(val[1], in_dict)
vres = val[0](key, rel_path)
elif tval in (str, tuple, list):
vres = self.read_rel_path(val, in_dict)
else:
raise value_error('invalid type in trafo_dict: %s' + val)
if vres is not None or keep_none:
res[key] = vres
return res
def read_rel_path(self, path, in_dict):
abs_path = []
if self.prefix:
abs_path += self.prefix
if type(path) is str:
abs_path.append(path)
else:
abs_path += path
return self.read_path(abs_path, in_dict)
def read_path(self, path, in_dict):
if len(path) == 0:
return in_dict
if type(in_dict) is not dict:
return None
key = path[0]
if key in in_dict:
val = in_dict[key]
path = path[1:]
return self.read_path(path, val) |
filename = 'full_text_small.txt'
def file_write(filename):
with open(filename, 'r') as f:
n = 0
for line in f:
n += 1
if n <= 5:
print(line)
return(line)
file_write(filename)
| filename = 'full_text_small.txt'
def file_write(filename):
with open(filename, 'r') as f:
n = 0
for line in f:
n += 1
if n <= 5:
print(line)
return line
file_write(filename) |
a = 1
b = 0
c = a & b
d = a | b
e = a ^ b
print(c+d+e)
my_list = [[1,2,3,4] for i in range(2)]
print(my_list[1][0])
x =2
x = x==x
print(x)
my_list = [1,2,3]
for v in range(len(my_list)):
my_list.insert(1, my_list[v])
print(my_list) | a = 1
b = 0
c = a & b
d = a | b
e = a ^ b
print(c + d + e)
my_list = [[1, 2, 3, 4] for i in range(2)]
print(my_list[1][0])
x = 2
x = x == x
print(x)
my_list = [1, 2, 3]
for v in range(len(my_list)):
my_list.insert(1, my_list[v])
print(my_list) |
n = int(input())
ans = 0
for i in range(n):
l, c = map(int, input().split())
if l > c:
ans += c
else:
continue
print(ans) | n = int(input())
ans = 0
for i in range(n):
(l, c) = map(int, input().split())
if l > c:
ans += c
else:
continue
print(ans) |
_base_ = [
'../_base_/models/faster_rcnn_r50_fpn_moco.py',
'../_base_/datasets/vocdataset_voc0712.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
optimizer = dict(type='SGD', lr=0.02/16, momentum=0.9, weight_decay=0.0001) | _base_ = ['../_base_/models/faster_rcnn_r50_fpn_moco.py', '../_base_/datasets/vocdataset_voc0712.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py']
optimizer = dict(type='SGD', lr=0.02 / 16, momentum=0.9, weight_decay=0.0001) |
_base_ = ['./mswin_par_small_patch4_512x512_160k_ade20k_pretrain_224x224_1K.py']
model = dict(
decode_head=dict(
mode='seq',
))
data = dict(samples_per_gpu=10)
| _base_ = ['./mswin_par_small_patch4_512x512_160k_ade20k_pretrain_224x224_1K.py']
model = dict(decode_head=dict(mode='seq'))
data = dict(samples_per_gpu=10) |
def merge_sort(arr):
n = len(arr)
if (n >= 2):
A = merge_sort(arr[:int(n/2)])
B = merge_sort(arr[int(n/2):])
i = 0
j = 0
for k in range(0, n):
if i < int(n/2) and (j == len(B) or A[i] <= B[j]):
arr[k] = A[i]
i = i + 1
else:
arr[k] = B[j]
j = j + 1
return arr
arr = input()
arr = [(int(num)) for num in arr.split()]
print(merge_sort(arr)) | def merge_sort(arr):
n = len(arr)
if n >= 2:
a = merge_sort(arr[:int(n / 2)])
b = merge_sort(arr[int(n / 2):])
i = 0
j = 0
for k in range(0, n):
if i < int(n / 2) and (j == len(B) or A[i] <= B[j]):
arr[k] = A[i]
i = i + 1
else:
arr[k] = B[j]
j = j + 1
return arr
arr = input()
arr = [int(num) for num in arr.split()]
print(merge_sort(arr)) |
# --------------
# Code starts here
class_1 = ['Geoffrey Hinton','Andrew Ng','Sebastian Raschka','Yoshua Bengio']
class_2 = ['Hilary Mason','Carla Gentry','Corinna Cortes']
new_class = class_1 + class_2
print(new_class)
new_class.append('Peter Warden')
print(new_class)
new_class.remove('Carla Gentry')
print(new_class)
# Code ends here
# --------------
# Code starts here
courses = {
'Math' : 65,
'English' : 70,
'History' : 80,
'French' : 70,
'Science' : 60
}
print(courses.get('Math'))
print(courses.get('English'))
print(courses.get('History'))
print(courses.get('French'))
print(courses.get('Science'))
total = (courses.get('Math')+courses.get('English')+courses.get('History')+courses.get('French')+courses.get('Science'))
print(total)
percentage = (total/500)*100
print(percentage)
# Code ends here
# --------------
# Code starts here
mathematics = {
'Geoffery Hinton' : 78,
'Andrew Ng' : 95,
'Sebastian Raschka' :65,
'Yoshua Benjio' : 50,
'Hilary Mason' : 70,
'Corinna Cortes' : 66,
'Peter Warden' : 75
}
topper = max(mathematics,key = mathematics.get)
print(topper)
# Code ends here
# --------------
# Given string
topper = 'andrew ng'
first_name = topper.split()[0]
last_name = topper.split()[1]
full_name = last_name+' '+first_name
certificate_name = full_name.upper()
print(certificate_name)
# Code starts here
# Code ends here
| class_1 = ['Geoffrey Hinton', 'Andrew Ng', 'Sebastian Raschka', 'Yoshua Bengio']
class_2 = ['Hilary Mason', 'Carla Gentry', 'Corinna Cortes']
new_class = class_1 + class_2
print(new_class)
new_class.append('Peter Warden')
print(new_class)
new_class.remove('Carla Gentry')
print(new_class)
courses = {'Math': 65, 'English': 70, 'History': 80, 'French': 70, 'Science': 60}
print(courses.get('Math'))
print(courses.get('English'))
print(courses.get('History'))
print(courses.get('French'))
print(courses.get('Science'))
total = courses.get('Math') + courses.get('English') + courses.get('History') + courses.get('French') + courses.get('Science')
print(total)
percentage = total / 500 * 100
print(percentage)
mathematics = {'Geoffery Hinton': 78, 'Andrew Ng': 95, 'Sebastian Raschka': 65, 'Yoshua Benjio': 50, 'Hilary Mason': 70, 'Corinna Cortes': 66, 'Peter Warden': 75}
topper = max(mathematics, key=mathematics.get)
print(topper)
topper = 'andrew ng'
first_name = topper.split()[0]
last_name = topper.split()[1]
full_name = last_name + ' ' + first_name
certificate_name = full_name.upper()
print(certificate_name) |
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
def f_gold ( x , y , z ) :
if ( not ( y / x ) ) :
return y if ( not ( y / z ) ) else z
return x if ( not ( x / z ) ) else z
#TOFILL
if __name__ == '__main__':
param = [
(48,63,56,),
(11,55,84,),
(50,89,96,),
(21,71,74,),
(94,39,42,),
(22,44,86,),
(3,41,68,),
(67,62,94,),
(59,2,83,),
(50,11,1,)
]
n_success = 0
for i, parameters_set in enumerate(param):
if f_filled(*parameters_set) == f_gold(*parameters_set):
n_success+=1
print("#Results: %i, %i" % (n_success, len(param))) | def f_gold(x, y, z):
if not y / x:
return y if not y / z else z
return x if not x / z else z
if __name__ == '__main__':
param = [(48, 63, 56), (11, 55, 84), (50, 89, 96), (21, 71, 74), (94, 39, 42), (22, 44, 86), (3, 41, 68), (67, 62, 94), (59, 2, 83), (50, 11, 1)]
n_success = 0
for (i, parameters_set) in enumerate(param):
if f_filled(*parameters_set) == f_gold(*parameters_set):
n_success += 1
print('#Results: %i, %i' % (n_success, len(param))) |
config = {
'lr': (1.5395901937079718e-05, 4.252664987376195e-05, 9.011700881717918e-05, 0.00026653695086486183),
'target_stepsize': 0.07688144983085089,
'feedback_wd': 5.751527315358352e-07,
'beta1': 0.9,
'beta2': 0.999,
'epsilon': (7.952762675272583e-06, 3.573159556208438e-06, 1.0425400798717413e-08, 2.0232644009531115e-08),
'lr_fb': 4.142073343374983e-05,
'sigma': 0.18197929046014408,
'beta1_fb': 0.9,
'beta2_fb': 0.999,
'epsilon_fb': 8.070760899188774e-06,
'out_dir': 'logs/cifar/DDTPConvCIFAR',
'network_type': 'DDTPConvCIFAR',
'initialization': 'xavier_normal',
'fb_activation': 'linear',
'dataset': 'cifar10',
# ### Training options ###
'optimizer': 'Adam',
'optimizer_fb': 'Adam',
'momentum': 0.,
'parallel': True,
'normalize_lr': True,
'batch_size': 128,
'epochs_fb': 10,
'not_randomized': True,
'not_randomized_fb': True,
'extra_fb_minibatches': 0,
'extra_fb_epochs': 1,
'epochs': 300,
'double_precision': True,
'no_val_set': True,
'forward_wd': 0.,
### Network options ###
# 'num_hidden': 3,
# 'size_hidden': 1024,
# 'size_input': 3072,
# 'size_output': 10,
'hidden_activation': 'tanh',
'output_activation': 'softmax',
'no_bias': False,
### Miscellaneous options ###
'no_cuda': False,
'random_seed': 42,
'cuda_deterministic': False,
'freeze_BPlayers': False,
'multiple_hpsearch': False,
### Logging options ###
'save_logs': False,
'save_BP_angle': False,
'save_GN_angle': False,
'save_GN_activations_angle': False,
'save_BP_activations_angle': False,
'gn_damping': 0.
} | config = {'lr': (1.5395901937079718e-05, 4.252664987376195e-05, 9.011700881717918e-05, 0.00026653695086486183), 'target_stepsize': 0.07688144983085089, 'feedback_wd': 5.751527315358352e-07, 'beta1': 0.9, 'beta2': 0.999, 'epsilon': (7.952762675272583e-06, 3.573159556208438e-06, 1.0425400798717413e-08, 2.0232644009531115e-08), 'lr_fb': 4.142073343374983e-05, 'sigma': 0.18197929046014408, 'beta1_fb': 0.9, 'beta2_fb': 0.999, 'epsilon_fb': 8.070760899188774e-06, 'out_dir': 'logs/cifar/DDTPConvCIFAR', 'network_type': 'DDTPConvCIFAR', 'initialization': 'xavier_normal', 'fb_activation': 'linear', 'dataset': 'cifar10', 'optimizer': 'Adam', 'optimizer_fb': 'Adam', 'momentum': 0.0, 'parallel': True, 'normalize_lr': True, 'batch_size': 128, 'epochs_fb': 10, 'not_randomized': True, 'not_randomized_fb': True, 'extra_fb_minibatches': 0, 'extra_fb_epochs': 1, 'epochs': 300, 'double_precision': True, 'no_val_set': True, 'forward_wd': 0.0, 'hidden_activation': 'tanh', 'output_activation': 'softmax', 'no_bias': False, 'no_cuda': False, 'random_seed': 42, 'cuda_deterministic': False, 'freeze_BPlayers': False, 'multiple_hpsearch': False, 'save_logs': False, 'save_BP_angle': False, 'save_GN_angle': False, 'save_GN_activations_angle': False, 'save_BP_activations_angle': False, 'gn_damping': 0.0} |
# constants related to the matchers
# all the types of matches
MATCH_TYPE_NONE = 0
MATCH_TYPE_RESET = 1
MATCH_TYPE_NMI = 2
MATCH_TYPE_WAIT_START = 3
MATCH_TYPE_WAIT_END = 4
MATCH_TYPE_BITS = 6 # number of bits required to represent the above (max 8)
NUM_MATCHERS = 32 # how many match engines are there?
MATCHER_BITS = 5 # number of bits required to represent the above (max 8)
| match_type_none = 0
match_type_reset = 1
match_type_nmi = 2
match_type_wait_start = 3
match_type_wait_end = 4
match_type_bits = 6
num_matchers = 32
matcher_bits = 5 |
def get_initial(name, force_uppercase=True):
if force_uppercase:
initial = name[0:1].upper()
else:
initial = name[0:1].lower()
return initial
first_name = input('Enter your first name: ')
# initial = get_initial(first_name)
initial = get_initial(force_uppercase=False, name=first_name)
print('Your initial is: ' + initial) | def get_initial(name, force_uppercase=True):
if force_uppercase:
initial = name[0:1].upper()
else:
initial = name[0:1].lower()
return initial
first_name = input('Enter your first name: ')
initial = get_initial(force_uppercase=False, name=first_name)
print('Your initial is: ' + initial) |
INPUT_PATH = "./input.txt"
input_file = open(INPUT_PATH, "r")
lines = input_file.readlines()
input_file.close()
divided_input = [[[set(x) for x in x.split()] for x in line.split(" | ")] for line in lines]
# Part 1
print("Part 1: ", sum([len([x for x in entry[1] if len(x) in [2, 3, 4, 7]]) for entry in divided_input]))
# Cursed part 1 1-liner assuming INPUT_PATH is defined with path to the input file
#print("Part 1: ", sum([len([x for x in entry[1] if len(x) in [2, 3, 4, 7]]) for entry in [[[set(x) for x in x.split()] for x in line.split(" | ")] for line in open(INPUT_PATH, "r").readlines()]]))
# Part 2
total_sum = 0
for elem in divided_input:
left = elem[0]
right = elem[1]
numbers = {
"0": None,
"1": [x for x in left if len(x) == 2][0],
"2": None,
"3": None,
"4": [x for x in left if len(x) == 4][0],
"5": None,
"6": None,
"7": [x for x in left if len(x) == 3][0],
"8": [x for x in left if len(x) == 7][0],
"9": None
}
segments = {
"a": None,
"b": None,
"c": None,
"d": None,
"e": None,
"f": None,
"g": None
}
(segments["a"],) = numbers["7"].difference(numbers["1"])
numbers["6"] = [x for x in left if len(x) == 6 and len(x.intersection(numbers["1"])) == 1][0]
zero_and_nine = [x for x in left if len(x) == 6 and len(x.difference(numbers["6"])) != 0]
numbers["9"] = [x for x in zero_and_nine if len(x.intersection(numbers["4"])) == 4][0]
numbers["0"] = [x for x in zero_and_nine if len(x.intersection(numbers["4"])) == 3][0]
(segments["f"],) = numbers["6"].intersection(numbers["1"])
(segments["c"],) = numbers["1"].difference(set(segments["f"]))
(segments["e"],) = numbers["6"].difference(numbers["9"])
(segments["d"],) = numbers["8"].difference(numbers["0"])
(segments["b"],) = numbers["4"].difference(set([x for x in segments.values() if x is not None]))
(segments["g"],) = numbers["8"].difference(set([x for x in segments.values() if x is not None]))
numbers["2"] = set([segments["a"], segments["c"], segments["d"], segments["e"], segments["g"]])
numbers["5"] = set([segments["a"], segments["b"], segments["d"], segments["f"], segments["g"]])
numbers["3"] = set([segments["a"], segments["c"], segments["d"], segments["f"], segments["g"]])
value = 0
for digit_set in right:
value *= 10
digit = int([k for k, v in numbers.items() if v == digit_set][0])
value += digit
total_sum += value
print("Part 2: ", total_sum) | input_path = './input.txt'
input_file = open(INPUT_PATH, 'r')
lines = input_file.readlines()
input_file.close()
divided_input = [[[set(x) for x in x.split()] for x in line.split(' | ')] for line in lines]
print('Part 1: ', sum([len([x for x in entry[1] if len(x) in [2, 3, 4, 7]]) for entry in divided_input]))
total_sum = 0
for elem in divided_input:
left = elem[0]
right = elem[1]
numbers = {'0': None, '1': [x for x in left if len(x) == 2][0], '2': None, '3': None, '4': [x for x in left if len(x) == 4][0], '5': None, '6': None, '7': [x for x in left if len(x) == 3][0], '8': [x for x in left if len(x) == 7][0], '9': None}
segments = {'a': None, 'b': None, 'c': None, 'd': None, 'e': None, 'f': None, 'g': None}
(segments['a'],) = numbers['7'].difference(numbers['1'])
numbers['6'] = [x for x in left if len(x) == 6 and len(x.intersection(numbers['1'])) == 1][0]
zero_and_nine = [x for x in left if len(x) == 6 and len(x.difference(numbers['6'])) != 0]
numbers['9'] = [x for x in zero_and_nine if len(x.intersection(numbers['4'])) == 4][0]
numbers['0'] = [x for x in zero_and_nine if len(x.intersection(numbers['4'])) == 3][0]
(segments['f'],) = numbers['6'].intersection(numbers['1'])
(segments['c'],) = numbers['1'].difference(set(segments['f']))
(segments['e'],) = numbers['6'].difference(numbers['9'])
(segments['d'],) = numbers['8'].difference(numbers['0'])
(segments['b'],) = numbers['4'].difference(set([x for x in segments.values() if x is not None]))
(segments['g'],) = numbers['8'].difference(set([x for x in segments.values() if x is not None]))
numbers['2'] = set([segments['a'], segments['c'], segments['d'], segments['e'], segments['g']])
numbers['5'] = set([segments['a'], segments['b'], segments['d'], segments['f'], segments['g']])
numbers['3'] = set([segments['a'], segments['c'], segments['d'], segments['f'], segments['g']])
value = 0
for digit_set in right:
value *= 10
digit = int([k for (k, v) in numbers.items() if v == digit_set][0])
value += digit
total_sum += value
print('Part 2: ', total_sum) |
#Python Lists
mylist = [ "banana", "abacate", "manga"]
print(mylist) | mylist = ['banana', 'abacate', 'manga']
print(mylist) |
# working on final project to combine all the learnt concepts into 1
# problem statement.
#The CTO wants to monitor all the computer usage by all engineers. Using Python ,
# write an automation script that will produce a report when each user logged in and out,
# and how long each user used the computers.
# writing real script:
# first sort all the processes by date using a function:
def get_event_date(event):
return event.date
#get current user and pass the sorted date function
# first sort all the processes by date using a function:
def get_event_date(event):
return event.date
#get current user and pass the sorted date function
# first sort all the processes by date using a function:
def get_event_date(event):
return event.date
#get current user and pass the sorted date function
def current_users(events):
events.sort(key=get_event_date)
#create a dictionary to store the values
machines = {}
for event in events:
#check if a mchine exist in dictionary else add
if event.machine not in machines:
machines[event.machine] = set()
if event.type == "logout":
machines[event.machine].add(event.user)
elif event.type == "logout":
machines[event.machine].remove(event.user)
return machines
#create a different function to print the report
def generate_report(machines):
for machines,users in machines.items():
#print only thoses who logged in and not those who loged in and out:
if len(users)>0:
users_list = ", ".join(users)
print("{} : {}".format(machines,users_list))
class Event:
def __init__(self, event_date, event_type, machine_name,user):
self.date = event_date
self.type = event_type
self.machine =machine_name
self.user = user
events = [
Event("2020-05-12 12:50PM","login","mail-server local", "owen"),
Event("2021-04-12 4:50PM","logout","mail-server local", "james"),
Event("2020-05-14 2:50PM","login","workstation local", "shem"),
Event("2020-05-1 16:50PM","login","mail-server local", "Timz"),
Event("2020-06-19 18:50PM","logout","admin server local", "brian"),
Event("2020-02-12 17:50PM","login","mail-server local", "chris")
]
#try creation
users = current_users(events)
print(users)
# generate user report
generate_report(users)
# CONGRATALTIONS:
# Up next final project;
| def get_event_date(event):
return event.date
def get_event_date(event):
return event.date
def get_event_date(event):
return event.date
def current_users(events):
events.sort(key=get_event_date)
machines = {}
for event in events:
if event.machine not in machines:
machines[event.machine] = set()
if event.type == 'logout':
machines[event.machine].add(event.user)
elif event.type == 'logout':
machines[event.machine].remove(event.user)
return machines
def generate_report(machines):
for (machines, users) in machines.items():
if len(users) > 0:
users_list = ', '.join(users)
print('{} : {}'.format(machines, users_list))
class Event:
def __init__(self, event_date, event_type, machine_name, user):
self.date = event_date
self.type = event_type
self.machine = machine_name
self.user = user
events = [event('2020-05-12 12:50PM', 'login', 'mail-server local', 'owen'), event('2021-04-12 4:50PM', 'logout', 'mail-server local', 'james'), event('2020-05-14 2:50PM', 'login', 'workstation local', 'shem'), event('2020-05-1 16:50PM', 'login', 'mail-server local', 'Timz'), event('2020-06-19 18:50PM', 'logout', 'admin server local', 'brian'), event('2020-02-12 17:50PM', 'login', 'mail-server local', 'chris')]
users = current_users(events)
print(users)
generate_report(users) |
class RockartExamplesException(Exception):
pass
class RockartExamplesIndexError(RockartExamplesException, IndexError):
pass
class RockartExamplesValueError(RockartExamplesException, ValueError):
pass
| class Rockartexamplesexception(Exception):
pass
class Rockartexamplesindexerror(RockartExamplesException, IndexError):
pass
class Rockartexamplesvalueerror(RockartExamplesException, ValueError):
pass |
print("Kinjal Raykarmakar\nSec: CSE2H\tRoll: 29\n")
row = int(input("Enter the number of rows: "))
for i in range(1, row+1):
for j in range(i):
print("*", end=" ")
print()
for i in range(row+1, 0, -1):
for j in range(i):
print("*", end=" ")
print() | print('Kinjal Raykarmakar\nSec: CSE2H\tRoll: 29\n')
row = int(input('Enter the number of rows: '))
for i in range(1, row + 1):
for j in range(i):
print('*', end=' ')
print()
for i in range(row + 1, 0, -1):
for j in range(i):
print('*', end=' ')
print() |
#!/usr/bin/env python3
for hour_offset in range(0, 24, 6):
train = open('data/train_b{:02}.csv'.format(hour_offset), 'w', newline='')
test = open('data/test_b{:02}.csv'.format(hour_offset), 'w', newline='')
data = open('data/data.txt')
t = int(next(data))
n, m = tuple(map(int, next(data).split()))
for line_num, line in enumerate(data):
hse = line_num // n # hours since epoch
hod = hse % 24 # hour of day
dse = (hse + hour_offset) // 24 # days since epoch
dow = dse % 7 # day of week
row = line_num % n
for col, dem in enumerate(map(int, line.split())):
out, lim = (test, -1) if dem == -1 else (train, None)
out.write(','.join(map(str, (hse, row, col, hod, dow, dem)[:lim])) + '\n')
data.close()
train.close()
test.close()
| for hour_offset in range(0, 24, 6):
train = open('data/train_b{:02}.csv'.format(hour_offset), 'w', newline='')
test = open('data/test_b{:02}.csv'.format(hour_offset), 'w', newline='')
data = open('data/data.txt')
t = int(next(data))
(n, m) = tuple(map(int, next(data).split()))
for (line_num, line) in enumerate(data):
hse = line_num // n
hod = hse % 24
dse = (hse + hour_offset) // 24
dow = dse % 7
row = line_num % n
for (col, dem) in enumerate(map(int, line.split())):
(out, lim) = (test, -1) if dem == -1 else (train, None)
out.write(','.join(map(str, (hse, row, col, hod, dow, dem)[:lim])) + '\n')
data.close()
train.close()
test.close() |
p = [1,2,3,4,5,6,7,8,9]
del p[1:3]
print(p[:])
p.remove(8)
print(p[:])
print(p.pop())
p.clear()
print(p[:])
l=[1,3,4,5,6,7]
l.remove(3)
print(l[:])
l.sort()
print(l[:])
l.reverse()
print(l[:])
l.clear()
print(l[:])
| p = [1, 2, 3, 4, 5, 6, 7, 8, 9]
del p[1:3]
print(p[:])
p.remove(8)
print(p[:])
print(p.pop())
p.clear()
print(p[:])
l = [1, 3, 4, 5, 6, 7]
l.remove(3)
print(l[:])
l.sort()
print(l[:])
l.reverse()
print(l[:])
l.clear()
print(l[:]) |
# -*- coding: utf-8 -*-
CSRF_ENABLED = True
SECRET_KEY = "208h3oiushefo9823liukhso8dyfhsdklihf"
debug = False
| csrf_enabled = True
secret_key = '208h3oiushefo9823liukhso8dyfhsdklihf'
debug = False |
def getLate():
v = Late(**{})
return v
class Late():
value = 'late'
| def get_late():
v = late(**{})
return v
class Late:
value = 'late' |
formatter = "{} {} {} {}"
print(formatter.format(1, 2, 3, 4))
print(formatter.format("one", "two", "three", "four"))
print(formatter.format(True, False, False, True))
print(formatter.format(formatter, formatter, formatter, formatter))
print(formatter.format(
"I had this thing.",
"That you could type up right.",
"But it didn't sing.",
"So I said goodnight."
))
| formatter = '{} {} {} {}'
print(formatter.format(1, 2, 3, 4))
print(formatter.format('one', 'two', 'three', 'four'))
print(formatter.format(True, False, False, True))
print(formatter.format(formatter, formatter, formatter, formatter))
print(formatter.format('I had this thing.', 'That you could type up right.', "But it didn't sing.", 'So I said goodnight.')) |
# Copyright 2014 PDFium authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# Original code from V8, original license was:
# Copyright 2014 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# This file is used only by the standalone PDFium build. Under a chromium
# checkout, the src/testing/gtest.gyp file is used instead.
{
'targets': [
{
'target_name': 'gtest',
'toolsets': ['host', 'target'],
'type': 'static_library',
'sources': [
'gtest/include/gtest/gtest-death-test.h',
'gtest/include/gtest/gtest-message.h',
'gtest/include/gtest/gtest-param-test.h',
'gtest/include/gtest/gtest-printers.h',
'gtest/include/gtest/gtest-spi.h',
'gtest/include/gtest/gtest-test-part.h',
'gtest/include/gtest/gtest-typed-test.h',
'gtest/include/gtest/gtest.h',
'gtest/include/gtest/gtest_pred_impl.h',
'gtest/include/gtest/internal/gtest-death-test-internal.h',
'gtest/include/gtest/internal/gtest-filepath.h',
'gtest/include/gtest/internal/gtest-internal.h',
'gtest/include/gtest/internal/gtest-linked_ptr.h',
'gtest/include/gtest/internal/gtest-param-util-generated.h',
'gtest/include/gtest/internal/gtest-param-util.h',
'gtest/include/gtest/internal/gtest-port.h',
'gtest/include/gtest/internal/gtest-string.h',
'gtest/include/gtest/internal/gtest-tuple.h',
'gtest/include/gtest/internal/gtest-type-util.h',
'gtest/src/gtest-all.cc',
'gtest/src/gtest-death-test.cc',
'gtest/src/gtest-filepath.cc',
'gtest/src/gtest-internal-inl.h',
'gtest/src/gtest-port.cc',
'gtest/src/gtest-printers.cc',
'gtest/src/gtest-test-part.cc',
'gtest/src/gtest-typed-test.cc',
'gtest/src/gtest.cc',
'gtest-support.h',
],
'sources!': [
'gtest/src/gtest-all.cc', # Not needed by our build.
],
'include_dirs': [
'gtest',
'gtest/include',
],
'dependencies': [
'gtest_prod',
],
'defines': [
# In order to allow regex matches in gtest to be shared between Windows
# and other systems, we tell gtest to always use it's internal engine.
'GTEST_HAS_POSIX_RE=0',
# Unit tests don't require C++11, yet.
'GTEST_LANG_CXX11=0',
],
'all_dependent_settings': {
'defines': [
'GTEST_HAS_POSIX_RE=0',
'GTEST_LANG_CXX11=0',
],
},
'conditions': [
['os_posix == 1', {
'defines': [
# gtest isn't able to figure out when RTTI is disabled for gcc
# versions older than 4.3.2, and assumes it's enabled. Our Mac
# and Linux builds disable RTTI, and cannot guarantee that the
# compiler will be 4.3.2. or newer. The Mac, for example, uses
# 4.2.1 as that is the latest available on that platform. gtest
# must be instructed that RTTI is disabled here, and for any
# direct dependents that might include gtest headers.
'GTEST_HAS_RTTI=0',
],
'direct_dependent_settings': {
'defines': [
'GTEST_HAS_RTTI=0',
],
},
}],
['OS=="android"', {
'defines': [
'GTEST_HAS_CLONE=0',
],
'direct_dependent_settings': {
'defines': [
'GTEST_HAS_CLONE=0',
],
},
}],
['OS=="android"', {
# We want gtest features that use tr1::tuple, but we currently
# don't support the variadic templates used by libstdc++'s
# implementation. gtest supports this scenario by providing its
# own implementation but we must opt in to it.
'defines': [
'GTEST_USE_OWN_TR1_TUPLE=1',
# GTEST_USE_OWN_TR1_TUPLE only works if GTEST_HAS_TR1_TUPLE is set.
# gtest r625 made it so that GTEST_HAS_TR1_TUPLE is set to 0
# automatically on android, so it has to be set explicitly here.
'GTEST_HAS_TR1_TUPLE=1',
],
'direct_dependent_settings': {
'defines': [
'GTEST_USE_OWN_TR1_TUPLE=1',
'GTEST_HAS_TR1_TUPLE=1',
],
},
}],
],
'direct_dependent_settings': {
'defines': [
'UNIT_TEST',
],
'include_dirs': [
'gtest/include', # So that gtest headers can find themselves.
],
'target_conditions': [
['_type=="executable"', {
'test': 1,
'conditions': [
['OS=="mac"', {
'run_as': {
'action????': ['${BUILT_PRODUCTS_DIR}/${PRODUCT_NAME}'],
},
}],
['OS=="win"', {
'run_as': {
'action????': ['$(TargetPath)', '--gtest_print_time'],
},
}],
],
}],
],
'msvs_disabled_warnings': [4800],
},
},
{
'target_name': 'gtest_main',
'type': 'static_library',
'dependencies': [
'gtest',
],
'sources': [
'gtest/src/gtest_main.cc',
],
},
{
'target_name': 'gtest_prod',
'toolsets': ['host', 'target'],
'type': 'none',
'sources': [
'gtest/include/gtest/gtest_prod.h',
],
},
],
}
| {'targets': [{'target_name': 'gtest', 'toolsets': ['host', 'target'], 'type': 'static_library', 'sources': ['gtest/include/gtest/gtest-death-test.h', 'gtest/include/gtest/gtest-message.h', 'gtest/include/gtest/gtest-param-test.h', 'gtest/include/gtest/gtest-printers.h', 'gtest/include/gtest/gtest-spi.h', 'gtest/include/gtest/gtest-test-part.h', 'gtest/include/gtest/gtest-typed-test.h', 'gtest/include/gtest/gtest.h', 'gtest/include/gtest/gtest_pred_impl.h', 'gtest/include/gtest/internal/gtest-death-test-internal.h', 'gtest/include/gtest/internal/gtest-filepath.h', 'gtest/include/gtest/internal/gtest-internal.h', 'gtest/include/gtest/internal/gtest-linked_ptr.h', 'gtest/include/gtest/internal/gtest-param-util-generated.h', 'gtest/include/gtest/internal/gtest-param-util.h', 'gtest/include/gtest/internal/gtest-port.h', 'gtest/include/gtest/internal/gtest-string.h', 'gtest/include/gtest/internal/gtest-tuple.h', 'gtest/include/gtest/internal/gtest-type-util.h', 'gtest/src/gtest-all.cc', 'gtest/src/gtest-death-test.cc', 'gtest/src/gtest-filepath.cc', 'gtest/src/gtest-internal-inl.h', 'gtest/src/gtest-port.cc', 'gtest/src/gtest-printers.cc', 'gtest/src/gtest-test-part.cc', 'gtest/src/gtest-typed-test.cc', 'gtest/src/gtest.cc', 'gtest-support.h'], 'sources!': ['gtest/src/gtest-all.cc'], 'include_dirs': ['gtest', 'gtest/include'], 'dependencies': ['gtest_prod'], 'defines': ['GTEST_HAS_POSIX_RE=0', 'GTEST_LANG_CXX11=0'], 'all_dependent_settings': {'defines': ['GTEST_HAS_POSIX_RE=0', 'GTEST_LANG_CXX11=0']}, 'conditions': [['os_posix == 1', {'defines': ['GTEST_HAS_RTTI=0'], 'direct_dependent_settings': {'defines': ['GTEST_HAS_RTTI=0']}}], ['OS=="android"', {'defines': ['GTEST_HAS_CLONE=0'], 'direct_dependent_settings': {'defines': ['GTEST_HAS_CLONE=0']}}], ['OS=="android"', {'defines': ['GTEST_USE_OWN_TR1_TUPLE=1', 'GTEST_HAS_TR1_TUPLE=1'], 'direct_dependent_settings': {'defines': ['GTEST_USE_OWN_TR1_TUPLE=1', 'GTEST_HAS_TR1_TUPLE=1']}}]], 'direct_dependent_settings': {'defines': ['UNIT_TEST'], 'include_dirs': ['gtest/include'], 'target_conditions': [['_type=="executable"', {'test': 1, 'conditions': [['OS=="mac"', {'run_as': {'action????': ['${BUILT_PRODUCTS_DIR}/${PRODUCT_NAME}']}}], ['OS=="win"', {'run_as': {'action????': ['$(TargetPath)', '--gtest_print_time']}}]]}]], 'msvs_disabled_warnings': [4800]}}, {'target_name': 'gtest_main', 'type': 'static_library', 'dependencies': ['gtest'], 'sources': ['gtest/src/gtest_main.cc']}, {'target_name': 'gtest_prod', 'toolsets': ['host', 'target'], 'type': 'none', 'sources': ['gtest/include/gtest/gtest_prod.h']}]} |
activate_mse = 1
activate_adaptation_imp = 1
activate_adaptation_d1 = 1
weight_d2 = 1.0
weight_mse = 1.0
refinement = 1
n_epochs_refinement = 10
lambda_regul = [0.01]
lambda_regul_s = [0.01]
threshold_value = [0.95]
compute_variance = False
random_seed = [1985] if not compute_variance else [1985, 2184, 51, 12, 465]
class DannMNISTUSPS(object):
MAX_NB_PROCESSES = 3
DEBUG = False
BINARY = "experiments/launcher/digits_binary.py"
GRID = {
"-mode": ["dann"],
"-upper_bound": [1],
"-adaptive_lr": [1],
"-is_balanced": [1],
"-source": ["MNIST"],
"-target": ["USPS"],
"-epoch_to_start_align": [11],
"-n_epochs": [100],
"-batch_size": [128],
"-initialize_model": [1],
"-init_batch_size": [32],
"-refinement": [refinement],
"-n_epochs_refinement": [n_epochs_refinement],
"-lambda_regul": lambda_regul,
"-lambda_regul_s": lambda_regul_s,
"-threshold_value": threshold_value,
"-random_seed": random_seed
}
class DannIgnoreMNISTUSPS(object):
MAX_NB_PROCESSES = 3
DEBUG = False
BINARY = "experiments/launcher/digits_binary.py"
GRID = {
"-mode": ["dann"],
"-upper_bound": [1],
"-adaptive_lr": [1],
"-is_balanced": [1],
"-source": ["MNIST"],
"-target": ["USPS"],
"-epoch_to_start_align": [11],
"-n_epochs": [100],
"-batch_size": [128],
"-initialize_model": [1],
"-init_batch_size": [32],
"-adapt_only_first": [1],
"-refinement": [refinement],
"-n_epochs_refinement": [n_epochs_refinement],
"-lambda_regul": lambda_regul,
"-threshold_value": threshold_value,
"-random_seed": random_seed
}
class DannZeroImputMNISTUSPS(object):
MAX_NB_PROCESSES = 3
DEBUG = False
BINARY = "experiments/launcher/digits_binary.py"
GRID = {
"-mode": ["dann"],
"-upper_bound": [0],
"-adaptive_lr": [1],
"-is_balanced": [1],
"-source": ["MNIST"],
"-target": ["USPS"],
"-epoch_to_start_align": [11],
"-n_epochs": [100],
"-batch_size": [128],
"-initialize_model": [1],
"-init_batch_size": [32],
"-init_lr": [10 ** -2.5],
"-refinement": [refinement],
"-n_epochs_refinement": [n_epochs_refinement],
"-lambda_regul": lambda_regul,
"-threshold_value": threshold_value,
"-random_seed": random_seed
}
class DannImputMNISTUSPS(object):
MAX_NB_PROCESSES = 2
DEBUG = False
BINARY = "experiments/launcher/digits_binary.py"
GRID = {
"-mode": ["dann_imput"],
"-adaptive_lr": [1],
"-is_balanced": [1],
"-source": ["MNIST"],
"-target": ["USPS"],
"-epoch_to_start_align": [11],
"-stop_grad": [0],
"-n_epochs": [100],
"-batch_size": [128],
"-initialize_model": [1],
"-init_batch_size": [32],
"-weight_d2": [weight_d2],
"-weight_mse": [weight_mse],
"-activate_mse": [activate_mse],
"-activate_adaptation_imp": [activate_adaptation_imp],
"-activate_adaptation_d1": [activate_adaptation_d1],
"-init_lr": [10 ** -2],
"-refinement": [refinement],
"-n_epochs_refinement": [n_epochs_refinement],
"-lambda_regul": lambda_regul,
"-lambda_regul_s": lambda_regul_s,
"-threshold_value": threshold_value,
"-random_seed": random_seed
}
class DjdotMNISTUSPS(object):
MAX_NB_PROCESSES = 2
DEBUG = False
BINARY = "experiments/launcher/digits_binary.py"
GRID = {
"-mode": ["djdot"],
"-upper_bound": [1],
"-is_balanced": [1],
"-djdot_alpha": [0.1],
"-adaptive_lr": [1],
"-source": ["MNIST"],
"-target": ["USPS"],
"-epoch_to_start_align": [11],
"-n_epochs": [100],
"-batch_size": [500],
"-initialize_model": [1],
"-init_batch_size": [32],
"-init_lr": [10 ** -2],
"-random_seed": random_seed
}
class DjdotIgnoreMNISTUSPS(object):
MAX_NB_PROCESSES = 2
DEBUG = False
BINARY = "experiments/launcher/digits_binary.py"
GRID = {
"-mode": ["djdot"],
"-upper_bound": [1],
"-is_balanced": [1],
"-djdot_alpha": [0.1],
"-adaptive_lr": [1],
"-source": ["MNIST"],
"-target": ["USPS"],
"-epoch_to_start_align": [11],
"-n_epochs": [100],
"-batch_size": [500],
"-initialize_model": [1],
"-output_fig": [1],
"-init_batch_size": [32],
"-init_lr": [10 ** -2],
"-adapt_only_first": [1],
"-random_seed": random_seed
}
class DjdotZeroImputMNISTUSPS(object):
MAX_NB_PROCESSES = 2
DEBUG = False
BINARY = "experiments/launcher/digits_binary.py"
GRID = {
"-mode": ["djdot"],
"-upper_bound": [0],
"-adaptive_lr": [1],
"-is_balanced": [1],
"-djdot_alpha": [0.1],
"-source": ["MNIST"],
"-target": ["USPS"],
"-epoch_to_start_align": [11],
"-n_epochs": [100],
"-batch_size": [500],
"-initialize_model": [1],
"-init_batch_size": [32],
"-init_lr": [10 ** -2],
"-random_seed": random_seed
}
class DjdotImputMNISTUSPS(object):
MAX_NB_PROCESSES = 2
DEBUG = False
BINARY = "experiments/launcher/digits_binary.py"
GRID = {
"-mode": ["djdot_imput"],
"-adaptive_lr": [1],
"-source": ["MNIST"],
"-target": ["USPS"],
"-is_balanced": [1],
"-epoch_to_start_align": [11],
"-stop_grad": [1],
"-djdot_alpha": [0.1],
"-bigger_reconstructor": [1],
"-n_epochs": [100],
"-batch_size": [500],
"-initialize_model": [1],
"-init_batch_size": [32],
"-init_lr": [10 ** -2],
"-activate_mse": [activate_mse],
"-activate_adaptation_imp": [activate_adaptation_imp],
"-activate_adaptation_d1": [activate_adaptation_d1],
"-random_seed": random_seed
}
| activate_mse = 1
activate_adaptation_imp = 1
activate_adaptation_d1 = 1
weight_d2 = 1.0
weight_mse = 1.0
refinement = 1
n_epochs_refinement = 10
lambda_regul = [0.01]
lambda_regul_s = [0.01]
threshold_value = [0.95]
compute_variance = False
random_seed = [1985] if not compute_variance else [1985, 2184, 51, 12, 465]
class Dannmnistusps(object):
max_nb_processes = 3
debug = False
binary = 'experiments/launcher/digits_binary.py'
grid = {'-mode': ['dann'], '-upper_bound': [1], '-adaptive_lr': [1], '-is_balanced': [1], '-source': ['MNIST'], '-target': ['USPS'], '-epoch_to_start_align': [11], '-n_epochs': [100], '-batch_size': [128], '-initialize_model': [1], '-init_batch_size': [32], '-refinement': [refinement], '-n_epochs_refinement': [n_epochs_refinement], '-lambda_regul': lambda_regul, '-lambda_regul_s': lambda_regul_s, '-threshold_value': threshold_value, '-random_seed': random_seed}
class Dannignoremnistusps(object):
max_nb_processes = 3
debug = False
binary = 'experiments/launcher/digits_binary.py'
grid = {'-mode': ['dann'], '-upper_bound': [1], '-adaptive_lr': [1], '-is_balanced': [1], '-source': ['MNIST'], '-target': ['USPS'], '-epoch_to_start_align': [11], '-n_epochs': [100], '-batch_size': [128], '-initialize_model': [1], '-init_batch_size': [32], '-adapt_only_first': [1], '-refinement': [refinement], '-n_epochs_refinement': [n_epochs_refinement], '-lambda_regul': lambda_regul, '-threshold_value': threshold_value, '-random_seed': random_seed}
class Dannzeroimputmnistusps(object):
max_nb_processes = 3
debug = False
binary = 'experiments/launcher/digits_binary.py'
grid = {'-mode': ['dann'], '-upper_bound': [0], '-adaptive_lr': [1], '-is_balanced': [1], '-source': ['MNIST'], '-target': ['USPS'], '-epoch_to_start_align': [11], '-n_epochs': [100], '-batch_size': [128], '-initialize_model': [1], '-init_batch_size': [32], '-init_lr': [10 ** (-2.5)], '-refinement': [refinement], '-n_epochs_refinement': [n_epochs_refinement], '-lambda_regul': lambda_regul, '-threshold_value': threshold_value, '-random_seed': random_seed}
class Dannimputmnistusps(object):
max_nb_processes = 2
debug = False
binary = 'experiments/launcher/digits_binary.py'
grid = {'-mode': ['dann_imput'], '-adaptive_lr': [1], '-is_balanced': [1], '-source': ['MNIST'], '-target': ['USPS'], '-epoch_to_start_align': [11], '-stop_grad': [0], '-n_epochs': [100], '-batch_size': [128], '-initialize_model': [1], '-init_batch_size': [32], '-weight_d2': [weight_d2], '-weight_mse': [weight_mse], '-activate_mse': [activate_mse], '-activate_adaptation_imp': [activate_adaptation_imp], '-activate_adaptation_d1': [activate_adaptation_d1], '-init_lr': [10 ** (-2)], '-refinement': [refinement], '-n_epochs_refinement': [n_epochs_refinement], '-lambda_regul': lambda_regul, '-lambda_regul_s': lambda_regul_s, '-threshold_value': threshold_value, '-random_seed': random_seed}
class Djdotmnistusps(object):
max_nb_processes = 2
debug = False
binary = 'experiments/launcher/digits_binary.py'
grid = {'-mode': ['djdot'], '-upper_bound': [1], '-is_balanced': [1], '-djdot_alpha': [0.1], '-adaptive_lr': [1], '-source': ['MNIST'], '-target': ['USPS'], '-epoch_to_start_align': [11], '-n_epochs': [100], '-batch_size': [500], '-initialize_model': [1], '-init_batch_size': [32], '-init_lr': [10 ** (-2)], '-random_seed': random_seed}
class Djdotignoremnistusps(object):
max_nb_processes = 2
debug = False
binary = 'experiments/launcher/digits_binary.py'
grid = {'-mode': ['djdot'], '-upper_bound': [1], '-is_balanced': [1], '-djdot_alpha': [0.1], '-adaptive_lr': [1], '-source': ['MNIST'], '-target': ['USPS'], '-epoch_to_start_align': [11], '-n_epochs': [100], '-batch_size': [500], '-initialize_model': [1], '-output_fig': [1], '-init_batch_size': [32], '-init_lr': [10 ** (-2)], '-adapt_only_first': [1], '-random_seed': random_seed}
class Djdotzeroimputmnistusps(object):
max_nb_processes = 2
debug = False
binary = 'experiments/launcher/digits_binary.py'
grid = {'-mode': ['djdot'], '-upper_bound': [0], '-adaptive_lr': [1], '-is_balanced': [1], '-djdot_alpha': [0.1], '-source': ['MNIST'], '-target': ['USPS'], '-epoch_to_start_align': [11], '-n_epochs': [100], '-batch_size': [500], '-initialize_model': [1], '-init_batch_size': [32], '-init_lr': [10 ** (-2)], '-random_seed': random_seed}
class Djdotimputmnistusps(object):
max_nb_processes = 2
debug = False
binary = 'experiments/launcher/digits_binary.py'
grid = {'-mode': ['djdot_imput'], '-adaptive_lr': [1], '-source': ['MNIST'], '-target': ['USPS'], '-is_balanced': [1], '-epoch_to_start_align': [11], '-stop_grad': [1], '-djdot_alpha': [0.1], '-bigger_reconstructor': [1], '-n_epochs': [100], '-batch_size': [500], '-initialize_model': [1], '-init_batch_size': [32], '-init_lr': [10 ** (-2)], '-activate_mse': [activate_mse], '-activate_adaptation_imp': [activate_adaptation_imp], '-activate_adaptation_d1': [activate_adaptation_d1], '-random_seed': random_seed} |
# flopy version file automatically created using...pre-commit.py
# created on...March 20, 2018 17:03:11
major = 3
minor = 2
micro = 9
build = 60
commit = 2731
__version__ = '{:d}.{:d}.{:d}'.format(major, minor, micro)
__build__ = '{:d}.{:d}.{:d}.{:d}'.format(major, minor, micro, build)
__git_commit__ = '{:d}'.format(commit)
| major = 3
minor = 2
micro = 9
build = 60
commit = 2731
__version__ = '{:d}.{:d}.{:d}'.format(major, minor, micro)
__build__ = '{:d}.{:d}.{:d}.{:d}'.format(major, minor, micro, build)
__git_commit__ = '{:d}'.format(commit) |
#!/usr/bin/env python3
def main():
with open("dnsservers.txt", "r") as dnsfile:
for svr in dnsfile:
svr = svr.rstrip('\n') # remove newline char if exists
# would exists on all but last line
# IF the string svr ends with 'org'
if svr.endswith('org'):
with open("org-domain.txt", "a") as srvfile: # 'a' is append mode
srvfile.write(svr + "\n")
# ELSE-IF the string svr ends with 'com'
elif svr.endswith('com'):
with open("com-domain.txt", "a") as srvfile: # 'a' is append mode
srvfile.write(svr + "\n")
main()
| def main():
with open('dnsservers.txt', 'r') as dnsfile:
for svr in dnsfile:
svr = svr.rstrip('\n')
if svr.endswith('org'):
with open('org-domain.txt', 'a') as srvfile:
srvfile.write(svr + '\n')
elif svr.endswith('com'):
with open('com-domain.txt', 'a') as srvfile:
srvfile.write(svr + '\n')
main() |
a, b = map(int, input('').split(' '))
n = int(input(''))
ans = 0
for i in range(n):
shop = [int(i) for i in input('').split(' ') if abs(int(i)) == a or b]
if shop.count(a) > shop.count(-a) and shop.count(b) > shop.count(-b):
ans += 1
print(ans) | (a, b) = map(int, input('').split(' '))
n = int(input(''))
ans = 0
for i in range(n):
shop = [int(i) for i in input('').split(' ') if abs(int(i)) == a or b]
if shop.count(a) > shop.count(-a) and shop.count(b) > shop.count(-b):
ans += 1
print(ans) |
# Break Statement :
greetings = ["Hello","World","!!!"]
for x in greetings:
print(x)
if (x == "World"):
break #Breaks the loop when condition matches
print()
for x in range (0,22,2):
if (x == 10):
continue #Skips the current iteration when condition matches
print(x)
input("Press Enter key to exit ")
| greetings = ['Hello', 'World', '!!!']
for x in greetings:
print(x)
if x == 'World':
break
print()
for x in range(0, 22, 2):
if x == 10:
continue
print(x)
input('Press Enter key to exit ') |
HOST = '127.0.0.1'
USERNAME = 'guest'
PASSWORD = 'guest'
URI = 'amqp://guest:guest@127.0.0.1:5672/%2F'
HTTP_URL = 'http://127.0.0.1:15672'
| host = '127.0.0.1'
username = 'guest'
password = 'guest'
uri = 'amqp://guest:guest@127.0.0.1:5672/%2F'
http_url = 'http://127.0.0.1:15672' |
load("@bazel_skylib//lib:paths.bzl", "paths")
def _add_data_impl(ctx):
(_, extension) = paths.split_extension(ctx.executable.executable.path)
executable = ctx.actions.declare_file(
ctx.label.name + extension,
)
ctx.actions.symlink(
output = executable,
target_file = ctx.executable.executable,
is_executable = True,
)
runfiles = ctx.runfiles(files = [executable, ctx.executable.executable] + ctx.files.data)
runfiles = runfiles.merge(ctx.attr.executable[DefaultInfo].default_runfiles)
for data_dep in ctx.attr.data:
runfiles = runfiles.merge(data_dep[DefaultInfo].default_runfiles)
return [DefaultInfo(
executable = executable,
files = depset(direct = [executable]),
runfiles = runfiles,
)]
add_data = rule(
_add_data_impl,
attrs = {
"executable": attr.label(
executable = True,
cfg = "target",
doc = "Create a symlink to this executable",
),
"data": attr.label_list(
allow_files = True,
doc = "Add these data files to the executable's runfiles",
),
},
executable = True,
doc = "Creates a new target for the given executable with additional runfiles.",
)
| load('@bazel_skylib//lib:paths.bzl', 'paths')
def _add_data_impl(ctx):
(_, extension) = paths.split_extension(ctx.executable.executable.path)
executable = ctx.actions.declare_file(ctx.label.name + extension)
ctx.actions.symlink(output=executable, target_file=ctx.executable.executable, is_executable=True)
runfiles = ctx.runfiles(files=[executable, ctx.executable.executable] + ctx.files.data)
runfiles = runfiles.merge(ctx.attr.executable[DefaultInfo].default_runfiles)
for data_dep in ctx.attr.data:
runfiles = runfiles.merge(data_dep[DefaultInfo].default_runfiles)
return [default_info(executable=executable, files=depset(direct=[executable]), runfiles=runfiles)]
add_data = rule(_add_data_impl, attrs={'executable': attr.label(executable=True, cfg='target', doc='Create a symlink to this executable'), 'data': attr.label_list(allow_files=True, doc="Add these data files to the executable's runfiles")}, executable=True, doc='Creates a new target for the given executable with additional runfiles.') |
#What will this script produce?
#A: 3
a = 1
a = 2
a = 3
print(a)
| a = 1
a = 2
a = 3
print(a) |
#addintersert3.py
def addInterest(balances, rate):
for i in range(len(balances)):
balances[i] = balances[i] * (1 + rate)
def main():
amounts = [1000, 105, 3500, 739]
rate = 0.05
addInterest(amounts, rate)
print(amounts)
main() | def add_interest(balances, rate):
for i in range(len(balances)):
balances[i] = balances[i] * (1 + rate)
def main():
amounts = [1000, 105, 3500, 739]
rate = 0.05
add_interest(amounts, rate)
print(amounts)
main() |
# INTERNAL_ONLY_PROPERTIES defines the properties in the config that, while settable, should
# not be documented for external users. These will generally be used for internal test or only
# given to customers when they have been briefed on the side effects of using them.
INTERNAL_ONLY_PROPERTIES = {
"__module__",
"__doc__",
"create_transaction",
"SESSION_COOKIE_NAME",
"SESSION_COOKIE_HTTPONLY",
"SESSION_COOKIE_SAMESITE",
"DATABASE_SECRET_KEY",
"V22_NAMESPACE_BLACKLIST",
"MAXIMUM_CNR_LAYER_SIZE",
"OCI_NAMESPACE_WHITELIST",
"FEATURE_GENERAL_OCI_SUPPORT",
"FEATURE_HELM_OCI_SUPPORT",
"FEATURE_NAMESPACE_GARBAGE_COLLECTION",
"FEATURE_REPOSITORY_GARBAGE_COLLECTION",
"FEATURE_REPOSITORY_ACTION_COUNTER",
"APP_REGISTRY_PACKAGE_LIST_CACHE_WHITELIST",
"APP_REGISTRY_SHOW_PACKAGE_CACHE_WHITELIST",
"FEATURE_MANIFEST_SIZE_BACKFILL",
"TESTING",
"SEND_FILE_MAX_AGE_DEFAULT",
"DISABLED_FOR_AUDIT_LOGS",
"DISABLED_FOR_PULL_LOGS",
"FEATURE_DISABLE_PULL_LOGS_FOR_FREE_NAMESPACES",
"FEATURE_CLEAR_EXPIRED_RAC_ENTRIES",
"ACTION_LOG_MAX_PAGE",
"NON_RATE_LIMITED_NAMESPACES",
"REPLICATION_QUEUE_NAME",
"DOCKERFILE_BUILD_QUEUE_NAME",
"CHUNK_CLEANUP_QUEUE_NAME",
"SECURITY_SCANNER_ISSUER_NAME",
"NOTIFICATION_QUEUE_NAME",
"REPOSITORY_GC_QUEUE_NAME",
"NAMESPACE_GC_QUEUE_NAME",
"EXPORT_ACTION_LOGS_QUEUE_NAME",
"SECSCAN_V4_NOTIFICATION_QUEUE_NAME",
"FEATURE_BILLING",
"BILLING_TYPE",
"INSTANCE_SERVICE_KEY_LOCATION",
"INSTANCE_SERVICE_KEY_REFRESH",
"INSTANCE_SERVICE_KEY_SERVICE",
"INSTANCE_SERVICE_KEY_KID_LOCATION",
"INSTANCE_SERVICE_KEY_EXPIRATION",
"UNAPPROVED_SERVICE_KEY_TTL_SEC",
"EXPIRED_SERVICE_KEY_TTL_SEC",
"REGISTRY_JWT_AUTH_MAX_FRESH_S",
"SERVICE_LOG_ACCOUNT_ID",
"BUILDLOGS_OPTIONS",
"LIBRARY_NAMESPACE",
"STAGGER_WORKERS",
"QUEUE_WORKER_METRICS_REFRESH_SECONDS",
"PUSH_TEMP_TAG_EXPIRATION_SEC",
"GARBAGE_COLLECTION_FREQUENCY",
"PAGE_TOKEN_KEY",
"BUILD_MANAGER",
"JWTPROXY_AUDIENCE",
"JWTPROXY_SIGNER",
"SECURITY_SCANNER_INDEXING_MIN_ID",
"SECURITY_SCANNER_V4_REINDEX_THRESHOLD",
"STATIC_SITE_BUCKET",
"LABEL_KEY_RESERVED_PREFIXES",
"TEAM_SYNC_WORKER_FREQUENCY",
"JSONIFY_PRETTYPRINT_REGULAR",
"TUF_GUN_PREFIX",
"LOGGING_LEVEL",
"SIGNED_GRANT_EXPIRATION_SEC",
"PROMETHEUS_PUSHGATEWAY_URL",
"DB_TRANSACTION_FACTORY",
"NOTIFICATION_SEND_TIMEOUT",
"QUEUE_METRICS_TYPE",
"MAIL_FAIL_SILENTLY",
"LOCAL_OAUTH_HANDLER",
"USE_CDN",
"ANALYTICS_TYPE",
"LAST_ACCESSED_UPDATE_THRESHOLD_S",
"GREENLET_TRACING",
"EXCEPTION_LOG_TYPE",
"SENTRY_DSN",
"SENTRY_PUBLIC_DSN",
"BILLED_NAMESPACE_MAXIMUM_BUILD_COUNT",
"THREAT_NAMESPACE_MAXIMUM_BUILD_COUNT",
"IP_DATA_API_KEY",
"SECURITY_SCANNER_ENDPOINT_BATCH",
"SECURITY_SCANNER_API_TIMEOUT_SECONDS",
"SECURITY_SCANNER_API_TIMEOUT_POST_SECONDS",
"SECURITY_SCANNER_ENGINE_VERSION_TARGET",
"SECURITY_SCANNER_READONLY_FAILOVER_ENDPOINTS",
"SECURITY_SCANNER_API_VERSION",
"REPO_MIRROR_INTERVAL",
"DATA_MODEL_CACHE_CONFIG",
# TODO: move this into the schema once we support signing in QE.
"FEATURE_SIGNING",
"TUF_SERVER",
"V1_ONLY_DOMAIN",
"LOGS_MODEL",
"LOGS_MODEL_CONFIG",
"APP_REGISTRY_RESULTS_LIMIT",
"V3_UPGRADE_MODE", # Deprecated old flag
"ACCOUNT_RECOVERY_MODE",
}
CONFIG_SCHEMA = {
"type": "object",
"description": "Schema for Quay configuration",
"required": [
"PREFERRED_URL_SCHEME",
"SERVER_HOSTNAME",
"DB_URI",
"AUTHENTICATION_TYPE",
"DISTRIBUTED_STORAGE_CONFIG",
"BUILDLOGS_REDIS",
"USER_EVENTS_REDIS",
"DISTRIBUTED_STORAGE_PREFERENCE",
"DEFAULT_TAG_EXPIRATION",
"TAG_EXPIRATION_OPTIONS",
],
"properties": {
"REGISTRY_STATE": {
"type": "string",
"description": "The state of the registry.",
"enum": ["normal", "readonly"],
"x-example": "readonly",
},
# Hosting.
"PREFERRED_URL_SCHEME": {
"type": "string",
"description": "The URL scheme to use when hitting Quay. If Quay is behind SSL *at all*, this *must* be `https`",
"enum": ["http", "https"],
"x-example": "https",
},
"SERVER_HOSTNAME": {
"type": "string",
"description": "The URL at which Quay is accessible, without the scheme.",
"x-example": "quay.io",
},
"EXTERNAL_TLS_TERMINATION": {
"type": "boolean",
"description": "If TLS is supported, but terminated at a layer before Quay, must be true.",
"x-example": True,
},
# SSL/TLS.
"SSL_CIPHERS": {
"type": "array",
"description": "If specified, the nginx-defined list of SSL ciphers to enabled and disabled",
"x-example": ["CAMELLIA", "!3DES"],
"x-reference": "http://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_ciphers",
},
"SSL_PROTOCOLS": {
"type": "array",
"description": "If specified, the nginx-defined list of SSL protocols to enabled and disabled",
"x-example": ["TLSv1.1", "TLSv1.2"],
"x-reference": "http://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_protocols",
},
# User-visible configuration.
"REGISTRY_TITLE": {
"type": "string",
"description": "If specified, the long-form title for the registry. Defaults to `Red Hat Quay`.",
"x-example": "Corp Container Service",
},
"REGISTRY_TITLE_SHORT": {
"type": "string",
"description": "If specified, the short-form title for the registry. Defaults to `Red Hat Quay`.",
"x-example": "CCS",
},
"CONTACT_INFO": {
"type": "array",
"uniqueItems": True,
"description": "If specified, contact information to display on the contact page. "
+ "If only a single piece of contact information is specified, the contact footer will link directly.",
"items": [
{
"type": "string",
"pattern": "^mailto:(.)+$",
"x-example": "mailto:admin@example.com",
"description": "Adds a link to send an e-mail",
},
{
"type": "string",
"pattern": "^irc://(.)+$",
"x-example": "irc://chat.freenode.net:6665/quay",
"description": "Adds a link to visit an IRC chat room",
},
{
"type": "string",
"pattern": "^tel:(.)+$",
"x-example": "tel:+1-888-930-3475",
"description": "Adds a link to call a phone number",
},
{
"type": "string",
"pattern": "^http(s)?://(.)+$",
"x-example": "https://twitter.com/quayio",
"description": "Adds a link to a defined URL",
},
],
},
"SEARCH_RESULTS_PER_PAGE": {
"type": "number",
"description": "Number of results returned per page by search page. Defaults to 10",
"x-example": 10,
},
"SEARCH_MAX_RESULT_PAGE_COUNT": {
"type": "number",
"description": "Maximum number of pages the user can paginate in search before they are limited. Defaults to 10",
"x-example": 10,
},
# E-mail.
"FEATURE_MAILING": {
"type": "boolean",
"description": "Whether emails are enabled. Defaults to True",
"x-example": True,
},
"MAIL_SERVER": {
"type": "string",
"description": "The SMTP server to use for sending e-mails. Only required if FEATURE_MAILING is set to true.",
"x-example": "smtp.somedomain.com",
},
"MAIL_USE_TLS": {
"type": "boolean",
"description": "If specified, whether to use TLS for sending e-mails.",
"x-example": True,
},
"MAIL_PORT": {
"type": "number",
"description": "The SMTP port to use. If not specified, defaults to 587.",
"x-example": 588,
},
"MAIL_USERNAME": {
"type": ["string", "null"],
"description": "The SMTP username to use when sending e-mails.",
"x-example": "myuser",
},
"MAIL_PASSWORD": {
"type": ["string", "null"],
"description": "The SMTP password to use when sending e-mails.",
"x-example": "mypassword",
},
"MAIL_DEFAULT_SENDER": {
"type": ["string", "null"],
"description": "If specified, the e-mail address used as the `from` when Quay sends e-mails. If none, defaults to `admin@example.com`.",
"x-example": "support@myco.com",
},
# Database.
"DB_URI": {
"type": "string",
"description": "The URI at which to access the database, including any credentials.",
"x-example": "mysql+pymysql://username:password@dns.of.database/quay",
"x-reference": "https://www.postgresql.org/docs/9.3/static/libpq-connect.html#AEN39495",
},
"DB_CONNECTION_ARGS": {
"type": "object",
"description": "If specified, connection arguments for the database such as timeouts and SSL.",
"properties": {
"threadlocals": {
"type": "boolean",
"description": "Whether to use thread-local connections. Should *ALWAYS* be `true`",
},
"autorollback": {
"type": "boolean",
"description": "Whether to use auto-rollback connections. Should *ALWAYS* be `true`",
},
"ssl": {
"type": "object",
"description": "SSL connection configuration",
"properties": {
"ca": {
"type": "string",
"description": "*Absolute container path* to the CA certificate to use for SSL connections",
"x-example": "conf/stack/ssl-ca-cert.pem",
},
},
"required": ["ca"],
},
},
"required": ["threadlocals", "autorollback"],
},
"ALLOW_PULLS_WITHOUT_STRICT_LOGGING": {
"type": "boolean",
"description": "If true, pulls in which the pull audit log entry cannot be written will "
+ "still succeed. Useful if the database can fallback into a read-only state "
+ "and it is desired for pulls to continue during that time. Defaults to False.",
"x-example": True,
},
# Storage.
"FEATURE_STORAGE_REPLICATION": {
"type": "boolean",
"description": "Whether to automatically replicate between storage engines. Defaults to False",
"x-example": False,
},
"FEATURE_PROXY_STORAGE": {
"type": "boolean",
"description": "Whether to proxy all direct download URLs in storage via the registry nginx. Defaults to False",
"x-example": False,
},
"MAXIMUM_LAYER_SIZE": {
"type": "string",
"description": "Maximum allowed size of an image layer. Defaults to 20G",
"x-example": "100G",
"pattern": "^[0-9]+(G|M)$",
},
"DISTRIBUTED_STORAGE_CONFIG": {
"type": "object",
"description": "Configuration for storage engine(s) to use in Quay. Each key is a unique ID"
+ " for a storage engine, with the value being a tuple of the type and "
+ " configuration for that engine.",
"x-example": {
"local_storage": ["LocalStorage", {"storage_path": "some/path/"}],
},
"items": {
"type": "array",
},
},
"DISTRIBUTED_STORAGE_PREFERENCE": {
"type": "array",
"description": "The preferred storage engine(s) (by ID in DISTRIBUTED_STORAGE_CONFIG) to "
+ "use. A preferred engine means it is first checked for pullig and images are "
+ "pushed to it.",
"items": {
"type": "string",
"uniqueItems": True,
},
"x-example": ["s3_us_east", "s3_us_west"],
},
"DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS": {
"type": "array",
"description": "The list of storage engine(s) (by ID in DISTRIBUTED_STORAGE_CONFIG) whose "
+ "images should be fully replicated, by default, to all other storage engines.",
"items": {
"type": "string",
"uniqueItems": True,
},
"x-example": ["s3_us_east", "s3_us_west"],
},
"USERFILES_LOCATION": {
"type": "string",
"description": "ID of the storage engine in which to place user-uploaded files",
"x-example": "s3_us_east",
},
"USERFILES_PATH": {
"type": "string",
"description": "Path under storage in which to place user-uploaded files",
"x-example": "userfiles",
},
"ACTION_LOG_ARCHIVE_LOCATION": {
"type": "string",
"description": "If action log archiving is enabled, the storage engine in which to place the "
+ "archived data.",
"x-example": "s3_us_east",
},
"ACTION_LOG_ARCHIVE_PATH": {
"type": "string",
"description": "If action log archiving is enabled, the path in storage in which to place the "
+ "archived data.",
"x-example": "archives/actionlogs",
},
"ACTION_LOG_ROTATION_THRESHOLD": {
"type": "string",
"description": "If action log archiving is enabled, the time interval after which to "
+ "archive data.",
"x-example": "30d",
},
"LOG_ARCHIVE_LOCATION": {
"type": "string",
"description": "If builds are enabled, the storage engine in which to place the "
+ "archived build logs.",
"x-example": "s3_us_east",
},
"LOG_ARCHIVE_PATH": {
"type": "string",
"description": "If builds are enabled, the path in storage in which to place the "
+ "archived build logs.",
"x-example": "archives/buildlogs",
},
# Authentication.
"AUTHENTICATION_TYPE": {
"type": "string",
"description": "The authentication engine to use for credential authentication.",
"x-example": "Database",
"enum": ["Database", "LDAP", "JWT", "Keystone", "OIDC", "AppToken"],
},
"SUPER_USERS": {
"type": "array",
"description": "Quay usernames of those users to be granted superuser privileges",
"uniqueItems": True,
"items": {
"type": "string",
},
},
"DIRECT_OAUTH_CLIENTID_WHITELIST": {
"type": "array",
"description": "A list of client IDs of *Quay-managed* applications that are allowed "
+ "to perform direct OAuth approval without user approval.",
"x-reference": "https://coreos.com/quay-enterprise/docs/latest/direct-oauth.html",
"uniqueItems": True,
"items": {
"type": "string",
},
},
# Redis.
"BUILDLOGS_REDIS": {
"type": "object",
"description": "Connection information for Redis for build logs caching",
"required": ["host"],
"properties": {
"host": {
"type": "string",
"description": "The hostname at which Redis is accessible",
"x-example": "my.redis.cluster",
},
"port": {
"type": "number",
"description": "The port at which Redis is accessible",
"x-example": 1234,
},
"password": {
"type": "string",
"description": "The password to connect to the Redis instance",
"x-example": "mypassword",
},
},
},
"USER_EVENTS_REDIS": {
"type": "object",
"description": "Connection information for Redis for user event handling",
"required": ["host"],
"properties": {
"host": {
"type": "string",
"description": "The hostname at which Redis is accessible",
"x-example": "my.redis.cluster",
},
"port": {
"type": "number",
"description": "The port at which Redis is accessible",
"x-example": 1234,
},
"password": {
"type": "string",
"description": "The password to connect to the Redis instance",
"x-example": "mypassword",
},
},
},
# OAuth configuration.
"GITHUB_LOGIN_CONFIG": {
"type": ["object", "null"],
"description": "Configuration for using GitHub (Enterprise) as an external login provider",
"required": ["CLIENT_ID", "CLIENT_SECRET"],
"x-reference": "https://coreos.com/quay-enterprise/docs/latest/github-auth.html",
"properties": {
"GITHUB_ENDPOINT": {
"type": "string",
"description": "The endpoint of the GitHub (Enterprise) being hit",
"x-example": "https://github.com/",
},
"API_ENDPOINT": {
"type": "string",
"description": "The endpoint of the GitHub (Enterprise) API to use. Must be overridden for github.com",
"x-example": "https://api.github.com/",
},
"CLIENT_ID": {
"type": "string",
"description": "The registered client ID for this Quay instance; cannot be shared with GITHUB_TRIGGER_CONFIG",
"x-example": "0e8dbe15c4c7630b6780",
"x-reference": "https://coreos.com/quay-enterprise/docs/latest/github-app.html",
},
"CLIENT_SECRET": {
"type": "string",
"description": "The registered client secret for this Quay instance",
"x-example": "e4a58ddd3d7408b7aec109e85564a0d153d3e846",
"x-reference": "https://coreos.com/quay-enterprise/docs/latest/github-app.html",
},
"ORG_RESTRICT": {
"type": "boolean",
"description": "If true, only users within the organization whitelist can login using this provider",
"x-example": True,
},
"ALLOWED_ORGANIZATIONS": {
"type": "array",
"description": "The names of the GitHub (Enterprise) organizations whitelisted to work with the ORG_RESTRICT option",
"uniqueItems": True,
"items": {
"type": "string",
},
},
},
},
"BITBUCKET_TRIGGER_CONFIG": {
"type": ["object", "null"],
"description": "Configuration for using BitBucket for build triggers",
"required": ["CONSUMER_KEY", "CONSUMER_SECRET"],
"x-reference": "https://coreos.com/quay-enterprise/docs/latest/bitbucket-build.html",
"properties": {
"CONSUMER_KEY": {
"type": "string",
"description": "The registered consumer key (client ID) for this Quay instance",
"x-example": "0e8dbe15c4c7630b6780",
},
"CONSUMER_SECRET": {
"type": "string",
"description": "The registered consumer secret (client secret) for this Quay instance",
"x-example": "e4a58ddd3d7408b7aec109e85564a0d153d3e846",
},
},
},
"GITHUB_TRIGGER_CONFIG": {
"type": ["object", "null"],
"description": "Configuration for using GitHub (Enterprise) for build triggers",
"required": ["GITHUB_ENDPOINT", "CLIENT_ID", "CLIENT_SECRET"],
"x-reference": "https://coreos.com/quay-enterprise/docs/latest/github-build.html",
"properties": {
"GITHUB_ENDPOINT": {
"type": "string",
"description": "The endpoint of the GitHub (Enterprise) being hit",
"x-example": "https://github.com/",
},
"API_ENDPOINT": {
"type": "string",
"description": "The endpoint of the GitHub (Enterprise) API to use. Must be overridden for github.com",
"x-example": "https://api.github.com/",
},
"CLIENT_ID": {
"type": "string",
"description": "The registered client ID for this Quay instance; cannot be shared with GITHUB_LOGIN_CONFIG",
"x-example": "0e8dbe15c4c7630b6780",
"x-reference": "https://coreos.com/quay-enterprise/docs/latest/github-app.html",
},
"CLIENT_SECRET": {
"type": "string",
"description": "The registered client secret for this Quay instance",
"x-example": "e4a58ddd3d7408b7aec109e85564a0d153d3e846",
"x-reference": "https://coreos.com/quay-enterprise/docs/latest/github-app.html",
},
},
},
"GOOGLE_LOGIN_CONFIG": {
"type": ["object", "null"],
"description": "Configuration for using Google for external authentication",
"required": ["CLIENT_ID", "CLIENT_SECRET"],
"properties": {
"CLIENT_ID": {
"type": "string",
"description": "The registered client ID for this Quay instance",
"x-example": "0e8dbe15c4c7630b6780",
},
"CLIENT_SECRET": {
"type": "string",
"description": "The registered client secret for this Quay instance",
"x-example": "e4a58ddd3d7408b7aec109e85564a0d153d3e846",
},
},
},
"GITLAB_TRIGGER_CONFIG": {
"type": ["object", "null"],
"description": "Configuration for using Gitlab (Enterprise) for external authentication",
"required": ["GITLAB_ENDPOINT", "CLIENT_ID", "CLIENT_SECRET"],
"properties": {
"GITLAB_ENDPOINT": {
"type": "string",
"description": "The endpoint at which Gitlab(Enterprise) is running",
"x-example": "https://gitlab.com",
},
"CLIENT_ID": {
"type": "string",
"description": "The registered client ID for this Quay instance",
"x-example": "0e8dbe15c4c7630b6780",
},
"CLIENT_SECRET": {
"type": "string",
"description": "The registered client secret for this Quay instance",
"x-example": "e4a58ddd3d7408b7aec109e85564a0d153d3e846",
},
},
},
"BRANDING": {
"type": ["object", "null"],
"description": "Custom branding for logos and URLs in the Quay UI",
"required": ["logo"],
"properties": {
"logo": {
"type": "string",
"description": "Main logo image URL",
"x-example": "/static/img/quay-horizontal-color.svg",
},
"footer_img": {
"type": "string",
"description": "Logo for UI footer",
"x-example": "/static/img/RedHat.svg",
},
"footer_url": {
"type": "string",
"description": "Link for footer image",
"x-example": "https://redhat.com",
},
},
},
"DOCUMENTATION_ROOT": {"type": "string", "description": "Root URL for documentation links"},
# Health.
"HEALTH_CHECKER": {
"description": "The configured health check.",
"x-example": ("RDSAwareHealthCheck", {"access_key": "foo", "secret_key": "bar"}),
},
# Metrics.
"PROMETHEUS_NAMESPACE": {
"type": "string",
"description": "The prefix applied to all exposed Prometheus metrics. Defaults to `quay`",
"x-example": "myregistry",
},
# Misc configuration.
"BLACKLIST_V2_SPEC": {
"type": "string",
"description": "The Docker CLI versions to which Quay will respond that V2 is *unsupported*. Defaults to `<1.6.0`",
"x-reference": "http://pythonhosted.org/semantic_version/reference.html#semantic_version.Spec",
"x-example": "<1.8.0",
},
"USER_RECOVERY_TOKEN_LIFETIME": {
"type": "string",
"description": "The length of time a token for recovering a user accounts is valid. Defaults to 30m.",
"x-example": "10m",
"pattern": "^[0-9]+(w|m|d|h|s)$",
},
"SESSION_COOKIE_SECURE": {
"type": "boolean",
"description": "Whether the `secure` property should be set on session cookies. "
+ "Defaults to False. Recommended to be True for all installations using SSL.",
"x-example": True,
"x-reference": "https://en.wikipedia.org/wiki/Secure_cookies",
},
"PUBLIC_NAMESPACES": {
"type": "array",
"description": "If a namespace is defined in the public namespace list, then it will appear on *all*"
+ " user's repository list pages, regardless of whether that user is a member of the namespace."
+ ' Typically, this is used by an enterprise customer in configuring a set of "well-known"'
+ " namespaces.",
"uniqueItems": True,
"items": {
"type": "string",
},
},
"AVATAR_KIND": {
"type": "string",
"description": "The types of avatars to display, either generated inline (local) or Gravatar (gravatar)",
"enum": ["local", "gravatar"],
},
"V2_PAGINATION_SIZE": {
"type": "number",
"description": "The number of results returned per page in V2 registry APIs",
"x-example": 100,
},
"ENABLE_HEALTH_DEBUG_SECRET": {
"type": ["string", "null"],
"description": "If specified, a secret that can be given to health endpoints to see full debug info when"
+ "not authenticated as a superuser",
"x-example": "somesecrethere",
},
"BROWSER_API_CALLS_XHR_ONLY": {
"type": "boolean",
"description": "If enabled, only API calls marked as being made by an XHR will be allowed from browsers. Defaults to True.",
"x-example": False,
},
# Time machine and tag expiration settings.
"FEATURE_CHANGE_TAG_EXPIRATION": {
"type": "boolean",
"description": "Whether users and organizations are allowed to change the tag expiration for tags in their namespace. Defaults to True.",
"x-example": False,
},
"DEFAULT_TAG_EXPIRATION": {
"type": "string",
"description": "The default, configurable tag expiration time for time machine. Defaults to `2w`.",
"pattern": "^[0-9]+(w|m|d|h|s)$",
},
"TAG_EXPIRATION_OPTIONS": {
"type": "array",
"description": "The options that users can select for expiration of tags in their namespace (if enabled)",
"items": {
"type": "string",
"pattern": "^[0-9]+(w|m|d|h|s)$",
},
},
# Team syncing.
"FEATURE_TEAM_SYNCING": {
"type": "boolean",
"description": "Whether to allow for team membership to be synced from a backing group in the authentication engine (LDAP or Keystone)",
"x-example": True,
},
"TEAM_RESYNC_STALE_TIME": {
"type": "string",
"description": "If team syncing is enabled for a team, how often to check its membership and resync if necessary (Default: 30m)",
"x-example": "2h",
"pattern": "^[0-9]+(w|m|d|h|s)$",
},
"FEATURE_NONSUPERUSER_TEAM_SYNCING_SETUP": {
"type": "boolean",
"description": "If enabled, non-superusers can setup syncing on teams to backing LDAP or Keystone. Defaults To False.",
"x-example": True,
},
# Security scanning.
"FEATURE_SECURITY_SCANNER": {
"type": "boolean",
"description": "Whether to turn of/off the security scanner. Defaults to False",
"x-example": False,
"x-reference": "https://coreos.com/quay-enterprise/docs/latest/security-scanning.html",
},
"FEATURE_SECURITY_NOTIFICATIONS": {
"type": "boolean",
"description": "If the security scanner is enabled, whether to turn of/off security notificaitons. Defaults to False",
"x-example": False,
},
"SECURITY_SCANNER_ENDPOINT": {
"type": "string",
"pattern": "^http(s)?://(.)+$",
"description": "The endpoint for the V2 security scanner",
"x-example": "http://192.168.99.101:6060",
},
"SECURITY_SCANNER_V4_ENDPOINT": {
"type": ["string", "null"],
"pattern": "^http(s)?://(.)+$",
"description": "The endpoint for the V4 security scanner",
"x-example": "http://192.168.99.101:6060",
},
"SECURITY_SCANNER_INDEXING_INTERVAL": {
"type": "number",
"description": "The number of seconds between indexing intervals in the security scanner. Defaults to 30.",
"x-example": 30,
},
"SECURITY_SCANNER_V4_PSK": {
"type": "string",
"description": "A base64 encoded string used to sign JWT(s) on Clair V4 requests. If 'None' jwt signing will not occur.",
"x-example": "PSK",
},
# Repository mirroring
"REPO_MIRROR_INTERVAL": {
"type": "number",
"description": "The number of seconds between checking for repository mirror candidates. Defaults to 30.",
"x-example": 30,
},
# Build
"FEATURE_GITHUB_BUILD": {
"type": "boolean",
"description": "Whether to support GitHub build triggers. Defaults to False",
"x-example": False,
},
"FEATURE_BITBUCKET_BUILD": {
"type": "boolean",
"description": "Whether to support Bitbucket build triggers. Defaults to False",
"x-example": False,
},
"FEATURE_GITLAB_BUILD": {
"type": "boolean",
"description": "Whether to support GitLab build triggers. Defaults to False",
"x-example": False,
},
"FEATURE_BUILD_SUPPORT": {
"type": "boolean",
"description": "Whether to support Dockerfile build. Defaults to True",
"x-example": True,
},
"DEFAULT_NAMESPACE_MAXIMUM_BUILD_COUNT": {
"type": ["number", "null"],
"description": "If not None, the default maximum number of builds that can be queued in a namespace.",
"x-example": 20,
},
"SUCCESSIVE_TRIGGER_INTERNAL_ERROR_DISABLE_THRESHOLD": {
"type": ["number", "null"],
"description": "If not None, the number of successive internal errors that can occur before a build trigger is automatically disabled. Defaults to 5.",
"x-example": 10,
},
"SUCCESSIVE_TRIGGER_FAILURE_DISABLE_THRESHOLD": {
"type": ["number", "null"],
"description": "If not None, the number of successive failures that can occur before a build trigger is automatically disabled. Defaults to 100.",
"x-example": 50,
},
# Nested repository names
"FEATURE_EXTENDED_REPOSITORY_NAMES": {
"type": "boolean",
"description": "Whether repository names can have nested paths (/)",
"x-example": False,
},
# Login
"FEATURE_GITHUB_LOGIN": {
"type": "boolean",
"description": "Whether GitHub login is supported. Defaults to False",
"x-example": False,
},
"FEATURE_GOOGLE_LOGIN": {
"type": "boolean",
"description": "Whether Google login is supported. Defaults to False",
"x-example": False,
},
# Recaptcha
"FEATURE_RECAPTCHA": {
"type": "boolean",
"description": "Whether Recaptcha is necessary for user login and recovery. Defaults to False",
"x-example": False,
"x-reference": "https://www.google.com/recaptcha/intro/",
},
"RECAPTCHA_SITE_KEY": {
"type": ["string", "null"],
"description": "If recaptcha is enabled, the site key for the Recaptcha service",
},
"RECAPTCHA_SECRET_KEY": {
"type": ["string", "null"],
"description": "If recaptcha is enabled, the secret key for the Recaptcha service",
},
# External application tokens.
"FEATURE_APP_SPECIFIC_TOKENS": {
"type": "boolean",
"description": "If enabled, users can create tokens for use by the Docker CLI. Defaults to True",
"x-example": False,
},
"APP_SPECIFIC_TOKEN_EXPIRATION": {
"type": ["string", "null"],
"description": "The expiration for external app tokens. Defaults to None.",
"pattern": "^[0-9]+(w|m|d|h|s)$",
},
"EXPIRED_APP_SPECIFIC_TOKEN_GC": {
"type": ["string", "null"],
"description": "Duration of time expired external app tokens will remain before being garbage collected. Defaults to 1d.",
"pattern": "^[0-9]+(w|m|d|h|s)$",
},
# Feature Flag: Garbage collection.
"FEATURE_GARBAGE_COLLECTION": {
"type": "boolean",
"description": "Whether garbage collection of repositories is enabled. Defaults to True",
"x-example": False,
},
# Feature Flag: Rate limits.
"FEATURE_RATE_LIMITS": {
"type": "boolean",
"description": "Whether to enable rate limits on API and registry endpoints. Defaults to False",
"x-example": True,
},
# Feature Flag: Aggregated log retrieval.
"FEATURE_AGGREGATED_LOG_COUNT_RETRIEVAL": {
"type": "boolean",
"description": "Whether to allow retrieval of aggregated log counts. Defaults to True",
"x-example": True,
},
# Feature Flag: Log export.
"FEATURE_LOG_EXPORT": {
"type": "boolean",
"description": "Whether to allow exporting of action logs. Defaults to True",
"x-example": True,
},
# Feature Flag: User last accessed.
"FEATURE_USER_LAST_ACCESSED": {
"type": "boolean",
"description": "Whether to record the last time a user was accessed. Defaults to True",
"x-example": True,
},
# Feature Flag: Permanent Sessions.
"FEATURE_PERMANENT_SESSIONS": {
"type": "boolean",
"description": "Whether sessions are permanent. Defaults to True",
"x-example": True,
},
# Feature Flag: Super User Support.
"FEATURE_SUPER_USERS": {
"type": "boolean",
"description": "Whether super users are supported. Defaults to True",
"x-example": True,
},
# Feature Flag: Use FIPS compliant cryptography.
"FEATURE_FIPS": {
"type": "boolean",
"description": "If set to true, Quay will run using FIPS compliant hash functions. Defaults to False",
"x-example": True,
},
# Feature Flag: Anonymous Users.
"FEATURE_ANONYMOUS_ACCESS": {
"type": "boolean",
"description": " Whether to allow anonymous users to browse and pull public repositories. Defaults to True",
"x-example": True,
},
# Feature Flag: User Creation.
"FEATURE_USER_CREATION": {
"type": "boolean",
"description": "Whether users can be created (by non-super users). Defaults to True",
"x-example": True,
},
# Feature Flag: Invite Only User Creation.
"FEATURE_INVITE_ONLY_USER_CREATION": {
"type": "boolean",
"description": "Whether users being created must be invited by another user. Defaults to False",
"x-example": False,
},
# Feature Flag: Encrypted Basic Auth.
"FEATURE_REQUIRE_ENCRYPTED_BASIC_AUTH": {
"type": "boolean",
"description": "Whether non-encrypted passwords (as opposed to encrypted tokens) can be used for basic auth. Defaults to False",
"x-example": False,
},
# Feature Flag: Direct Login.
"FEATURE_DIRECT_LOGIN": {
"type": "boolean",
"description": "Whether users can directly login to the UI. Defaults to True",
"x-example": True,
},
# Feature Flag: Advertising V2.
"FEATURE_ADVERTISE_V2": {
"type": "boolean",
"description": "Whether the v2/ endpoint is visible. Defaults to True",
"x-example": True,
},
# Feature Flag: Log Rotation.
"FEATURE_ACTION_LOG_ROTATION": {
"type": "boolean",
"description": "Whether or not to rotate old action logs to storage. Defaults to False",
"x-example": False,
},
# Feature Flag: ACI Conversion.
"FEATURE_ACI_CONVERSION": {
"type": "boolean",
"description": "Whether to enable conversion to ACIs. Defaults to False",
"x-example": False,
},
# Feature Flag: Library Support.
"FEATURE_LIBRARY_SUPPORT": {
"type": "boolean",
"description": 'Whether to allow for "namespace-less" repositories when pulling and pushing from Docker. Defaults to True',
"x-example": True,
},
# Feature Flag: Require Team Invite.
"FEATURE_REQUIRE_TEAM_INVITE": {
"type": "boolean",
"description": "Whether to require invitations when adding a user to a team. Defaults to True",
"x-example": True,
},
# Feature Flag: Collecting and Supporting Metadata.
"FEATURE_USER_METADATA": {
"type": "boolean",
"description": "Whether to collect and support user metadata. Defaults to False",
"x-example": False,
},
# Feature Flag: Support App Registry.
"FEATURE_APP_REGISTRY": {
"type": "boolean",
"description": "Whether to enable support for App repositories. Defaults to False",
"x-example": False,
},
# Feature Flag: Read only app registry.
"FEATURE_READONLY_APP_REGISTRY": {
"type": "boolean",
"description": "Whether to App repositories are read-only. Defaults to False",
"x-example": True,
},
# Feature Flag: Public Reposiotires in _catalog Endpoint.
"FEATURE_PUBLIC_CATALOG": {
"type": "boolean",
"description": "If set to true, the _catalog endpoint returns public repositories. Otherwise, only private repositories can be returned. Defaults to False",
"x-example": False,
},
# Feature Flag: Reader Build Logs.
"FEATURE_READER_BUILD_LOGS": {
"type": "boolean",
"description": "If set to true, build logs may be read by those with read access to the repo, rather than only write access or admin access. Defaults to False",
"x-example": False,
},
# Feature Flag: Usernames Autocomplete.
"FEATURE_PARTIAL_USER_AUTOCOMPLETE": {
"type": "boolean",
"description": "If set to true, autocompletion will apply to partial usernames. Defaults to True",
"x-example": True,
},
# Feature Flag: User log access.
"FEATURE_USER_LOG_ACCESS": {
"type": "boolean",
"description": "If set to true, users will have access to audit logs for their namespace. Defaults to False",
"x-example": True,
},
# Feature Flag: User renaming.
"FEATURE_USER_RENAME": {
"type": "boolean",
"description": "If set to true, users can rename their own namespace. Defaults to False",
"x-example": True,
},
# Feature Flag: Username confirmation.
"FEATURE_USERNAME_CONFIRMATION": {
"type": "boolean",
"description": "If set to true, users can confirm their generated usernames. Defaults to True",
"x-example": False,
},
# Feature Flag: V1 push restriction.
"FEATURE_RESTRICTED_V1_PUSH": {
"type": "boolean",
"description": "If set to true, only namespaces listed in V1_PUSH_WHITELIST support V1 push. Defaults to True",
"x-example": False,
},
# Feature Flag: Support Repository Mirroring.
"FEATURE_REPO_MIRROR": {
"type": "boolean",
"description": "Whether to enable support for repository mirroring. Defaults to False",
"x-example": False,
},
"REPO_MIRROR_TLS_VERIFY": {
"type": "boolean",
"description": "Require HTTPS and verify certificates of Quay registry during mirror. Defaults to True",
"x-example": True,
},
"REPO_MIRROR_SERVER_HOSTNAME": {
"type": ["string", "null"],
"description": "Replaces the SERVER_HOSTNAME as the destination for mirroring. Defaults to unset",
"x-example": "openshift-quay-service",
},
# Feature Flag: V1 push restriction.
"V1_PUSH_WHITELIST": {
"type": "array",
"description": "The array of namespace names that support V1 push if FEATURE_RESTRICTED_V1_PUSH is set to true.",
"x-example": ["some", "namespaces"],
},
# Logs model
"LOGS_MODEL": {
"type": "string",
"description": "Logs model for action logs",
"enum": ["database", "transition_reads_both_writes_es", "elasticsearch"],
"x-example": "database",
},
"LOGS_MODEL_CONFIG": {
"type": "object",
"description": "Logs model config for action logs",
"x-reference": "https://www.elastic.co/guide/en/elasticsearch/guide/master/_index_settings.html",
"properties": {
"producer": {
"type": "string",
"description": "Logs producer if logging to Elasticsearch",
"enum": ["kafka", "elasticsearch", "kinesis_stream"],
"x-example": "kafka",
},
"elasticsearch_config": {
"type": "object",
"description": "Elasticsearch cluster configuration",
"properties": {
"host": {
"type": "string",
"description": "Elasticsearch cluster endpoint",
"x-example": "host.elasticsearch.example",
},
"port": {
"type": "number",
"description": "Elasticsearch cluster endpoint port",
"x-example": 1234,
},
"access_key": {
"type": "string",
"description": "Elasticsearch user (or IAM key for AWS ES)",
"x-example": "some_string",
},
"secret_key": {
"type": "string",
"description": "Elasticsearch password (or IAM secret for AWS ES)",
"x-example": "some_secret_string",
},
"aws_region": {
"type": "string",
"description": "Amazon web service region",
"x-example": "us-east-1",
},
"use_ssl": {
"type": "boolean",
"description": "Use ssl for Elasticsearch. Defaults to True",
"x-example": True,
},
"index_prefix": {
"type": "string",
"description": "Elasticsearch's index prefix",
"x-example": "logentry_",
},
"index_settings": {
"type": "object",
"description": "Elasticsearch's index settings",
},
},
},
"kafka_config": {
"type": "object",
"description": "Kafka cluster configuration",
"properties": {
"bootstrap_servers": {
"type": "array",
"description": "List of Kafka brokers to bootstrap the client from",
"uniqueItems": True,
"items": {
"type": "string",
},
},
"topic": {
"type": "string",
"description": "Kafka topic to publish log entries to",
"x-example": "logentry",
},
"max_block_seconds": {
"type": "number",
"description": "Max number of seconds to block during a `send()`, either because the buffer is full or metadata unavailable",
"x-example": 10,
},
},
},
"kinesis_stream_config": {
"type": "object",
"description": "AWS Kinesis Stream configuration",
"properties": {
"stream_name": {
"type": "string",
"description": "Kinesis stream to send action logs to",
"x-example": "logentry-kinesis-stream",
},
"aws_region": {
"type": "string",
"description": "AWS region",
"x-example": "us-east-1",
},
"aws_access_key": {
"type": "string",
"description": "AWS access key",
"x-example": "some_access_key",
},
"aws_secret_key": {
"type": "string",
"description": "AWS secret key",
"x-example": "some_secret_key",
},
"connect_timeout": {
"type": "number",
"description": "Number of seconds before timeout when attempting to make a connection",
"x-example": 5,
},
"read_timeout": {
"type": "number",
"description": "Number of seconds before timeout when reading from a connection",
"x-example": 5,
},
"retries": {
"type": "number",
"description": "Max number of attempts made on a single request",
"x-example": 5,
},
"max_pool_connections": {
"type": "number",
"description": "The maximum number of connections to keep in a connection pool",
"x-example": 10,
},
},
},
},
},
# Feature Flag: Blacklist Email Domains
"FEATURE_BLACKLISTED_EMAILS": {
"type": "boolean",
"description": "If set to true, no new User accounts may be created if their email domain is blacklisted.",
"x-example": False,
},
# Blacklisted Email Domains
"BLACKLISTED_EMAIL_DOMAINS": {
"type": "array",
"description": "The array of email-address domains that is used if FEATURE_BLACKLISTED_EMAILS is set to true.",
"x-example": ["example.com", "example.org"],
},
"FRESH_LOGIN_TIMEOUT": {
"type": "string",
"description": "The time after which a fresh login requires users to reenter their password",
"x-example": "5m",
},
# Webhook blacklist.
"WEBHOOK_HOSTNAME_BLACKLIST": {
"type": "array",
"description": "The set of hostnames to disallow from webhooks when validating, beyond localhost",
"x-example": ["somexternaldomain.com"],
},
"CREATE_PRIVATE_REPO_ON_PUSH": {
"type": "boolean",
"description": "Whether new repositories created by push are set to private visibility. Defaults to True.",
"x-example": True,
},
"CREATE_NAMESPACE_ON_PUSH": {
"type": "boolean",
"description": "Whether new push to a non-existent organization creates it. Defaults to False.",
"x-example": False,
},
# Allow first user to be initialized via API
"FEATURE_USER_INITIALIZE": {
"type": "boolean",
"description": "If set to true, the first User account may be created via API /api/v1/user/initialize",
"x-example": False,
},
# OCI artifact types
"ALLOWED_OCI_ARTIFACT_TYPES": {
"type": "object",
"description": "The set of allowed OCI artifact mimetypes and the assiciated layer types",
"x-example": {
"application/vnd.cncf.helm.config.v1+json": ["application/tar+gzip"],
"application/vnd.sylabs.sif.config.v1+json": [
"application/vnd.sylabs.sif.layer.v1.sif"
],
},
},
},
}
| internal_only_properties = {'__module__', '__doc__', 'create_transaction', 'SESSION_COOKIE_NAME', 'SESSION_COOKIE_HTTPONLY', 'SESSION_COOKIE_SAMESITE', 'DATABASE_SECRET_KEY', 'V22_NAMESPACE_BLACKLIST', 'MAXIMUM_CNR_LAYER_SIZE', 'OCI_NAMESPACE_WHITELIST', 'FEATURE_GENERAL_OCI_SUPPORT', 'FEATURE_HELM_OCI_SUPPORT', 'FEATURE_NAMESPACE_GARBAGE_COLLECTION', 'FEATURE_REPOSITORY_GARBAGE_COLLECTION', 'FEATURE_REPOSITORY_ACTION_COUNTER', 'APP_REGISTRY_PACKAGE_LIST_CACHE_WHITELIST', 'APP_REGISTRY_SHOW_PACKAGE_CACHE_WHITELIST', 'FEATURE_MANIFEST_SIZE_BACKFILL', 'TESTING', 'SEND_FILE_MAX_AGE_DEFAULT', 'DISABLED_FOR_AUDIT_LOGS', 'DISABLED_FOR_PULL_LOGS', 'FEATURE_DISABLE_PULL_LOGS_FOR_FREE_NAMESPACES', 'FEATURE_CLEAR_EXPIRED_RAC_ENTRIES', 'ACTION_LOG_MAX_PAGE', 'NON_RATE_LIMITED_NAMESPACES', 'REPLICATION_QUEUE_NAME', 'DOCKERFILE_BUILD_QUEUE_NAME', 'CHUNK_CLEANUP_QUEUE_NAME', 'SECURITY_SCANNER_ISSUER_NAME', 'NOTIFICATION_QUEUE_NAME', 'REPOSITORY_GC_QUEUE_NAME', 'NAMESPACE_GC_QUEUE_NAME', 'EXPORT_ACTION_LOGS_QUEUE_NAME', 'SECSCAN_V4_NOTIFICATION_QUEUE_NAME', 'FEATURE_BILLING', 'BILLING_TYPE', 'INSTANCE_SERVICE_KEY_LOCATION', 'INSTANCE_SERVICE_KEY_REFRESH', 'INSTANCE_SERVICE_KEY_SERVICE', 'INSTANCE_SERVICE_KEY_KID_LOCATION', 'INSTANCE_SERVICE_KEY_EXPIRATION', 'UNAPPROVED_SERVICE_KEY_TTL_SEC', 'EXPIRED_SERVICE_KEY_TTL_SEC', 'REGISTRY_JWT_AUTH_MAX_FRESH_S', 'SERVICE_LOG_ACCOUNT_ID', 'BUILDLOGS_OPTIONS', 'LIBRARY_NAMESPACE', 'STAGGER_WORKERS', 'QUEUE_WORKER_METRICS_REFRESH_SECONDS', 'PUSH_TEMP_TAG_EXPIRATION_SEC', 'GARBAGE_COLLECTION_FREQUENCY', 'PAGE_TOKEN_KEY', 'BUILD_MANAGER', 'JWTPROXY_AUDIENCE', 'JWTPROXY_SIGNER', 'SECURITY_SCANNER_INDEXING_MIN_ID', 'SECURITY_SCANNER_V4_REINDEX_THRESHOLD', 'STATIC_SITE_BUCKET', 'LABEL_KEY_RESERVED_PREFIXES', 'TEAM_SYNC_WORKER_FREQUENCY', 'JSONIFY_PRETTYPRINT_REGULAR', 'TUF_GUN_PREFIX', 'LOGGING_LEVEL', 'SIGNED_GRANT_EXPIRATION_SEC', 'PROMETHEUS_PUSHGATEWAY_URL', 'DB_TRANSACTION_FACTORY', 'NOTIFICATION_SEND_TIMEOUT', 'QUEUE_METRICS_TYPE', 'MAIL_FAIL_SILENTLY', 'LOCAL_OAUTH_HANDLER', 'USE_CDN', 'ANALYTICS_TYPE', 'LAST_ACCESSED_UPDATE_THRESHOLD_S', 'GREENLET_TRACING', 'EXCEPTION_LOG_TYPE', 'SENTRY_DSN', 'SENTRY_PUBLIC_DSN', 'BILLED_NAMESPACE_MAXIMUM_BUILD_COUNT', 'THREAT_NAMESPACE_MAXIMUM_BUILD_COUNT', 'IP_DATA_API_KEY', 'SECURITY_SCANNER_ENDPOINT_BATCH', 'SECURITY_SCANNER_API_TIMEOUT_SECONDS', 'SECURITY_SCANNER_API_TIMEOUT_POST_SECONDS', 'SECURITY_SCANNER_ENGINE_VERSION_TARGET', 'SECURITY_SCANNER_READONLY_FAILOVER_ENDPOINTS', 'SECURITY_SCANNER_API_VERSION', 'REPO_MIRROR_INTERVAL', 'DATA_MODEL_CACHE_CONFIG', 'FEATURE_SIGNING', 'TUF_SERVER', 'V1_ONLY_DOMAIN', 'LOGS_MODEL', 'LOGS_MODEL_CONFIG', 'APP_REGISTRY_RESULTS_LIMIT', 'V3_UPGRADE_MODE', 'ACCOUNT_RECOVERY_MODE'}
config_schema = {'type': 'object', 'description': 'Schema for Quay configuration', 'required': ['PREFERRED_URL_SCHEME', 'SERVER_HOSTNAME', 'DB_URI', 'AUTHENTICATION_TYPE', 'DISTRIBUTED_STORAGE_CONFIG', 'BUILDLOGS_REDIS', 'USER_EVENTS_REDIS', 'DISTRIBUTED_STORAGE_PREFERENCE', 'DEFAULT_TAG_EXPIRATION', 'TAG_EXPIRATION_OPTIONS'], 'properties': {'REGISTRY_STATE': {'type': 'string', 'description': 'The state of the registry.', 'enum': ['normal', 'readonly'], 'x-example': 'readonly'}, 'PREFERRED_URL_SCHEME': {'type': 'string', 'description': 'The URL scheme to use when hitting Quay. If Quay is behind SSL *at all*, this *must* be `https`', 'enum': ['http', 'https'], 'x-example': 'https'}, 'SERVER_HOSTNAME': {'type': 'string', 'description': 'The URL at which Quay is accessible, without the scheme.', 'x-example': 'quay.io'}, 'EXTERNAL_TLS_TERMINATION': {'type': 'boolean', 'description': 'If TLS is supported, but terminated at a layer before Quay, must be true.', 'x-example': True}, 'SSL_CIPHERS': {'type': 'array', 'description': 'If specified, the nginx-defined list of SSL ciphers to enabled and disabled', 'x-example': ['CAMELLIA', '!3DES'], 'x-reference': 'http://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_ciphers'}, 'SSL_PROTOCOLS': {'type': 'array', 'description': 'If specified, the nginx-defined list of SSL protocols to enabled and disabled', 'x-example': ['TLSv1.1', 'TLSv1.2'], 'x-reference': 'http://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_protocols'}, 'REGISTRY_TITLE': {'type': 'string', 'description': 'If specified, the long-form title for the registry. Defaults to `Red Hat Quay`.', 'x-example': 'Corp Container Service'}, 'REGISTRY_TITLE_SHORT': {'type': 'string', 'description': 'If specified, the short-form title for the registry. Defaults to `Red Hat Quay`.', 'x-example': 'CCS'}, 'CONTACT_INFO': {'type': 'array', 'uniqueItems': True, 'description': 'If specified, contact information to display on the contact page. ' + 'If only a single piece of contact information is specified, the contact footer will link directly.', 'items': [{'type': 'string', 'pattern': '^mailto:(.)+$', 'x-example': 'mailto:admin@example.com', 'description': 'Adds a link to send an e-mail'}, {'type': 'string', 'pattern': '^irc://(.)+$', 'x-example': 'irc://chat.freenode.net:6665/quay', 'description': 'Adds a link to visit an IRC chat room'}, {'type': 'string', 'pattern': '^tel:(.)+$', 'x-example': 'tel:+1-888-930-3475', 'description': 'Adds a link to call a phone number'}, {'type': 'string', 'pattern': '^http(s)?://(.)+$', 'x-example': 'https://twitter.com/quayio', 'description': 'Adds a link to a defined URL'}]}, 'SEARCH_RESULTS_PER_PAGE': {'type': 'number', 'description': 'Number of results returned per page by search page. Defaults to 10', 'x-example': 10}, 'SEARCH_MAX_RESULT_PAGE_COUNT': {'type': 'number', 'description': 'Maximum number of pages the user can paginate in search before they are limited. Defaults to 10', 'x-example': 10}, 'FEATURE_MAILING': {'type': 'boolean', 'description': 'Whether emails are enabled. Defaults to True', 'x-example': True}, 'MAIL_SERVER': {'type': 'string', 'description': 'The SMTP server to use for sending e-mails. Only required if FEATURE_MAILING is set to true.', 'x-example': 'smtp.somedomain.com'}, 'MAIL_USE_TLS': {'type': 'boolean', 'description': 'If specified, whether to use TLS for sending e-mails.', 'x-example': True}, 'MAIL_PORT': {'type': 'number', 'description': 'The SMTP port to use. If not specified, defaults to 587.', 'x-example': 588}, 'MAIL_USERNAME': {'type': ['string', 'null'], 'description': 'The SMTP username to use when sending e-mails.', 'x-example': 'myuser'}, 'MAIL_PASSWORD': {'type': ['string', 'null'], 'description': 'The SMTP password to use when sending e-mails.', 'x-example': 'mypassword'}, 'MAIL_DEFAULT_SENDER': {'type': ['string', 'null'], 'description': 'If specified, the e-mail address used as the `from` when Quay sends e-mails. If none, defaults to `admin@example.com`.', 'x-example': 'support@myco.com'}, 'DB_URI': {'type': 'string', 'description': 'The URI at which to access the database, including any credentials.', 'x-example': 'mysql+pymysql://username:password@dns.of.database/quay', 'x-reference': 'https://www.postgresql.org/docs/9.3/static/libpq-connect.html#AEN39495'}, 'DB_CONNECTION_ARGS': {'type': 'object', 'description': 'If specified, connection arguments for the database such as timeouts and SSL.', 'properties': {'threadlocals': {'type': 'boolean', 'description': 'Whether to use thread-local connections. Should *ALWAYS* be `true`'}, 'autorollback': {'type': 'boolean', 'description': 'Whether to use auto-rollback connections. Should *ALWAYS* be `true`'}, 'ssl': {'type': 'object', 'description': 'SSL connection configuration', 'properties': {'ca': {'type': 'string', 'description': '*Absolute container path* to the CA certificate to use for SSL connections', 'x-example': 'conf/stack/ssl-ca-cert.pem'}}, 'required': ['ca']}}, 'required': ['threadlocals', 'autorollback']}, 'ALLOW_PULLS_WITHOUT_STRICT_LOGGING': {'type': 'boolean', 'description': 'If true, pulls in which the pull audit log entry cannot be written will ' + 'still succeed. Useful if the database can fallback into a read-only state ' + 'and it is desired for pulls to continue during that time. Defaults to False.', 'x-example': True}, 'FEATURE_STORAGE_REPLICATION': {'type': 'boolean', 'description': 'Whether to automatically replicate between storage engines. Defaults to False', 'x-example': False}, 'FEATURE_PROXY_STORAGE': {'type': 'boolean', 'description': 'Whether to proxy all direct download URLs in storage via the registry nginx. Defaults to False', 'x-example': False}, 'MAXIMUM_LAYER_SIZE': {'type': 'string', 'description': 'Maximum allowed size of an image layer. Defaults to 20G', 'x-example': '100G', 'pattern': '^[0-9]+(G|M)$'}, 'DISTRIBUTED_STORAGE_CONFIG': {'type': 'object', 'description': 'Configuration for storage engine(s) to use in Quay. Each key is a unique ID' + ' for a storage engine, with the value being a tuple of the type and ' + ' configuration for that engine.', 'x-example': {'local_storage': ['LocalStorage', {'storage_path': 'some/path/'}]}, 'items': {'type': 'array'}}, 'DISTRIBUTED_STORAGE_PREFERENCE': {'type': 'array', 'description': 'The preferred storage engine(s) (by ID in DISTRIBUTED_STORAGE_CONFIG) to ' + 'use. A preferred engine means it is first checked for pullig and images are ' + 'pushed to it.', 'items': {'type': 'string', 'uniqueItems': True}, 'x-example': ['s3_us_east', 's3_us_west']}, 'DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS': {'type': 'array', 'description': 'The list of storage engine(s) (by ID in DISTRIBUTED_STORAGE_CONFIG) whose ' + 'images should be fully replicated, by default, to all other storage engines.', 'items': {'type': 'string', 'uniqueItems': True}, 'x-example': ['s3_us_east', 's3_us_west']}, 'USERFILES_LOCATION': {'type': 'string', 'description': 'ID of the storage engine in which to place user-uploaded files', 'x-example': 's3_us_east'}, 'USERFILES_PATH': {'type': 'string', 'description': 'Path under storage in which to place user-uploaded files', 'x-example': 'userfiles'}, 'ACTION_LOG_ARCHIVE_LOCATION': {'type': 'string', 'description': 'If action log archiving is enabled, the storage engine in which to place the ' + 'archived data.', 'x-example': 's3_us_east'}, 'ACTION_LOG_ARCHIVE_PATH': {'type': 'string', 'description': 'If action log archiving is enabled, the path in storage in which to place the ' + 'archived data.', 'x-example': 'archives/actionlogs'}, 'ACTION_LOG_ROTATION_THRESHOLD': {'type': 'string', 'description': 'If action log archiving is enabled, the time interval after which to ' + 'archive data.', 'x-example': '30d'}, 'LOG_ARCHIVE_LOCATION': {'type': 'string', 'description': 'If builds are enabled, the storage engine in which to place the ' + 'archived build logs.', 'x-example': 's3_us_east'}, 'LOG_ARCHIVE_PATH': {'type': 'string', 'description': 'If builds are enabled, the path in storage in which to place the ' + 'archived build logs.', 'x-example': 'archives/buildlogs'}, 'AUTHENTICATION_TYPE': {'type': 'string', 'description': 'The authentication engine to use for credential authentication.', 'x-example': 'Database', 'enum': ['Database', 'LDAP', 'JWT', 'Keystone', 'OIDC', 'AppToken']}, 'SUPER_USERS': {'type': 'array', 'description': 'Quay usernames of those users to be granted superuser privileges', 'uniqueItems': True, 'items': {'type': 'string'}}, 'DIRECT_OAUTH_CLIENTID_WHITELIST': {'type': 'array', 'description': 'A list of client IDs of *Quay-managed* applications that are allowed ' + 'to perform direct OAuth approval without user approval.', 'x-reference': 'https://coreos.com/quay-enterprise/docs/latest/direct-oauth.html', 'uniqueItems': True, 'items': {'type': 'string'}}, 'BUILDLOGS_REDIS': {'type': 'object', 'description': 'Connection information for Redis for build logs caching', 'required': ['host'], 'properties': {'host': {'type': 'string', 'description': 'The hostname at which Redis is accessible', 'x-example': 'my.redis.cluster'}, 'port': {'type': 'number', 'description': 'The port at which Redis is accessible', 'x-example': 1234}, 'password': {'type': 'string', 'description': 'The password to connect to the Redis instance', 'x-example': 'mypassword'}}}, 'USER_EVENTS_REDIS': {'type': 'object', 'description': 'Connection information for Redis for user event handling', 'required': ['host'], 'properties': {'host': {'type': 'string', 'description': 'The hostname at which Redis is accessible', 'x-example': 'my.redis.cluster'}, 'port': {'type': 'number', 'description': 'The port at which Redis is accessible', 'x-example': 1234}, 'password': {'type': 'string', 'description': 'The password to connect to the Redis instance', 'x-example': 'mypassword'}}}, 'GITHUB_LOGIN_CONFIG': {'type': ['object', 'null'], 'description': 'Configuration for using GitHub (Enterprise) as an external login provider', 'required': ['CLIENT_ID', 'CLIENT_SECRET'], 'x-reference': 'https://coreos.com/quay-enterprise/docs/latest/github-auth.html', 'properties': {'GITHUB_ENDPOINT': {'type': 'string', 'description': 'The endpoint of the GitHub (Enterprise) being hit', 'x-example': 'https://github.com/'}, 'API_ENDPOINT': {'type': 'string', 'description': 'The endpoint of the GitHub (Enterprise) API to use. Must be overridden for github.com', 'x-example': 'https://api.github.com/'}, 'CLIENT_ID': {'type': 'string', 'description': 'The registered client ID for this Quay instance; cannot be shared with GITHUB_TRIGGER_CONFIG', 'x-example': '0e8dbe15c4c7630b6780', 'x-reference': 'https://coreos.com/quay-enterprise/docs/latest/github-app.html'}, 'CLIENT_SECRET': {'type': 'string', 'description': 'The registered client secret for this Quay instance', 'x-example': 'e4a58ddd3d7408b7aec109e85564a0d153d3e846', 'x-reference': 'https://coreos.com/quay-enterprise/docs/latest/github-app.html'}, 'ORG_RESTRICT': {'type': 'boolean', 'description': 'If true, only users within the organization whitelist can login using this provider', 'x-example': True}, 'ALLOWED_ORGANIZATIONS': {'type': 'array', 'description': 'The names of the GitHub (Enterprise) organizations whitelisted to work with the ORG_RESTRICT option', 'uniqueItems': True, 'items': {'type': 'string'}}}}, 'BITBUCKET_TRIGGER_CONFIG': {'type': ['object', 'null'], 'description': 'Configuration for using BitBucket for build triggers', 'required': ['CONSUMER_KEY', 'CONSUMER_SECRET'], 'x-reference': 'https://coreos.com/quay-enterprise/docs/latest/bitbucket-build.html', 'properties': {'CONSUMER_KEY': {'type': 'string', 'description': 'The registered consumer key (client ID) for this Quay instance', 'x-example': '0e8dbe15c4c7630b6780'}, 'CONSUMER_SECRET': {'type': 'string', 'description': 'The registered consumer secret (client secret) for this Quay instance', 'x-example': 'e4a58ddd3d7408b7aec109e85564a0d153d3e846'}}}, 'GITHUB_TRIGGER_CONFIG': {'type': ['object', 'null'], 'description': 'Configuration for using GitHub (Enterprise) for build triggers', 'required': ['GITHUB_ENDPOINT', 'CLIENT_ID', 'CLIENT_SECRET'], 'x-reference': 'https://coreos.com/quay-enterprise/docs/latest/github-build.html', 'properties': {'GITHUB_ENDPOINT': {'type': 'string', 'description': 'The endpoint of the GitHub (Enterprise) being hit', 'x-example': 'https://github.com/'}, 'API_ENDPOINT': {'type': 'string', 'description': 'The endpoint of the GitHub (Enterprise) API to use. Must be overridden for github.com', 'x-example': 'https://api.github.com/'}, 'CLIENT_ID': {'type': 'string', 'description': 'The registered client ID for this Quay instance; cannot be shared with GITHUB_LOGIN_CONFIG', 'x-example': '0e8dbe15c4c7630b6780', 'x-reference': 'https://coreos.com/quay-enterprise/docs/latest/github-app.html'}, 'CLIENT_SECRET': {'type': 'string', 'description': 'The registered client secret for this Quay instance', 'x-example': 'e4a58ddd3d7408b7aec109e85564a0d153d3e846', 'x-reference': 'https://coreos.com/quay-enterprise/docs/latest/github-app.html'}}}, 'GOOGLE_LOGIN_CONFIG': {'type': ['object', 'null'], 'description': 'Configuration for using Google for external authentication', 'required': ['CLIENT_ID', 'CLIENT_SECRET'], 'properties': {'CLIENT_ID': {'type': 'string', 'description': 'The registered client ID for this Quay instance', 'x-example': '0e8dbe15c4c7630b6780'}, 'CLIENT_SECRET': {'type': 'string', 'description': 'The registered client secret for this Quay instance', 'x-example': 'e4a58ddd3d7408b7aec109e85564a0d153d3e846'}}}, 'GITLAB_TRIGGER_CONFIG': {'type': ['object', 'null'], 'description': 'Configuration for using Gitlab (Enterprise) for external authentication', 'required': ['GITLAB_ENDPOINT', 'CLIENT_ID', 'CLIENT_SECRET'], 'properties': {'GITLAB_ENDPOINT': {'type': 'string', 'description': 'The endpoint at which Gitlab(Enterprise) is running', 'x-example': 'https://gitlab.com'}, 'CLIENT_ID': {'type': 'string', 'description': 'The registered client ID for this Quay instance', 'x-example': '0e8dbe15c4c7630b6780'}, 'CLIENT_SECRET': {'type': 'string', 'description': 'The registered client secret for this Quay instance', 'x-example': 'e4a58ddd3d7408b7aec109e85564a0d153d3e846'}}}, 'BRANDING': {'type': ['object', 'null'], 'description': 'Custom branding for logos and URLs in the Quay UI', 'required': ['logo'], 'properties': {'logo': {'type': 'string', 'description': 'Main logo image URL', 'x-example': '/static/img/quay-horizontal-color.svg'}, 'footer_img': {'type': 'string', 'description': 'Logo for UI footer', 'x-example': '/static/img/RedHat.svg'}, 'footer_url': {'type': 'string', 'description': 'Link for footer image', 'x-example': 'https://redhat.com'}}}, 'DOCUMENTATION_ROOT': {'type': 'string', 'description': 'Root URL for documentation links'}, 'HEALTH_CHECKER': {'description': 'The configured health check.', 'x-example': ('RDSAwareHealthCheck', {'access_key': 'foo', 'secret_key': 'bar'})}, 'PROMETHEUS_NAMESPACE': {'type': 'string', 'description': 'The prefix applied to all exposed Prometheus metrics. Defaults to `quay`', 'x-example': 'myregistry'}, 'BLACKLIST_V2_SPEC': {'type': 'string', 'description': 'The Docker CLI versions to which Quay will respond that V2 is *unsupported*. Defaults to `<1.6.0`', 'x-reference': 'http://pythonhosted.org/semantic_version/reference.html#semantic_version.Spec', 'x-example': '<1.8.0'}, 'USER_RECOVERY_TOKEN_LIFETIME': {'type': 'string', 'description': 'The length of time a token for recovering a user accounts is valid. Defaults to 30m.', 'x-example': '10m', 'pattern': '^[0-9]+(w|m|d|h|s)$'}, 'SESSION_COOKIE_SECURE': {'type': 'boolean', 'description': 'Whether the `secure` property should be set on session cookies. ' + 'Defaults to False. Recommended to be True for all installations using SSL.', 'x-example': True, 'x-reference': 'https://en.wikipedia.org/wiki/Secure_cookies'}, 'PUBLIC_NAMESPACES': {'type': 'array', 'description': 'If a namespace is defined in the public namespace list, then it will appear on *all*' + " user's repository list pages, regardless of whether that user is a member of the namespace." + ' Typically, this is used by an enterprise customer in configuring a set of "well-known"' + ' namespaces.', 'uniqueItems': True, 'items': {'type': 'string'}}, 'AVATAR_KIND': {'type': 'string', 'description': 'The types of avatars to display, either generated inline (local) or Gravatar (gravatar)', 'enum': ['local', 'gravatar']}, 'V2_PAGINATION_SIZE': {'type': 'number', 'description': 'The number of results returned per page in V2 registry APIs', 'x-example': 100}, 'ENABLE_HEALTH_DEBUG_SECRET': {'type': ['string', 'null'], 'description': 'If specified, a secret that can be given to health endpoints to see full debug info when' + 'not authenticated as a superuser', 'x-example': 'somesecrethere'}, 'BROWSER_API_CALLS_XHR_ONLY': {'type': 'boolean', 'description': 'If enabled, only API calls marked as being made by an XHR will be allowed from browsers. Defaults to True.', 'x-example': False}, 'FEATURE_CHANGE_TAG_EXPIRATION': {'type': 'boolean', 'description': 'Whether users and organizations are allowed to change the tag expiration for tags in their namespace. Defaults to True.', 'x-example': False}, 'DEFAULT_TAG_EXPIRATION': {'type': 'string', 'description': 'The default, configurable tag expiration time for time machine. Defaults to `2w`.', 'pattern': '^[0-9]+(w|m|d|h|s)$'}, 'TAG_EXPIRATION_OPTIONS': {'type': 'array', 'description': 'The options that users can select for expiration of tags in their namespace (if enabled)', 'items': {'type': 'string', 'pattern': '^[0-9]+(w|m|d|h|s)$'}}, 'FEATURE_TEAM_SYNCING': {'type': 'boolean', 'description': 'Whether to allow for team membership to be synced from a backing group in the authentication engine (LDAP or Keystone)', 'x-example': True}, 'TEAM_RESYNC_STALE_TIME': {'type': 'string', 'description': 'If team syncing is enabled for a team, how often to check its membership and resync if necessary (Default: 30m)', 'x-example': '2h', 'pattern': '^[0-9]+(w|m|d|h|s)$'}, 'FEATURE_NONSUPERUSER_TEAM_SYNCING_SETUP': {'type': 'boolean', 'description': 'If enabled, non-superusers can setup syncing on teams to backing LDAP or Keystone. Defaults To False.', 'x-example': True}, 'FEATURE_SECURITY_SCANNER': {'type': 'boolean', 'description': 'Whether to turn of/off the security scanner. Defaults to False', 'x-example': False, 'x-reference': 'https://coreos.com/quay-enterprise/docs/latest/security-scanning.html'}, 'FEATURE_SECURITY_NOTIFICATIONS': {'type': 'boolean', 'description': 'If the security scanner is enabled, whether to turn of/off security notificaitons. Defaults to False', 'x-example': False}, 'SECURITY_SCANNER_ENDPOINT': {'type': 'string', 'pattern': '^http(s)?://(.)+$', 'description': 'The endpoint for the V2 security scanner', 'x-example': 'http://192.168.99.101:6060'}, 'SECURITY_SCANNER_V4_ENDPOINT': {'type': ['string', 'null'], 'pattern': '^http(s)?://(.)+$', 'description': 'The endpoint for the V4 security scanner', 'x-example': 'http://192.168.99.101:6060'}, 'SECURITY_SCANNER_INDEXING_INTERVAL': {'type': 'number', 'description': 'The number of seconds between indexing intervals in the security scanner. Defaults to 30.', 'x-example': 30}, 'SECURITY_SCANNER_V4_PSK': {'type': 'string', 'description': "A base64 encoded string used to sign JWT(s) on Clair V4 requests. If 'None' jwt signing will not occur.", 'x-example': 'PSK'}, 'REPO_MIRROR_INTERVAL': {'type': 'number', 'description': 'The number of seconds between checking for repository mirror candidates. Defaults to 30.', 'x-example': 30}, 'FEATURE_GITHUB_BUILD': {'type': 'boolean', 'description': 'Whether to support GitHub build triggers. Defaults to False', 'x-example': False}, 'FEATURE_BITBUCKET_BUILD': {'type': 'boolean', 'description': 'Whether to support Bitbucket build triggers. Defaults to False', 'x-example': False}, 'FEATURE_GITLAB_BUILD': {'type': 'boolean', 'description': 'Whether to support GitLab build triggers. Defaults to False', 'x-example': False}, 'FEATURE_BUILD_SUPPORT': {'type': 'boolean', 'description': 'Whether to support Dockerfile build. Defaults to True', 'x-example': True}, 'DEFAULT_NAMESPACE_MAXIMUM_BUILD_COUNT': {'type': ['number', 'null'], 'description': 'If not None, the default maximum number of builds that can be queued in a namespace.', 'x-example': 20}, 'SUCCESSIVE_TRIGGER_INTERNAL_ERROR_DISABLE_THRESHOLD': {'type': ['number', 'null'], 'description': 'If not None, the number of successive internal errors that can occur before a build trigger is automatically disabled. Defaults to 5.', 'x-example': 10}, 'SUCCESSIVE_TRIGGER_FAILURE_DISABLE_THRESHOLD': {'type': ['number', 'null'], 'description': 'If not None, the number of successive failures that can occur before a build trigger is automatically disabled. Defaults to 100.', 'x-example': 50}, 'FEATURE_EXTENDED_REPOSITORY_NAMES': {'type': 'boolean', 'description': 'Whether repository names can have nested paths (/)', 'x-example': False}, 'FEATURE_GITHUB_LOGIN': {'type': 'boolean', 'description': 'Whether GitHub login is supported. Defaults to False', 'x-example': False}, 'FEATURE_GOOGLE_LOGIN': {'type': 'boolean', 'description': 'Whether Google login is supported. Defaults to False', 'x-example': False}, 'FEATURE_RECAPTCHA': {'type': 'boolean', 'description': 'Whether Recaptcha is necessary for user login and recovery. Defaults to False', 'x-example': False, 'x-reference': 'https://www.google.com/recaptcha/intro/'}, 'RECAPTCHA_SITE_KEY': {'type': ['string', 'null'], 'description': 'If recaptcha is enabled, the site key for the Recaptcha service'}, 'RECAPTCHA_SECRET_KEY': {'type': ['string', 'null'], 'description': 'If recaptcha is enabled, the secret key for the Recaptcha service'}, 'FEATURE_APP_SPECIFIC_TOKENS': {'type': 'boolean', 'description': 'If enabled, users can create tokens for use by the Docker CLI. Defaults to True', 'x-example': False}, 'APP_SPECIFIC_TOKEN_EXPIRATION': {'type': ['string', 'null'], 'description': 'The expiration for external app tokens. Defaults to None.', 'pattern': '^[0-9]+(w|m|d|h|s)$'}, 'EXPIRED_APP_SPECIFIC_TOKEN_GC': {'type': ['string', 'null'], 'description': 'Duration of time expired external app tokens will remain before being garbage collected. Defaults to 1d.', 'pattern': '^[0-9]+(w|m|d|h|s)$'}, 'FEATURE_GARBAGE_COLLECTION': {'type': 'boolean', 'description': 'Whether garbage collection of repositories is enabled. Defaults to True', 'x-example': False}, 'FEATURE_RATE_LIMITS': {'type': 'boolean', 'description': 'Whether to enable rate limits on API and registry endpoints. Defaults to False', 'x-example': True}, 'FEATURE_AGGREGATED_LOG_COUNT_RETRIEVAL': {'type': 'boolean', 'description': 'Whether to allow retrieval of aggregated log counts. Defaults to True', 'x-example': True}, 'FEATURE_LOG_EXPORT': {'type': 'boolean', 'description': 'Whether to allow exporting of action logs. Defaults to True', 'x-example': True}, 'FEATURE_USER_LAST_ACCESSED': {'type': 'boolean', 'description': 'Whether to record the last time a user was accessed. Defaults to True', 'x-example': True}, 'FEATURE_PERMANENT_SESSIONS': {'type': 'boolean', 'description': 'Whether sessions are permanent. Defaults to True', 'x-example': True}, 'FEATURE_SUPER_USERS': {'type': 'boolean', 'description': 'Whether super users are supported. Defaults to True', 'x-example': True}, 'FEATURE_FIPS': {'type': 'boolean', 'description': 'If set to true, Quay will run using FIPS compliant hash functions. Defaults to False', 'x-example': True}, 'FEATURE_ANONYMOUS_ACCESS': {'type': 'boolean', 'description': ' Whether to allow anonymous users to browse and pull public repositories. Defaults to True', 'x-example': True}, 'FEATURE_USER_CREATION': {'type': 'boolean', 'description': 'Whether users can be created (by non-super users). Defaults to True', 'x-example': True}, 'FEATURE_INVITE_ONLY_USER_CREATION': {'type': 'boolean', 'description': 'Whether users being created must be invited by another user. Defaults to False', 'x-example': False}, 'FEATURE_REQUIRE_ENCRYPTED_BASIC_AUTH': {'type': 'boolean', 'description': 'Whether non-encrypted passwords (as opposed to encrypted tokens) can be used for basic auth. Defaults to False', 'x-example': False}, 'FEATURE_DIRECT_LOGIN': {'type': 'boolean', 'description': 'Whether users can directly login to the UI. Defaults to True', 'x-example': True}, 'FEATURE_ADVERTISE_V2': {'type': 'boolean', 'description': 'Whether the v2/ endpoint is visible. Defaults to True', 'x-example': True}, 'FEATURE_ACTION_LOG_ROTATION': {'type': 'boolean', 'description': 'Whether or not to rotate old action logs to storage. Defaults to False', 'x-example': False}, 'FEATURE_ACI_CONVERSION': {'type': 'boolean', 'description': 'Whether to enable conversion to ACIs. Defaults to False', 'x-example': False}, 'FEATURE_LIBRARY_SUPPORT': {'type': 'boolean', 'description': 'Whether to allow for "namespace-less" repositories when pulling and pushing from Docker. Defaults to True', 'x-example': True}, 'FEATURE_REQUIRE_TEAM_INVITE': {'type': 'boolean', 'description': 'Whether to require invitations when adding a user to a team. Defaults to True', 'x-example': True}, 'FEATURE_USER_METADATA': {'type': 'boolean', 'description': 'Whether to collect and support user metadata. Defaults to False', 'x-example': False}, 'FEATURE_APP_REGISTRY': {'type': 'boolean', 'description': 'Whether to enable support for App repositories. Defaults to False', 'x-example': False}, 'FEATURE_READONLY_APP_REGISTRY': {'type': 'boolean', 'description': 'Whether to App repositories are read-only. Defaults to False', 'x-example': True}, 'FEATURE_PUBLIC_CATALOG': {'type': 'boolean', 'description': 'If set to true, the _catalog endpoint returns public repositories. Otherwise, only private repositories can be returned. Defaults to False', 'x-example': False}, 'FEATURE_READER_BUILD_LOGS': {'type': 'boolean', 'description': 'If set to true, build logs may be read by those with read access to the repo, rather than only write access or admin access. Defaults to False', 'x-example': False}, 'FEATURE_PARTIAL_USER_AUTOCOMPLETE': {'type': 'boolean', 'description': 'If set to true, autocompletion will apply to partial usernames. Defaults to True', 'x-example': True}, 'FEATURE_USER_LOG_ACCESS': {'type': 'boolean', 'description': 'If set to true, users will have access to audit logs for their namespace. Defaults to False', 'x-example': True}, 'FEATURE_USER_RENAME': {'type': 'boolean', 'description': 'If set to true, users can rename their own namespace. Defaults to False', 'x-example': True}, 'FEATURE_USERNAME_CONFIRMATION': {'type': 'boolean', 'description': 'If set to true, users can confirm their generated usernames. Defaults to True', 'x-example': False}, 'FEATURE_RESTRICTED_V1_PUSH': {'type': 'boolean', 'description': 'If set to true, only namespaces listed in V1_PUSH_WHITELIST support V1 push. Defaults to True', 'x-example': False}, 'FEATURE_REPO_MIRROR': {'type': 'boolean', 'description': 'Whether to enable support for repository mirroring. Defaults to False', 'x-example': False}, 'REPO_MIRROR_TLS_VERIFY': {'type': 'boolean', 'description': 'Require HTTPS and verify certificates of Quay registry during mirror. Defaults to True', 'x-example': True}, 'REPO_MIRROR_SERVER_HOSTNAME': {'type': ['string', 'null'], 'description': 'Replaces the SERVER_HOSTNAME as the destination for mirroring. Defaults to unset', 'x-example': 'openshift-quay-service'}, 'V1_PUSH_WHITELIST': {'type': 'array', 'description': 'The array of namespace names that support V1 push if FEATURE_RESTRICTED_V1_PUSH is set to true.', 'x-example': ['some', 'namespaces']}, 'LOGS_MODEL': {'type': 'string', 'description': 'Logs model for action logs', 'enum': ['database', 'transition_reads_both_writes_es', 'elasticsearch'], 'x-example': 'database'}, 'LOGS_MODEL_CONFIG': {'type': 'object', 'description': 'Logs model config for action logs', 'x-reference': 'https://www.elastic.co/guide/en/elasticsearch/guide/master/_index_settings.html', 'properties': {'producer': {'type': 'string', 'description': 'Logs producer if logging to Elasticsearch', 'enum': ['kafka', 'elasticsearch', 'kinesis_stream'], 'x-example': 'kafka'}, 'elasticsearch_config': {'type': 'object', 'description': 'Elasticsearch cluster configuration', 'properties': {'host': {'type': 'string', 'description': 'Elasticsearch cluster endpoint', 'x-example': 'host.elasticsearch.example'}, 'port': {'type': 'number', 'description': 'Elasticsearch cluster endpoint port', 'x-example': 1234}, 'access_key': {'type': 'string', 'description': 'Elasticsearch user (or IAM key for AWS ES)', 'x-example': 'some_string'}, 'secret_key': {'type': 'string', 'description': 'Elasticsearch password (or IAM secret for AWS ES)', 'x-example': 'some_secret_string'}, 'aws_region': {'type': 'string', 'description': 'Amazon web service region', 'x-example': 'us-east-1'}, 'use_ssl': {'type': 'boolean', 'description': 'Use ssl for Elasticsearch. Defaults to True', 'x-example': True}, 'index_prefix': {'type': 'string', 'description': "Elasticsearch's index prefix", 'x-example': 'logentry_'}, 'index_settings': {'type': 'object', 'description': "Elasticsearch's index settings"}}}, 'kafka_config': {'type': 'object', 'description': 'Kafka cluster configuration', 'properties': {'bootstrap_servers': {'type': 'array', 'description': 'List of Kafka brokers to bootstrap the client from', 'uniqueItems': True, 'items': {'type': 'string'}}, 'topic': {'type': 'string', 'description': 'Kafka topic to publish log entries to', 'x-example': 'logentry'}, 'max_block_seconds': {'type': 'number', 'description': 'Max number of seconds to block during a `send()`, either because the buffer is full or metadata unavailable', 'x-example': 10}}}, 'kinesis_stream_config': {'type': 'object', 'description': 'AWS Kinesis Stream configuration', 'properties': {'stream_name': {'type': 'string', 'description': 'Kinesis stream to send action logs to', 'x-example': 'logentry-kinesis-stream'}, 'aws_region': {'type': 'string', 'description': 'AWS region', 'x-example': 'us-east-1'}, 'aws_access_key': {'type': 'string', 'description': 'AWS access key', 'x-example': 'some_access_key'}, 'aws_secret_key': {'type': 'string', 'description': 'AWS secret key', 'x-example': 'some_secret_key'}, 'connect_timeout': {'type': 'number', 'description': 'Number of seconds before timeout when attempting to make a connection', 'x-example': 5}, 'read_timeout': {'type': 'number', 'description': 'Number of seconds before timeout when reading from a connection', 'x-example': 5}, 'retries': {'type': 'number', 'description': 'Max number of attempts made on a single request', 'x-example': 5}, 'max_pool_connections': {'type': 'number', 'description': 'The maximum number of connections to keep in a connection pool', 'x-example': 10}}}}}, 'FEATURE_BLACKLISTED_EMAILS': {'type': 'boolean', 'description': 'If set to true, no new User accounts may be created if their email domain is blacklisted.', 'x-example': False}, 'BLACKLISTED_EMAIL_DOMAINS': {'type': 'array', 'description': 'The array of email-address domains that is used if FEATURE_BLACKLISTED_EMAILS is set to true.', 'x-example': ['example.com', 'example.org']}, 'FRESH_LOGIN_TIMEOUT': {'type': 'string', 'description': 'The time after which a fresh login requires users to reenter their password', 'x-example': '5m'}, 'WEBHOOK_HOSTNAME_BLACKLIST': {'type': 'array', 'description': 'The set of hostnames to disallow from webhooks when validating, beyond localhost', 'x-example': ['somexternaldomain.com']}, 'CREATE_PRIVATE_REPO_ON_PUSH': {'type': 'boolean', 'description': 'Whether new repositories created by push are set to private visibility. Defaults to True.', 'x-example': True}, 'CREATE_NAMESPACE_ON_PUSH': {'type': 'boolean', 'description': 'Whether new push to a non-existent organization creates it. Defaults to False.', 'x-example': False}, 'FEATURE_USER_INITIALIZE': {'type': 'boolean', 'description': 'If set to true, the first User account may be created via API /api/v1/user/initialize', 'x-example': False}, 'ALLOWED_OCI_ARTIFACT_TYPES': {'type': 'object', 'description': 'The set of allowed OCI artifact mimetypes and the assiciated layer types', 'x-example': {'application/vnd.cncf.helm.config.v1+json': ['application/tar+gzip'], 'application/vnd.sylabs.sif.config.v1+json': ['application/vnd.sylabs.sif.layer.v1.sif']}}}} |
# example file for submodule imports
def divide_me_by_2(x):
return x/2
| def divide_me_by_2(x):
return x / 2 |
class Solution:
def connect(self, root):
nodes = [[root], []]
x = 0
while (nodes[0] and nodes[0][0]) or (nodes[1] and nodes[1][0]):
for i in range(len(nodes[x])):
nodes[x][i].next = None if i == len(nodes[x]) - 1 else nodes[x][i+1]
nodes[(1 + x) % 2].append(nodes[x][i].left)
nodes[(1 + x) % 2].append(nodes[x][i].right)
nodes[x] = []
x = (1 + x) % 2
return root
| class Solution:
def connect(self, root):
nodes = [[root], []]
x = 0
while nodes[0] and nodes[0][0] or (nodes[1] and nodes[1][0]):
for i in range(len(nodes[x])):
nodes[x][i].next = None if i == len(nodes[x]) - 1 else nodes[x][i + 1]
nodes[(1 + x) % 2].append(nodes[x][i].left)
nodes[(1 + x) % 2].append(nodes[x][i].right)
nodes[x] = []
x = (1 + x) % 2
return root |
CELERY_TIMEZONE = 'Europe/Rome'
# The backend used to store task results
CELERY_RESULT_BACKEND = 'rpc://'
# If set to True, result messages will be persistent. This means the messages will not be lost after a broker restart
CELERY_RESULT_PERSISTENT = True
CELERY_ACCEPT_CONTENT=['json', 'pickle']
CELERY_TASK_SERIALIZER='json'
CELERY_RESULT_SERIALIZER='json'
# Broker settings.
BROKER_URL = 'amqp://guest:guest@localhost:5672//'
BROKER_HEARTBEAT = 10.0
BROKER_HEARTBEAT_CHECKRATE = 2.0
CELERY_IMPORTS = [ 'app_celery.tasks' ]
CELERYD_STATE_DB = '/var/celery/db/state'
# Enables error emails.
CELERY_SEND_TASK_ERROR_EMAILS = True
# Name and email addresses of recipients
ADMINS = (
('Administrator Name', 'admin@somedoamin.net'),
)
# Email address used as sender (From field).
SERVER_EMAIL = 'no-reply@somedomain.net'
# Mailserver configuration
EMAIL_HOST = 'localhost'
EMAIL_PORT = 25
# Send events so the worker can be monitored by tools like celerymon.
CELERY_SEND_EVENTS = True
# If enabled the worker pool can be restarted using the pool_restart remote control command.
CELERYD_POOL_RESTARTS = True
| celery_timezone = 'Europe/Rome'
celery_result_backend = 'rpc://'
celery_result_persistent = True
celery_accept_content = ['json', 'pickle']
celery_task_serializer = 'json'
celery_result_serializer = 'json'
broker_url = 'amqp://guest:guest@localhost:5672//'
broker_heartbeat = 10.0
broker_heartbeat_checkrate = 2.0
celery_imports = ['app_celery.tasks']
celeryd_state_db = '/var/celery/db/state'
celery_send_task_error_emails = True
admins = (('Administrator Name', 'admin@somedoamin.net'),)
server_email = 'no-reply@somedomain.net'
email_host = 'localhost'
email_port = 25
celery_send_events = True
celeryd_pool_restarts = True |
# colorcodingfor rows(...)
def colornumber(color):
if color == 'd':
return 0
elif color == 'e':
return 1
elif color == 'f':
return 2
elif color == 'g':
return 3
elif color == 'h':
return 4
elif color == 'i':
return 5
elif color == 'j':
return 6
elif color == 'k':
return 7
elif color == 'l':
return 8
else:
return 9 | def colornumber(color):
if color == 'd':
return 0
elif color == 'e':
return 1
elif color == 'f':
return 2
elif color == 'g':
return 3
elif color == 'h':
return 4
elif color == 'i':
return 5
elif color == 'j':
return 6
elif color == 'k':
return 7
elif color == 'l':
return 8
else:
return 9 |
[
{
'date': '2018-01-01',
'description': "New Year's Day",
'locale': 'en-US',
'notes': '',
'region': '',
'type': 'NF'
},
{
'date': '2018-01-15',
'description': 'Birthday of Martin Luther King, Jr.',
'locale': 'en-US',
'notes': '',
'region': '',
'type': 'NV'
},
{
'date': '2018-02-19',
'description': "Washington's Birthday",
'locale': 'en-US',
'notes': '',
'region': '',
'type': 'NV'
},
{
'date': '2018-04-16',
'description': "Patriots' Day",
'locale': 'en-US',
'notes': '',
'region': 'MA',
'type': 'V'
},
{
'date': '2018-04-16',
'description': "Patriots' Day",
'locale': 'en-US',
'notes': '',
'region': 'ME',
'type': 'V'
},
{
'date': '2018-05-28',
'description': 'Memorial Day',
'locale': 'en-US',
'notes': '',
'region': '',
'type': 'NV'
},
{
'date': '2018-07-04',
'description': 'Independence Day',
'locale': 'en-US',
'notes': '',
'region': '',
'type': 'NF'
},
{
'date': '2018-09-03',
'description': 'Labor Day',
'locale': 'en-US',
'notes': '',
'region': '',
'type': 'NV'
},
{
'date': '2018-10-08',
'description': 'Columbus Day',
'locale': 'en-US',
'notes': '',
'region': '',
'type': 'NV'
},
{
'date': '2018-11-11',
'description': 'Veterans Day',
'locale': 'en-US',
'notes': '',
'region': '',
'type': 'NF'
},
{
'date': '2018-11-22',
'description': 'Thanksgiving Day',
'locale': 'en-US',
'notes': '',
'region': '',
'type': 'NV'
},
{
'date': '2018-11-23',
'description': 'Day after Thanksgiving',
'locale': 'en-US',
'notes': '',
'region': '',
'type': 'NV'
},
{
'date': '2018-12-24',
'description': 'Christmas Eve',
'locale': 'en-US',
'notes': '',
'region': '',
'type': 'NRF'
},
{
'date': '2018-12-25',
'description': 'Christmas Day',
'locale': 'en-US',
'notes': '',
'region': '',
'type': 'NRF'
}
] | [{'date': '2018-01-01', 'description': "New Year's Day", 'locale': 'en-US', 'notes': '', 'region': '', 'type': 'NF'}, {'date': '2018-01-15', 'description': 'Birthday of Martin Luther King, Jr.', 'locale': 'en-US', 'notes': '', 'region': '', 'type': 'NV'}, {'date': '2018-02-19', 'description': "Washington's Birthday", 'locale': 'en-US', 'notes': '', 'region': '', 'type': 'NV'}, {'date': '2018-04-16', 'description': "Patriots' Day", 'locale': 'en-US', 'notes': '', 'region': 'MA', 'type': 'V'}, {'date': '2018-04-16', 'description': "Patriots' Day", 'locale': 'en-US', 'notes': '', 'region': 'ME', 'type': 'V'}, {'date': '2018-05-28', 'description': 'Memorial Day', 'locale': 'en-US', 'notes': '', 'region': '', 'type': 'NV'}, {'date': '2018-07-04', 'description': 'Independence Day', 'locale': 'en-US', 'notes': '', 'region': '', 'type': 'NF'}, {'date': '2018-09-03', 'description': 'Labor Day', 'locale': 'en-US', 'notes': '', 'region': '', 'type': 'NV'}, {'date': '2018-10-08', 'description': 'Columbus Day', 'locale': 'en-US', 'notes': '', 'region': '', 'type': 'NV'}, {'date': '2018-11-11', 'description': 'Veterans Day', 'locale': 'en-US', 'notes': '', 'region': '', 'type': 'NF'}, {'date': '2018-11-22', 'description': 'Thanksgiving Day', 'locale': 'en-US', 'notes': '', 'region': '', 'type': 'NV'}, {'date': '2018-11-23', 'description': 'Day after Thanksgiving', 'locale': 'en-US', 'notes': '', 'region': '', 'type': 'NV'}, {'date': '2018-12-24', 'description': 'Christmas Eve', 'locale': 'en-US', 'notes': '', 'region': '', 'type': 'NRF'}, {'date': '2018-12-25', 'description': 'Christmas Day', 'locale': 'en-US', 'notes': '', 'region': '', 'type': 'NRF'}] |
# A function to get the desired metrics while working with multiple model training procedures
def print_classification_metrics(y_train, train_pred, y_test, test_pred, return_performance=True):
dict_performance = {'Training Accuracy: ': accuracy_score(y_train, train_pred),
'Training f1-score: ': f1_score(y_train, train_pred),
'Accuracy: ': accuracy_score(y_test, test_pred),
'Precision: ': precision_score(y_test, test_pred),
'Recall: ': recall_score(y_test, test_pred),
'f1-score: ': f1_score(y_test, test_pred)}
for key, value in dict_performance.items():
print("{} : {}".format(key, value))
if return_performance:
return dict_performance
| def print_classification_metrics(y_train, train_pred, y_test, test_pred, return_performance=True):
dict_performance = {'Training Accuracy: ': accuracy_score(y_train, train_pred), 'Training f1-score: ': f1_score(y_train, train_pred), 'Accuracy: ': accuracy_score(y_test, test_pred), 'Precision: ': precision_score(y_test, test_pred), 'Recall: ': recall_score(y_test, test_pred), 'f1-score: ': f1_score(y_test, test_pred)}
for (key, value) in dict_performance.items():
print('{} : {}'.format(key, value))
if return_performance:
return dict_performance |
#!/usr/bin/env python
def part_one(values: list[int]) -> int:
count = sum(values[index] < values[index + 1] for index in range(len(values) - 1))
return count
def part_two(values: list[int]) -> int:
summed_list = list(sum(three) for three in zip(values, values[1:], values[2:]))
count = sum(summed_list[index] < summed_list[index + 1] for index in range(len(summed_list) - 1))
return count
if __name__ == '__main__':
values_: list[int] = [int(row) for row in open("../../../input.txt").readlines()]
print(part_one(values=values_))
print(part_two(values=values_))
| def part_one(values: list[int]) -> int:
count = sum((values[index] < values[index + 1] for index in range(len(values) - 1)))
return count
def part_two(values: list[int]) -> int:
summed_list = list((sum(three) for three in zip(values, values[1:], values[2:])))
count = sum((summed_list[index] < summed_list[index + 1] for index in range(len(summed_list) - 1)))
return count
if __name__ == '__main__':
values_: list[int] = [int(row) for row in open('../../../input.txt').readlines()]
print(part_one(values=values_))
print(part_two(values=values_)) |
# Usage: gunicorn ProductCatalog.wsgi --bind 0.0.0.0:$PORT --config deploy/gunicorn.conf.py
# Max number of pending connections.
backlog = 1024
# Number of workers spawned for request handling.
workers = 1
# Standard type of workers.
worker_class = 'sync'
# Kill worker if it does not notify the master process in this number of seconds.
timeout = 30
# Log file location.
logfile = '/var/log/productcatalog-gunicorn.log'
# The granularity of log output.
loglevel = 'info'
| backlog = 1024
workers = 1
worker_class = 'sync'
timeout = 30
logfile = '/var/log/productcatalog-gunicorn.log'
loglevel = 'info' |
# NOTE: This objects are used directly in the external-notification-data and vulnerability-service
# on the frontend, so be careful with changing their existing keys.
PRIORITY_LEVELS = {
"Unknown": {
"title": "Unknown",
"value": "Unknown",
"index": 5,
"level": "info",
"color": "#9B9B9B",
"score": 0,
"description": "Unknown is either a security problem that has not been assigned to a priority"
+ " yet or a priority that our system did not recognize",
"banner_required": False,
},
"Negligible": {
"title": "Negligible",
"value": "Negligible",
"index": 4,
"level": "info",
"color": "#9B9B9B",
"score": 1,
"description": "Negligible is technically a security problem, but is only theoretical "
+ "in nature, requires a very special situation, has almost no install base, "
+ "or does no real damage.",
"banner_required": False,
},
"Low": {
"title": "Low",
"value": "Low",
"index": 3,
"level": "warning",
"color": "#F8CA1C",
"score": 3,
"description": "Low is a security problem, but is hard to exploit due to environment, "
+ "requires a user-assisted attack, a small install base, or does very little"
+ " damage.",
"banner_required": False,
},
"Medium": {
"title": "Medium",
"value": "Medium",
"index": 2,
"level": "warning",
"color": "#FCA657",
"score": 6,
"description": "Medium is a real security problem, and is exploitable for many people. "
+ "Includes network daemon denial of service attacks, cross-site scripting, and "
+ "gaining user privileges.",
"banner_required": False,
},
"High": {
"title": "High",
"value": "High",
"index": 1,
"level": "warning",
"color": "#F77454",
"score": 9,
"description": "High is a real problem, exploitable for many people in a default "
+ "installation. Includes serious remote denial of services, local root "
+ "privilege escalations, or data loss.",
"banner_required": False,
},
"Critical": {
"title": "Critical",
"value": "Critical",
"index": 0,
"level": "error",
"color": "#D64456",
"score": 10,
"description": "Critical is a world-burning problem, exploitable for nearly all people in "
+ "a installation of the package. Includes remote root privilege escalations, "
+ "or massive data loss.",
"banner_required": False,
},
}
def get_priority_for_index(index):
try:
int_index = int(index)
except ValueError:
return "Unknown"
for priority in PRIORITY_LEVELS:
if PRIORITY_LEVELS[priority]["index"] == int_index:
return priority
return "Unknown"
def get_priority_from_cvssscore(score):
try:
if 0 < score < 4:
return PRIORITY_LEVELS["Low"]["value"]
if 4 <= score < 7:
return PRIORITY_LEVELS["Medium"]["value"]
if 7 <= score < 9:
return PRIORITY_LEVELS["High"]["value"]
if 9 <= score < 10:
return PRIORITY_LEVELS["Critical"]["value"]
except ValueError:
return "Unknown"
return "Unknown"
def fetch_vuln_severity(vuln, enrichments):
if (
vuln["normalized_severity"]
and vuln["normalized_severity"] != PRIORITY_LEVELS["Unknown"]["value"]
):
return vuln["normalized_severity"]
if enrichments.get(vuln["id"], {}).get("baseScore", None):
return get_priority_from_cvssscore(enrichments[vuln["id"]]["baseScore"])
return PRIORITY_LEVELS["Unknown"]["value"]
| priority_levels = {'Unknown': {'title': 'Unknown', 'value': 'Unknown', 'index': 5, 'level': 'info', 'color': '#9B9B9B', 'score': 0, 'description': 'Unknown is either a security problem that has not been assigned to a priority' + ' yet or a priority that our system did not recognize', 'banner_required': False}, 'Negligible': {'title': 'Negligible', 'value': 'Negligible', 'index': 4, 'level': 'info', 'color': '#9B9B9B', 'score': 1, 'description': 'Negligible is technically a security problem, but is only theoretical ' + 'in nature, requires a very special situation, has almost no install base, ' + 'or does no real damage.', 'banner_required': False}, 'Low': {'title': 'Low', 'value': 'Low', 'index': 3, 'level': 'warning', 'color': '#F8CA1C', 'score': 3, 'description': 'Low is a security problem, but is hard to exploit due to environment, ' + 'requires a user-assisted attack, a small install base, or does very little' + ' damage.', 'banner_required': False}, 'Medium': {'title': 'Medium', 'value': 'Medium', 'index': 2, 'level': 'warning', 'color': '#FCA657', 'score': 6, 'description': 'Medium is a real security problem, and is exploitable for many people. ' + 'Includes network daemon denial of service attacks, cross-site scripting, and ' + 'gaining user privileges.', 'banner_required': False}, 'High': {'title': 'High', 'value': 'High', 'index': 1, 'level': 'warning', 'color': '#F77454', 'score': 9, 'description': 'High is a real problem, exploitable for many people in a default ' + 'installation. Includes serious remote denial of services, local root ' + 'privilege escalations, or data loss.', 'banner_required': False}, 'Critical': {'title': 'Critical', 'value': 'Critical', 'index': 0, 'level': 'error', 'color': '#D64456', 'score': 10, 'description': 'Critical is a world-burning problem, exploitable for nearly all people in ' + 'a installation of the package. Includes remote root privilege escalations, ' + 'or massive data loss.', 'banner_required': False}}
def get_priority_for_index(index):
try:
int_index = int(index)
except ValueError:
return 'Unknown'
for priority in PRIORITY_LEVELS:
if PRIORITY_LEVELS[priority]['index'] == int_index:
return priority
return 'Unknown'
def get_priority_from_cvssscore(score):
try:
if 0 < score < 4:
return PRIORITY_LEVELS['Low']['value']
if 4 <= score < 7:
return PRIORITY_LEVELS['Medium']['value']
if 7 <= score < 9:
return PRIORITY_LEVELS['High']['value']
if 9 <= score < 10:
return PRIORITY_LEVELS['Critical']['value']
except ValueError:
return 'Unknown'
return 'Unknown'
def fetch_vuln_severity(vuln, enrichments):
if vuln['normalized_severity'] and vuln['normalized_severity'] != PRIORITY_LEVELS['Unknown']['value']:
return vuln['normalized_severity']
if enrichments.get(vuln['id'], {}).get('baseScore', None):
return get_priority_from_cvssscore(enrichments[vuln['id']]['baseScore'])
return PRIORITY_LEVELS['Unknown']['value'] |
class solution:
def findNumbers(self, nums=[]):
even = 0
for num in nums:
numString = str(num)
if len(numString) % 2 == 0:
even += 1
return even
if __name__ == "__main__":
sol = solution()
_ = [int(n) for n in input().split()]
print(sol.findNumbers(_))
| class Solution:
def find_numbers(self, nums=[]):
even = 0
for num in nums:
num_string = str(num)
if len(numString) % 2 == 0:
even += 1
return even
if __name__ == '__main__':
sol = solution()
_ = [int(n) for n in input().split()]
print(sol.findNumbers(_)) |
pressure_arr = [80, 90, 100, 150, 120, 110, 160, 110, 100]
sum = 0
for pressure in pressure_arr:
sum = pressure + sum
length = len(pressure_arr)
mean = sum / length
print("The mean is", mean)
| pressure_arr = [80, 90, 100, 150, 120, 110, 160, 110, 100]
sum = 0
for pressure in pressure_arr:
sum = pressure + sum
length = len(pressure_arr)
mean = sum / length
print('The mean is', mean) |
n = int(input())
families = map(int, input().split())
families = sorted(families)
for i in range(len(families)):
if(i!=len(families)-1):
if(families[i]!=families[i - 1] and families[i]!=families[i + 1]):
print(families[i])
break
else:
print(families[i])
| n = int(input())
families = map(int, input().split())
families = sorted(families)
for i in range(len(families)):
if i != len(families) - 1:
if families[i] != families[i - 1] and families[i] != families[i + 1]:
print(families[i])
break
else:
print(families[i]) |
class Solution:
def expand(self, S: str) -> List[str]:
return sorted(self.dfs(S, ['']))
def dfs(self, s, prev):
if not s:
return prev
n = len(s)
cur = ''
found = False
result = []
for i in range(n):
if s[i].isalpha():
cur += s[i]
continue
if s[i] == '{':
found = True
start = i
break
added = []
for sub in prev:
added.append(sub + cur)
if not found:
return added
end = s.find('}')
chars = s[start + 1: end].split(',')
arr = []
for sub in added:
for ch in chars:
arr.append(sub + ch)
# print(s[end + 1: ])
return self.dfs(s[end + 1: ], arr)
| class Solution:
def expand(self, S: str) -> List[str]:
return sorted(self.dfs(S, ['']))
def dfs(self, s, prev):
if not s:
return prev
n = len(s)
cur = ''
found = False
result = []
for i in range(n):
if s[i].isalpha():
cur += s[i]
continue
if s[i] == '{':
found = True
start = i
break
added = []
for sub in prev:
added.append(sub + cur)
if not found:
return added
end = s.find('}')
chars = s[start + 1:end].split(',')
arr = []
for sub in added:
for ch in chars:
arr.append(sub + ch)
return self.dfs(s[end + 1:], arr) |
def translate(data, char, replacement):
result = data.replace(char, replacement)
print(result)
return result
def includes(data, string):
if string in data:
return True
return False
def start(data, string):
counter = 0
is_it = False
for char in string:
if char == data[counter]:
counter += 1
is_it = True
continue
else:
is_it = False
break
return is_it
def findindex(data, char):
for i in range(0, len(data)):
if char == data[i]:
last_inedx = i
return last_inedx
def remove(data, start_index, count):
start_index = int(start_index)
count = int(count)
stop_index = int(start_index) + int(count)
if len(data) > stop_index:
data = data[0: start_index:] + data[stop_index + 0::]
print(data)
return data
data = input()
command = input()
while command != "End":
command = command.split()
if command[0] == "Lowercase":
data = data.lower()
print(data)
command = input()
continue
elif len(command) == 2:
act = command[0]
a = command[1]
elif len(command) == 3:
act = command[0]
a = command[1]
b = command[2]
if act == "Translate":
data = translate(data, a, b)
elif act == "Includes":
print(includes(data, a))
elif act == "Start":
print(start(data, a))
elif act == "FindIndex":
print(findindex(data, a))
elif act == "Remove":
data = remove(data, a, b)
command = input() | def translate(data, char, replacement):
result = data.replace(char, replacement)
print(result)
return result
def includes(data, string):
if string in data:
return True
return False
def start(data, string):
counter = 0
is_it = False
for char in string:
if char == data[counter]:
counter += 1
is_it = True
continue
else:
is_it = False
break
return is_it
def findindex(data, char):
for i in range(0, len(data)):
if char == data[i]:
last_inedx = i
return last_inedx
def remove(data, start_index, count):
start_index = int(start_index)
count = int(count)
stop_index = int(start_index) + int(count)
if len(data) > stop_index:
data = data[0:start_index] + data[stop_index + 0:]
print(data)
return data
data = input()
command = input()
while command != 'End':
command = command.split()
if command[0] == 'Lowercase':
data = data.lower()
print(data)
command = input()
continue
elif len(command) == 2:
act = command[0]
a = command[1]
elif len(command) == 3:
act = command[0]
a = command[1]
b = command[2]
if act == 'Translate':
data = translate(data, a, b)
elif act == 'Includes':
print(includes(data, a))
elif act == 'Start':
print(start(data, a))
elif act == 'FindIndex':
print(findindex(data, a))
elif act == 'Remove':
data = remove(data, a, b)
command = input() |
L = 25
with open('input') as f:
nums = list(map(int, f.read().split()))
# Part 1
for i in range(L, len(nums)):
pre = nums[i - L:i]
n = nums[i]
d = {}
for p in pre:
if p in d and p != d[p]:
break
d[n - p] = p
else:
print(n)
break
# Part 2
i = 0
j = 2
while j < len(nums):
cont = nums[i:j]
s = sum(cont)
if s > n:
i += 1
elif s < n or j - i < 2:
j += 1
else:
print(min(cont) + max(cont))
break
| l = 25
with open('input') as f:
nums = list(map(int, f.read().split()))
for i in range(L, len(nums)):
pre = nums[i - L:i]
n = nums[i]
d = {}
for p in pre:
if p in d and p != d[p]:
break
d[n - p] = p
else:
print(n)
break
i = 0
j = 2
while j < len(nums):
cont = nums[i:j]
s = sum(cont)
if s > n:
i += 1
elif s < n or j - i < 2:
j += 1
else:
print(min(cont) + max(cont))
break |
class UnexpectedMode(ValueError):
def __init__(self, mode: str) -> None:
super().__init__(
f"Unexpected mode - found '{mode}' but must be 'image' or 'mesh'"
)
| class Unexpectedmode(ValueError):
def __init__(self, mode: str) -> None:
super().__init__(f"Unexpected mode - found '{mode}' but must be 'image' or 'mesh'") |
# Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
{
'includes': [
'../../../../../../common_settings.gypi', # Common settings
],
'targets': [
{
'target_name': 'iLBC',
'type': '<(library)',
'dependencies': [
'../../../../../../common_audio/signal_processing_library/main/source/spl.gyp:spl',
],
'include_dirs': [
'../interface',
],
'direct_dependent_settings': {
'include_dirs': [
'../interface',
],
},
'sources': [
'../interface/ilbc.h',
'abs_quant.c',
'abs_quant_loop.c',
'augmented_cb_corr.c',
'bw_expand.c',
'cb_construct.c',
'cb_mem_energy.c',
'cb_mem_energy_augmentation.c',
'cb_mem_energy_calc.c',
'cb_search.c',
'cb_search_core.c',
'cb_update_best_index.c',
'chebyshev.c',
'comp_corr.c',
'constants.c',
'create_augmented_vec.c',
'decode.c',
'decode_residual.c',
'decoder_interpolate_lsf.c',
'do_plc.c',
'encode.c',
'energy_inverse.c',
'enh_upsample.c',
'enhancer.c',
'enhancer_interface.c',
'filtered_cb_vecs.c',
'frame_classify.c',
'gain_dequant.c',
'gain_quant.c',
'get_cd_vec.c',
'get_lsp_poly.c',
'get_sync_seq.c',
'hp_input.c',
'hp_output.c',
'ilbc.c',
'index_conv_dec.c',
'index_conv_enc.c',
'init_decode.c',
'init_encode.c',
'interpolate.c',
'interpolate_samples.c',
'lpc_encode.c',
'lsf_check.c',
'lsf_interpolate_to_poly_dec.c',
'lsf_interpolate_to_poly_enc.c',
'lsf_to_lsp.c',
'lsf_to_poly.c',
'lsp_to_lsf.c',
'my_corr.c',
'nearest_neighbor.c',
'pack_bits.c',
'poly_to_lsf.c',
'poly_to_lsp.c',
'refiner.c',
'simple_interpolate_lsf.c',
'simple_lpc_analysis.c',
'simple_lsf_dequant.c',
'simple_lsf_quant.c',
'smooth.c',
'smooth_out_data.c',
'sort_sq.c',
'split_vq.c',
'state_construct.c',
'state_search.c',
'swap_bytes.c',
'unpack_bits.c',
'vq3.c',
'vq4.c',
'window32_w32.c',
'xcorr_coef.c',
'abs_quant.h',
'abs_quant_loop.h',
'augmented_cb_corr.h',
'bw_expand.h',
'cb_construct.h',
'cb_mem_energy.h',
'cb_mem_energy_augmentation.h',
'cb_mem_energy_calc.h',
'cb_search.h',
'cb_search_core.h',
'cb_update_best_index.h',
'chebyshev.h',
'comp_corr.h',
'constants.h',
'create_augmented_vec.h',
'decode.h',
'decode_residual.h',
'decoder_interpolate_lsf.h',
'do_plc.h',
'encode.h',
'energy_inverse.h',
'enh_upsample.h',
'enhancer.h',
'enhancer_interface.h',
'filtered_cb_vecs.h',
'frame_classify.h',
'gain_dequant.h',
'gain_quant.h',
'get_cd_vec.h',
'get_lsp_poly.h',
'get_sync_seq.h',
'hp_input.h',
'hp_output.h',
'defines.h',
'index_conv_dec.h',
'index_conv_enc.h',
'init_decode.h',
'init_encode.h',
'interpolate.h',
'interpolate_samples.h',
'lpc_encode.h',
'lsf_check.h',
'lsf_interpolate_to_poly_dec.h',
'lsf_interpolate_to_poly_enc.h',
'lsf_to_lsp.h',
'lsf_to_poly.h',
'lsp_to_lsf.h',
'my_corr.h',
'nearest_neighbor.h',
'pack_bits.h',
'poly_to_lsf.h',
'poly_to_lsp.h',
'refiner.h',
'simple_interpolate_lsf.h',
'simple_lpc_analysis.h',
'simple_lsf_dequant.h',
'simple_lsf_quant.h',
'smooth.h',
'smooth_out_data.h',
'sort_sq.h',
'split_vq.h',
'state_construct.h',
'state_search.h',
'swap_bytes.h',
'unpack_bits.h',
'vq3.h',
'vq4.h',
'window32_w32.h',
'xcorr_coef.h',
],
},
],
}
# Local Variables:
# tab-width:2
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=2 shiftwidth=2:
| {'includes': ['../../../../../../common_settings.gypi'], 'targets': [{'target_name': 'iLBC', 'type': '<(library)', 'dependencies': ['../../../../../../common_audio/signal_processing_library/main/source/spl.gyp:spl'], 'include_dirs': ['../interface'], 'direct_dependent_settings': {'include_dirs': ['../interface']}, 'sources': ['../interface/ilbc.h', 'abs_quant.c', 'abs_quant_loop.c', 'augmented_cb_corr.c', 'bw_expand.c', 'cb_construct.c', 'cb_mem_energy.c', 'cb_mem_energy_augmentation.c', 'cb_mem_energy_calc.c', 'cb_search.c', 'cb_search_core.c', 'cb_update_best_index.c', 'chebyshev.c', 'comp_corr.c', 'constants.c', 'create_augmented_vec.c', 'decode.c', 'decode_residual.c', 'decoder_interpolate_lsf.c', 'do_plc.c', 'encode.c', 'energy_inverse.c', 'enh_upsample.c', 'enhancer.c', 'enhancer_interface.c', 'filtered_cb_vecs.c', 'frame_classify.c', 'gain_dequant.c', 'gain_quant.c', 'get_cd_vec.c', 'get_lsp_poly.c', 'get_sync_seq.c', 'hp_input.c', 'hp_output.c', 'ilbc.c', 'index_conv_dec.c', 'index_conv_enc.c', 'init_decode.c', 'init_encode.c', 'interpolate.c', 'interpolate_samples.c', 'lpc_encode.c', 'lsf_check.c', 'lsf_interpolate_to_poly_dec.c', 'lsf_interpolate_to_poly_enc.c', 'lsf_to_lsp.c', 'lsf_to_poly.c', 'lsp_to_lsf.c', 'my_corr.c', 'nearest_neighbor.c', 'pack_bits.c', 'poly_to_lsf.c', 'poly_to_lsp.c', 'refiner.c', 'simple_interpolate_lsf.c', 'simple_lpc_analysis.c', 'simple_lsf_dequant.c', 'simple_lsf_quant.c', 'smooth.c', 'smooth_out_data.c', 'sort_sq.c', 'split_vq.c', 'state_construct.c', 'state_search.c', 'swap_bytes.c', 'unpack_bits.c', 'vq3.c', 'vq4.c', 'window32_w32.c', 'xcorr_coef.c', 'abs_quant.h', 'abs_quant_loop.h', 'augmented_cb_corr.h', 'bw_expand.h', 'cb_construct.h', 'cb_mem_energy.h', 'cb_mem_energy_augmentation.h', 'cb_mem_energy_calc.h', 'cb_search.h', 'cb_search_core.h', 'cb_update_best_index.h', 'chebyshev.h', 'comp_corr.h', 'constants.h', 'create_augmented_vec.h', 'decode.h', 'decode_residual.h', 'decoder_interpolate_lsf.h', 'do_plc.h', 'encode.h', 'energy_inverse.h', 'enh_upsample.h', 'enhancer.h', 'enhancer_interface.h', 'filtered_cb_vecs.h', 'frame_classify.h', 'gain_dequant.h', 'gain_quant.h', 'get_cd_vec.h', 'get_lsp_poly.h', 'get_sync_seq.h', 'hp_input.h', 'hp_output.h', 'defines.h', 'index_conv_dec.h', 'index_conv_enc.h', 'init_decode.h', 'init_encode.h', 'interpolate.h', 'interpolate_samples.h', 'lpc_encode.h', 'lsf_check.h', 'lsf_interpolate_to_poly_dec.h', 'lsf_interpolate_to_poly_enc.h', 'lsf_to_lsp.h', 'lsf_to_poly.h', 'lsp_to_lsf.h', 'my_corr.h', 'nearest_neighbor.h', 'pack_bits.h', 'poly_to_lsf.h', 'poly_to_lsp.h', 'refiner.h', 'simple_interpolate_lsf.h', 'simple_lpc_analysis.h', 'simple_lsf_dequant.h', 'simple_lsf_quant.h', 'smooth.h', 'smooth_out_data.h', 'sort_sq.h', 'split_vq.h', 'state_construct.h', 'state_search.h', 'swap_bytes.h', 'unpack_bits.h', 'vq3.h', 'vq4.h', 'window32_w32.h', 'xcorr_coef.h']}]} |
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/lzh/racecar_ws/devel/include;/home/lzh/racecar_ws/src/navigation-melodic-devel/dwa_local_planner/include".split(';') if "/home/lzh/racecar_ws/devel/include;/home/lzh/racecar_ws/src/navigation-melodic-devel/dwa_local_planner/include" != "" else []
PROJECT_CATKIN_DEPENDS = "base_local_planner;dynamic_reconfigure;nav_msgs;pluginlib;sensor_msgs;roscpp;tf2;tf2_ros".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-ldwa_local_planner".split(';') if "-ldwa_local_planner" != "" else []
PROJECT_NAME = "dwa_local_planner"
PROJECT_SPACE_DIR = "/home/lzh/racecar_ws/devel"
PROJECT_VERSION = "1.16.7"
| catkin_package_prefix = ''
project_pkg_config_include_dirs = '/home/lzh/racecar_ws/devel/include;/home/lzh/racecar_ws/src/navigation-melodic-devel/dwa_local_planner/include'.split(';') if '/home/lzh/racecar_ws/devel/include;/home/lzh/racecar_ws/src/navigation-melodic-devel/dwa_local_planner/include' != '' else []
project_catkin_depends = 'base_local_planner;dynamic_reconfigure;nav_msgs;pluginlib;sensor_msgs;roscpp;tf2;tf2_ros'.replace(';', ' ')
pkg_config_libraries_with_prefix = '-ldwa_local_planner'.split(';') if '-ldwa_local_planner' != '' else []
project_name = 'dwa_local_planner'
project_space_dir = '/home/lzh/racecar_ws/devel'
project_version = '1.16.7' |
nome = input("Digite seu nome ").strip().lower()
confirmacao = 'silva' in nome
print(f"Seu nome tem silva {confirmacao}") | nome = input('Digite seu nome ').strip().lower()
confirmacao = 'silva' in nome
print(f'Seu nome tem silva {confirmacao}') |
# *****************************
# Environment specific settings
# *****************************
# DO NOT use "DEBUG = True" in production environments
DEBUG = True
# DO NOT use Unsecure Secrets in production environments
# Generate a safe one with:
# python -c "import os; print repr(os.urandom(24));"
SECRET_KEY = (
'This is an UNSECURE Secret. CHANGE THIS for production environments.'
)
# SQLAlchemy settings
SQLALCHEMY_DATABASE_URI = 'sqlite:///../app.sqlite'
SQLALCHEMY_TRACK_MODIFICATIONS = False # Avoids a SQLAlchemy Warning
| debug = True
secret_key = 'This is an UNSECURE Secret. CHANGE THIS for production environments.'
sqlalchemy_database_uri = 'sqlite:///../app.sqlite'
sqlalchemy_track_modifications = False |
# coding: utf-8
# # Functions (1) - Creating Functions
# In this lesson we're going to learn about functions in Python. Functions are an important tool when programming and their use can be very complex. It's not the aim of this course to teach you how to implement functional programming, instead, this lesson will give you a grounding in how functions work and an insight into how we can use them to help us create charts with Plotly.
#
# ## What is a function?
#
# A function is a block of code which is used to perform a single action. A function should be reusable, and it should behave predictably. We have already used several built-in functions, such as <code>print()</code> and <code>len()</code>, but Python also allows you to create user-defined functions.
#
# ## How to create a function
#
# The syntax of creating a function is relatively straightforward. We first need to tell Python that we're going to define a function using the <code>def</code> keyword; we must then give the function a name followed by some parentheses (<code> () </code>) and a colon. Function names have the same restrictions as variable names (can't start with a number, can only contain letters, numbers and underscores). After the function name has been defined, any code within the function is indented by four spaces (or a tab):
# ````python
# def <function name>():
# <code to run>
# ````
#
# In the cell below, I'm defining a function which prints the string <code>"This is a function"</code> every time it is called:
# In[10]:
def testFunction():
print("This is a function")
# When we have defined a function, we can call the function as we would call any built-in function that we have already used, remembering to include the parentheses:
# In[11]:
testFunction()
# ## Using arguments in a function
#
# When we use the <code>len()</code> function, we have to tell that function which object we want the length of. We are passing that object as an argument to the function:
# In[12]:
len("abcdefg")
# We can do the same with user-defined functions. To do so, we create the function as normal, but inside the parentheses we can put argument names. We can put as many as we like, but each must be separated by a comma:
# ````python
# def <function name>(<arg1>, <arg2>, . . . <argN>):
# <code to run>
# ````
#
# We can then reference these arguments inside the function. In the cell below, I've written a function which prints out two items. Notice that I've converted each item to a string using the <code>str()</code> function - this ensures that the function behaves predictably - without converting an integer to a string, the code wouldn't run.
# In[13]:
def testFunction2(item1, item2):
print("The first item is: " + str(item1) + ", the second item is: " + str(item2))
# We can then use this function an pass arguments to it:
# In[14]:
testFunction2('abc', 20)
# The function will create a different output if we pass different arguments to it. This is because the arguments which are passed to a function only endure for the duration of that function.
# In[15]:
testFunction2('howdy', 'partner')
# ## Returning objects from a function
#
# Functions are useful when we use them to create or modify an object. Variables which are created inside a function are not available to the rest of the code, unless we return them (or specifically declare them to be <a href="http://stackoverflow.com/questions/423379/using-global-variables-in-a-function-other-than-the-one-that-created-them">global variables</a>)
#
# We can return an object created inside a function by using the return keyword; we must assign the output of a function to an object and we cannot write any more code after the return statment.
#
# In the cell below, I create a function which takes returns a list of alternating valus. This function takes three arguments, two of which are the values to alternate, whilst the third is the number of times they must be repeated:
# In[16]:
def alternateList(item1, item2, repeats):
alternate = [item1, item2]
altRepeat = alternate * repeats
return altRepeat
# Because the function returns a value we must assign the output that is return to a variable:
# In[17]:
repeated1 = alternateList(5, 50, 3)
# There are two variables created inside this function; <code>alternate</code> and <code>altRepeat</code>. These variables exist only within the function and we cannot access them in open code:
# In[18]:
print(alternate)
# In[19]:
print(altRepeat)
# But because we returned the value of the variable <code>altRepeat</code>, creating a new variable with that value, we can now see what the function <code>alternateList()</code> has created:
# In[20]:
repeated1
# We can return two or more variables from a function by separating each variable with a comma. We must assign each to an object:
# In[21]:
def alternateList(item1, item2, repeats):
alternate = [item1, item2]
altRepeat = alternate * repeats
return alternate, altRepeat
pair, rpt = alternateList(77, 99, 5)
print(pair)
# In[22]:
print(rpt)
# ### What have we learnt this lesson?
# In this lesson we've learnt how to define a function using the <code>def</code> keyword, and how to pass arguments to the function. We've seen that these arguments only hold their value within the function, and that we can use a return statement to return one or more values from within the function.
#
# In the next lesson we'll look at how we can use functions to help us make our charts.
# If you have any questions, please ask in the comments section or email <a href="mailto:me@richard-muir.com">me@richard-muir.com</a>
| def test_function():
print('This is a function')
test_function()
len('abcdefg')
def test_function2(item1, item2):
print('The first item is: ' + str(item1) + ', the second item is: ' + str(item2))
test_function2('abc', 20)
test_function2('howdy', 'partner')
def alternate_list(item1, item2, repeats):
alternate = [item1, item2]
alt_repeat = alternate * repeats
return altRepeat
repeated1 = alternate_list(5, 50, 3)
print(alternate)
print(altRepeat)
repeated1
def alternate_list(item1, item2, repeats):
alternate = [item1, item2]
alt_repeat = alternate * repeats
return (alternate, altRepeat)
(pair, rpt) = alternate_list(77, 99, 5)
print(pair)
print(rpt) |
stack = []
stack.append('a')
stack.append('b')
stack.append('c')
print('Initial stack')
print(stack)
print('\nElements poped from stack:')
print(stack.pop())
print(stack.pop())
print(stack.pop())
print('\nStack after elements are poped:')
print(stack)
| stack = []
stack.append('a')
stack.append('b')
stack.append('c')
print('Initial stack')
print(stack)
print('\nElements poped from stack:')
print(stack.pop())
print(stack.pop())
print(stack.pop())
print('\nStack after elements are poped:')
print(stack) |
def findLongestSubSeq(str):
n = len(str)
dp = [[0 for k in range(n+1)] for l in range(n+1)]
for i in range(1, n+1):
for j in range(1, n+1):
# If characters match and indices are not same
if (str[i-1] == str[j-1] and i != j):
dp[i][j] = 1 + dp[i-1][j-1]
# If characters do not match
else:
dp[i][j] = max(dp[i][j-1], dp[i-1][j])
return dp[n][n]
| def find_longest_sub_seq(str):
n = len(str)
dp = [[0 for k in range(n + 1)] for l in range(n + 1)]
for i in range(1, n + 1):
for j in range(1, n + 1):
if str[i - 1] == str[j - 1] and i != j:
dp[i][j] = 1 + dp[i - 1][j - 1]
else:
dp[i][j] = max(dp[i][j - 1], dp[i - 1][j])
return dp[n][n] |
@singleton
class Database:
def __init__(self):
print('Loading database')
| @singleton
class Database:
def __init__(self):
print('Loading database') |
#!/usr/bin/env python3
def get_case_data():
return [int(i) for i in input().split()]
# Using recursive implementation
def get_gcd(a, b):
return get_gcd(b, a % b) if b != 0 else a
def print_number_or_ok_if_equals(number, guess):
print("OK" if number == guess else number)
number_of_cases = int(input())
for case in range(number_of_cases):
first_integer, second_integer, proposed_gcd = get_case_data()
real_gcd = get_gcd(first_integer, second_integer)
print_number_or_ok_if_equals(real_gcd, proposed_gcd)
| def get_case_data():
return [int(i) for i in input().split()]
def get_gcd(a, b):
return get_gcd(b, a % b) if b != 0 else a
def print_number_or_ok_if_equals(number, guess):
print('OK' if number == guess else number)
number_of_cases = int(input())
for case in range(number_of_cases):
(first_integer, second_integer, proposed_gcd) = get_case_data()
real_gcd = get_gcd(first_integer, second_integer)
print_number_or_ok_if_equals(real_gcd, proposed_gcd) |
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def verticalTraversal(self, root: TreeNode) -> List[List[int]]:
res = defaultdict(list)
q = [(root, 0)]
min_col = max_col = 0
while q:
q.sort(key=lambda x: (x[1], x[0].val))
min_col = min(min_col, q[0][1])
max_col = max(max_col, q[-1][1])
prev_q, q = q, []
for node, col in prev_q:
res[col].append(node.val)
if node.left:
q.append((node.left, col - 1))
if node.right:
q.append((node.right, col + 1))
return [res[col] for col in range(min_col, max_col+1)]
| class Solution:
def vertical_traversal(self, root: TreeNode) -> List[List[int]]:
res = defaultdict(list)
q = [(root, 0)]
min_col = max_col = 0
while q:
q.sort(key=lambda x: (x[1], x[0].val))
min_col = min(min_col, q[0][1])
max_col = max(max_col, q[-1][1])
(prev_q, q) = (q, [])
for (node, col) in prev_q:
res[col].append(node.val)
if node.left:
q.append((node.left, col - 1))
if node.right:
q.append((node.right, col + 1))
return [res[col] for col in range(min_col, max_col + 1)] |
# -*- coding: utf-8 -*-
def main():
s = input()
mod = ''
for i in range(3):
if s[i] == '1':
mod += '9'
elif s[i] == '9':
mod += '1'
print(mod)
if __name__ == '__main__':
main()
| def main():
s = input()
mod = ''
for i in range(3):
if s[i] == '1':
mod += '9'
elif s[i] == '9':
mod += '1'
print(mod)
if __name__ == '__main__':
main() |
class Node:
def __init__(self,data):
self.data=data
self.next=None
arr=[5,8,20]
brr=[4,11,15]
#inserting elements in first list
list1=Node(arr[0])
root1=list1
for i in arr[1::]:
temp=Node(i)
list1.next=temp
list1=list1.next
#inserting elements in second list
list2=Node(brr[0])
root2=list2
for i in brr[1::]:
temp=Node(i)
list2.next=temp
list2=list2.next
newlist=[]
while(root1!=None and root2!=None):
if(root1.data<root2.data):
newlist.append(root1.data)
root1=root1.next
else:
newlist.append(root2.data)
root2=root2.next
if(root1==None):
if(root2==None):
print(newlist)
else:
while(root2!=None):
newlist.append(root2.data)
root2=root2.next
elif(root2==None):
if(root1==None):
print(newlist)
else:
while(root1!=None):
newlist.append(root1.data)
root1=root1.next
print(newlist)
| class Node:
def __init__(self, data):
self.data = data
self.next = None
arr = [5, 8, 20]
brr = [4, 11, 15]
list1 = node(arr[0])
root1 = list1
for i in arr[1:]:
temp = node(i)
list1.next = temp
list1 = list1.next
list2 = node(brr[0])
root2 = list2
for i in brr[1:]:
temp = node(i)
list2.next = temp
list2 = list2.next
newlist = []
while root1 != None and root2 != None:
if root1.data < root2.data:
newlist.append(root1.data)
root1 = root1.next
else:
newlist.append(root2.data)
root2 = root2.next
if root1 == None:
if root2 == None:
print(newlist)
else:
while root2 != None:
newlist.append(root2.data)
root2 = root2.next
elif root2 == None:
if root1 == None:
print(newlist)
else:
while root1 != None:
newlist.append(root1.data)
root1 = root1.next
print(newlist) |
# Write your solution for 1.4 here!
def is_prime(x):
if x > 1:
for i in range(2,x):
if (x % i) == 0:
print(x,"is not a prime number")
print(i,"times",x//i,"is",x)
else:
print(x,"is not a prime number")
is_prime(5)
| def is_prime(x):
if x > 1:
for i in range(2, x):
if x % i == 0:
print(x, 'is not a prime number')
print(i, 'times', x // i, 'is', x)
else:
print(x, 'is not a prime number')
is_prime(5) |
if __name__ == "__main__":
print((lambda x,r : [r:=r+1 for i in x.split('\n\n') if all(map(lambda x : x in i,['byr','iyr','eyr','hgt','hcl','ecl','pid']))][-1])(open("i").read(),0))
def main_debug(inp): # 204
inp = inp.split('\n\n')
rep = 0
for i in inp:
if all(map(lambda x : x in i,['byr','iyr','eyr','hgt','hcl','ecl','pid'])):
rep += 1
return rep
| if __name__ == '__main__':
print((lambda x, r: [(r := (r + 1)) for i in x.split('\n\n') if all(map(lambda x: x in i, ['byr', 'iyr', 'eyr', 'hgt', 'hcl', 'ecl', 'pid']))][-1])(open('i').read(), 0))
def main_debug(inp):
inp = inp.split('\n\n')
rep = 0
for i in inp:
if all(map(lambda x: x in i, ['byr', 'iyr', 'eyr', 'hgt', 'hcl', 'ecl', 'pid'])):
rep += 1
return rep |
def flatten(iterable, result=None):
if result == None:
result = []
for it in iterable:
if type(it) in (list, set, tuple):
flatten(it, result)
else:
result.append(it)
return [i for i in result if i is not None]
| def flatten(iterable, result=None):
if result == None:
result = []
for it in iterable:
if type(it) in (list, set, tuple):
flatten(it, result)
else:
result.append(it)
return [i for i in result if i is not None] |
def palindromo(palavra: str) -> bool:
if len(palavra) <= 1:
return True
primeira_letra = palavra[0]
ultima_letra = palavra[-1]
if primeira_letra != ultima_letra:
return False
return palindromo(palavra[1:-1])
nome_do_arquivo = input('Digite o nome do entrada de entrada: ')
with open(nome_do_arquivo, 'r', encoding='utf8') as arquivo:
for linha in arquivo:
linha = linha.strip()
palavras = linha.split()
for palavra in palavras:
if palindromo(palavra):
print(palavra)
# print(eh_primo('ama'))
# print(eh_primo('socorrammesubinoonibusemmarroco'))
| def palindromo(palavra: str) -> bool:
if len(palavra) <= 1:
return True
primeira_letra = palavra[0]
ultima_letra = palavra[-1]
if primeira_letra != ultima_letra:
return False
return palindromo(palavra[1:-1])
nome_do_arquivo = input('Digite o nome do entrada de entrada: ')
with open(nome_do_arquivo, 'r', encoding='utf8') as arquivo:
for linha in arquivo:
linha = linha.strip()
palavras = linha.split()
for palavra in palavras:
if palindromo(palavra):
print(palavra) |
def main():
isNumber = False
while not isNumber:
try:
size = int(input('Height: '))
if size > 0 and size <= 8:
isNumber = True
break
except ValueError:
isNumber = False
build(size, size)
def build(size, counter):
spaces = size - 1
if size == 0:
return 1
else:
print(' ' * spaces, end='')
print('#' * (counter - spaces), end=' ')
print('#' * (counter - spaces), end='\n')
return build(size - 1, counter)
main()
| def main():
is_number = False
while not isNumber:
try:
size = int(input('Height: '))
if size > 0 and size <= 8:
is_number = True
break
except ValueError:
is_number = False
build(size, size)
def build(size, counter):
spaces = size - 1
if size == 0:
return 1
else:
print(' ' * spaces, end='')
print('#' * (counter - spaces), end=' ')
print('#' * (counter - spaces), end='\n')
return build(size - 1, counter)
main() |
#URLs
ROOTURL = 'https://www.reuters.com/companies/'
FXRATESURL = 'https://www.reuters.com/markets/currencies'
#ADDURLs
INCSTAT_ANN_URL = '/financials/income-statement-annual/'
INCSTAT_QRT_URL = '/financials/income-statement-quarterly/'
BS_ANN_URL = '/financials/balance-sheet-annual/'
BS_QRT_URL = '/financials/balance-sheet-quarterly/'
KEYMETRICS_URL = '/key-metrics/'
#TABLENAMES
STOCKDATA = 'stockdata'
FXRATES = 'fxrates'
INCSTAT_ANN = 'incstat_ann'
INCSTAT_QRT = 'incstat_qrt'
BS_ANN = 'bs_ann'
BS_QRT = 'bs_qrt'
KEYMETRICS = 'km'
#TIMES
YEARS = ['2015', '2016', '2017', '2018', '2019']
QRTS = ['2019Q2', '2019Q3', '2019Q4', '2020Q1', '2020Q2']
#DICTIONARIES
ADDURLS_TO_TABLENAMES = {
INCSTAT_ANN_URL: INCSTAT_ANN,\
INCSTAT_QRT_URL:INCSTAT_QRT,\
BS_ANN_URL: BS_ANN,\
BS_QRT_URL: BS_QRT, \
KEYMETRICS_URL: KEYMETRICS}
TABLENAMES_TO_DATA = {
INCSTAT_ANN: {
'Total Revenue' :[0, 'int64'],\
'Net Income' : [0, 'int64']},\
INCSTAT_QRT:{
'Total Revenue': [0, 'int64'],\
'Net Income': [0, 'int64']},\
BS_ANN: {
'Total Equity' : [0, 'int64'],\
'Total Liabilities' : [0, 'int64']},\
BS_QRT: {
'Total Equity' : [0, 'int64'],\
'Total Liabilities' : [0, 'int64']}, \
KEYMETRICS: {
'Dividend (Per Share Annual)' : [0, 'float64' ],\
'Free Cash Flow (Per Share TTM)' : [0, 'float64'],\
'Current Ratio (Annual)' : [0, 'float64']}
}
FACTORS = {
'Mil': 1000000,
'Thousands': 1000}
COLUMNHEADERSDICT_ANN = {
"Unnamed: 0":"Item", \
"Unnamed: 1":YEARS[-1],\
"Unnamed: 2":YEARS[-2],\
"Unnamed: 3":YEARS[-3],\
"Unnamed: 4":YEARS[-4],\
"Unnamed: 5":YEARS[-5], \
0:"Item", \
1: YEARS[-1]} # Unnamed for financials - 0,1 for non-financials
COLUMNHEADERSDICT_QRT = {
"Unnamed: 0":"Item", \
"Unnamed: 1":QRTS[-1],\
"Unnamed: 2":QRTS[-2],\
"Unnamed: 3":QRTS[-3],\
"Unnamed: 4":QRTS[-4],\
"Unnamed: 5":QRTS[-5], \
0:"Item", \
1: YEARS[-1]} # Unnamed for financials - 0,1 for non-financials
ISIN_TO_COUNTRIES = {
'US' : 'USA',
'DE' : 'Germany',
'GB' : 'UK',
'NL' : 'Netherlands',
'IE' : 'Ireland',
'FR' : 'France',
'CA' : 'Canada',
'CH' : 'Switzerland'
}
#PATHS
RICSCSVPATH = "..\\data\\01_raw\\reuters-shorts.csv"
RAWDATAPATH = "..\\data\\01_raw\\rawdatadb.db"
INTDATAPATH = "..\\data\\02_intermediate\\intdatadb.db"
PROCDATAPATH = '..\\data\\03_processed\\processeddata.feather'
PROCDATAPATHCSV = '..\\data\\03_processed\\processeddata.csv'
CURRENCIES = ['USD', 'EUR', 'GBP','CHF', 'INR'] | rooturl = 'https://www.reuters.com/companies/'
fxratesurl = 'https://www.reuters.com/markets/currencies'
incstat_ann_url = '/financials/income-statement-annual/'
incstat_qrt_url = '/financials/income-statement-quarterly/'
bs_ann_url = '/financials/balance-sheet-annual/'
bs_qrt_url = '/financials/balance-sheet-quarterly/'
keymetrics_url = '/key-metrics/'
stockdata = 'stockdata'
fxrates = 'fxrates'
incstat_ann = 'incstat_ann'
incstat_qrt = 'incstat_qrt'
bs_ann = 'bs_ann'
bs_qrt = 'bs_qrt'
keymetrics = 'km'
years = ['2015', '2016', '2017', '2018', '2019']
qrts = ['2019Q2', '2019Q3', '2019Q4', '2020Q1', '2020Q2']
addurls_to_tablenames = {INCSTAT_ANN_URL: INCSTAT_ANN, INCSTAT_QRT_URL: INCSTAT_QRT, BS_ANN_URL: BS_ANN, BS_QRT_URL: BS_QRT, KEYMETRICS_URL: KEYMETRICS}
tablenames_to_data = {INCSTAT_ANN: {'Total Revenue': [0, 'int64'], 'Net Income': [0, 'int64']}, INCSTAT_QRT: {'Total Revenue': [0, 'int64'], 'Net Income': [0, 'int64']}, BS_ANN: {'Total Equity': [0, 'int64'], 'Total Liabilities': [0, 'int64']}, BS_QRT: {'Total Equity': [0, 'int64'], 'Total Liabilities': [0, 'int64']}, KEYMETRICS: {'Dividend (Per Share Annual)': [0, 'float64'], 'Free Cash Flow (Per Share TTM)': [0, 'float64'], 'Current Ratio (Annual)': [0, 'float64']}}
factors = {'Mil': 1000000, 'Thousands': 1000}
columnheadersdict_ann = {'Unnamed: 0': 'Item', 'Unnamed: 1': YEARS[-1], 'Unnamed: 2': YEARS[-2], 'Unnamed: 3': YEARS[-3], 'Unnamed: 4': YEARS[-4], 'Unnamed: 5': YEARS[-5], 0: 'Item', 1: YEARS[-1]}
columnheadersdict_qrt = {'Unnamed: 0': 'Item', 'Unnamed: 1': QRTS[-1], 'Unnamed: 2': QRTS[-2], 'Unnamed: 3': QRTS[-3], 'Unnamed: 4': QRTS[-4], 'Unnamed: 5': QRTS[-5], 0: 'Item', 1: YEARS[-1]}
isin_to_countries = {'US': 'USA', 'DE': 'Germany', 'GB': 'UK', 'NL': 'Netherlands', 'IE': 'Ireland', 'FR': 'France', 'CA': 'Canada', 'CH': 'Switzerland'}
ricscsvpath = '..\\data\\01_raw\\reuters-shorts.csv'
rawdatapath = '..\\data\\01_raw\\rawdatadb.db'
intdatapath = '..\\data\\02_intermediate\\intdatadb.db'
procdatapath = '..\\data\\03_processed\\processeddata.feather'
procdatapathcsv = '..\\data\\03_processed\\processeddata.csv'
currencies = ['USD', 'EUR', 'GBP', 'CHF', 'INR'] |
DEFAULT_PORT = 9000
DEFAULT_SECURE_PORT = 9440
DBMS_MIN_REVISION_WITH_TEMPORARY_TABLES = 50264
DBMS_MIN_REVISION_WITH_TOTAL_ROWS_IN_PROGRESS = 51554
DBMS_MIN_REVISION_WITH_BLOCK_INFO = 51903
# Legacy above.
DBMS_MIN_REVISION_WITH_CLIENT_INFO = 54032
DBMS_MIN_REVISION_WITH_SERVER_TIMEZONE = 54058
DBMS_MIN_REVISION_WITH_QUOTA_KEY_IN_CLIENT_INFO = 54060
DBMS_MIN_REVISION_WITH_SERVER_DISPLAY_NAME = 54372
DBMS_MIN_REVISION_WITH_VERSION_PATCH = 54401
DBMS_MIN_REVISION_WITH_SERVER_LOGS = 54406
DBMS_MIN_REVISION_WITH_COLUMN_DEFAULTS_METADATA = 54410
DBMS_MIN_REVISION_WITH_CLIENT_WRITE_INFO = 54420
DBMS_MIN_REVISION_WITH_SETTINGS_SERIALIZED_AS_STRINGS = 54429
# Timeouts
DBMS_DEFAULT_CONNECT_TIMEOUT_SEC = 10
DBMS_DEFAULT_TIMEOUT_SEC = 300
DBMS_DEFAULT_SYNC_REQUEST_TIMEOUT_SEC = 5
DEFAULT_COMPRESS_BLOCK_SIZE = 1048576
DEFAULT_INSERT_BLOCK_SIZE = 1048576
DBMS_NAME = 'ClickHouse'
CLIENT_NAME = 'python-driver'
CLIENT_VERSION_MAJOR = 18
CLIENT_VERSION_MINOR = 10
CLIENT_VERSION_PATCH = 3
CLIENT_REVISION = 54429
BUFFER_SIZE = 1048576
STRINGS_ENCODING = 'utf-8'
| default_port = 9000
default_secure_port = 9440
dbms_min_revision_with_temporary_tables = 50264
dbms_min_revision_with_total_rows_in_progress = 51554
dbms_min_revision_with_block_info = 51903
dbms_min_revision_with_client_info = 54032
dbms_min_revision_with_server_timezone = 54058
dbms_min_revision_with_quota_key_in_client_info = 54060
dbms_min_revision_with_server_display_name = 54372
dbms_min_revision_with_version_patch = 54401
dbms_min_revision_with_server_logs = 54406
dbms_min_revision_with_column_defaults_metadata = 54410
dbms_min_revision_with_client_write_info = 54420
dbms_min_revision_with_settings_serialized_as_strings = 54429
dbms_default_connect_timeout_sec = 10
dbms_default_timeout_sec = 300
dbms_default_sync_request_timeout_sec = 5
default_compress_block_size = 1048576
default_insert_block_size = 1048576
dbms_name = 'ClickHouse'
client_name = 'python-driver'
client_version_major = 18
client_version_minor = 10
client_version_patch = 3
client_revision = 54429
buffer_size = 1048576
strings_encoding = 'utf-8' |
input_file = open("input.txt", "r")
entriesArray = input_file.read().split("\n")
depth_measure_increase = 0
for i in range(3, len(entriesArray), 1):
first_window = int(entriesArray[i-1]) + int(entriesArray[i-2]) + int(entriesArray[i-3])
second_window = int(entriesArray[i]) + int(entriesArray[i-1]) + int(entriesArray[i-2])
if second_window > first_window:
depth_measure_increase += 1
print(f'{depth_measure_increase=}') | input_file = open('input.txt', 'r')
entries_array = input_file.read().split('\n')
depth_measure_increase = 0
for i in range(3, len(entriesArray), 1):
first_window = int(entriesArray[i - 1]) + int(entriesArray[i - 2]) + int(entriesArray[i - 3])
second_window = int(entriesArray[i]) + int(entriesArray[i - 1]) + int(entriesArray[i - 2])
if second_window > first_window:
depth_measure_increase += 1
print(f'depth_measure_increase={depth_measure_increase!r}') |
def multiplicationTable(size):
return [[j*i for j in range(1, size+1)] for i in range(1, size+1)]
x = multiplicationTable(5)
print(x)
print()
for i in x:
print(i)
| def multiplication_table(size):
return [[j * i for j in range(1, size + 1)] for i in range(1, size + 1)]
x = multiplication_table(5)
print(x)
print()
for i in x:
print(i) |
def getFrequencyDictForText(sentence):
fullTermsDict = multidict.MultiDict()
tmpDict = {}
# making dictionary for counting word frequencies
for text in sentence.split(" "):
# remove irrelevant words
if re.match("a|the|an|the|to|in|for|of|or|by|with|is|on|that|but|from|than|be", text):
continue
val = tmpDict.get(text, 0)
tmpDict[text.lower()] = val + 1
for key in tmpDict:
fullTermsDict.add(key, tmpDict[key])
return fullTermsDict
def makeImage(text):
wc = WordCloud(width = 3000, height = 1080, background_color="white", colormap = 'Dark2', max_words=200)
# generate word cloud
wc.generate_from_frequencies(text)
# save
plt.imshow(wc)
plt.axis("off")
datestring = date.today().strftime("%b-%d-%Y")
plt.text(860, -50, 'Date Generated: ' + datestring)
filename = datestring + '.png'
plt.savefig(os.path.join(os.getcwd(), '..', './static', filename), dpi = 400, bbox_inches='tight')
# get text from existing word file
tifile = open(os.path.join(os.getcwd(), '..','words.txt'), 'r')
text = tifile.read()
makeImage(getFrequencyDictForText(text))
tifile.close()
| def get_frequency_dict_for_text(sentence):
full_terms_dict = multidict.MultiDict()
tmp_dict = {}
for text in sentence.split(' '):
if re.match('a|the|an|the|to|in|for|of|or|by|with|is|on|that|but|from|than|be', text):
continue
val = tmpDict.get(text, 0)
tmpDict[text.lower()] = val + 1
for key in tmpDict:
fullTermsDict.add(key, tmpDict[key])
return fullTermsDict
def make_image(text):
wc = word_cloud(width=3000, height=1080, background_color='white', colormap='Dark2', max_words=200)
wc.generate_from_frequencies(text)
plt.imshow(wc)
plt.axis('off')
datestring = date.today().strftime('%b-%d-%Y')
plt.text(860, -50, 'Date Generated: ' + datestring)
filename = datestring + '.png'
plt.savefig(os.path.join(os.getcwd(), '..', './static', filename), dpi=400, bbox_inches='tight')
tifile = open(os.path.join(os.getcwd(), '..', 'words.txt'), 'r')
text = tifile.read()
make_image(get_frequency_dict_for_text(text))
tifile.close() |
class BoxaugError(Exception):
pass
| class Boxaugerror(Exception):
pass |
# short hand if
a=23
b=4
if a > b: print("a is greater than b")
# short hand if
print("a is greater ") if a > b else print("b is greater ")
#pass statements
b=300
if b > a:
pass
| a = 23
b = 4
if a > b:
print('a is greater than b')
print('a is greater ') if a > b else print('b is greater ')
b = 300
if b > a:
pass |
if args.algo in ['a2c', 'acktr']:
values, action_log_probs, dist_entropy, conv_list = actor_critic.evaluate_actions(Variable(rollouts.states[:-1].view(-1, *obs_shape)), Variable(rollouts.actions.view(-1, action_shape)))
# pre-process
values = values.view(args.num_steps, num_processes_total, 1)
action_log_probs = action_log_probs.view(args.num_steps, num_processes_total, 1)
# compute afs loss
afs_per_m_temp, afs_loss = actor_critic.get_afs_per_m(
action_log_probs=action_log_probs,
conv_list=conv_list,
)
if len(afs_per_m_temp)>0:
afs_per_m += [afs_per_m_temp]
if (afs_loss is not None) and (afs_loss.data.cpu().numpy()[0]!=0.0):
afs_loss.backward(mone, retain_graph=True)
afs_loss_list += [afs_loss.data.cpu().numpy()[0]]
advantages = Variable(rollouts.returns[:-1]) - values
value_loss = advantages.pow(2).mean()
action_loss = -(Variable(advantages.data) * action_log_probs).mean()
final_loss_basic = value_loss * args.value_loss_coef + action_loss - dist_entropy * args.entropy_coef
ewc_loss = None
if j != 0:
if ewc == 1:
ewc_loss = actor_critic.get_ewc_loss(lam=ewc_lambda)
if ewc_loss is None:
final_loss = final_loss_basic
else:
final_loss = final_loss_basic + ewc_loss
basic_loss_list += [final_loss_basic.data.cpu().numpy()[0]]
final_loss.backward()
if args.algo == 'a2c':
nn.utils.clip_grad_norm(actor_critic.parameters(), args.max_grad_norm)
optimizer.step()
elif args.algo == 'ppo':
advantages = rollouts.returns[:-1] - rollouts.value_preds[:-1]
advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-5)
old_model.load_state_dict(actor_critic.state_dict())
if hasattr(actor_critic, 'obs_filter'):
old_model.obs_filter = actor_critic.obs_filter
for _ in range(args.ppo_epoch):
sampler = BatchSampler(SubsetRandomSampler(range(num_processes_total * args.num_steps)), args.batch_size * num_processes_total, drop_last=False)
for indices in sampler:
indices = torch.LongTensor(indices)
if args.cuda:
indices = indices.cuda()
states_batch = rollouts.states[:-1].view(-1, *obs_shape)[indices]
actions_batch = rollouts.actions.view(-1, action_shape)[indices]
return_batch = rollouts.returns[:-1].view(-1, 1)[indices]
# Reshape to do in a single forward pass for all steps
values, action_log_probs, dist_entropy, conv_list = actor_critic.evaluate_actions(Variable(states_batch), Variable(actions_batch))
_, old_action_log_probs, _, old_conv_list= old_model.evaluate_actions(Variable(states_batch, volatile=True), Variable(actions_batch, volatile=True))
ratio = torch.exp(action_log_probs - Variable(old_action_log_probs.data))
adv_targ = Variable(advantages.view(-1, 1)[indices])
surr1 = ratio * adv_targ
surr2 = torch.clamp(ratio, 1.0 - args.clip_param, 1.0 + args.clip_param) * adv_targ
action_loss = -torch.min(surr1, surr2).mean() # PPO's pessimistic surrogate (L^CLIP)
value_loss = (Variable(return_batch) - values).pow(2).mean()
optimizer.zero_grad()
(value_loss + action_loss - dist_entropy * args.entropy_coef).backward()
optimizer.step()
| if args.algo in ['a2c', 'acktr']:
(values, action_log_probs, dist_entropy, conv_list) = actor_critic.evaluate_actions(variable(rollouts.states[:-1].view(-1, *obs_shape)), variable(rollouts.actions.view(-1, action_shape)))
values = values.view(args.num_steps, num_processes_total, 1)
action_log_probs = action_log_probs.view(args.num_steps, num_processes_total, 1)
(afs_per_m_temp, afs_loss) = actor_critic.get_afs_per_m(action_log_probs=action_log_probs, conv_list=conv_list)
if len(afs_per_m_temp) > 0:
afs_per_m += [afs_per_m_temp]
if afs_loss is not None and afs_loss.data.cpu().numpy()[0] != 0.0:
afs_loss.backward(mone, retain_graph=True)
afs_loss_list += [afs_loss.data.cpu().numpy()[0]]
advantages = variable(rollouts.returns[:-1]) - values
value_loss = advantages.pow(2).mean()
action_loss = -(variable(advantages.data) * action_log_probs).mean()
final_loss_basic = value_loss * args.value_loss_coef + action_loss - dist_entropy * args.entropy_coef
ewc_loss = None
if j != 0:
if ewc == 1:
ewc_loss = actor_critic.get_ewc_loss(lam=ewc_lambda)
if ewc_loss is None:
final_loss = final_loss_basic
else:
final_loss = final_loss_basic + ewc_loss
basic_loss_list += [final_loss_basic.data.cpu().numpy()[0]]
final_loss.backward()
if args.algo == 'a2c':
nn.utils.clip_grad_norm(actor_critic.parameters(), args.max_grad_norm)
optimizer.step()
elif args.algo == 'ppo':
advantages = rollouts.returns[:-1] - rollouts.value_preds[:-1]
advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-05)
old_model.load_state_dict(actor_critic.state_dict())
if hasattr(actor_critic, 'obs_filter'):
old_model.obs_filter = actor_critic.obs_filter
for _ in range(args.ppo_epoch):
sampler = batch_sampler(subset_random_sampler(range(num_processes_total * args.num_steps)), args.batch_size * num_processes_total, drop_last=False)
for indices in sampler:
indices = torch.LongTensor(indices)
if args.cuda:
indices = indices.cuda()
states_batch = rollouts.states[:-1].view(-1, *obs_shape)[indices]
actions_batch = rollouts.actions.view(-1, action_shape)[indices]
return_batch = rollouts.returns[:-1].view(-1, 1)[indices]
(values, action_log_probs, dist_entropy, conv_list) = actor_critic.evaluate_actions(variable(states_batch), variable(actions_batch))
(_, old_action_log_probs, _, old_conv_list) = old_model.evaluate_actions(variable(states_batch, volatile=True), variable(actions_batch, volatile=True))
ratio = torch.exp(action_log_probs - variable(old_action_log_probs.data))
adv_targ = variable(advantages.view(-1, 1)[indices])
surr1 = ratio * adv_targ
surr2 = torch.clamp(ratio, 1.0 - args.clip_param, 1.0 + args.clip_param) * adv_targ
action_loss = -torch.min(surr1, surr2).mean()
value_loss = (variable(return_batch) - values).pow(2).mean()
optimizer.zero_grad()
(value_loss + action_loss - dist_entropy * args.entropy_coef).backward()
optimizer.step() |
'''
Given an array consists of non-negative integers, your task is to count the number of triplets chosen from the array that can make triangles if we take them as side lengths of a triangle.
Example 1:
Input: [2,2,3,4]
Output: 3
Explanation:
Valid combinations are:
2,3,4 (using the first 2)
2,3,4 (using the second 2)
2,2,3
Note:
The length of the given array won't exceed 1000.
The integers in the given array are in the range of [0, 1000].
'''
class Solution:
def triangleNumber(self, nums: List[int]) -> int:
nums.sort()
res = 0
for i in reversed(range(len(nums))):
j = 0
k = i - 1
while j < k:
if nums[j] + nums[k] > nums[i]:
res += k - j
k -= 1
else:
j += 1
return res
| """
Given an array consists of non-negative integers, your task is to count the number of triplets chosen from the array that can make triangles if we take them as side lengths of a triangle.
Example 1:
Input: [2,2,3,4]
Output: 3
Explanation:
Valid combinations are:
2,3,4 (using the first 2)
2,3,4 (using the second 2)
2,2,3
Note:
The length of the given array won't exceed 1000.
The integers in the given array are in the range of [0, 1000].
"""
class Solution:
def triangle_number(self, nums: List[int]) -> int:
nums.sort()
res = 0
for i in reversed(range(len(nums))):
j = 0
k = i - 1
while j < k:
if nums[j] + nums[k] > nums[i]:
res += k - j
k -= 1
else:
j += 1
return res |
a = [0x77, 0x60, 0x76, 0x66, 0x72, 0x77, 0x7D, 0x73, 0x60, 0x3D, 0x64, 0x60, 0x39, 0x52, 0x66, 0x3B, 0x73, 0x7A, 0x23, 0x7D, 0x73, 0x4A, 0x70, 0x78, 0x6A, 0x46, 0x69, 0x2B, 0x76, 0x68, 0x41, 0x77, 0x41, 0x42, 0x49, 0x4A, 0x4A, 0x42, 0x40, 0x48, 0x5A, 0x5A, 0x45, 0x41, 0x59, 0x03, 0x5A, 0x4A, 0x51, 0x5C, 0x4F]
flag = ''
for i in range(len(a)):
flag += chr(a[i]^i)
print(flag) # watevr{th4nk5_h4ck1ng_for_s0ju_hackingforsoju.team} | a = [119, 96, 118, 102, 114, 119, 125, 115, 96, 61, 100, 96, 57, 82, 102, 59, 115, 122, 35, 125, 115, 74, 112, 120, 106, 70, 105, 43, 118, 104, 65, 119, 65, 66, 73, 74, 74, 66, 64, 72, 90, 90, 69, 65, 89, 3, 90, 74, 81, 92, 79]
flag = ''
for i in range(len(a)):
flag += chr(a[i] ^ i)
print(flag) |
# This code is provoded by MDS DSCI 531/532
def mds_special():
font = "Arial"
axisColor = "#000000"
gridColor = "#DEDDDD"
return {
"config": {
"title": {
"fontSize": 24,
"font": font,
"anchor": "start", # equivalent of left-aligned.
"fontColor": "#000000"
},
'view': {
"height": 300,
"width": 400
},
"axisX": {
"domain": True,
#"domainColor": axisColor,
"gridColor": gridColor,
"domainWidth": 1,
"grid": False,
"labelFont": font,
"labelFontSize": 12,
"labelAngle": 0,
"tickColor": axisColor,
"tickSize": 5, # default, including it just to show you can change it
"titleFont": font,
"titleFontSize": 16,
"titlePadding": 10, # guessing, not specified in styleguide
"title": "X Axis Title (units)",
},
"axisY": {
"domain": False,
"grid": True,
"gridColor": gridColor,
"gridWidth": 1,
"labelFont": font,
"labelFontSize": 14,
"labelAngle": 0,
#"ticks": False, # even if you don't have a "domain" you need to turn these off.
"titleFont": font,
"titleFontSize": 16,
"titlePadding": 10, # guessing, not specified in styleguide
"title": "Y Axis Title (units)",
# titles are by default vertical left of axis so we need to hack this
#"titleAngle": 0, # horizontal
#"titleY": -10, # move it up
#"titleX": 18, # move it to the right so it aligns with the labels
},
}
} | def mds_special():
font = 'Arial'
axis_color = '#000000'
grid_color = '#DEDDDD'
return {'config': {'title': {'fontSize': 24, 'font': font, 'anchor': 'start', 'fontColor': '#000000'}, 'view': {'height': 300, 'width': 400}, 'axisX': {'domain': True, 'gridColor': gridColor, 'domainWidth': 1, 'grid': False, 'labelFont': font, 'labelFontSize': 12, 'labelAngle': 0, 'tickColor': axisColor, 'tickSize': 5, 'titleFont': font, 'titleFontSize': 16, 'titlePadding': 10, 'title': 'X Axis Title (units)'}, 'axisY': {'domain': False, 'grid': True, 'gridColor': gridColor, 'gridWidth': 1, 'labelFont': font, 'labelFontSize': 14, 'labelAngle': 0, 'titleFont': font, 'titleFontSize': 16, 'titlePadding': 10, 'title': 'Y Axis Title (units)'}}} |
# Writing a method
class Shape:
def __init__(self, name, sides, colour=None):
self.name = name
self.sides = sides
self.colour = colour
def get_info(self):
return '{} {} with {} sides'.format(self.colour,
self.name,
self.sides)
s = Shape('square', 4, 'green')
print(s.get_info())
# example of classmethod and staticmethod
class Shape:
def __init__(self, name, sides, colour=None):
self.name = name
self.sides = sides
self.colour = colour
@classmethod
def green_shape(cls, name, sides):
print(cls)
return cls(name, sides, 'green')
@staticmethod
def trapezium_area(a, b, height):
# area of trapezium = 0.5(a + b)h
return 0.5 * (a + b) * height
green = Shape.green_shape('rectangle', 4)
print('{} {} with {} sides'.format(green.colour,
green.name,
green.sides))
print(Shape.trapezium_area(5, 7, 4))
# demonstrating differences when calling regular methods, classmethods
# and staticmethods
class Shape:
def dummy_method(self, *args):
print('self:', self)
print('args:', *args)
@classmethod
def dummy_classmethod(cls, *args):
print('cls :', cls)
print('args:', *args)
@staticmethod
def dummy_staticmethod(*args):
print('args:', *args)
square = Shape()
# calling regular method from instance
square.dummy_method('arg')
print(repr(square.dummy_method) + '\n')
# calling regular method from class
Shape.dummy_method('arg')
print(repr(Shape.dummy_method) + '\n')
# calling classmethod from instance
square.dummy_classmethod('arg')
print(repr(square.dummy_classmethod) + '\n')
# calling classmethod from class
Shape.dummy_classmethod('arg')
print(repr(Shape.dummy_classmethod) + '\n')
# calling staticmethod from instance
square.dummy_staticmethod('arg')
print(repr(square.dummy_staticmethod) + '\n')
# calling staticmethod from class
Shape.dummy_staticmethod('arg')
print(repr(Shape.dummy_staticmethod) + '\n')
| class Shape:
def __init__(self, name, sides, colour=None):
self.name = name
self.sides = sides
self.colour = colour
def get_info(self):
return '{} {} with {} sides'.format(self.colour, self.name, self.sides)
s = shape('square', 4, 'green')
print(s.get_info())
class Shape:
def __init__(self, name, sides, colour=None):
self.name = name
self.sides = sides
self.colour = colour
@classmethod
def green_shape(cls, name, sides):
print(cls)
return cls(name, sides, 'green')
@staticmethod
def trapezium_area(a, b, height):
return 0.5 * (a + b) * height
green = Shape.green_shape('rectangle', 4)
print('{} {} with {} sides'.format(green.colour, green.name, green.sides))
print(Shape.trapezium_area(5, 7, 4))
class Shape:
def dummy_method(self, *args):
print('self:', self)
print('args:', *args)
@classmethod
def dummy_classmethod(cls, *args):
print('cls :', cls)
print('args:', *args)
@staticmethod
def dummy_staticmethod(*args):
print('args:', *args)
square = shape()
square.dummy_method('arg')
print(repr(square.dummy_method) + '\n')
Shape.dummy_method('arg')
print(repr(Shape.dummy_method) + '\n')
square.dummy_classmethod('arg')
print(repr(square.dummy_classmethod) + '\n')
Shape.dummy_classmethod('arg')
print(repr(Shape.dummy_classmethod) + '\n')
square.dummy_staticmethod('arg')
print(repr(square.dummy_staticmethod) + '\n')
Shape.dummy_staticmethod('arg')
print(repr(Shape.dummy_staticmethod) + '\n') |
class Solution:
def findSmallestSetOfVertices(self, n: int, edges: List[List[int]]) -> List[int]:
degree = [0] * n
for u, v in edges:
degree[v] = 1
return [i for i, d in enumerate(degree) if d == 0]
| class Solution:
def find_smallest_set_of_vertices(self, n: int, edges: List[List[int]]) -> List[int]:
degree = [0] * n
for (u, v) in edges:
degree[v] = 1
return [i for (i, d) in enumerate(degree) if d == 0] |
#!/usr/bin/env python
DEBUG = True
SECRET_KEY = 'super-ultra-secret-key'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
}
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.staticfiles',
'django_tables2',
'django_tables2_column_shifter',
'django_tables2_column_shifter.tests',
]
ROOT_URLCONF = 'django_tables2_column_shifter.tests.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
LANGUAGE_CODE = 'en-us'
MEDIA_URL = '/media/'
STATIC_URL = '/static/'
MIDDLEWARE = []
| debug = True
secret_key = 'super-ultra-secret-key'
databases = {'default': {'ENGINE': 'django.db.backends.sqlite3', 'NAME': ':memory:'}}
installed_apps = ['django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.staticfiles', 'django_tables2', 'django_tables2_column_shifter', 'django_tables2_column_shifter.tests']
root_urlconf = 'django_tables2_column_shifter.tests.urls'
templates = [{'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': {'context_processors': ['django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages']}}]
language_code = 'en-us'
media_url = '/media/'
static_url = '/static/'
middleware = [] |
# classical (x, y) position vectors
class Pos:
def __init__(self, x, y):
self.x = x
self.y = y
def __add__(self, other):
return(Pos(self.x + other.x, self.y + other.y))
def __eq__(self, other):
return(
(self.x == other.x) and
(self.y == other.y))
def __mul__(self, factor):
return(Pos(factor * self.x, factor * self.y))
def __ne__(self, other):
return(not(self == other))
def __str__(self):
return("(" + str(self.x) + ", " + str(self.y) + ")")
def __sub__(self, subtrahend):
return(self + (subtrahend * -1))
| class Pos:
def __init__(self, x, y):
self.x = x
self.y = y
def __add__(self, other):
return pos(self.x + other.x, self.y + other.y)
def __eq__(self, other):
return self.x == other.x and self.y == other.y
def __mul__(self, factor):
return pos(factor * self.x, factor * self.y)
def __ne__(self, other):
return not self == other
def __str__(self):
return '(' + str(self.x) + ', ' + str(self.y) + ')'
def __sub__(self, subtrahend):
return self + subtrahend * -1 |
# model settings
model = dict(
type='Recognizer3D',
backbone=dict(
type='C3D',
# pretrained= # noqa: E251
# 'https://download.openmmlab.com/mmaction/recognition/c3d/c3d_sports1m_pretrain_20201016-dcc47ddc.pth', # noqa: E501
pretrained= # noqa: E251
'./work_dirs/fatigue_c3d/c3d_sports1m_pretrain_20201016-dcc47ddc.pth',
# noqa: E501
style='pytorch',
conv_cfg=dict(type='Conv3d'),
norm_cfg=None,
act_cfg=dict(type='ReLU'),
dropout_ratio=0.5,
init_std=0.005),
cls_head=dict(
type='I3DHead',
num_classes=2,
in_channels=4096,
spatial_type=None,
dropout_ratio=0.5,
init_std=0.01),
# model training and testing settings
train_cfg=None,
test_cfg=dict(average_clips='score'))
# dataset settings
dataset_type = 'FatigueCleanDataset'
data_root = '/zhourui/workspace/pro/fatigue/data/rawframes/new_clean/fatigue_clips'
data_root_val = '/zhourui/workspace/pro/fatigue/data/rawframes/new_clean/fatigue_clips'
facerect_data_prefix = '/zhourui/workspace/pro/fatigue/data/clean/fatigue_info_from_yolov5'
ann_file_train = '/zhourui/workspace/pro/fatigue/data/clean/fatigue_anns/20210824_fatigue_pl_less_than_50_fatigue_full_info_all_path.json'
ann_file_val = '/zhourui/workspace/pro/fatigue/data/clean/fatigue_anns/20210824_fatigue_pl_less_than_50_fatigue_full_info_all_path.json'
ann_file_test = '/zhourui/workspace/pro/fatigue/data/clean/fatigue_anns/20210824_fatigue_pl_less_than_50_fatigue_full_info_all_path.json'
test_save_results_path = 'work_dirs/fatigue_c3d/valid_results_testone.npy'
test_save_label_path = 'work_dirs/fatigue_c3d/valid_label_testone.npy'
img_norm_cfg = dict(mean=[104, 117, 128], std=[1, 1, 1], to_bgr=False)
# support clip len 16 only!!!
clip_len = 16
train_pipeline = [
dict(type='SampleFrames', clip_len=clip_len, frame_interval=1, num_clips=1, out_of_bound_opt='repeat_last'),
dict(type='FatigueRawFrameDecode'),
dict(type='Resize', scale=(112, 112), keep_ratio=False),
#dict(type='RandomCrop', size=112),
dict(type='Flip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs', 'label'])
]
val_pipeline = [
dict(
type='SampleFrames',
clip_len=clip_len,
frame_interval=1,
num_clips=1,
test_mode=True,
out_of_bound_opt='repeat_last'),
dict(type='FatigueRawFrameDecode'),
dict(type='Resize', scale=(112, 112), keep_ratio=False),
#dict(type='CenterCrop', crop_size=112),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs'])
]
test_pipeline = [
dict(
type='SampleFrames',
clip_len=clip_len,
frame_interval=1,
num_clips=1,
test_mode=True,
out_of_bound_opt='repeat_last'),
dict(type='FatigueRawFrameDecode'),
dict(type='Resize', scale=(112, 112), keep_ratio=False),
#dict(type='CenterCrop', crop_size=112),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs'])
]
data = dict(
videos_per_gpu=40,
workers_per_gpu=4,
pin_memory=False,
train=dict(
type=dataset_type,
ann_file=ann_file_train,
video_data_prefix=data_root,
facerect_data_prefix=facerect_data_prefix,
data_phase='train',
test_mode=False,
pipeline=train_pipeline,
min_frames_before_fatigue=clip_len),
val=dict(
type=dataset_type,
ann_file=ann_file_val,
video_data_prefix=data_root_val,
facerect_data_prefix=facerect_data_prefix,
data_phase='valid',
test_mode=True,
test_all=False,
pipeline=val_pipeline,
min_frames_before_fatigue=clip_len),
test=dict(
type=dataset_type,
ann_file=ann_file_test,
video_data_prefix=data_root_val,
facerect_data_prefix=facerect_data_prefix,
data_phase='valid',
test_mode=True,
test_all=False,
test_save_label_path=test_save_label_path,
test_save_results_path=test_save_results_path,
pipeline=test_pipeline,
min_frames_before_fatigue=clip_len))
evaluation = dict(
interval=5, metrics=['top_k_classes'])
# optimizer
optimizer = dict(
type='SGD', lr=0.001, momentum=0.9,
weight_decay=0.0005) # this lr is used for 8 gpus
optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2))
# learning policy
lr_config = dict(policy='step', step=[20, 40])
total_epochs = 45
checkpoint_config = dict(interval=1)
log_config = dict(
interval=20,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook'),
])
# runtime settings
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = f'./work_dirs/fatigue_c3d/'
load_from = None
resume_from = None
workflow = [('train', 1)]
| model = dict(type='Recognizer3D', backbone=dict(type='C3D', pretrained='./work_dirs/fatigue_c3d/c3d_sports1m_pretrain_20201016-dcc47ddc.pth', style='pytorch', conv_cfg=dict(type='Conv3d'), norm_cfg=None, act_cfg=dict(type='ReLU'), dropout_ratio=0.5, init_std=0.005), cls_head=dict(type='I3DHead', num_classes=2, in_channels=4096, spatial_type=None, dropout_ratio=0.5, init_std=0.01), train_cfg=None, test_cfg=dict(average_clips='score'))
dataset_type = 'FatigueCleanDataset'
data_root = '/zhourui/workspace/pro/fatigue/data/rawframes/new_clean/fatigue_clips'
data_root_val = '/zhourui/workspace/pro/fatigue/data/rawframes/new_clean/fatigue_clips'
facerect_data_prefix = '/zhourui/workspace/pro/fatigue/data/clean/fatigue_info_from_yolov5'
ann_file_train = '/zhourui/workspace/pro/fatigue/data/clean/fatigue_anns/20210824_fatigue_pl_less_than_50_fatigue_full_info_all_path.json'
ann_file_val = '/zhourui/workspace/pro/fatigue/data/clean/fatigue_anns/20210824_fatigue_pl_less_than_50_fatigue_full_info_all_path.json'
ann_file_test = '/zhourui/workspace/pro/fatigue/data/clean/fatigue_anns/20210824_fatigue_pl_less_than_50_fatigue_full_info_all_path.json'
test_save_results_path = 'work_dirs/fatigue_c3d/valid_results_testone.npy'
test_save_label_path = 'work_dirs/fatigue_c3d/valid_label_testone.npy'
img_norm_cfg = dict(mean=[104, 117, 128], std=[1, 1, 1], to_bgr=False)
clip_len = 16
train_pipeline = [dict(type='SampleFrames', clip_len=clip_len, frame_interval=1, num_clips=1, out_of_bound_opt='repeat_last'), dict(type='FatigueRawFrameDecode'), dict(type='Resize', scale=(112, 112), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label'])]
val_pipeline = [dict(type='SampleFrames', clip_len=clip_len, frame_interval=1, num_clips=1, test_mode=True, out_of_bound_opt='repeat_last'), dict(type='FatigueRawFrameDecode'), dict(type='Resize', scale=(112, 112), keep_ratio=False), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs'])]
test_pipeline = [dict(type='SampleFrames', clip_len=clip_len, frame_interval=1, num_clips=1, test_mode=True, out_of_bound_opt='repeat_last'), dict(type='FatigueRawFrameDecode'), dict(type='Resize', scale=(112, 112), keep_ratio=False), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs'])]
data = dict(videos_per_gpu=40, workers_per_gpu=4, pin_memory=False, train=dict(type=dataset_type, ann_file=ann_file_train, video_data_prefix=data_root, facerect_data_prefix=facerect_data_prefix, data_phase='train', test_mode=False, pipeline=train_pipeline, min_frames_before_fatigue=clip_len), val=dict(type=dataset_type, ann_file=ann_file_val, video_data_prefix=data_root_val, facerect_data_prefix=facerect_data_prefix, data_phase='valid', test_mode=True, test_all=False, pipeline=val_pipeline, min_frames_before_fatigue=clip_len), test=dict(type=dataset_type, ann_file=ann_file_test, video_data_prefix=data_root_val, facerect_data_prefix=facerect_data_prefix, data_phase='valid', test_mode=True, test_all=False, test_save_label_path=test_save_label_path, test_save_results_path=test_save_results_path, pipeline=test_pipeline, min_frames_before_fatigue=clip_len))
evaluation = dict(interval=5, metrics=['top_k_classes'])
optimizer = dict(type='SGD', lr=0.001, momentum=0.9, weight_decay=0.0005)
optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2))
lr_config = dict(policy='step', step=[20, 40])
total_epochs = 45
checkpoint_config = dict(interval=1)
log_config = dict(interval=20, hooks=[dict(type='TextLoggerHook')])
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = f'./work_dirs/fatigue_c3d/'
load_from = None
resume_from = None
workflow = [('train', 1)] |
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def lowestCommonAncestor(self, root: 'TreeNode', p: 'TreeNode', q: 'TreeNode') -> 'TreeNode':
if root is None:
return None
stack = deque([root,])
parent = {root: None}
while stack:
node = stack.pop()
if node.left:
parent[node.left] = node
stack.append(node.left)
if node.right:
parent[node.right] = node
stack.append(node.right)
ancestors = set()
while p:
ancestors.add(p)
p = parent[p]
while q not in ancestors:
q = parent[q]
return q | class Solution:
def lowest_common_ancestor(self, root: 'TreeNode', p: 'TreeNode', q: 'TreeNode') -> 'TreeNode':
if root is None:
return None
stack = deque([root])
parent = {root: None}
while stack:
node = stack.pop()
if node.left:
parent[node.left] = node
stack.append(node.left)
if node.right:
parent[node.right] = node
stack.append(node.right)
ancestors = set()
while p:
ancestors.add(p)
p = parent[p]
while q not in ancestors:
q = parent[q]
return q |
def variance_of_sample_proportion(a,b,c,d,e,f,g,h,j,k):
try:
a = int(a)
b = int(b)
c = int(c)
d = int(d)
e = int(e)
f = int(f)
g = int(g)
h = int(h)
j = int(j)
k = int(k)
sample = [a,b,c,d,e,f,g,h,j,k]
# Count how many people are over the age 80
i = 0
occurrence = 0
while i < len(sample):
if (sample[i])> 80:
occurrence = occurrence + 1
else:
i = i + 1
i = i + 1
# n is population size
n = len(sample)
# p is probability
p = float(occurrence/n)
var_samp_propor = float((p * (1-p))/n)
print("variance_of_sample_proportion:",var_samp_propor )
return var_samp_propor
except ZeroDivisionError:
print("Error: Dividing by Zero is not valid!!")
except ValueError:
print ("Error: Only Numeric Values are valid!!") | def variance_of_sample_proportion(a, b, c, d, e, f, g, h, j, k):
try:
a = int(a)
b = int(b)
c = int(c)
d = int(d)
e = int(e)
f = int(f)
g = int(g)
h = int(h)
j = int(j)
k = int(k)
sample = [a, b, c, d, e, f, g, h, j, k]
i = 0
occurrence = 0
while i < len(sample):
if sample[i] > 80:
occurrence = occurrence + 1
else:
i = i + 1
i = i + 1
n = len(sample)
p = float(occurrence / n)
var_samp_propor = float(p * (1 - p) / n)
print('variance_of_sample_proportion:', var_samp_propor)
return var_samp_propor
except ZeroDivisionError:
print('Error: Dividing by Zero is not valid!!')
except ValueError:
print('Error: Only Numeric Values are valid!!') |
class Stack:
def __init__(self):
self.stack = []
self.current_minimum = float('inf')
def push(self, item):
if not self.stack:
self.stack.append(item)
self.current_minimum = item
else:
if item >= self.current_minimum:
self.stack.append(item)
else:
self.stack.append(2 * item - self.current_minimum)
self.current_minimum = item
def pop(self):
if not self.stack:
raise IndexError
else:
item = self.stack.pop()
if item >= self.current_minimum:
return item
else:
answer = self.current_minimum
self.current_minimum = 2 * self.current_minimum - item
return answer
def peek(self):
if not self.stack:
raise IndexError
else:
item = self.stack[-1]
if item >= self.current_minimum:
return item
else:
return self.current_minimum
def find_min(self):
if not self.stack:
return IndexError
return self.current_minimum
def __len__(self):
return len(self.stack) | class Stack:
def __init__(self):
self.stack = []
self.current_minimum = float('inf')
def push(self, item):
if not self.stack:
self.stack.append(item)
self.current_minimum = item
elif item >= self.current_minimum:
self.stack.append(item)
else:
self.stack.append(2 * item - self.current_minimum)
self.current_minimum = item
def pop(self):
if not self.stack:
raise IndexError
else:
item = self.stack.pop()
if item >= self.current_minimum:
return item
else:
answer = self.current_minimum
self.current_minimum = 2 * self.current_minimum - item
return answer
def peek(self):
if not self.stack:
raise IndexError
else:
item = self.stack[-1]
if item >= self.current_minimum:
return item
else:
return self.current_minimum
def find_min(self):
if not self.stack:
return IndexError
return self.current_minimum
def __len__(self):
return len(self.stack) |
# -*- coding: utf-8 -*-
# Return the contents of a file
def load_file(filename):
with open(filename, "r") as f:
return f.read()
# Write contents to a file
def write_file(filename, content):
with open(filename, "w+") as f:
f.write(content)
# Append contents to a file
def append_file(filename, content):
with open(filename, "a+") as f:
f.write(content)
| def load_file(filename):
with open(filename, 'r') as f:
return f.read()
def write_file(filename, content):
with open(filename, 'w+') as f:
f.write(content)
def append_file(filename, content):
with open(filename, 'a+') as f:
f.write(content) |
def main():
t: tuple[i32, str]
t = (1, 2)
main() | def main():
t: tuple[i32, str]
t = (1, 2)
main() |
# Config
SIZES = {
'basic': 299
}
NUM_CHANNELS = 3
NUM_CLASSES = 2
GENERATOR_BATCH_SIZE = 32
TOTAL_EPOCHS = 50
STEPS_PER_EPOCH = 100
VALIDATION_STEPS = 50
BASE_DIR = 'C:\\Users\\guilo\\mba-tcc\\data\\' | sizes = {'basic': 299}
num_channels = 3
num_classes = 2
generator_batch_size = 32
total_epochs = 50
steps_per_epoch = 100
validation_steps = 50
base_dir = 'C:\\Users\\guilo\\mba-tcc\\data\\' |
def test_split():
assert split(10) == 2
def test_string():
city = "String"
assert type(city) == str
def test_float():
price = 3.45
assert type(price) == float
def test_int():
high_score = 1
assert type(high_score) == int
def test_boolean():
is_having_fun = True
assert type(is_having_fun) == bool
def split(cash):
bounty = cash / 5
print(bounty)
return bounty
| def test_split():
assert split(10) == 2
def test_string():
city = 'String'
assert type(city) == str
def test_float():
price = 3.45
assert type(price) == float
def test_int():
high_score = 1
assert type(high_score) == int
def test_boolean():
is_having_fun = True
assert type(is_having_fun) == bool
def split(cash):
bounty = cash / 5
print(bounty)
return bounty |
A = 'A'
B = 'B'
RULE_ACTION = {
1: 'Suck',
2: 'Right',
3: 'Left',
4: 'NoOp'
}
rules = {
(A, 'Dirty'): 1,
(B, 'Dirty'): 1,
(A, 'Clean'): 2,
(B, 'Clean'): 3,
(A, B, 'Clean'): 4
}
# Ex. rule (if location == A && Dirty then 1)
Environment = {
A: 'Dirty',
B: 'Dirty',
'Current': A
}
def INTERPRET_INPUT(input): # No interpretation
return input
def RULE_MATCH(state, rules): # Match rule for a given state
rule = rules.get(tuple(state))
return rule
def SIMPLE_REFLEX_AGENT(percept): # Determine action
state = INTERPRET_INPUT(percept)
rule = RULE_MATCH(state, rules)
action = RULE_ACTION[rule]
return action
def Sensors(): # Sense Environment
location = Environment['Current']
return (location, Environment[location])
def Actuators(action): # Modify Environment
location = Environment['Current']
if action == 'Suck':
Environment[location] = 'Clean'
elif action == 'Right' and location == A:
Environment['Current'] = B
elif action == 'Left' and location == B:
Environment['Current'] = A
def run(n): # run the agent through n steps
print(' Current New')
print('location status action location status')
for i in range(1, n):
(location, status) = Sensors() # Sense Environment before action
print("{:12s}{:8s}".format(location, status), end='')
action = SIMPLE_REFLEX_AGENT(Sensors())
Actuators(action)
(location, status) = Sensors() # Sense Environment after action
print("{:8s}{:12s}{:8s}".format(action, location, status))
if __name__ == '__main__':
run(10)
| a = 'A'
b = 'B'
rule_action = {1: 'Suck', 2: 'Right', 3: 'Left', 4: 'NoOp'}
rules = {(A, 'Dirty'): 1, (B, 'Dirty'): 1, (A, 'Clean'): 2, (B, 'Clean'): 3, (A, B, 'Clean'): 4}
environment = {A: 'Dirty', B: 'Dirty', 'Current': A}
def interpret_input(input):
return input
def rule_match(state, rules):
rule = rules.get(tuple(state))
return rule
def simple_reflex_agent(percept):
state = interpret_input(percept)
rule = rule_match(state, rules)
action = RULE_ACTION[rule]
return action
def sensors():
location = Environment['Current']
return (location, Environment[location])
def actuators(action):
location = Environment['Current']
if action == 'Suck':
Environment[location] = 'Clean'
elif action == 'Right' and location == A:
Environment['Current'] = B
elif action == 'Left' and location == B:
Environment['Current'] = A
def run(n):
print(' Current New')
print('location status action location status')
for i in range(1, n):
(location, status) = sensors()
print('{:12s}{:8s}'.format(location, status), end='')
action = simple_reflex_agent(sensors())
actuators(action)
(location, status) = sensors()
print('{:8s}{:12s}{:8s}'.format(action, location, status))
if __name__ == '__main__':
run(10) |
def convert(s):
s_split = s.split(' ')
return s_split
def niceprint(s):
for i, elm in enumerate(s):
print('Element #', i + 1, ' = ', elm, sep='')
return None
c1 = 10
c2 = 's'
| def convert(s):
s_split = s.split(' ')
return s_split
def niceprint(s):
for (i, elm) in enumerate(s):
print('Element #', i + 1, ' = ', elm, sep='')
return None
c1 = 10
c2 = 's' |
#
# PySNMP MIB module ASCEND-MIBIPSECSPD-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/ASCEND-MIBIPSECSPD-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 17:11:32 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
configuration, = mibBuilder.importSymbols("ASCEND-MIB", "configuration")
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, SingleValueConstraint, ConstraintsUnion, ValueRangeConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "SingleValueConstraint", "ConstraintsUnion", "ValueRangeConstraint", "ConstraintsIntersection")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
Gauge32, Bits, Counter32, iso, IpAddress, Integer32, ModuleIdentity, Unsigned32, Counter64, ObjectIdentity, MibIdentifier, MibScalar, MibTable, MibTableRow, MibTableColumn, TimeTicks, NotificationType = mibBuilder.importSymbols("SNMPv2-SMI", "Gauge32", "Bits", "Counter32", "iso", "IpAddress", "Integer32", "ModuleIdentity", "Unsigned32", "Counter64", "ObjectIdentity", "MibIdentifier", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "TimeTicks", "NotificationType")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
class DisplayString(OctetString):
pass
mibmibProfIpsecSpd = MibIdentifier((1, 3, 6, 1, 4, 1, 529, 23, 168))
mibmibProfIpsecSpdTable = MibTable((1, 3, 6, 1, 4, 1, 529, 23, 168, 1), )
if mibBuilder.loadTexts: mibmibProfIpsecSpdTable.setStatus('mandatory')
mibmibProfIpsecSpdEntry = MibTableRow((1, 3, 6, 1, 4, 1, 529, 23, 168, 1, 1), ).setIndexNames((0, "ASCEND-MIBIPSECSPD-MIB", "mibProfIpsecSpd-SpdName"))
if mibBuilder.loadTexts: mibmibProfIpsecSpdEntry.setStatus('mandatory')
mibProfIpsecSpd_SpdName = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 168, 1, 1, 1), DisplayString()).setLabel("mibProfIpsecSpd-SpdName").setMaxAccess("readonly")
if mibBuilder.loadTexts: mibProfIpsecSpd_SpdName.setStatus('mandatory')
mibProfIpsecSpd_DefaultFilter = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 168, 1, 1, 2), DisplayString()).setLabel("mibProfIpsecSpd-DefaultFilter").setMaxAccess("readwrite")
if mibBuilder.loadTexts: mibProfIpsecSpd_DefaultFilter.setStatus('mandatory')
mibProfIpsecSpd_Action_o = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 168, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("noAction", 1), ("createProfile", 2), ("deleteProfile", 3)))).setLabel("mibProfIpsecSpd-Action-o").setMaxAccess("readwrite")
if mibBuilder.loadTexts: mibProfIpsecSpd_Action_o.setStatus('mandatory')
mibmibProfIpsecSpd_PolicyTable = MibTable((1, 3, 6, 1, 4, 1, 529, 23, 168, 2), ).setLabel("mibmibProfIpsecSpd-PolicyTable")
if mibBuilder.loadTexts: mibmibProfIpsecSpd_PolicyTable.setStatus('mandatory')
mibmibProfIpsecSpd_PolicyEntry = MibTableRow((1, 3, 6, 1, 4, 1, 529, 23, 168, 2, 1), ).setLabel("mibmibProfIpsecSpd-PolicyEntry").setIndexNames((0, "ASCEND-MIBIPSECSPD-MIB", "mibProfIpsecSpd-Policy-SpdName"), (0, "ASCEND-MIBIPSECSPD-MIB", "mibProfIpsecSpd-Policy-Index-o"))
if mibBuilder.loadTexts: mibmibProfIpsecSpd_PolicyEntry.setStatus('mandatory')
mibProfIpsecSpd_Policy_SpdName = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 168, 2, 1, 1), DisplayString()).setLabel("mibProfIpsecSpd-Policy-SpdName").setMaxAccess("readonly")
if mibBuilder.loadTexts: mibProfIpsecSpd_Policy_SpdName.setStatus('mandatory')
mibProfIpsecSpd_Policy_Index_o = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 168, 2, 1, 2), Integer32()).setLabel("mibProfIpsecSpd-Policy-Index-o").setMaxAccess("readonly")
if mibBuilder.loadTexts: mibProfIpsecSpd_Policy_Index_o.setStatus('mandatory')
mibProfIpsecSpd_Policy = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 168, 2, 1, 3), DisplayString()).setLabel("mibProfIpsecSpd-Policy").setMaxAccess("readwrite")
if mibBuilder.loadTexts: mibProfIpsecSpd_Policy.setStatus('mandatory')
mibBuilder.exportSymbols("ASCEND-MIBIPSECSPD-MIB", mibmibProfIpsecSpdTable=mibmibProfIpsecSpdTable, mibProfIpsecSpd_SpdName=mibProfIpsecSpd_SpdName, mibmibProfIpsecSpd=mibmibProfIpsecSpd, mibProfIpsecSpd_Policy=mibProfIpsecSpd_Policy, mibmibProfIpsecSpd_PolicyEntry=mibmibProfIpsecSpd_PolicyEntry, mibProfIpsecSpd_Policy_Index_o=mibProfIpsecSpd_Policy_Index_o, mibmibProfIpsecSpdEntry=mibmibProfIpsecSpdEntry, mibProfIpsecSpd_Policy_SpdName=mibProfIpsecSpd_Policy_SpdName, mibmibProfIpsecSpd_PolicyTable=mibmibProfIpsecSpd_PolicyTable, mibProfIpsecSpd_Action_o=mibProfIpsecSpd_Action_o, DisplayString=DisplayString, mibProfIpsecSpd_DefaultFilter=mibProfIpsecSpd_DefaultFilter)
| (configuration,) = mibBuilder.importSymbols('ASCEND-MIB', 'configuration')
(octet_string, integer, object_identifier) = mibBuilder.importSymbols('ASN1', 'OctetString', 'Integer', 'ObjectIdentifier')
(named_values,) = mibBuilder.importSymbols('ASN1-ENUMERATION', 'NamedValues')
(value_size_constraint, single_value_constraint, constraints_union, value_range_constraint, constraints_intersection) = mibBuilder.importSymbols('ASN1-REFINEMENT', 'ValueSizeConstraint', 'SingleValueConstraint', 'ConstraintsUnion', 'ValueRangeConstraint', 'ConstraintsIntersection')
(notification_group, module_compliance) = mibBuilder.importSymbols('SNMPv2-CONF', 'NotificationGroup', 'ModuleCompliance')
(gauge32, bits, counter32, iso, ip_address, integer32, module_identity, unsigned32, counter64, object_identity, mib_identifier, mib_scalar, mib_table, mib_table_row, mib_table_column, time_ticks, notification_type) = mibBuilder.importSymbols('SNMPv2-SMI', 'Gauge32', 'Bits', 'Counter32', 'iso', 'IpAddress', 'Integer32', 'ModuleIdentity', 'Unsigned32', 'Counter64', 'ObjectIdentity', 'MibIdentifier', 'MibScalar', 'MibTable', 'MibTableRow', 'MibTableColumn', 'TimeTicks', 'NotificationType')
(display_string, textual_convention) = mibBuilder.importSymbols('SNMPv2-TC', 'DisplayString', 'TextualConvention')
class Displaystring(OctetString):
pass
mibmib_prof_ipsec_spd = mib_identifier((1, 3, 6, 1, 4, 1, 529, 23, 168))
mibmib_prof_ipsec_spd_table = mib_table((1, 3, 6, 1, 4, 1, 529, 23, 168, 1))
if mibBuilder.loadTexts:
mibmibProfIpsecSpdTable.setStatus('mandatory')
mibmib_prof_ipsec_spd_entry = mib_table_row((1, 3, 6, 1, 4, 1, 529, 23, 168, 1, 1)).setIndexNames((0, 'ASCEND-MIBIPSECSPD-MIB', 'mibProfIpsecSpd-SpdName'))
if mibBuilder.loadTexts:
mibmibProfIpsecSpdEntry.setStatus('mandatory')
mib_prof_ipsec_spd__spd_name = mib_scalar((1, 3, 6, 1, 4, 1, 529, 23, 168, 1, 1, 1), display_string()).setLabel('mibProfIpsecSpd-SpdName').setMaxAccess('readonly')
if mibBuilder.loadTexts:
mibProfIpsecSpd_SpdName.setStatus('mandatory')
mib_prof_ipsec_spd__default_filter = mib_scalar((1, 3, 6, 1, 4, 1, 529, 23, 168, 1, 1, 2), display_string()).setLabel('mibProfIpsecSpd-DefaultFilter').setMaxAccess('readwrite')
if mibBuilder.loadTexts:
mibProfIpsecSpd_DefaultFilter.setStatus('mandatory')
mib_prof_ipsec_spd__action_o = mib_scalar((1, 3, 6, 1, 4, 1, 529, 23, 168, 1, 1, 3), integer32().subtype(subtypeSpec=constraints_union(single_value_constraint(1, 2, 3))).clone(namedValues=named_values(('noAction', 1), ('createProfile', 2), ('deleteProfile', 3)))).setLabel('mibProfIpsecSpd-Action-o').setMaxAccess('readwrite')
if mibBuilder.loadTexts:
mibProfIpsecSpd_Action_o.setStatus('mandatory')
mibmib_prof_ipsec_spd__policy_table = mib_table((1, 3, 6, 1, 4, 1, 529, 23, 168, 2)).setLabel('mibmibProfIpsecSpd-PolicyTable')
if mibBuilder.loadTexts:
mibmibProfIpsecSpd_PolicyTable.setStatus('mandatory')
mibmib_prof_ipsec_spd__policy_entry = mib_table_row((1, 3, 6, 1, 4, 1, 529, 23, 168, 2, 1)).setLabel('mibmibProfIpsecSpd-PolicyEntry').setIndexNames((0, 'ASCEND-MIBIPSECSPD-MIB', 'mibProfIpsecSpd-Policy-SpdName'), (0, 'ASCEND-MIBIPSECSPD-MIB', 'mibProfIpsecSpd-Policy-Index-o'))
if mibBuilder.loadTexts:
mibmibProfIpsecSpd_PolicyEntry.setStatus('mandatory')
mib_prof_ipsec_spd__policy__spd_name = mib_scalar((1, 3, 6, 1, 4, 1, 529, 23, 168, 2, 1, 1), display_string()).setLabel('mibProfIpsecSpd-Policy-SpdName').setMaxAccess('readonly')
if mibBuilder.loadTexts:
mibProfIpsecSpd_Policy_SpdName.setStatus('mandatory')
mib_prof_ipsec_spd__policy__index_o = mib_scalar((1, 3, 6, 1, 4, 1, 529, 23, 168, 2, 1, 2), integer32()).setLabel('mibProfIpsecSpd-Policy-Index-o').setMaxAccess('readonly')
if mibBuilder.loadTexts:
mibProfIpsecSpd_Policy_Index_o.setStatus('mandatory')
mib_prof_ipsec_spd__policy = mib_scalar((1, 3, 6, 1, 4, 1, 529, 23, 168, 2, 1, 3), display_string()).setLabel('mibProfIpsecSpd-Policy').setMaxAccess('readwrite')
if mibBuilder.loadTexts:
mibProfIpsecSpd_Policy.setStatus('mandatory')
mibBuilder.exportSymbols('ASCEND-MIBIPSECSPD-MIB', mibmibProfIpsecSpdTable=mibmibProfIpsecSpdTable, mibProfIpsecSpd_SpdName=mibProfIpsecSpd_SpdName, mibmibProfIpsecSpd=mibmibProfIpsecSpd, mibProfIpsecSpd_Policy=mibProfIpsecSpd_Policy, mibmibProfIpsecSpd_PolicyEntry=mibmibProfIpsecSpd_PolicyEntry, mibProfIpsecSpd_Policy_Index_o=mibProfIpsecSpd_Policy_Index_o, mibmibProfIpsecSpdEntry=mibmibProfIpsecSpdEntry, mibProfIpsecSpd_Policy_SpdName=mibProfIpsecSpd_Policy_SpdName, mibmibProfIpsecSpd_PolicyTable=mibmibProfIpsecSpd_PolicyTable, mibProfIpsecSpd_Action_o=mibProfIpsecSpd_Action_o, DisplayString=DisplayString, mibProfIpsecSpd_DefaultFilter=mibProfIpsecSpd_DefaultFilter) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.