code stringlengths 1 25.8M | language stringclasses 18 values | source stringclasses 4 values | repo stringclasses 78 values | path stringlengths 0 268 |
|---|---|---|---|---|
"""settings that allow changing of the license
clause used in askbot instances"""
from askbot import const
from askbot.conf.settings_wrapper import settings
from askbot.conf.super_groups import CONTENT_AND_UI
from askbot.deps import livesettings
from askbot.skins import utils as skin_utils
from django.utils.translation import ugettext as _
from django.conf import settings as django_settings
LICENSE_SETTINGS = livesettings.ConfigurationGroup(
'LICENSE_SETTINGS',
_('Content License'),
super_group = CONTENT_AND_UI
)
settings.register(
livesettings.BooleanValue(
LICENSE_SETTINGS,
'USE_LICENSE',
description = _('Show license clause in the site footer'),
default = True
)
)
settings.register(
livesettings.StringValue(
LICENSE_SETTINGS,
'LICENSE_ACRONYM',
description = _('Short name for the license'),
default = 'cc-by-sa'
)
)
settings.register(
livesettings.StringValue(
LICENSE_SETTINGS,
'LICENSE_TITLE',
description = _('Full name of the license'),
default = _('Creative Commons Attribution Share Alike 3.0'),
)
)
settings.register(
livesettings.BooleanValue(
LICENSE_SETTINGS,
'LICENSE_USE_URL',
description = _('Add link to the license page'),
default = True
)
)
settings.register(
livesettings.URLValue(
LICENSE_SETTINGS,
'LICENSE_URL',
description = _('License homepage'),
help_text = _(
'URL of the official page with all the license legal clauses'
),
default = const.DEPENDENCY_URLS['cc-by-sa']
)
)
settings.register(
livesettings.BooleanValue(
LICENSE_SETTINGS,
'LICENSE_USE_LOGO',
description = _('Use license logo'),
default = True
)
)
settings.register(
livesettings.ImageValue(
LICENSE_SETTINGS,
'LICENSE_LOGO_URL',
description = _('License logo image'),
default = '/images/cc-by-sa.png',
url_resolver = skin_utils.get_media_url
)
) | unknown | codeparrot/codeparrot-clean | ||
'use server'
async function fn() {
'use cache'
return 'foo'
}
async function Component() {
const data = await fn()
return <div>{data}</div>
} | javascript | github | https://github.com/vercel/next.js | crates/next-custom-transforms/tests/fixture/server-actions/server-graph/37/input.js |
package kotlinx.coroutines.flow
import kotlinx.coroutines.testing.*
import kotlinx.coroutines.*
import kotlin.test.*
class OnEmptyTest : TestBase() {
@Test
fun testOnEmptyInvoked() = runTest {
val flow = emptyFlow<Int>().onEmpty { emit(1) }
assertEquals(1, flow.single())
}
@Test
fun testOnEmptyNotInvoked() = runTest {
val flow = flowOf(1).onEmpty { emit(2) }
assertEquals(1, flow.single())
}
@Test
fun testOnEmptyNotInvokedOnError() = runTest {
val flow = flow<Int> {
throw TestException()
}.onEmpty { expectUnreached() }
assertFailsWith<TestException>(flow)
}
@Test
fun testOnEmptyNotInvokedOnCancellation() = runTest {
val flow = flow<Int> {
expect(2)
hang { expect(4) }
}.onEmpty { expectUnreached() }
expect(1)
val job = flow.onEach { expectUnreached() }.launchIn(this)
yield()
expect(3)
job.cancelAndJoin()
finish(5)
}
@Test
fun testOnEmptyCancellation() = runTest {
val flow = emptyFlow<Int>().onEmpty {
expect(2)
hang { expect(4) }
emit(1)
}
expect(1)
val job = flow.onEach { expectUnreached() }.launchIn(this)
yield()
expect(3)
job.cancelAndJoin()
finish(5)
}
@Test
fun testTransparencyViolation() = runTest {
val flow = emptyFlow<Int>().onEmpty {
expect(2)
coroutineScope {
launch {
try {
emit(1)
} catch (e: IllegalStateException) {
expect(3)
}
}
}
}
expect(1)
assertNull(flow.singleOrNull())
finish(4)
}
} | kotlin | github | https://github.com/Kotlin/kotlinx.coroutines | kotlinx-coroutines-core/common/test/flow/operators/OnEmptyTest.kt |
import sys
import random
MAX_R = 1000
MAX_D = 25
MAX_K = 20
MAX_C = 2000
MAX_B = 100
case_no = 1
def next_file(suffix=None, desc=None):
global case_no
basename = '%02d' % case_no if suffix is None else '%02d-%s' % (case_no, suffix)
f = open(basename + '.in', 'w')
if desc is not None:
with open(basename + '.desc', 'w') as df:
df.write('%s\n' % desc)
case_no += 1
return f
def write_case(ingreds, starters, mains, desserts, conflicts, suffix=None, desc=None):
f = next_file(suffix, desc)
f.write('%d %d %d %d %d\n' % (len(ingreds), len(starters), len(mains), len(desserts), len(conflicts)))
f.write('%s\n' % ' '.join(map(str, ingreds)))
for d in starters + mains + desserts:
f.write('%d %s\n' % (len(d), ' '.join(map(str, d))))
for (a,b) in conflicts:
f.write('%d %d\n' % (a,b))
f.close()
def random_dish(r, min_k, max_k):
idxs = range(1,r+1)
k = random.randint(min_k, max_k)
random.shuffle(idxs)
return idxs[:k]
def gen_random(r, s, m, d, c, max_b, max_k, min_b=1, min_k=1):
ingreds = [random.randint(min_b, max_b) for i in range(r)]
dishes = [random_dish(r, min_k, max_k) for i in range(s+m+d)]
conf = []
for i in range(c):
while True:
t1 = random.randint(0,2)
t2 = (t1+random.randint(1,2)) % 3
a = random.randint(1, [s,m,d][t1]) + [0,s,s+m][t1]
b = random.randint(1, [s,m,d][t2]) + [0,s,s+m][t2]
if (a,b) not in conf and (b,a) not in conf:
break
conf.append((a,b))
write_case(ingreds, dishes[:s], dishes[s:s+m], dishes[s+m:], conf, suffix='random', desc='random case with %d ingreds, %d starters, %d mains, %d desserts, %d conflicts' % (r, s, m, d, c))
def gen_special():
# answer = 10^18
ingreds = [5]*18 + [2]*18
random.shuffle(ingreds)
s = m = d = 1
starters = [list(set(range(1,13) + random_dish(len(ingreds), 1, MAX_K-12))) for i in range(s)]
mains = [list(set(range(13,25) + random_dish(len(ingreds), 1, MAX_K-12))) for i in range(m)]
desserts = [list(set(range(25,37) + random_dish(len(ingreds), 1, MAX_K-12))) for i in range(d)]
write_case(ingreds, starters, mains, desserts, [], suffix='maxans', desc='answer = 10^18')
# answer = 19*10^18 (19 terms of value 10^18, should take out some overflow errs)
ingreds = [5]*18 + [2]*18
random.shuffle(ingreds)
ingres = ingreds + [1]*42
s = 1
m = 1
d = 19
starters = [list(set(range(1,13) + random_dish(len(ingreds), 1, MAX_K-12))) for i in range(s)]
mains = [list(set(range(13,25) + random_dish(len(ingreds), 1, MAX_K-12))) for i in range(m)]
desserts = [list(set(range(25,37) + random_dish(len(ingreds), 1, MAX_K-12))) for i in range(d)]
write_case(ingreds, starters, mains, desserts, [], suffix='overflow', desc='answer = 19*10^18 -- 19 terms of value 10^18')
# answer = 10^18 (but with 25x25x25 dish combos)
# note this has a bunch of constants hard-coded to MAX_D=25
ingreds = [5]*12 + [2]*18
random.shuffle(ingreds)
ingreds = ingreds + [1]*123
s = m = d = MAX_D
starters = [list(set(range(1,11) + random_dish(len(ingreds), 1, MAX_K-10))) for i in range(s)]
mains = [list(set(range(11,21) + random_dish(len(ingreds), 1, MAX_K-10))) for i in range(m)]
desserts = [list(set(range(21,31) + random_dish(len(ingreds), 1, MAX_K-10))) for i in range(d)]
write_case(ingreds, starters, mains, desserts, [], suffix='maxans', desc='ans = 10^18, coming from 25^3 different dish combos')
# answer = 10^18+1
ingreds = [5]*18 + [2]*18
random.shuffle(ingreds)
ingreds = ingreds + [1]*(3*MAX_K)
s = m = d = 1
starters = [list(set(range(1,13) + random_dish(len(ingreds), 1, MAX_K-12))) for i in range(s)] + [range(37, 37+MAX_K)]
mains = [list(set(range(13,25) + random_dish(len(ingreds), 1, MAX_K-12))) for i in range(m)] + [range(37+MAX_K, 37+2*MAX_K)]
desserts = [list(set(range(25,37) + random_dish(len(ingreds), 1, MAX_K-12))) for i in range(d)] + [range(37+2*MAX_K, 37+3*MAX_K)]
conf = [(1,4), (6, 3), (2, 5)]
write_case(ingreds, starters, mains, desserts, conf, suffix='overflow', desc='ans = 10^18 + 1')
gen_1875()
# overflow, but all partial products when multiplied as longs are
# within range
ingreds = [92, 92, 92, 92, 79, 92, 92, 92, 92, 92]
starters = [random_dish(len(ingreds), 1, MAX_K)]
mains = [range(1, len(ingreds)+1)]
desserts = [random_dish(len(ingreds), 1, MAX_K)]
write_case(ingreds, starters, mains, desserts, [], suffix='overflow', desc='overflow, but when computing with signed 64-bit integers,\neach partial product is larger than the previous, and smaller than 10^18')
# similar case as before but kills a solution that (for whatever
# reason...) multplies the numbers from largest to smallest
ingreds = [38, 38, 38, 38, 38, 80, 38, 38, 38, 38, 38, 38]
write_case(ingreds, starters, mains, desserts, [], suffix='overflow', desc='overflow, but when computing with signed 64-bit integers,\neach partial product is larger than the previous, and smaller than 10^18')
def gen_1875():
# answer = 0
ingreds = [1]*1
starters = [[1]]*25
mains = starters
desserts = starters
conf=[];
for i in range(1,26):
for j in range(1,26):
conf.extend([(i,j+25), (i,j+50), (i+25, j+50)])
write_case(ingreds, starters, mains, desserts, conf, suffix='maxconf', desc='all pairs of dishes in conflict')
random.seed(42)
gen_special()
gen_random(17, 5, 9, 8, 23, 5, 11)
for i in range(25):
r = random.randint(1, MAX_R)
s = random.randint(1, MAX_D)
m = random.randint(1, MAX_D)
d = random.randint(1, MAX_D)
max_c = min(s*m+m*d+s*d, MAX_C)
c = random.randint(0, max_c)
max_b = random.randint(1, 20)
max_k = random.randint(1, MAX_K)
gen_random(r, s, m, d, c, max_b, max_k)
gen_random(MAX_R, MAX_D, MAX_D, MAX_D, 0, 2, MAX_K, min_k=MAX_K)
gen_random(MAX_R, MAX_D, MAX_D, MAX_D, 0, MAX_B, MAX_K, min_k=MAX_K, min_b=MAX_B)
gen_random(MAX_R, MAX_D, MAX_D, MAX_D, MAX_D*MAX_D*3/2, 2, MAX_K, min_k=MAX_K)
gen_random(MAX_R, MAX_D, MAX_D, MAX_D, MAX_D*MAX_D*3/2, MAX_B, MAX_K, min_k=MAX_K, min_b=MAX_B)
gen_random(MAX_R, MAX_D, MAX_D, MAX_D, MAX_D*MAX_D*3-3*MAX_D, 2, MAX_K, min_k=MAX_K)
gen_random(MAX_R, MAX_D, MAX_D, MAX_D, MAX_D*MAX_D*3-3*MAX_D, MAX_B, MAX_K, min_k=MAX_K, min_b=MAX_B) | unknown | codeparrot/codeparrot-clean | ||
#encoding: UTF-8
'''
Created on Nov 24, 2015
@author: coffeemakr
'''
import unittest
import re
import itertools
import random
import string
import sys, os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.split(__file__)[0], "..")))
from uci.backend.originalreader import OriginalReader
class TestRegularExpressions(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testReName(self):
pass
def test_has_unclosed_quotes(self):
unclosed = ['some text "',
'some text \'',
'some \'text',
'some "text',
'"some text',
'\'some text',
'some "proper" \'unproper',
'some "proper" "unproper',
'some \'proper\' \'unproper',
'some \'proper\' "unproper',
'some "unproper\" \'proper',
r'some "unproper\\\' \"proper',
r'some \'unproper\\" \\\'proper',
'some muliline " unquote text \n the new line',
'some muliline " unquote text \n\r the new line',
'some muliline " unquote text \r\n the new line',
'some muliline " unquote text \n the \n new line',
]
closed = ['"some" "quotes"',
'\'some\' \'quotes\'',
'\'some\' "quotes"',
'"some" \'quotes\'',
' some "some" "quotes" quotes',
' multiline: " properly closed \n on the new line"',
' Properly close because the # quote " ist commented ',
'Properly closed because the " # comment is ignored is the quotes" ']
for value in unclosed:
result = OriginalReader.has_quotes_left_open(value + '\n')
self.assertTrue(result, "Unclosed quotes not detected: %s" % value)
for value in closed:
result = OriginalReader.has_quotes_left_open(value + '\n')
self.assertFalse(result, "Closed quotes faulty detected: %s" % value)
def test_get_section_name_and_type(self):
# Usually you do not need to enclose identifiers or values in quotes.
# Quotes are only required if the enclosed value contains spaces or tabs.
# Also it's legal to use double- instead of single-quotes when typing configuration options.
values = [
'a',
'abc',
'abcdef',
'abcdefghijk',
'A',
'ABC',
'ABCDEFGHIJK',
'Abcefg',
'aBCEFG',
'0',
'1',
'12',
'_abcdef',
'ab_cdef',
'abcedef_',
'_',
'5pe¢îalVälũe',
'%',
'=',
'=+/()=*+'
]
for value in values:
for type_escape in ('"', "'", ''):
for name_escape in ('"', "'", ''):
input = 'config ' + type_escape + value + type_escape + " " + name_escape + value + name_escape
name, type = OriginalReader.get_section_name_and_type(input)
self.assertEqual(value, name)
self.assertEqual(value, type)
values = """abcdef
abcdef
a b c d e f
a b cdef
abcdef
\tabcdef
abcdef\t
abcd\tef
\ta b c d\te\t\t\t\tf d """
values = values.splitlines()
for value in values:
for type_escape in ('"', "'"):
for name_escape in ('"', "'"):
input = 'config ' + type_escape + value + type_escape + " " + name_escape + value + name_escape
name, type = OriginalReader.get_section_name_and_type(input)
self.assertEqual(value, name)
self.assertEqual(value, type)
if __name__ == '__main__':
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies simple actions when using an explicit build target of 'all'.
"""
import glob
import os
import TestGyp
test = TestGyp.TestGyp(workdir='workarea_all')
test.run_gyp('actions.gyp', chdir='src')
test.relocate('src', 'relocate/src')
# Some gyp files use an action that mentions an output but never
# writes it as a means to making the action run on every build. That
# doesn't mesh well with ninja's semantics. TODO(evan): figure out
# how to work always-run actions in to ninja.
if test.format in ['ninja', 'xcode-ninja']:
test.build('actions.gyp', test.ALL, chdir='relocate/src')
else:
# Test that an "always run" action increases a counter on multiple
# invocations, and that a dependent action updates in step.
test.build('actions.gyp', test.ALL, chdir='relocate/src')
test.must_match('relocate/src/subdir1/actions-out/action-counter.txt', '1')
test.must_match('relocate/src/subdir1/actions-out/action-counter_2.txt', '1')
test.build('actions.gyp', test.ALL, chdir='relocate/src')
test.must_match('relocate/src/subdir1/actions-out/action-counter.txt', '2')
test.must_match('relocate/src/subdir1/actions-out/action-counter_2.txt', '2')
# The "always run" action only counts to 2, but the dependent target
# will count forever if it's allowed to run. This verifies that the
# dependent target only runs when the "always run" action generates
# new output, not just because the "always run" ran.
test.build('actions.gyp', test.ALL, chdir='relocate/src')
test.must_match('relocate/src/subdir1/actions-out/action-counter.txt', '2')
test.must_match('relocate/src/subdir1/actions-out/action-counter_2.txt', '2')
expect = """\
Hello from program.c
Hello from make-prog1.py
Hello from make-prog2.py
"""
if test.format == 'xcode':
chdir = 'relocate/src/subdir1'
else:
chdir = 'relocate/src'
test.run_built_executable('program', chdir=chdir, stdout=expect)
test.must_match('relocate/src/subdir2/file.out', "Hello from make-file.py\n")
expect = "Hello from generate_main.py\n"
if test.format == 'xcode':
chdir = 'relocate/src/subdir3'
else:
chdir = 'relocate/src'
test.run_built_executable('null_input', chdir=chdir, stdout=expect)
# Clean out files which may have been created if test.ALL was run.
def clean_dep_files():
for file in (glob.glob('relocate/src/dep_*.txt') +
glob.glob('relocate/src/deps_all_done_*.txt')):
if os.path.exists(file):
os.remove(file)
# Confirm our clean.
clean_dep_files()
test.must_not_exist('relocate/src/dep_1.txt')
test.must_not_exist('relocate/src/deps_all_done_first_123.txt')
# Make sure all deps finish before an action is run on a 'None' target.
# If using the Make builder, add -j to make things more difficult.
arguments = []
if test.format == 'make':
arguments = ['-j']
test.build('actions.gyp', 'action_with_dependencies_123', chdir='relocate/src',
arguments=arguments)
test.must_exist('relocate/src/deps_all_done_first_123.txt')
# Try again with a target that has deps in reverse. Output files from
# previous tests deleted. Confirm this execution did NOT run the ALL
# target which would mess up our dep tests.
clean_dep_files()
test.build('actions.gyp', 'action_with_dependencies_321', chdir='relocate/src',
arguments=arguments)
test.must_exist('relocate/src/deps_all_done_first_321.txt')
test.must_not_exist('relocate/src/deps_all_done_first_123.txt')
test.pass_test() | unknown | codeparrot/codeparrot-clean | ||
---
"Test cat nodes output":
- do:
cat.nodes: {}
- match:
$body: |
/ #ip heap.percent ram.percent cpu load_1m load_5m load_15m node.role master name
^ ((\d{1,3}\.){3}\d{1,3} \s+ \d+ \s+ \d* \s+ (-)?\d* \s+ ((-)?\d*(\.\d+)?)? \s+ ((-)?\d*(\.\d+)?)?\s+ ((-)?\d*(\.\d+)?)? \s+ (-|[cdfhilmrstvw]{1,11}) \s+ [-*x] \s+ .* \n)+ $/
- do:
cat.nodes:
v: true
- match:
$body: |
/^ ip \s+ heap\.percent \s+ ram\.percent \s+ cpu \s+ load_1m \s+ load_5m \s+ load_15m \s+ node\.role \s+ master \s+ name \n
((\d{1,3}\.){3}\d{1,3} \s+ \d+ \s+ \d* \s+ (-)?\d* \s+ ((-)?\d*(\.\d+)?)? \s+ ((-)?\d*(\.\d+)?)? \s+ ((-)?\d*(\.\d+)?)? \s+ (-|[cdfhilmrstvw]{1,11}) \s+ [-*x] \s+ .* \n)+ $/
- do:
cat.nodes:
h: heap.current,heap.percent,heap.max
v: true
- match:
$body: |
/^ heap\.current \s+ heap\.percent \s+ heap\.max \n
(\s+ \d+(\.\d+)?[ptgmk]?b \s+ \d+ \s+ \d+(\.\d+)?[ptgmk]?b \n)+ $/
- do:
cat.nodes:
h: heap.*
v: true
- match:
$body: |
/^ heap\.current \s+ heap\.percent \s+ heap\.max \n
(\s+ \d+(\.\d+)?[ptgmk]?b \s+ \d+ \s+ \d+(\.\d+)?[ptgmk]?b \n)+ $/
- do:
cat.nodes:
h: file_desc.current,file_desc.percent,file_desc.max
v: true
- match:
# Windows reports -1 for the file descriptor counts.
$body: |
/^ file_desc\.current \s+ file_desc\.percent \s+ file_desc\.max \n
(\s+ (-1|\d+) \s+ \d+ \s+ (-1|\d+) \n)+ $/
- do:
cat.nodes:
h: http
v: true
- match:
$body: |
/^ http \n ((\d{1,3}\.){3}\d{1,3}:\d{1,5}\n)+ $/
---
"Additional disk information":
- do:
cat.nodes:
h: diskAvail,diskTotal,diskUsed,diskUsedPercent
v: true
- match:
# leading whitespace on columns and optional whitespace on values is necessary
# because `diskAvail` is right aligned and text representation of disk size might be
# longer so it's padded with leading whitespace
$body: |
/^ \s* diskAvail \s+ diskTotal \s+ diskUsed \s+ diskUsedPercent \n
(\s* \d+(\.\d+)?[ptgmk]?b \s+ \d+(\.\d+)?[ptgmk]?b \s+ \d+(\.\d+)?[ptgmk]?b\s+ (100\.00 | \d{1,2}\.\d{2}) \n)+ $/
- do:
cat.nodes:
h: disk,dt,du,dup
v: true
- match:
# leading whitespace on columns and optional whitespace on values is necessary
# because `disk` is right aligned and text representation of disk size might be
# longer so it's padded with leading whitespace
$body: |
/^ \s* disk \s+ dt \s+ du \s+ dup \n
(\s* \d+(\.\d+)?[ptgmk]?b \s+ \d+(\.\d+)?[ptgmk]?b \s+ \d+(\.\d+)?[ptgmk]?b\s+ (100\.00 | \d{1,2}\.\d{2}) \n)+ $/
---
"Test cat nodes output with full_id set":
- do:
cat.nodes:
h: id
# check for a 4 char non-whitespace character string
- match:
$body: |
/^(\S{4}\n)+$/
- do:
cat.nodes:
h: id
full_id: true
# check for a 5+ char non-whitespace character string
- match:
$body: |
/^(\S{5,}\n)+$/ | unknown | github | https://github.com/elastic/elasticsearch | distribution/docker/src/yamlRestTest/resources/rest-api-spec/test/11_nodes.yml |
#!/bin/python3
class Node:
def __init__(self,data):
self.data = data
self.next = None
class Solution:
def insert(self,head,data):
p = Node(data)
if head==None:
head=p
elif head.next==None:
head.next=p
else:
start=head
while(start.next!=None):
start=start.next
start.next=p
return head
def display(self,head):
current = head
while current:
print(current.data,end=' ')
current = current.next
def removeDuplicates(self,head):
#Write your code here
# Elements array to keep track of unique elements
# This way is less memory efficient but more time efficient
elements = []
curr = head
if curr is not None:
elements.append(curr.data)
while curr is not None and curr.next is not None:
if curr.next.data not in elements:
elements.append(curr.next.data)
curr = curr.next
else:
curr.next = curr.next.next
return head
mylist= Solution()
T=int(input())
head=None
for i in range(T):
data=int(input())
head=mylist.insert(head,data)
head=mylist.removeDuplicates(head)
mylist.display(head); | unknown | codeparrot/codeparrot-clean | ||
"""
===============================
Wikipedia principal eigenvector
===============================
A classical way to assert the relative importance of vertices in a
graph is to compute the principal eigenvector of the adjacency matrix
so as to assign to each vertex the values of the components of the first
eigenvector as a centrality score:
https://en.wikipedia.org/wiki/Eigenvector_centrality
On the graph of webpages and links those values are called the PageRank
scores by Google.
The goal of this example is to analyze the graph of links inside
wikipedia articles to rank articles by relative importance according to
this eigenvector centrality.
The traditional way to compute the principal eigenvector is to use the
power iteration method:
https://en.wikipedia.org/wiki/Power_iteration
Here the computation is achieved thanks to Martinsson's Randomized SVD
algorithm implemented in scikit-learn.
The graph data is fetched from the DBpedia dumps. DBpedia is an extraction
of the latent structured data of the Wikipedia content.
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from bz2 import BZ2File
import os
from datetime import datetime
from pprint import pprint
from time import time
import numpy as np
from scipy import sparse
from joblib import Memory
from sklearn.decomposition import randomized_svd
from urllib.request import urlopen
print(__doc__)
# #############################################################################
# Where to download the data, if not already on disk
redirects_url = "http://downloads.dbpedia.org/3.5.1/en/redirects_en.nt.bz2"
redirects_filename = redirects_url.rsplit("/", 1)[1]
page_links_url = "http://downloads.dbpedia.org/3.5.1/en/page_links_en.nt.bz2"
page_links_filename = page_links_url.rsplit("/", 1)[1]
resources = [
(redirects_url, redirects_filename),
(page_links_url, page_links_filename),
]
for url, filename in resources:
if not os.path.exists(filename):
print("Downloading data from '%s', please wait..." % url)
opener = urlopen(url)
open(filename, 'wb').write(opener.read())
print()
# #############################################################################
# Loading the redirect files
memory = Memory(cachedir=".")
def index(redirects, index_map, k):
"""Find the index of an article name after redirect resolution"""
k = redirects.get(k, k)
return index_map.setdefault(k, len(index_map))
DBPEDIA_RESOURCE_PREFIX_LEN = len("http://dbpedia.org/resource/")
SHORTNAME_SLICE = slice(DBPEDIA_RESOURCE_PREFIX_LEN + 1, -1)
def short_name(nt_uri):
"""Remove the < and > URI markers and the common URI prefix"""
return nt_uri[SHORTNAME_SLICE]
def get_redirects(redirects_filename):
"""Parse the redirections and build a transitively closed map out of it"""
redirects = {}
print("Parsing the NT redirect file")
for l, line in enumerate(BZ2File(redirects_filename)):
split = line.split()
if len(split) != 4:
print("ignoring malformed line: " + line)
continue
redirects[short_name(split[0])] = short_name(split[2])
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
# compute the transitive closure
print("Computing the transitive closure of the redirect relation")
for l, source in enumerate(redirects.keys()):
transitive_target = None
target = redirects[source]
seen = {source}
while True:
transitive_target = target
target = redirects.get(target)
if target is None or target in seen:
break
seen.add(target)
redirects[source] = transitive_target
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
return redirects
# disabling joblib as the pickling of large dicts seems much too slow
#@memory.cache
def get_adjacency_matrix(redirects_filename, page_links_filename, limit=None):
"""Extract the adjacency graph as a scipy sparse matrix
Redirects are resolved first.
Returns X, the scipy sparse adjacency matrix, redirects as python
dict from article names to article names and index_map a python dict
from article names to python int (article indexes).
"""
print("Computing the redirect map")
redirects = get_redirects(redirects_filename)
print("Computing the integer index map")
index_map = dict()
links = list()
for l, line in enumerate(BZ2File(page_links_filename)):
split = line.split()
if len(split) != 4:
print("ignoring malformed line: " + line)
continue
i = index(redirects, index_map, short_name(split[0]))
j = index(redirects, index_map, short_name(split[2]))
links.append((i, j))
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
if limit is not None and l >= limit - 1:
break
print("Computing the adjacency matrix")
X = sparse.lil_matrix((len(index_map), len(index_map)), dtype=np.float32)
for i, j in links:
X[i, j] = 1.0
del links
print("Converting to CSR representation")
X = X.tocsr()
print("CSR conversion done")
return X, redirects, index_map
# stop after 5M links to make it possible to work in RAM
X, redirects, index_map = get_adjacency_matrix(
redirects_filename, page_links_filename, limit=5000000)
names = {i: name for name, i in index_map.items()}
print("Computing the principal singular vectors using randomized_svd")
t0 = time()
U, s, V = randomized_svd(X, 5, n_iter=3)
print("done in %0.3fs" % (time() - t0))
# print the names of the wikipedia related strongest components of the
# principal singular vector which should be similar to the highest eigenvector
print("Top wikipedia pages according to principal singular vectors")
pprint([names[i] for i in np.abs(U.T[0]).argsort()[-10:]])
pprint([names[i] for i in np.abs(V[0]).argsort()[-10:]])
def centrality_scores(X, alpha=0.85, max_iter=100, tol=1e-10):
"""Power iteration computation of the principal eigenvector
This method is also known as Google PageRank and the implementation
is based on the one from the NetworkX project (BSD licensed too)
with copyrights by:
Aric Hagberg <hagberg@lanl.gov>
Dan Schult <dschult@colgate.edu>
Pieter Swart <swart@lanl.gov>
"""
n = X.shape[0]
X = X.copy()
incoming_counts = np.asarray(X.sum(axis=1)).ravel()
print("Normalizing the graph")
for i in incoming_counts.nonzero()[0]:
X.data[X.indptr[i]:X.indptr[i + 1]] *= 1.0 / incoming_counts[i]
dangle = np.asarray(np.where(np.isclose(X.sum(axis=1), 0),
1.0 / n, 0)).ravel()
scores = np.full(n, 1. / n, dtype=np.float32) # initial guess
for i in range(max_iter):
print("power iteration #%d" % i)
prev_scores = scores
scores = (alpha * (scores * X + np.dot(dangle, prev_scores))
+ (1 - alpha) * prev_scores.sum() / n)
# check convergence: normalized l_inf norm
scores_max = np.abs(scores).max()
if scores_max == 0.0:
scores_max = 1.0
err = np.abs(scores - prev_scores).max() / scores_max
print("error: %0.6f" % err)
if err < n * tol:
return scores
return scores
print("Computing principal eigenvector score using a power iteration method")
t0 = time()
scores = centrality_scores(X, max_iter=100, tol=1e-10)
print("done in %0.3fs" % (time() - t0))
pprint([names[i] for i in np.abs(scores).argsort()[-10:]]) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) 2013 Therp BV (<http://therp.nl>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv.orm import Model
class publisher_warranty_contract(Model):
_inherit = 'publisher_warranty.contract'
def update_notification(self, cr, uid, ids, cron_mode=True, context=None):
pass | unknown | codeparrot/codeparrot-clean | ||
import functools
from importlib import util
from typing import Any
from langchain_core.embeddings import Embeddings
from langchain_core.runnables import Runnable
_SUPPORTED_PROVIDERS = {
"azure_openai": "langchain_openai",
"bedrock": "langchain_aws",
"cohere": "langchain_cohere",
"google_genai": "langchain_google_genai",
"google_vertexai": "langchain_google_vertexai",
"huggingface": "langchain_huggingface",
"mistralai": "langchain_mistralai",
"ollama": "langchain_ollama",
"openai": "langchain_openai",
}
def _get_provider_list() -> str:
"""Get formatted list of providers and their packages."""
return "\n".join(
f" - {p}: {pkg.replace('_', '-')}" for p, pkg in _SUPPORTED_PROVIDERS.items()
)
def _parse_model_string(model_name: str) -> tuple[str, str]:
"""Parse a model string into provider and model name components.
The model string should be in the format 'provider:model-name', where provider
is one of the supported providers.
Args:
model_name: A model string in the format 'provider:model-name'
Returns:
A tuple of (provider, model_name)
```python
_parse_model_string("openai:text-embedding-3-small")
# Returns: ("openai", "text-embedding-3-small")
_parse_model_string("bedrock:amazon.titan-embed-text-v1")
# Returns: ("bedrock", "amazon.titan-embed-text-v1")
```
Raises:
ValueError: If the model string is not in the correct format or
the provider is unsupported
"""
if ":" not in model_name:
providers = _SUPPORTED_PROVIDERS
msg = (
f"Invalid model format '{model_name}'.\n"
f"Model name must be in format 'provider:model-name'\n"
f"Example valid model strings:\n"
f" - openai:text-embedding-3-small\n"
f" - bedrock:amazon.titan-embed-text-v1\n"
f" - cohere:embed-english-v3.0\n"
f"Supported providers: {providers}"
)
raise ValueError(msg)
provider, model = model_name.split(":", 1)
provider = provider.lower().strip()
model = model.strip()
if provider not in _SUPPORTED_PROVIDERS:
msg = (
f"Provider '{provider}' is not supported.\n"
f"Supported providers and their required packages:\n"
f"{_get_provider_list()}"
)
raise ValueError(msg)
if not model:
msg = "Model name cannot be empty"
raise ValueError(msg)
return provider, model
def _infer_model_and_provider(
model: str,
*,
provider: str | None = None,
) -> tuple[str, str]:
if not model.strip():
msg = "Model name cannot be empty"
raise ValueError(msg)
if provider is None and ":" in model:
provider, model_name = _parse_model_string(model)
else:
model_name = model
if not provider:
providers = _SUPPORTED_PROVIDERS
msg = (
"Must specify either:\n"
"1. A model string in format 'provider:model-name'\n"
" Example: 'openai:text-embedding-3-small'\n"
"2. Or explicitly set provider from: "
f"{providers}"
)
raise ValueError(msg)
if provider not in _SUPPORTED_PROVIDERS:
msg = (
f"Provider '{provider}' is not supported.\n"
f"Supported providers and their required packages:\n"
f"{_get_provider_list()}"
)
raise ValueError(msg)
return provider, model_name
@functools.lru_cache(maxsize=len(_SUPPORTED_PROVIDERS))
def _check_pkg(pkg: str) -> None:
"""Check if a package is installed."""
if not util.find_spec(pkg):
msg = (
f"Could not import {pkg} python package. "
f"Please install it with `pip install {pkg}`"
)
raise ImportError(msg)
def init_embeddings(
model: str,
*,
provider: str | None = None,
**kwargs: Any,
) -> Embeddings | Runnable[Any, list[float]]:
"""Initialize an embeddings model from a model name and optional provider.
!!! note
Must have the integration package corresponding to the model provider
installed.
Args:
model: Name of the model to use.
Can be either:
- A model string like `"openai:text-embedding-3-small"`
- Just the model name if the provider is specified separately or can be
inferred.
See supported providers under the `provider` arg description.
provider: Optional explicit provider name. If not specified, will attempt to
parse from the model string in the `model` arg.
Supported providers:
- `openai` -> [`langchain-openai`](https://docs.langchain.com/oss/python/integrations/providers/openai)
- `azure_openai` -> [`langchain-openai`](https://docs.langchain.com/oss/python/integrations/providers/openai)
- `bedrock` -> [`langchain-aws`](https://docs.langchain.com/oss/python/integrations/providers/aws)
- `cohere` -> [`langchain-cohere`](https://docs.langchain.com/oss/python/integrations/providers/cohere)
- `google_genai` -> [`langchain-google-genai`](https://docs.langchain.com/oss/python/integrations/providers/google)
- `google_vertexai` -> [`langchain-google-vertexai`](https://docs.langchain.com/oss/python/integrations/providers/google)
- `huggingface` -> [`langchain-huggingface`](https://docs.langchain.com/oss/python/integrations/providers/huggingface)
- `mistralai` -> [`langchain-mistralai`](https://docs.langchain.com/oss/python/integrations/providers/mistralai)
- `ollama` -> [`langchain-ollama`](https://docs.langchain.com/oss/python/integrations/providers/ollama)
**kwargs: Additional model-specific parameters passed to the embedding model.
These vary by provider, see the provider-specific documentation for details.
Returns:
An `Embeddings` instance that can generate embeddings for text.
Raises:
ValueError: If the model provider is not supported or cannot be determined
ImportError: If the required provider package is not installed
???+ note "Example Usage"
```python
# Using a model string
model = init_embeddings("openai:text-embedding-3-small")
model.embed_query("Hello, world!")
# Using explicit provider
model = init_embeddings(model="text-embedding-3-small", provider="openai")
model.embed_documents(["Hello, world!", "Goodbye, world!"])
# With additional parameters
model = init_embeddings("openai:text-embedding-3-small", api_key="sk-...")
```
!!! version-added "Added in `langchain` 0.3.9"
"""
if not model:
providers = _SUPPORTED_PROVIDERS.keys()
msg = (
f"Must specify model name. Supported providers are: {', '.join(providers)}"
)
raise ValueError(msg)
provider, model_name = _infer_model_and_provider(model, provider=provider)
pkg = _SUPPORTED_PROVIDERS[provider]
_check_pkg(pkg)
if provider == "openai":
from langchain_openai import OpenAIEmbeddings
return OpenAIEmbeddings(model=model_name, **kwargs)
if provider == "azure_openai":
from langchain_openai import AzureOpenAIEmbeddings
return AzureOpenAIEmbeddings(model=model_name, **kwargs)
if provider == "google_genai":
from langchain_google_genai import GoogleGenerativeAIEmbeddings
return GoogleGenerativeAIEmbeddings(model=model_name, **kwargs)
if provider == "google_vertexai":
from langchain_google_vertexai import VertexAIEmbeddings
return VertexAIEmbeddings(model=model_name, **kwargs)
if provider == "bedrock":
from langchain_aws import BedrockEmbeddings
return BedrockEmbeddings(model_id=model_name, **kwargs)
if provider == "cohere":
from langchain_cohere import CohereEmbeddings
return CohereEmbeddings(model=model_name, **kwargs)
if provider == "mistralai":
from langchain_mistralai import MistralAIEmbeddings
return MistralAIEmbeddings(model=model_name, **kwargs)
if provider == "huggingface":
from langchain_huggingface import HuggingFaceEmbeddings
return HuggingFaceEmbeddings(model_name=model_name, **kwargs)
if provider == "ollama":
from langchain_ollama import OllamaEmbeddings
return OllamaEmbeddings(model=model_name, **kwargs)
msg = (
f"Provider '{provider}' is not supported.\n"
f"Supported providers and their required packages:\n"
f"{_get_provider_list()}"
)
raise ValueError(msg)
__all__ = [
"Embeddings", # This one is for backwards compatibility
"init_embeddings",
] | python | github | https://github.com/langchain-ai/langchain | libs/langchain/langchain_classic/embeddings/base.py |
# -*- coding: utf-8 -*-
##############################################################################
#
# Author: David BEAL, Copyright 2014 Akretion
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import re
from openerp.osv import orm, fields
class AbstractConfigSettings(orm.AbstractModel):
_name = 'abstract.config.settings'
_description = 'Abstract configuration settings'
# prefix field name to differentiate fields in company with those in config
_prefix = 'setting_'
# this is the class name to import in your module
# (it should be ResCompany or res_company, depends of your code)
_companyObject = None
def _filter_field(self, field_key):
"""Inherit in your module to define for which company field
you don't want have a matching related field"""
return True
def __init__(self, pool, cr):
super(AbstractConfigSettings, self).__init__(pool, cr)
if self._companyObject:
for field_key in self._companyObject._columns:
# allows to exclude some field
if self._filter_field(field_key):
args = ('company_id', field_key)
kwargs = {
'string': self._companyObject._columns[field_key].string,
'help': self._companyObject._columns[field_key].help,
'type': self._companyObject._columns[field_key]._type,
}
if '_obj' in self._companyObject._columns[field_key].__dict__.keys():
kwargs['relation'] = \
self._companyObject._columns[field_key]._obj
if '_domain' in \
self._companyObject._columns[field_key].__dict__.keys():
kwargs['domain'] = \
self._companyObject._columns[field_key]._domain
field_key = re.sub('^' + self._prefix, '', field_key)
self._columns[field_key] = \
fields.related(*args, **kwargs)
_columns = {
'company_id': fields.many2one(
'res.company',
'Company',
required=True),
}
def _default_company(self, cr, uid, context=None):
user = self.pool['res.users'].browse(cr, uid, uid, context=context)
return user.company_id.id
_defaults = {
'company_id': _default_company,
}
def field_to_populate_as_related(self, cr, uid, field, company_cols, context=None):
"""Only fields which comes from company with the right prefix
must be defined as related"""
if self._prefix + field in company_cols:
return True
return False
def onchange_company_id(self, cr, uid, ids, company_id, context=None):
" update related fields "
values = {}
values['currency_id'] = False
if not company_id:
return {'value': values}
company_m = self.pool['res.company']
company = company_m.browse(
cr, uid, company_id, context=context)
company_cols = company_m._columns.keys()
for field in self._columns:
if self.field_to_populate_as_related(
cr, uid, field, company_cols, context=context):
cpny_field = self._columns[field].arg[-1]
if self._columns[field]._type == 'many2one':
values[field] = company[cpny_field]['id'] or False
else:
values[field] = company[cpny_field]
return {'value': values}
def create(self, cr, uid, values, context=None):
id = super(AbstractConfigSettings, self).create(
cr, uid, values, context=context)
# Hack: to avoid some nasty bug, related fields are not written
# upon record creation. Hence we write on those fields here.
vals = {}
for fname, field in self._columns.iteritems():
if isinstance(field, fields.related) and fname in values:
vals[fname] = values[fname]
self.write(cr, uid, [id], vals, context)
return id | unknown | codeparrot/codeparrot-clean | ||
---
title: svelte/attachments
tags: attachments
---
> MODULE: svelte/attachments | unknown | github | https://github.com/sveltejs/svelte | documentation/docs/98-reference/21-svelte-attachments.md |
/* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_CORE_PLATFORM_RANDOM_H_
#define TENSORFLOW_CORE_PLATFORM_RANDOM_H_
#include "tensorflow/core/platform/types.h"
#include "tsl/platform/random.h"
namespace tensorflow {
namespace random {
using tsl::random::New64; // NOLINT
using tsl::random::New64DefaultSeed; // NOLINT
} // namespace random
} // namespace tensorflow
#endif // TENSORFLOW_CORE_PLATFORM_RANDOM_H_ | c | github | https://github.com/tensorflow/tensorflow | tensorflow/core/platform/random.h |
import warnings
from mongoengine.common import _import_class
from mongoengine.errors import InvalidDocumentError
from mongoengine.python_support import PY3
from mongoengine.queryset import (DO_NOTHING, DoesNotExist,
MultipleObjectsReturned,
QuerySetManager)
from mongoengine.base.common import _document_registry, ALLOW_INHERITANCE
from mongoengine.base.fields import BaseField, ComplexBaseField, ObjectIdField
__all__ = ('DocumentMetaclass', 'TopLevelDocumentMetaclass')
class DocumentMetaclass(type):
"""Metaclass for all documents.
"""
def __new__(cls, name, bases, attrs):
flattened_bases = cls._get_bases(bases)
super_new = super(DocumentMetaclass, cls).__new__
# If a base class just call super
metaclass = attrs.get('my_metaclass')
if metaclass and issubclass(metaclass, DocumentMetaclass):
return super_new(cls, name, bases, attrs)
attrs['_is_document'] = attrs.get('_is_document', False)
attrs['_cached_reference_fields'] = []
# EmbeddedDocuments could have meta data for inheritance
if 'meta' in attrs:
attrs['_meta'] = attrs.pop('meta')
# EmbeddedDocuments should inherit meta data
if '_meta' not in attrs:
meta = MetaDict()
for base in flattened_bases[::-1]:
# Add any mixin metadata from plain objects
if hasattr(base, 'meta'):
meta.merge(base.meta)
elif hasattr(base, '_meta'):
meta.merge(base._meta)
attrs['_meta'] = meta
attrs['_meta']['abstract'] = False # 789: EmbeddedDocument shouldn't inherit abstract
if attrs['_meta'].get('allow_inheritance', ALLOW_INHERITANCE):
StringField = _import_class('StringField')
attrs['_cls'] = StringField()
# Handle document Fields
# Merge all fields from subclasses
doc_fields = {}
for base in flattened_bases[::-1]:
if hasattr(base, '_fields'):
doc_fields.update(base._fields)
# Standard object mixin - merge in any Fields
if not hasattr(base, '_meta'):
base_fields = {}
for attr_name, attr_value in base.__dict__.iteritems():
if not isinstance(attr_value, BaseField):
continue
attr_value.name = attr_name
if not attr_value.db_field:
attr_value.db_field = attr_name
base_fields[attr_name] = attr_value
doc_fields.update(base_fields)
# Discover any document fields
field_names = {}
for attr_name, attr_value in attrs.iteritems():
if not isinstance(attr_value, BaseField):
continue
attr_value.name = attr_name
if not attr_value.db_field:
attr_value.db_field = attr_name
doc_fields[attr_name] = attr_value
# Count names to ensure no db_field redefinitions
field_names[attr_value.db_field] = field_names.get(
attr_value.db_field, 0) + 1
# Ensure no duplicate db_fields
duplicate_db_fields = [k for k, v in field_names.items() if v > 1]
if duplicate_db_fields:
msg = ("Multiple db_fields defined for: %s " %
", ".join(duplicate_db_fields))
raise InvalidDocumentError(msg)
# Set _fields and db_field maps
attrs['_fields'] = doc_fields
attrs['_db_field_map'] = dict([(k, getattr(v, 'db_field', k))
for k, v in doc_fields.iteritems()])
attrs['_reverse_db_field_map'] = dict(
(v, k) for k, v in attrs['_db_field_map'].iteritems())
attrs['_fields_ordered'] = tuple(i[1] for i in sorted(
(v.creation_counter, v.name)
for v in doc_fields.itervalues()))
#
# Set document hierarchy
#
superclasses = ()
class_name = [name]
for base in flattened_bases:
if (not getattr(base, '_is_base_cls', True) and
not getattr(base, '_meta', {}).get('abstract', True)):
# Collate hierarchy for _cls and _subclasses
class_name.append(base.__name__)
if hasattr(base, '_meta'):
# Warn if allow_inheritance isn't set and prevent
# inheritance of classes where inheritance is set to False
allow_inheritance = base._meta.get('allow_inheritance',
ALLOW_INHERITANCE)
if (allow_inheritance is not True and
not base._meta.get('abstract')):
raise ValueError('Document %s may not be subclassed' %
base.__name__)
# Get superclasses from last base superclass
document_bases = [b for b in flattened_bases
if hasattr(b, '_class_name')]
if document_bases:
superclasses = document_bases[0]._superclasses
superclasses += (document_bases[0]._class_name, )
_cls = '.'.join(reversed(class_name))
attrs['_class_name'] = _cls
attrs['_superclasses'] = superclasses
attrs['_subclasses'] = (_cls, )
attrs['_types'] = attrs['_subclasses'] # TODO depreciate _types
# Create the new_class
new_class = super_new(cls, name, bases, attrs)
# Set _subclasses
for base in document_bases:
if _cls not in base._subclasses:
base._subclasses += (_cls,)
base._types = base._subclasses # TODO depreciate _types
(Document, EmbeddedDocument, DictField,
CachedReferenceField) = cls._import_classes()
if issubclass(new_class, Document):
new_class._collection = None
# Add class to the _document_registry
_document_registry[new_class._class_name] = new_class
# In Python 2, User-defined methods objects have special read-only
# attributes 'im_func' and 'im_self' which contain the function obj
# and class instance object respectively. With Python 3 these special
# attributes have been replaced by __func__ and __self__. The Blinker
# module continues to use im_func and im_self, so the code below
# copies __func__ into im_func and __self__ into im_self for
# classmethod objects in Document derived classes.
if PY3:
for key, val in new_class.__dict__.items():
if isinstance(val, classmethod):
f = val.__get__(new_class)
if hasattr(f, '__func__') and not hasattr(f, 'im_func'):
f.__dict__.update({'im_func': getattr(f, '__func__')})
if hasattr(f, '__self__') and not hasattr(f, 'im_self'):
f.__dict__.update({'im_self': getattr(f, '__self__')})
# Handle delete rules
for field in new_class._fields.itervalues():
f = field
if f.owner_document is None:
f.owner_document = new_class
delete_rule = getattr(f, 'reverse_delete_rule', DO_NOTHING)
if isinstance(f, CachedReferenceField):
if issubclass(new_class, EmbeddedDocument):
raise InvalidDocumentError(
"CachedReferenceFields is not allowed in EmbeddedDocuments")
if not f.document_type:
raise InvalidDocumentError(
"Document is not available to sync")
if f.auto_sync:
f.start_listener()
f.document_type._cached_reference_fields.append(f)
if isinstance(f, ComplexBaseField) and hasattr(f, 'field'):
delete_rule = getattr(f.field,
'reverse_delete_rule',
DO_NOTHING)
if isinstance(f, DictField) and delete_rule != DO_NOTHING:
msg = ("Reverse delete rules are not supported "
"for %s (field: %s)" %
(field.__class__.__name__, field.name))
raise InvalidDocumentError(msg)
f = field.field
if delete_rule != DO_NOTHING:
if issubclass(new_class, EmbeddedDocument):
msg = ("Reverse delete rules are not supported for "
"EmbeddedDocuments (field: %s)" % field.name)
raise InvalidDocumentError(msg)
f.document_type.register_delete_rule(new_class,
field.name, delete_rule)
if (field.name and hasattr(Document, field.name) and
EmbeddedDocument not in new_class.mro()):
msg = ("%s is a document method and not a valid "
"field name" % field.name)
raise InvalidDocumentError(msg)
return new_class
def add_to_class(self, name, value):
setattr(self, name, value)
@classmethod
def _get_bases(cls, bases):
if isinstance(bases, BasesTuple):
return bases
seen = []
bases = cls.__get_bases(bases)
unique_bases = (b for b in bases if not (b in seen or seen.append(b)))
return BasesTuple(unique_bases)
@classmethod
def __get_bases(cls, bases):
for base in bases:
if base is object:
continue
yield base
for child_base in cls.__get_bases(base.__bases__):
yield child_base
@classmethod
def _import_classes(cls):
Document = _import_class('Document')
EmbeddedDocument = _import_class('EmbeddedDocument')
DictField = _import_class('DictField')
CachedReferenceField = _import_class('CachedReferenceField')
return Document, EmbeddedDocument, DictField, CachedReferenceField
class TopLevelDocumentMetaclass(DocumentMetaclass):
"""Metaclass for top-level documents (i.e. documents that have their own
collection in the database.
"""
def __new__(cls, name, bases, attrs):
flattened_bases = cls._get_bases(bases)
super_new = super(TopLevelDocumentMetaclass, cls).__new__
# Set default _meta data if base class, otherwise get user defined meta
if attrs.get('my_metaclass') == TopLevelDocumentMetaclass:
# defaults
attrs['_meta'] = {
'abstract': True,
'max_documents': None,
'max_size': None,
'ordering': [], # default ordering applied at runtime
'indexes': [], # indexes to be ensured at runtime
'id_field': None,
'index_background': False,
'index_drop_dups': False,
'index_opts': None,
'delete_rules': None,
'allow_inheritance': None,
}
attrs['_is_base_cls'] = True
attrs['_meta'].update(attrs.get('meta', {}))
else:
attrs['_meta'] = attrs.get('meta', {})
# Explicitly set abstract to false unless set
attrs['_meta']['abstract'] = attrs['_meta'].get('abstract', False)
attrs['_is_base_cls'] = False
# Set flag marking as document class - as opposed to an object mixin
attrs['_is_document'] = True
# Ensure queryset_class is inherited
if 'objects' in attrs:
manager = attrs['objects']
if hasattr(manager, 'queryset_class'):
attrs['_meta']['queryset_class'] = manager.queryset_class
# Clean up top level meta
if 'meta' in attrs:
del attrs['meta']
# Find the parent document class
parent_doc_cls = [b for b in flattened_bases
if b.__class__ == TopLevelDocumentMetaclass]
parent_doc_cls = None if not parent_doc_cls else parent_doc_cls[0]
# Prevent classes setting collection different to their parents
# If parent wasn't an abstract class
if (parent_doc_cls and 'collection' in attrs.get('_meta', {}) and
not parent_doc_cls._meta.get('abstract', True)):
msg = "Trying to set a collection on a subclass (%s)" % name
warnings.warn(msg, SyntaxWarning)
del attrs['_meta']['collection']
# Ensure abstract documents have abstract bases
if attrs.get('_is_base_cls') or attrs['_meta'].get('abstract'):
if (parent_doc_cls and
not parent_doc_cls._meta.get('abstract', False)):
msg = "Abstract document cannot have non-abstract base"
raise ValueError(msg)
return super_new(cls, name, bases, attrs)
# Merge base class metas.
# Uses a special MetaDict that handles various merging rules
meta = MetaDict()
for base in flattened_bases[::-1]:
# Add any mixin metadata from plain objects
if hasattr(base, 'meta'):
meta.merge(base.meta)
elif hasattr(base, '_meta'):
meta.merge(base._meta)
# Set collection in the meta if its callable
if (getattr(base, '_is_document', False) and
not base._meta.get('abstract')):
collection = meta.get('collection', None)
if callable(collection):
meta['collection'] = collection(base)
meta.merge(attrs.get('_meta', {})) # Top level meta
# Only simple classes (direct subclasses of Document)
# may set allow_inheritance to False
simple_class = all([b._meta.get('abstract')
for b in flattened_bases if hasattr(b, '_meta')])
if (not simple_class and meta['allow_inheritance'] is False and
not meta['abstract']):
raise ValueError('Only direct subclasses of Document may set '
'"allow_inheritance" to False')
# Set default collection name
if 'collection' not in meta:
meta['collection'] = ''.join('_%s' % c if c.isupper() else c
for c in name).strip('_').lower()
attrs['_meta'] = meta
# Call super and get the new class
new_class = super_new(cls, name, bases, attrs)
meta = new_class._meta
# Set index specifications
meta['index_specs'] = new_class._build_index_specs(meta['indexes'])
# If collection is a callable - call it and set the value
collection = meta.get('collection')
if callable(collection):
new_class._meta['collection'] = collection(new_class)
# Provide a default queryset unless exists or one has been set
if 'objects' not in dir(new_class):
new_class.objects = QuerySetManager()
# Validate the fields and set primary key if needed
for field_name, field in new_class._fields.iteritems():
if field.primary_key:
# Ensure only one primary key is set
current_pk = new_class._meta.get('id_field')
if current_pk and current_pk != field_name:
raise ValueError('Cannot override primary key field')
# Set primary key
if not current_pk:
new_class._meta['id_field'] = field_name
new_class.id = field
# Set primary key if not defined by the document
new_class._auto_id_field = getattr(parent_doc_cls,
'_auto_id_field', False)
if not new_class._meta.get('id_field'):
# After 0.10, find not existing names, instead of overwriting
id_name, id_db_name = cls.get_auto_id_names(new_class)
new_class._auto_id_field = True
new_class._meta['id_field'] = id_name
new_class._fields[id_name] = ObjectIdField(db_field=id_db_name)
new_class._fields[id_name].name = id_name
new_class.id = new_class._fields[id_name]
new_class._db_field_map[id_name] = id_db_name
new_class._reverse_db_field_map[id_db_name] = id_name
# Prepend id field to _fields_ordered
new_class._fields_ordered = (id_name, ) + new_class._fields_ordered
# Merge in exceptions with parent hierarchy
exceptions_to_merge = (DoesNotExist, MultipleObjectsReturned)
module = attrs.get('__module__')
for exc in exceptions_to_merge:
name = exc.__name__
parents = tuple(getattr(base, name) for base in flattened_bases
if hasattr(base, name)) or (exc,)
# Create new exception and set to new_class
exception = type(name, parents, {'__module__': module})
setattr(new_class, name, exception)
return new_class
@classmethod
def get_auto_id_names(cls, new_class):
id_name, id_db_name = ('id', '_id')
if id_name not in new_class._fields and \
id_db_name not in (v.db_field for v in new_class._fields.values()):
return id_name, id_db_name
id_basename, id_db_basename, i = 'auto_id', '_auto_id', 0
while id_name in new_class._fields or \
id_db_name in (v.db_field for v in new_class._fields.values()):
id_name = '{0}_{1}'.format(id_basename, i)
id_db_name = '{0}_{1}'.format(id_db_basename, i)
i += 1
return id_name, id_db_name
class MetaDict(dict):
"""Custom dictionary for meta classes.
Handles the merging of set indexes
"""
_merge_options = ('indexes',)
def merge(self, new_options):
for k, v in new_options.iteritems():
if k in self._merge_options:
self[k] = self.get(k, []) + v
else:
self[k] = v
class BasesTuple(tuple):
"""Special class to handle introspection of bases tuple in __new__"""
pass | unknown | codeparrot/codeparrot-clean | ||
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package configs
import (
"testing"
"github.com/hashicorp/hcl/v2"
"github.com/hashicorp/hcl/v2/hclsyntax"
)
func TestVariableInvalidDefault(t *testing.T) {
src := `
variable foo {
type = map(object({
foo = bool
}))
default = {
"thingy" = {
foo = "string where bool is expected"
}
}
}
`
hclF, diags := hclsyntax.ParseConfig([]byte(src), "test.tf", hcl.InitialPos)
if diags.HasErrors() {
t.Fatal(diags.Error())
}
_, diags = parseConfigFile(hclF.Body, nil, false, false)
if !diags.HasErrors() {
t.Fatal("unexpected success; want error")
}
for _, diag := range diags {
if diag.Severity != hcl.DiagError {
continue
}
if diag.Summary != "Invalid default value for variable" {
t.Errorf("unexpected diagnostic summary: %q", diag.Summary)
continue
}
if got, want := diag.Detail, `This default value is not compatible with the variable's type constraint: ["thingy"].foo: a bool is required.`; got != want {
t.Errorf("wrong diagnostic detault\ngot: %s\nwant: %s", got, want)
}
}
}
func TestOutputDeprecation(t *testing.T) {
src := `
output "foo" {
value = "bar"
deprecated = "This output is deprecated"
}
`
hclF, diags := hclsyntax.ParseConfig([]byte(src), "test.tf", hcl.InitialPos)
if diags.HasErrors() {
t.Fatal(diags.Error())
}
b, diags := parseConfigFile(hclF.Body, nil, false, false)
if diags.HasErrors() {
t.Fatalf("unexpected error: %q", diags)
}
if !b.Outputs[0].DeprecatedSet {
t.Fatalf("expected output to be deprecated")
}
if b.Outputs[0].Deprecated != "This output is deprecated" {
t.Fatalf("expected output to have deprecation message")
}
}
func TestVariableDeprecation(t *testing.T) {
src := `
variable "foo" {
type = string
deprecated = "This variable is deprecated, use bar instead"
}
`
hclF, diags := hclsyntax.ParseConfig([]byte(src), "test.tf", hcl.InitialPos)
if diags.HasErrors() {
t.Fatal(diags.Error())
}
b, diags := parseConfigFile(hclF.Body, nil, false, false)
if diags.HasErrors() {
t.Fatalf("unexpected error: %q", diags)
}
if !b.Variables[0].DeprecatedSet {
t.Fatalf("expected variable to be deprecated")
}
if b.Variables[0].Deprecated != "This variable is deprecated, use bar instead" {
t.Fatalf("expected variable to have deprecation message")
}
} | go | github | https://github.com/hashicorp/terraform | internal/configs/named_values_test.go |
# -*- coding: utf-8 -*-
import operator
import simplejson
import urllib2
import openerp
from openerp import tools
from openerp import SUPERUSER_ID
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT
from openerp.tools.translate import _
from openerp.http import request
from datetime import datetime, timedelta
from dateutil import parser
import pytz
from openerp.osv import fields, osv
import logging
_logger = logging.getLogger(__name__)
def status_response(status, substr=False):
if substr:
return int(str(status)[0])
else:
return status_response(status, substr=True) == 2
class Meta(type):
""" This Meta class allow to define class as a structure, and so instancied variable
in __init__ to avoid to have side effect alike 'static' variable """
def __new__(typ, name, parents, attrs):
methods = dict((k, v) for k, v in attrs.iteritems()
if callable(v))
attrs = dict((k, v) for k, v in attrs.iteritems()
if not callable(v))
def init(self, **kw):
for k, v in attrs.iteritems():
setattr(self, k, v)
for k, v in kw.iteritems():
assert k in attrs
setattr(self, k, v)
methods['__init__'] = init
methods['__getitem__'] = getattr
return type.__new__(typ, name, parents, methods)
class Struct(object):
__metaclass__ = Meta
class OpenerpEvent(Struct):
event = False
found = False
event_id = False
isRecurrence = False
isInstance = False
update = False
status = False
attendee_id = False
synchro = False
class GmailEvent(Struct):
event = False
found = False
isRecurrence = False
isInstance = False
update = False
status = False
class SyncEvent(object):
def __init__(self):
self.OE = OpenerpEvent()
self.GG = GmailEvent()
self.OP = None
def __getitem__(self, key):
return getattr(self, key)
def compute_OP(self, modeFull=True):
#If event are already in Gmail and in OpenERP
if self.OE.found and self.GG.found:
#If the event has been deleted from one side, we delete on other side !
if self.OE.status != self.GG.status:
self.OP = Delete((self.OE.status and "OE") or (self.GG.status and "GG"),
'The event has been deleted from one side, we delete on other side !')
#If event is not deleted !
elif self.OE.status and self.GG.status:
if self.OE.update.split('.')[0] != self.GG.update.split('.')[0]:
if self.OE.update < self.GG.update:
tmpSrc = 'GG'
elif self.OE.update > self.GG.update:
tmpSrc = 'OE'
assert tmpSrc in ['GG', 'OE']
#if self.OP.action == None:
if self[tmpSrc].isRecurrence:
if self[tmpSrc].status:
self.OP = Update(tmpSrc, 'Only need to update, because i\'m active')
else:
self.OP = Exclude(tmpSrc, 'Need to Exclude (Me = First event from recurrence) from recurrence')
elif self[tmpSrc].isInstance:
self.OP = Update(tmpSrc, 'Only need to update, because already an exclu')
else:
self.OP = Update(tmpSrc, 'Simply Update... I\'m a single event')
else:
if not self.OE.synchro or self.OE.synchro.split('.')[0] < self.OE.update.split('.')[0]:
self.OP = Update('OE', 'Event already updated by another user, but not synchro with my google calendar')
else:
self.OP = NothingToDo("", 'Not update needed')
else:
self.OP = NothingToDo("", "Both are already deleted")
# New in openERP... Create on create_events of synchronize function
elif self.OE.found and not self.GG.found:
if self.OE.status:
self.OP = Delete('OE', 'Update or delete from GOOGLE')
else:
if not modeFull:
self.OP = Delete('GG', 'Deleted from OpenERP, need to delete it from Gmail if already created')
else:
self.OP = NothingToDo("", "Already Deleted in gmail and unlinked in OpenERP")
elif self.GG.found and not self.OE.found:
tmpSrc = 'GG'
if not self.GG.status and not self.GG.isInstance:
# don't need to make something... because event has been created and deleted before the synchronization
self.OP = NothingToDo("", 'Nothing to do... Create and Delete directly')
else:
if self.GG.isInstance:
if self[tmpSrc].status:
self.OP = Exclude(tmpSrc, 'Need to create the new exclu')
else:
self.OP = Exclude(tmpSrc, 'Need to copy and Exclude')
else:
self.OP = Create(tmpSrc, 'New EVENT CREATE from GMAIL')
def __str__(self):
return self.__repr__()
def __repr__(self):
myPrint = "\n\n---- A SYNC EVENT ---"
myPrint += "\n ID OE: %s " % (self.OE.event and self.OE.event.id)
myPrint += "\n ID GG: %s " % (self.GG.event and self.GG.event.get('id', False))
myPrint += "\n Name OE: %s " % (self.OE.event and self.OE.event.name.encode('utf8'))
myPrint += "\n Name GG: %s " % (self.GG.event and self.GG.event.get('summary', '').encode('utf8'))
myPrint += "\n Found OE:%5s vs GG: %5s" % (self.OE.found, self.GG.found)
myPrint += "\n Recurrence OE:%5s vs GG: %5s" % (self.OE.isRecurrence, self.GG.isRecurrence)
myPrint += "\n Instance OE:%5s vs GG: %5s" % (self.OE.isInstance, self.GG.isInstance)
myPrint += "\n Synchro OE: %10s " % (self.OE.synchro)
myPrint += "\n Update OE: %10s " % (self.OE.update)
myPrint += "\n Update GG: %10s " % (self.GG.update)
myPrint += "\n Status OE:%5s vs GG: %5s" % (self.OE.status, self.GG.status)
if (self.OP is None):
myPrint += "\n Action %s" % "---!!!---NONE---!!!---"
else:
myPrint += "\n Action %s" % type(self.OP).__name__
myPrint += "\n Source %s" % (self.OP.src)
myPrint += "\n comment %s" % (self.OP.info)
return myPrint
class SyncOperation(object):
def __init__(self, src, info, **kw):
self.src = src
self.info = info
for k, v in kw.items():
setattr(self, k, v)
def __str__(self):
return 'in__STR__'
class Create(SyncOperation):
pass
class Update(SyncOperation):
pass
class Delete(SyncOperation):
pass
class NothingToDo(SyncOperation):
pass
class Exclude(SyncOperation):
pass
class google_calendar(osv.AbstractModel):
STR_SERVICE = 'calendar'
_name = 'google.%s' % STR_SERVICE
def generate_data(self, cr, uid, event, isCreating=False, context=None):
if event.allday:
start_date = fields.datetime.context_timestamp(cr, uid, datetime.strptime(event.start, tools.DEFAULT_SERVER_DATETIME_FORMAT), context=context).isoformat('T').split('T')[0]
final_date = fields.datetime.context_timestamp(cr, uid, datetime.strptime(event.start, tools.DEFAULT_SERVER_DATETIME_FORMAT) + timedelta(hours=event.duration) + timedelta(days=isCreating and 1 or 0), context=context).isoformat('T').split('T')[0]
type = 'date'
vstype = 'dateTime'
else:
start_date = fields.datetime.context_timestamp(cr, uid, datetime.strptime(event.start, tools.DEFAULT_SERVER_DATETIME_FORMAT), context=context).isoformat('T')
final_date = fields.datetime.context_timestamp(cr, uid, datetime.strptime(event.stop, tools.DEFAULT_SERVER_DATETIME_FORMAT), context=context).isoformat('T')
type = 'dateTime'
vstype = 'date'
attendee_list = []
for attendee in event.attendee_ids:
attendee_list.append({
'email': attendee.email or 'NoEmail@mail.com',
'displayName': attendee.partner_id.name,
'responseStatus': attendee.state or 'needsAction',
})
data = {
"summary": event.name or '',
"description": event.description or '',
"start": {
type: start_date,
vstype: None,
'timeZone': 'UTC'
},
"end": {
type: final_date,
vstype: None,
'timeZone': 'UTC'
},
"attendees": attendee_list,
"location": event.location or '',
"visibility": event['class'] or 'public',
}
if event.recurrency and event.rrule:
data["recurrence"] = ["RRULE:" + event.rrule]
if not event.active:
data["state"] = "cancelled"
if not self.get_need_synchro_attendee(cr, uid, context=context):
data.pop("attendees")
return data
def create_an_event(self, cr, uid, event, context=None):
gs_pool = self.pool['google.service']
data = self.generate_data(cr, uid, event, isCreating=True, context=context)
url = "/calendar/v3/calendars/%s/events?fields=%s&access_token=%s" % ('primary', urllib2.quote('id,updated'), self.get_token(cr, uid, context))
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
data_json = simplejson.dumps(data)
return gs_pool._do_request(cr, uid, url, data_json, headers, type='POST', context=context)
def delete_an_event(self, cr, uid, event_id, context=None):
gs_pool = self.pool['google.service']
params = {
'access_token': self.get_token(cr, uid, context)
}
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
url = "/calendar/v3/calendars/%s/events/%s" % ('primary', event_id)
return gs_pool._do_request(cr, uid, url, params, headers, type='DELETE', context=context)
def get_calendar_primary_id(self, cr, uid, context=None):
params = {
'fields': 'id',
'access_token': self.get_token(cr, uid, context)
}
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
url = "/calendar/v3/calendars/primary"
try:
st, content = self.pool['google.service']._do_request(cr, uid, url, params, headers, type='GET', context=context)
except Exception, e:
if (e.code == 401): # Token invalid / Acces unauthorized
error_msg = "Your token is invalid or has been revoked !"
registry = openerp.modules.registry.RegistryManager.get(request.session.db)
with registry.cursor() as cur:
self.pool['res.users'].write(cur, uid, [uid], {'google_calendar_token': False, 'google_calendar_token_validity': False}, context=context)
raise self.pool.get('res.config.settings').get_config_warning(cr, _(error_msg), context=context)
raise
return status_response(st) and content['id'] or False
def get_event_synchro_dict(self, cr, uid, lastSync=False, token=False, nextPageToken=False, context=None):
if not token:
token = self.get_token(cr, uid, context)
params = {
'fields': 'items,nextPageToken',
'access_token': token,
'maxResults': 1000,
#'timeMin': self.get_minTime(cr, uid, context=context).strftime("%Y-%m-%dT%H:%M:%S.%fz"),
}
if lastSync:
params['updatedMin'] = lastSync.strftime("%Y-%m-%dT%H:%M:%S.%fz")
params['showDeleted'] = True
else:
params['timeMin'] = self.get_minTime(cr, uid, context=context).strftime("%Y-%m-%dT%H:%M:%S.%fz")
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
url = "/calendar/v3/calendars/%s/events" % 'primary'
if nextPageToken:
params['pageToken'] = nextPageToken
status, content = self.pool['google.service']._do_request(cr, uid, url, params, headers, type='GET', context=context)
google_events_dict = {}
for google_event in content['items']:
google_events_dict[google_event['id']] = google_event
if content.get('nextPageToken'):
google_events_dict.update(
self.get_event_synchro_dict(cr, uid, lastSync=lastSync, token=token, nextPageToken=content['nextPageToken'], context=context)
)
return google_events_dict
def get_one_event_synchro(self, cr, uid, google_id, context=None):
token = self.get_token(cr, uid, context)
params = {
'access_token': token,
'maxResults': 1000,
'showDeleted': True,
}
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
url = "/calendar/v3/calendars/%s/events/%s" % ('primary', google_id)
try:
status, content = self.pool['google.service']._do_request(cr, uid, url, params, headers, type='GET', context=context)
except:
_logger.info("Calendar Synchro - In except of get_one_event_synchro")
pass
return status_response(status) and content or False
def update_to_google(self, cr, uid, oe_event, google_event, context):
calendar_event = self.pool['calendar.event']
url = "/calendar/v3/calendars/%s/events/%s?fields=%s&access_token=%s" % ('primary', google_event['id'], 'id,updated', self.get_token(cr, uid, context))
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
data = self.generate_data(cr, uid, oe_event, context)
data['sequence'] = google_event.get('sequence', 0)
data_json = simplejson.dumps(data)
status, content = self.pool['google.service']._do_request(cr, uid, url, data_json, headers, type='PATCH', context=context)
update_date = datetime.strptime(content['updated'], "%Y-%m-%dT%H:%M:%S.%fz")
calendar_event.write(cr, uid, [oe_event.id], {'oe_update_date': update_date})
if context['curr_attendee']:
self.pool['calendar.attendee'].write(cr, uid, [context['curr_attendee']], {'oe_synchro_date': update_date}, context)
def update_an_event(self, cr, uid, event, context=None):
data = self.generate_data(cr, uid, event, context=context)
url = "/calendar/v3/calendars/%s/events/%s" % ('primary', event.google_internal_event_id)
headers = {}
data['access_token'] = self.get_token(cr, uid, context)
status, response = self.pool['google.service']._do_request(cr, uid, url, data, headers, type='GET', context=context)
#TO_CHECK : , if http fail, no event, do DELETE ?
return response
def update_recurrent_event_exclu(self, cr, uid, instance_id, event_ori_google_id, event_new, context=None):
gs_pool = self.pool['google.service']
data = self.generate_data(cr, uid, event_new, context=context)
data['recurringEventId'] = event_ori_google_id
data['originalStartTime'] = event_new.recurrent_id_date
url = "/calendar/v3/calendars/%s/events/%s?access_token=%s" % ('primary', instance_id, self.get_token(cr, uid, context))
headers = {'Content-type': 'application/json'}
data['sequence'] = self.get_sequence(cr, uid, instance_id, context)
data_json = simplejson.dumps(data)
return gs_pool._do_request(cr, uid, url, data_json, headers, type='PUT', context=context)
def update_from_google(self, cr, uid, event, single_event_dict, type, context):
if context is None:
context = []
calendar_event = self.pool['calendar.event']
res_partner_obj = self.pool['res.partner']
calendar_attendee_obj = self.pool['calendar.attendee']
user_obj = self.pool['res.users']
myPartnerID = user_obj.browse(cr, uid, uid, context).partner_id.id
attendee_record = []
partner_record = [(4, myPartnerID)]
result = {}
if single_event_dict.get('attendees', False):
for google_attendee in single_event_dict['attendees']:
if type == "write":
for oe_attendee in event['attendee_ids']:
if oe_attendee.email == google_attendee['email']:
calendar_attendee_obj.write(cr, uid, [oe_attendee.id], {'state': google_attendee['responseStatus']}, context=context)
google_attendee['found'] = True
continue
if google_attendee.get('found', False):
continue
if self.get_need_synchro_attendee(cr, uid, context=context):
attendee_id = res_partner_obj.search(cr, uid, [('email', '=', google_attendee['email'])], context=context)
if not attendee_id:
data = {
'email': google_attendee['email'],
'customer': False,
'name': google_attendee.get("displayName", False) or google_attendee['email']
}
attendee_id = [res_partner_obj.create(cr, uid, data, context=context)]
attendee = res_partner_obj.read(cr, uid, attendee_id[0], ['email'], context=context)
partner_record.append((4, attendee.get('id')))
attendee['partner_id'] = attendee.pop('id')
attendee['state'] = google_attendee['responseStatus']
attendee_record.append((0, 0, attendee))
UTC = pytz.timezone('UTC')
if single_event_dict.get('start') and single_event_dict.get('end'): # If not cancelled
if single_event_dict['start'].get('dateTime', False) and single_event_dict['end'].get('dateTime', False):
date = parser.parse(single_event_dict['start']['dateTime'])
stop = parser.parse(single_event_dict['end']['dateTime'])
date = str(date.astimezone(UTC))[:-6]
stop = str(stop.astimezone(UTC))[:-6]
allday = False
else:
date = (single_event_dict['start']['date'])
stop = (single_event_dict['end']['date'])
d_end = datetime.strptime(stop, DEFAULT_SERVER_DATE_FORMAT)
allday = True
d_end = d_end + timedelta(days=-1)
stop = d_end.strftime(DEFAULT_SERVER_DATE_FORMAT)
update_date = datetime.strptime(single_event_dict['updated'], "%Y-%m-%dT%H:%M:%S.%fz")
result.update({
'start': date,
'stop': stop,
'allday': allday
})
result.update({
'attendee_ids': attendee_record,
'partner_ids': list(set(partner_record)),
'name': single_event_dict.get('summary', 'Event'),
'description': single_event_dict.get('description', False),
'location': single_event_dict.get('location', False),
'class': single_event_dict.get('visibility', 'public'),
'oe_update_date': update_date,
})
if single_event_dict.get("recurrence", False):
rrule = [rule for rule in single_event_dict["recurrence"] if rule.startswith("RRULE:")][0][6:]
result['rrule'] = rrule
if type == "write":
res = calendar_event.write(cr, uid, event['id'], result, context=context)
elif type == "copy":
result['recurrence'] = True
res = calendar_event.write(cr, uid, [event['id']], result, context=context)
elif type == "create":
res = calendar_event.create(cr, uid, result, context=context)
if context['curr_attendee']:
self.pool['calendar.attendee'].write(cr, uid, [context['curr_attendee']], {'oe_synchro_date': update_date, 'google_internal_event_id': single_event_dict.get('id', False)}, context)
return res
def remove_references(self, cr, uid, context=None):
current_user = self.pool['res.users'].browse(cr, uid, uid, context=context)
reset_data = {
'google_calendar_rtoken': False,
'google_calendar_token': False,
'google_calendar_token_validity': False,
'google_calendar_last_sync_date': False,
'google_calendar_cal_id': False,
}
all_my_attendees = self.pool['calendar.attendee'].search(cr, uid, [('partner_id', '=', current_user.partner_id.id)], context=context)
self.pool['calendar.attendee'].write(cr, uid, all_my_attendees, {'oe_synchro_date': False, 'google_internal_event_id': False}, context=context)
current_user.write(reset_data, context=context)
return True
def synchronize_events(self, cr, uid, ids, lastSync=True, context=None):
if context is None:
context = {}
# def isValidSync(syncToken):
# gs_pool = self.pool['google.service']
# params = {
# 'maxResults': 1,
# 'fields': 'id',
# 'access_token': self.get_token(cr, uid, context),
# 'syncToken': syncToken,
# }
# url = "/calendar/v3/calendars/primary/events"
# status, response = gs_pool._do_request(cr, uid, url, params, type='GET', context=context)
# return int(status) != 410
current_user = self.pool['res.users'].browse(cr, uid, uid, context=context)
context_with_time = dict(context.copy(), ask_time=True)
current_google = self.get_calendar_primary_id(cr, uid, context=context_with_time)
if current_user.google_calendar_cal_id:
if current_google != current_user.google_calendar_cal_id:
return {
"status": "need_reset",
"info": {
"old_name": current_user.google_calendar_cal_id,
"new_name": current_google
},
"url": ''
}
if lastSync and self.get_last_sync_date(cr, uid, context=context) and not self.get_disable_since_synchro(cr, uid, context=context):
lastSync = self.get_last_sync_date(cr, uid, context)
_logger.info("Calendar Synchro - MODE SINCE_MODIFIED : %s !" % lastSync.strftime(DEFAULT_SERVER_DATETIME_FORMAT))
else:
lastSync = False
_logger.info("Calendar Synchro - MODE FULL SYNCHRO FORCED")
else:
current_user.write({'google_calendar_cal_id': current_google}, context=context)
lastSync = False
_logger.info("Calendar Synchro - MODE FULL SYNCHRO - NEW CAL ID")
new_ids = []
new_ids += self.create_new_events(cr, uid, context=context)
new_ids += self.bind_recurring_events_to_google(cr, uid, context)
res = self.update_events(cr, uid, lastSync, context)
current_user.write({'google_calendar_last_sync_date': context_with_time.get('ask_time')}, context=context)
return {
"status": res and "need_refresh" or "no_new_event_form_google",
"url": ''
}
def create_new_events(self, cr, uid, context=None):
if context is None:
context = {}
new_ids = []
ev_obj = self.pool['calendar.event']
att_obj = self.pool['calendar.attendee']
user_obj = self.pool['res.users']
myPartnerID = user_obj.browse(cr, uid, uid, context=context).partner_id.id
context_norecurrent = context.copy()
context_norecurrent['virtual_id'] = False
my_att_ids = att_obj.search(cr, uid, [('partner_id', '=', myPartnerID),
('google_internal_event_id', '=', False),
'|',
('event_id.stop', '>', self.get_minTime(cr, uid, context=context).strftime(DEFAULT_SERVER_DATETIME_FORMAT)),
('event_id.final_date', '>', self.get_minTime(cr, uid, context=context).strftime(DEFAULT_SERVER_DATETIME_FORMAT)),
], context=context_norecurrent)
for att in att_obj.browse(cr, uid, my_att_ids, context=context):
if not att.event_id.recurrent_id or att.event_id.recurrent_id == 0:
st, response = self.create_an_event(cr, uid, att.event_id, context=context)
if status_response(st):
update_date = datetime.strptime(response['updated'], "%Y-%m-%dT%H:%M:%S.%fz")
ev_obj.write(cr, uid, att.event_id.id, {'oe_update_date': update_date})
new_ids.append(response['id'])
att_obj.write(cr, uid, [att.id], {'google_internal_event_id': response['id'], 'oe_synchro_date': update_date})
cr.commit()
else:
_logger.warning("Impossible to create event %s. [%s]" % (att.event_id.id, st))
_logger.warning("Response : %s" % response)
return new_ids
def get_context_no_virtual(self, context):
context_norecurrent = context.copy()
context_norecurrent['virtual_id'] = False
context_norecurrent['active_test'] = False
return context_norecurrent
def bind_recurring_events_to_google(self, cr, uid, context=None):
if context is None:
context = {}
new_ids = []
ev_obj = self.pool['calendar.event']
att_obj = self.pool['calendar.attendee']
user_obj = self.pool['res.users']
myPartnerID = user_obj.browse(cr, uid, uid, context=context).partner_id.id
context_norecurrent = self.get_context_no_virtual(context)
my_att_ids = att_obj.search(cr, uid, [('partner_id', '=', myPartnerID), ('google_internal_event_id', '=', False)], context=context_norecurrent)
for att in att_obj.browse(cr, uid, my_att_ids, context=context):
if att.event_id.recurrent_id and att.event_id.recurrent_id > 0:
new_google_internal_event_id = False
source_event_record = ev_obj.browse(cr, uid, att.event_id.recurrent_id, context)
source_attendee_record_id = att_obj.search(cr, uid, [('partner_id', '=', myPartnerID), ('event_id', '=', source_event_record.id)], context=context)
source_attendee_record = att_obj.browse(cr, uid, source_attendee_record_id, context)[0]
if att.event_id.recurrent_id_date and source_event_record.allday and source_attendee_record.google_internal_event_id:
new_google_internal_event_id = source_attendee_record.google_internal_event_id + '_' + att.event_id.recurrent_id_date.split(' ')[0].replace('-', '')
elif att.event_id.recurrent_id_date and source_attendee_record.google_internal_event_id:
new_google_internal_event_id = source_attendee_record.google_internal_event_id + '_' + att.event_id.recurrent_id_date.replace('-', '').replace(' ', 'T').replace(':', '') + 'Z'
if new_google_internal_event_id:
#TODO WARNING, NEED TO CHECK THAT EVENT and ALL instance NOT DELETE IN GMAIL BEFORE !
try:
st, response = self.update_recurrent_event_exclu(cr, uid, new_google_internal_event_id, source_attendee_record.google_internal_event_id, att.event_id, context=context)
if status_response(st):
att_obj.write(cr, uid, [att.id], {'google_internal_event_id': new_google_internal_event_id}, context=context)
new_ids.append(new_google_internal_event_id)
cr.commit()
else:
_logger.warning("Impossible to create event %s. [%s]" % (att.event_id.id, st))
_logger.warning("Response : %s" % response)
except:
pass
return new_ids
def update_events(self, cr, uid, lastSync=False, context=None):
context = dict(context or {})
calendar_event = self.pool['calendar.event']
user_obj = self.pool['res.users']
att_obj = self.pool['calendar.attendee']
myPartnerID = user_obj.browse(cr, uid, uid, context=context).partner_id.id
context_novirtual = self.get_context_no_virtual(context)
if lastSync:
try:
all_event_from_google = self.get_event_synchro_dict(cr, uid, lastSync=lastSync, context=context)
except urllib2.HTTPError, e:
if e.code == 410: # GONE, Google is lost.
# we need to force the rollback from this cursor, because it locks my res_users but I need to write in this tuple before to raise.
cr.rollback()
registry = openerp.modules.registry.RegistryManager.get(request.session.db)
with registry.cursor() as cur:
self.pool['res.users'].write(cur, uid, [uid], {'google_calendar_last_sync_date': False}, context=context)
error_key = simplejson.loads(e.read())
error_key = error_key.get('error', {}).get('message', 'nc')
error_msg = "Google are lost... the next synchro will be a full synchro. \n\n %s" % error_key
raise self.pool.get('res.config.settings').get_config_warning(cr, _(error_msg), context=context)
my_google_att_ids = att_obj.search(cr, uid, [
('partner_id', '=', myPartnerID),
('google_internal_event_id', 'in', all_event_from_google.keys())
], context=context_novirtual)
my_openerp_att_ids = att_obj.search(cr, uid, [
('partner_id', '=', myPartnerID),
('event_id.oe_update_date', '>', lastSync and lastSync.strftime(DEFAULT_SERVER_DATETIME_FORMAT) or self.get_minTime(cr, uid, context).strftime(DEFAULT_SERVER_DATETIME_FORMAT)),
('google_internal_event_id', '!=', False),
], context=context_novirtual)
my_openerp_googleinternal_ids = att_obj.read(cr, uid, my_openerp_att_ids, ['google_internal_event_id', 'event_id'], context=context_novirtual)
if self.get_print_log(cr, uid, context=context):
_logger.info("Calendar Synchro - \n\nUPDATE IN GOOGLE\n%s\n\nRETRIEVE FROM OE\n%s\n\nUPDATE IN OE\n%s\n\nRETRIEVE FROM GG\n%s\n\n" % (all_event_from_google, my_google_att_ids, my_openerp_att_ids, my_openerp_googleinternal_ids))
for giid in my_openerp_googleinternal_ids:
active = True # if not sure, we request google
if giid.get('event_id'):
active = calendar_event.browse(cr, uid, int(giid.get('event_id')[0]), context=context_novirtual).active
if giid.get('google_internal_event_id') and not all_event_from_google.get(giid.get('google_internal_event_id')) and active:
one_event = self.get_one_event_synchro(cr, uid, giid.get('google_internal_event_id'), context=context)
if one_event:
all_event_from_google[one_event['id']] = one_event
my_att_ids = list(set(my_google_att_ids + my_openerp_att_ids))
else:
domain = [
('partner_id', '=', myPartnerID),
('google_internal_event_id', '!=', False),
'|',
('event_id.stop', '>', self.get_minTime(cr, uid, context).strftime(DEFAULT_SERVER_DATETIME_FORMAT)),
('event_id.final_date', '>', self.get_minTime(cr, uid, context).strftime(DEFAULT_SERVER_DATETIME_FORMAT)),
]
# Select all events from OpenERP which have been already synchronized in gmail
my_att_ids = att_obj.search(cr, uid, domain, context=context_novirtual)
all_event_from_google = self.get_event_synchro_dict(cr, uid, lastSync=False, context=context)
event_to_synchronize = {}
for att in att_obj.browse(cr, uid, my_att_ids, context=context):
event = att.event_id
base_event_id = att.google_internal_event_id.rsplit('_', 1)[0]
if base_event_id not in event_to_synchronize:
event_to_synchronize[base_event_id] = {}
if att.google_internal_event_id not in event_to_synchronize[base_event_id]:
event_to_synchronize[base_event_id][att.google_internal_event_id] = SyncEvent()
ev_to_sync = event_to_synchronize[base_event_id][att.google_internal_event_id]
ev_to_sync.OE.attendee_id = att.id
ev_to_sync.OE.event = event
ev_to_sync.OE.found = True
ev_to_sync.OE.event_id = event.id
ev_to_sync.OE.isRecurrence = event.recurrency
ev_to_sync.OE.isInstance = bool(event.recurrent_id and event.recurrent_id > 0)
ev_to_sync.OE.update = event.oe_update_date
ev_to_sync.OE.status = event.active
ev_to_sync.OE.synchro = att.oe_synchro_date
for event in all_event_from_google.values():
event_id = event.get('id')
base_event_id = event_id.rsplit('_', 1)[0]
if base_event_id not in event_to_synchronize:
event_to_synchronize[base_event_id] = {}
if event_id not in event_to_synchronize[base_event_id]:
event_to_synchronize[base_event_id][event_id] = SyncEvent()
ev_to_sync = event_to_synchronize[base_event_id][event_id]
ev_to_sync.GG.event = event
ev_to_sync.GG.found = True
ev_to_sync.GG.isRecurrence = bool(event.get('recurrence', ''))
ev_to_sync.GG.isInstance = bool(event.get('recurringEventId', 0))
ev_to_sync.GG.update = event.get('updated', None) # if deleted, no date without browse event
if ev_to_sync.GG.update:
ev_to_sync.GG.update = ev_to_sync.GG.update.replace('T', ' ').replace('Z', '')
ev_to_sync.GG.status = (event.get('status') != 'cancelled')
######################
# PRE-PROCESSING #
######################
for base_event in event_to_synchronize:
for current_event in event_to_synchronize[base_event]:
event_to_synchronize[base_event][current_event].compute_OP(modeFull=not lastSync)
if self.get_print_log(cr, uid, context=context):
if not isinstance(event_to_synchronize[base_event][current_event].OP, NothingToDo):
_logger.info(event_to_synchronize[base_event])
######################
# DO ACTION #
######################
for base_event in event_to_synchronize:
event_to_synchronize[base_event] = sorted(event_to_synchronize[base_event].iteritems(), key=operator.itemgetter(0))
for current_event in event_to_synchronize[base_event]:
cr.commit()
event = current_event[1] # event is an Sync Event !
actToDo = event.OP
actSrc = event.OP.src
context['curr_attendee'] = event.OE.attendee_id
if isinstance(actToDo, NothingToDo):
continue
elif isinstance(actToDo, Create):
context_tmp = context.copy()
context_tmp['NewMeeting'] = True
if actSrc == 'GG':
res = self.update_from_google(cr, uid, False, event.GG.event, "create", context=context_tmp)
event.OE.event_id = res
meeting = calendar_event.browse(cr, uid, res, context=context)
attendee_record_id = att_obj.search(cr, uid, [('partner_id', '=', myPartnerID), ('event_id', '=', res)], context=context)
self.pool['calendar.attendee'].write(cr, uid, attendee_record_id, {'oe_synchro_date': meeting.oe_update_date, 'google_internal_event_id': event.GG.event['id']}, context=context_tmp)
elif actSrc == 'OE':
raise "Should be never here, creation for OE is done before update !"
#TODO Add to batch
elif isinstance(actToDo, Update):
if actSrc == 'GG':
self.update_from_google(cr, uid, event.OE.event, event.GG.event, 'write', context)
elif actSrc == 'OE':
self.update_to_google(cr, uid, event.OE.event, event.GG.event, context)
elif isinstance(actToDo, Exclude):
if actSrc == 'OE':
self.delete_an_event(cr, uid, current_event[0], context=context)
elif actSrc == 'GG':
new_google_event_id = event.GG.event['id'].rsplit('_', 1)[1]
if 'T' in new_google_event_id:
new_google_event_id = new_google_event_id.replace('T', '')[:-1]
else:
new_google_event_id = new_google_event_id + "000000"
if event.GG.status:
parent_event = {}
if not event_to_synchronize[base_event][0][1].OE.event_id:
main_ev = att_obj.search_read(cr, uid, [('google_internal_event_id', '=', event.GG.event['id'].rsplit('_', 1)[0])], fields=['event_id'], context=context_novirtual)
event_to_synchronize[base_event][0][1].OE.event_id = main_ev[0].get('event_id')[0]
parent_event['id'] = "%s-%s" % (event_to_synchronize[base_event][0][1].OE.event_id, new_google_event_id)
res = self.update_from_google(cr, uid, parent_event, event.GG.event, "copy", context)
else:
parent_oe_id = event_to_synchronize[base_event][0][1].OE.event_id
calendar_event.unlink(cr, uid, "%s-%s" % (parent_oe_id, new_google_event_id), can_be_deleted=True, context=context)
elif isinstance(actToDo, Delete):
if actSrc == 'GG':
try:
self.delete_an_event(cr, uid, current_event[0], context=context)
except Exception, e:
error = simplejson.loads(e.read())
error_nr = error.get('error', {}).get('code')
# if already deleted from gmail or never created
if error_nr in (404, 410,):
pass
else:
raise e
elif actSrc == 'OE':
calendar_event.unlink(cr, uid, event.OE.event_id, can_be_deleted=False, context=context)
return True
def check_and_sync(self, cr, uid, oe_event, google_event, context):
if datetime.strptime(oe_event.oe_update_date, "%Y-%m-%d %H:%M:%S.%f") > datetime.strptime(google_event['updated'], "%Y-%m-%dT%H:%M:%S.%fz"):
self.update_to_google(cr, uid, oe_event, google_event, context)
elif datetime.strptime(oe_event.oe_update_date, "%Y-%m-%d %H:%M:%S.%f") < datetime.strptime(google_event['updated'], "%Y-%m-%dT%H:%M:%S.%fz"):
self.update_from_google(cr, uid, oe_event, google_event, 'write', context)
def get_sequence(self, cr, uid, instance_id, context=None):
gs_pool = self.pool['google.service']
params = {
'fields': 'sequence',
'access_token': self.get_token(cr, uid, context)
}
headers = {'Content-type': 'application/json'}
url = "/calendar/v3/calendars/%s/events/%s" % ('primary', instance_id)
st, content = gs_pool._do_request(cr, uid, url, params, headers, type='GET', context=context)
return content.get('sequence', 0)
#################################
## MANAGE CONNEXION TO GMAIL ##
#################################
def get_token(self, cr, uid, context=None):
current_user = self.pool['res.users'].browse(cr, uid, uid, context=context)
if not current_user.google_calendar_token_validity or \
datetime.strptime(current_user.google_calendar_token_validity.split('.')[0], DEFAULT_SERVER_DATETIME_FORMAT) < (datetime.now() + timedelta(minutes=1)):
self.do_refresh_token(cr, uid, context=context)
current_user.refresh()
return current_user.google_calendar_token
def get_last_sync_date(self, cr, uid, context=None):
current_user = self.pool['res.users'].browse(cr, uid, uid, context=context)
return current_user.google_calendar_last_sync_date and datetime.strptime(current_user.google_calendar_last_sync_date, DEFAULT_SERVER_DATETIME_FORMAT) + timedelta(minutes=0) or False
def do_refresh_token(self, cr, uid, context=None):
current_user = self.pool['res.users'].browse(cr, uid, uid, context=context)
gs_pool = self.pool['google.service']
all_token = gs_pool._refresh_google_token_json(cr, uid, current_user.google_calendar_rtoken, self.STR_SERVICE, context=context)
vals = {}
vals['google_%s_token_validity' % self.STR_SERVICE] = datetime.now() + timedelta(seconds=all_token.get('expires_in'))
vals['google_%s_token' % self.STR_SERVICE] = all_token.get('access_token')
self.pool['res.users'].write(cr, SUPERUSER_ID, uid, vals, context=context)
def need_authorize(self, cr, uid, context=None):
current_user = self.pool['res.users'].browse(cr, uid, uid, context=context)
return current_user.google_calendar_rtoken is False
def get_calendar_scope(self, RO=False):
readonly = RO and '.readonly' or ''
return 'https://www.googleapis.com/auth/calendar%s' % (readonly)
def authorize_google_uri(self, cr, uid, from_url='http://www.openerp.com', context=None):
url = self.pool['google.service']._get_authorize_uri(cr, uid, from_url, self.STR_SERVICE, scope=self.get_calendar_scope(), context=context)
return url
def can_authorize_google(self, cr, uid, context=None):
return self.pool['res.users'].has_group(cr, uid, 'base.group_erp_manager')
def set_all_tokens(self, cr, uid, authorization_code, context=None):
gs_pool = self.pool['google.service']
all_token = gs_pool._get_google_token_json(cr, uid, authorization_code, self.STR_SERVICE, context=context)
vals = {}
vals['google_%s_rtoken' % self.STR_SERVICE] = all_token.get('refresh_token')
vals['google_%s_token_validity' % self.STR_SERVICE] = datetime.now() + timedelta(seconds=all_token.get('expires_in'))
vals['google_%s_token' % self.STR_SERVICE] = all_token.get('access_token')
self.pool['res.users'].write(cr, SUPERUSER_ID, uid, vals, context=context)
def get_minTime(self, cr, uid, context=None):
number_of_week = self.pool['ir.config_parameter'].get_param(cr, uid, 'calendar.week_synchro', default=13)
return datetime.now() - timedelta(weeks=number_of_week)
def get_need_synchro_attendee(self, cr, uid, context=None):
return self.pool['ir.config_parameter'].get_param(cr, uid, 'calendar.block_synchro_attendee', default=True)
def get_disable_since_synchro(self, cr, uid, context=None):
return self.pool['ir.config_parameter'].get_param(cr, uid, 'calendar.block_since_synchro', default=False)
def get_print_log(self, cr, uid, context=None):
return self.pool['ir.config_parameter'].get_param(cr, uid, 'calendar.debug_print', default=False)
class res_users(osv.Model):
_inherit = 'res.users'
_columns = {
'google_calendar_rtoken': fields.char('Refresh Token'),
'google_calendar_token': fields.char('User token'),
'google_calendar_token_validity': fields.datetime('Token Validity'),
'google_calendar_last_sync_date': fields.datetime('Last synchro date'),
'google_calendar_cal_id': fields.char('Calendar ID', help='Last Calendar ID who has been synchronized. If it is changed, we remove \
all links between GoogleID and OpenERP Google Internal ID')
}
class calendar_event(osv.Model):
_inherit = "calendar.event"
def get_fields_need_update_google(self, cr, uid, context=None):
return ['name', 'description', 'allday', 'date', 'date_end', 'stop', 'attendee_ids', 'location', 'class', 'active']
def write(self, cr, uid, ids, vals, context=None):
if context is None:
context = {}
sync_fields = set(self.get_fields_need_update_google(cr, uid, context))
if (set(vals.keys()) & sync_fields) and 'oe_update_date' not in vals.keys() and 'NewMeeting' not in context:
vals['oe_update_date'] = datetime.now()
return super(calendar_event, self).write(cr, uid, ids, vals, context=context)
def copy(self, cr, uid, id, default=None, context=None):
default = default or {}
if default.get('write_type', False):
del default['write_type']
elif default.get('recurrent_id', False):
default['oe_update_date'] = datetime.now()
else:
default['oe_update_date'] = False
return super(calendar_event, self).copy(cr, uid, id, default, context)
def unlink(self, cr, uid, ids, can_be_deleted=False, context=None):
return super(calendar_event, self).unlink(cr, uid, ids, can_be_deleted=can_be_deleted, context=context)
_columns = {
'oe_update_date': fields.datetime('OpenERP Update Date'),
}
class calendar_attendee(osv.Model):
_inherit = 'calendar.attendee'
_columns = {
'google_internal_event_id': fields.char('Google Calendar Event Id'),
'oe_synchro_date': fields.datetime('OpenERP Synchro Date'),
}
_sql_constraints = [('google_id_uniq', 'unique(google_internal_event_id,partner_id,event_id)', 'Google ID should be unique!')]
def write(self, cr, uid, ids, vals, context=None):
if context is None:
context = {}
for id in ids:
ref = vals.get('event_id', self.browse(cr, uid, id, context=context).event_id.id)
# If attendees are updated, we need to specify that next synchro need an action
# Except if it come from an update_from_google
if not context.get('curr_attendee', False) and not context.get('NewMeeting', False):
self.pool['calendar.event'].write(cr, uid, ref, {'oe_update_date': datetime.now()}, context)
return super(calendar_attendee, self).write(cr, uid, ids, vals, context=context) | unknown | codeparrot/codeparrot-clean | ||
/*-------------------------------------------------------------------------
*
* dshash.c
* Concurrent hash tables backed by dynamic shared memory areas.
*
* This is an open hashing hash table, with a linked list at each table
* entry. It supports dynamic resizing, as required to prevent the linked
* lists from growing too long on average. Currently, only growing is
* supported: the hash table never becomes smaller.
*
* To deal with concurrency, it has a fixed size set of partitions, each of
* which is independently locked. Each bucket maps to a partition; so insert,
* find and iterate operations normally only acquire one lock. Therefore,
* good concurrency is achieved whenever such operations don't collide at the
* lock partition level. However, when a resize operation begins, all
* partition locks must be acquired simultaneously for a brief period. This
* is only expected to happen a small number of times until a stable size is
* found, since growth is geometric.
*
* Future versions may support iterators and incremental resizing; for now
* the implementation is minimalist.
*
* Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* src/backend/lib/dshash.c
*
*-------------------------------------------------------------------------
*/
#include "postgres.h"
#include <limits.h>
#include "common/hashfn.h"
#include "lib/dshash.h"
#include "storage/lwlock.h"
#include "utils/dsa.h"
/*
* An item in the hash table. This wraps the user's entry object in an
* envelop that holds a pointer back to the bucket and a pointer to the next
* item in the bucket.
*/
struct dshash_table_item
{
/* The next item in the same bucket. */
dsa_pointer next;
/* The hashed key, to avoid having to recompute it. */
dshash_hash hash;
/* The user's entry object follows here. See ENTRY_FROM_ITEM(item). */
};
/*
* The number of partitions for locking purposes. This is set to match
* NUM_BUFFER_PARTITIONS for now, on the basis that whatever's good enough for
* the buffer pool must be good enough for any other purpose. This could
* become a runtime parameter in future.
*/
#define DSHASH_NUM_PARTITIONS_LOG2 7
#define DSHASH_NUM_PARTITIONS (1 << DSHASH_NUM_PARTITIONS_LOG2)
/* A magic value used to identify our hash tables. */
#define DSHASH_MAGIC 0x75ff6a20
/*
* Tracking information for each lock partition. Initially, each partition
* corresponds to one bucket, but each time the hash table grows, the buckets
* covered by each partition split so the number of buckets covered doubles.
*
* We might want to add padding here so that each partition is on a different
* cache line, but doing so would bloat this structure considerably.
*/
typedef struct dshash_partition
{
LWLock lock; /* Protects all buckets in this partition. */
size_t count; /* # of items in this partition's buckets */
} dshash_partition;
/*
* The head object for a hash table. This will be stored in dynamic shared
* memory.
*/
typedef struct dshash_table_control
{
dshash_table_handle handle;
uint32 magic;
dshash_partition partitions[DSHASH_NUM_PARTITIONS];
int lwlock_tranche_id;
/*
* The following members are written to only when ALL partitions locks are
* held. They can be read when any one partition lock is held.
*/
/* Number of buckets expressed as power of 2 (8 = 256 buckets). */
size_t size_log2; /* log2(number of buckets) */
dsa_pointer buckets; /* current bucket array */
} dshash_table_control;
/*
* Per-backend state for a dynamic hash table.
*/
struct dshash_table
{
dsa_area *area; /* Backing dynamic shared memory area. */
dshash_parameters params; /* Parameters. */
void *arg; /* User-supplied data pointer. */
dshash_table_control *control; /* Control object in DSM. */
dsa_pointer *buckets; /* Current bucket pointers in DSM. */
size_t size_log2; /* log2(number of buckets) */
};
/* Given a pointer to an item, find the entry (user data) it holds. */
#define ENTRY_FROM_ITEM(item) \
((char *)(item) + MAXALIGN(sizeof(dshash_table_item)))
/* Given a pointer to an entry, find the item that holds it. */
#define ITEM_FROM_ENTRY(entry) \
((dshash_table_item *)((char *)(entry) - \
MAXALIGN(sizeof(dshash_table_item))))
/* How many resize operations (bucket splits) have there been? */
#define NUM_SPLITS(size_log2) \
(size_log2 - DSHASH_NUM_PARTITIONS_LOG2)
/* How many buckets are there in a given size? */
#define NUM_BUCKETS(size_log2) \
(((size_t) 1) << (size_log2))
/* How many buckets are there in each partition at a given size? */
#define BUCKETS_PER_PARTITION(size_log2) \
(((size_t) 1) << NUM_SPLITS(size_log2))
/* Max entries before we need to grow. Half + quarter = 75% load factor. */
#define MAX_COUNT_PER_PARTITION(hash_table) \
(BUCKETS_PER_PARTITION(hash_table->size_log2) / 2 + \
BUCKETS_PER_PARTITION(hash_table->size_log2) / 4)
/* Choose partition based on the highest order bits of the hash. */
#define PARTITION_FOR_HASH(hash) \
(hash >> ((sizeof(dshash_hash) * CHAR_BIT) - DSHASH_NUM_PARTITIONS_LOG2))
/*
* Find the bucket index for a given hash and table size. Each time the table
* doubles in size, the appropriate bucket for a given hash value doubles and
* possibly adds one, depending on the newly revealed bit, so that all buckets
* are split.
*/
#define BUCKET_INDEX_FOR_HASH_AND_SIZE(hash, size_log2) \
(hash >> ((sizeof(dshash_hash) * CHAR_BIT) - (size_log2)))
/* The index of the first bucket in a given partition. */
#define BUCKET_INDEX_FOR_PARTITION(partition, size_log2) \
((partition) << NUM_SPLITS(size_log2))
/* Choose partition based on bucket index. */
#define PARTITION_FOR_BUCKET_INDEX(bucket_idx, size_log2) \
((bucket_idx) >> NUM_SPLITS(size_log2))
/* The head of the active bucket for a given hash value (lvalue). */
#define BUCKET_FOR_HASH(hash_table, hash) \
(hash_table->buckets[ \
BUCKET_INDEX_FOR_HASH_AND_SIZE(hash, \
hash_table->size_log2)])
static void delete_item(dshash_table *hash_table,
dshash_table_item *item);
static void resize(dshash_table *hash_table, size_t new_size_log2);
static inline void ensure_valid_bucket_pointers(dshash_table *hash_table);
static inline dshash_table_item *find_in_bucket(dshash_table *hash_table,
const void *key,
dsa_pointer item_pointer);
static void insert_item_into_bucket(dshash_table *hash_table,
dsa_pointer item_pointer,
dshash_table_item *item,
dsa_pointer *bucket);
static dshash_table_item *insert_into_bucket(dshash_table *hash_table,
const void *key,
dsa_pointer *bucket);
static bool delete_key_from_bucket(dshash_table *hash_table,
const void *key,
dsa_pointer *bucket_head);
static bool delete_item_from_bucket(dshash_table *hash_table,
dshash_table_item *item,
dsa_pointer *bucket_head);
static inline dshash_hash hash_key(dshash_table *hash_table, const void *key);
static inline bool equal_keys(dshash_table *hash_table,
const void *a, const void *b);
static inline void copy_key(dshash_table *hash_table, void *dest,
const void *src);
#define PARTITION_LOCK(hash_table, i) \
(&(hash_table)->control->partitions[(i)].lock)
#define ASSERT_NO_PARTITION_LOCKS_HELD_BY_ME(hash_table) \
Assert(!LWLockAnyHeldByMe(&(hash_table)->control->partitions[0].lock, \
DSHASH_NUM_PARTITIONS, sizeof(dshash_partition)))
/*
* Create a new hash table backed by the given dynamic shared area, with the
* given parameters. The returned object is allocated in backend-local memory
* using the current MemoryContext. 'arg' will be passed through to the
* compare, hash, and copy functions.
*/
dshash_table *
dshash_create(dsa_area *area, const dshash_parameters *params, void *arg)
{
dshash_table *hash_table;
dsa_pointer control;
/* Allocate the backend-local object representing the hash table. */
hash_table = palloc_object(dshash_table);
/* Allocate the control object in shared memory. */
control = dsa_allocate(area, sizeof(dshash_table_control));
/* Set up the local and shared hash table structs. */
hash_table->area = area;
hash_table->params = *params;
hash_table->arg = arg;
hash_table->control = dsa_get_address(area, control);
hash_table->control->handle = control;
hash_table->control->magic = DSHASH_MAGIC;
hash_table->control->lwlock_tranche_id = params->tranche_id;
/* Set up the array of lock partitions. */
{
dshash_partition *partitions = hash_table->control->partitions;
int tranche_id = hash_table->control->lwlock_tranche_id;
int i;
for (i = 0; i < DSHASH_NUM_PARTITIONS; ++i)
{
LWLockInitialize(&partitions[i].lock, tranche_id);
partitions[i].count = 0;
}
}
/*
* Set up the initial array of buckets. Our initial size is the same as
* the number of partitions.
*/
hash_table->control->size_log2 = DSHASH_NUM_PARTITIONS_LOG2;
hash_table->control->buckets =
dsa_allocate_extended(area,
sizeof(dsa_pointer) * DSHASH_NUM_PARTITIONS,
DSA_ALLOC_NO_OOM | DSA_ALLOC_ZERO);
if (!DsaPointerIsValid(hash_table->control->buckets))
{
dsa_free(area, control);
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of memory"),
errdetail("Failed on DSA request of size %zu.",
sizeof(dsa_pointer) * DSHASH_NUM_PARTITIONS)));
}
hash_table->buckets = dsa_get_address(area,
hash_table->control->buckets);
hash_table->size_log2 = hash_table->control->size_log2;
return hash_table;
}
/*
* Attach to an existing hash table using a handle. The returned object is
* allocated in backend-local memory using the current MemoryContext. 'arg'
* will be passed through to the compare and hash functions.
*/
dshash_table *
dshash_attach(dsa_area *area, const dshash_parameters *params,
dshash_table_handle handle, void *arg)
{
dshash_table *hash_table;
dsa_pointer control;
/* Allocate the backend-local object representing the hash table. */
hash_table = palloc_object(dshash_table);
/* Find the control object in shared memory. */
control = handle;
/* Set up the local hash table struct. */
hash_table->area = area;
hash_table->params = *params;
hash_table->arg = arg;
hash_table->control = dsa_get_address(area, control);
Assert(hash_table->control->magic == DSHASH_MAGIC);
/*
* These will later be set to the correct values by
* ensure_valid_bucket_pointers(), at which time we'll be holding a
* partition lock for interlocking against concurrent resizing.
*/
hash_table->buckets = NULL;
hash_table->size_log2 = 0;
return hash_table;
}
/*
* Detach from a hash table. This frees backend-local resources associated
* with the hash table, but the hash table will continue to exist until it is
* either explicitly destroyed (by a backend that is still attached to it), or
* the area that backs it is returned to the operating system.
*/
void
dshash_detach(dshash_table *hash_table)
{
ASSERT_NO_PARTITION_LOCKS_HELD_BY_ME(hash_table);
/* The hash table may have been destroyed. Just free local memory. */
pfree(hash_table);
}
/*
* Destroy a hash table, returning all memory to the area. The caller must be
* certain that no other backend will attempt to access the hash table before
* calling this function. Other backend must explicitly call dshash_detach to
* free up backend-local memory associated with the hash table. The backend
* that calls dshash_destroy must not call dshash_detach.
*/
void
dshash_destroy(dshash_table *hash_table)
{
size_t size;
size_t i;
Assert(hash_table->control->magic == DSHASH_MAGIC);
ensure_valid_bucket_pointers(hash_table);
/* Free all the entries. */
size = NUM_BUCKETS(hash_table->size_log2);
for (i = 0; i < size; ++i)
{
dsa_pointer item_pointer = hash_table->buckets[i];
while (DsaPointerIsValid(item_pointer))
{
dshash_table_item *item;
dsa_pointer next_item_pointer;
item = dsa_get_address(hash_table->area, item_pointer);
next_item_pointer = item->next;
dsa_free(hash_table->area, item_pointer);
item_pointer = next_item_pointer;
}
}
/*
* Vandalize the control block to help catch programming errors where
* other backends access the memory formerly occupied by this hash table.
*/
hash_table->control->magic = 0;
/* Free the active table and control object. */
dsa_free(hash_table->area, hash_table->control->buckets);
dsa_free(hash_table->area, hash_table->control->handle);
pfree(hash_table);
}
/*
* Get a handle that can be used by other processes to attach to this hash
* table.
*/
dshash_table_handle
dshash_get_hash_table_handle(dshash_table *hash_table)
{
Assert(hash_table->control->magic == DSHASH_MAGIC);
return hash_table->control->handle;
}
/*
* Look up an entry, given a key. Returns a pointer to an entry if one can be
* found with the given key. Returns NULL if the key is not found. If a
* non-NULL value is returned, the entry is locked and must be released by
* calling dshash_release_lock. If an error is raised before
* dshash_release_lock is called, the lock will be released automatically, but
* the caller must take care to ensure that the entry is not left corrupted.
* The lock mode is either shared or exclusive depending on 'exclusive'.
*
* The caller must not hold a lock already.
*
* Note that the lock held is in fact an LWLock, so interrupts will be held on
* return from this function, and not resumed until dshash_release_lock is
* called. It is a very good idea for the caller to release the lock quickly.
*/
void *
dshash_find(dshash_table *hash_table, const void *key, bool exclusive)
{
dshash_hash hash;
size_t partition;
dshash_table_item *item;
hash = hash_key(hash_table, key);
partition = PARTITION_FOR_HASH(hash);
Assert(hash_table->control->magic == DSHASH_MAGIC);
ASSERT_NO_PARTITION_LOCKS_HELD_BY_ME(hash_table);
LWLockAcquire(PARTITION_LOCK(hash_table, partition),
exclusive ? LW_EXCLUSIVE : LW_SHARED);
ensure_valid_bucket_pointers(hash_table);
/* Search the active bucket. */
item = find_in_bucket(hash_table, key, BUCKET_FOR_HASH(hash_table, hash));
if (!item)
{
/* Not found. */
LWLockRelease(PARTITION_LOCK(hash_table, partition));
return NULL;
}
else
{
/* The caller will free the lock by calling dshash_release_lock. */
return ENTRY_FROM_ITEM(item);
}
}
/*
* Returns a pointer to an exclusively locked item which must be released with
* dshash_release_lock. If the key is found in the hash table, 'found' is set
* to true and a pointer to the existing entry is returned. If the key is not
* found, 'found' is set to false, and a pointer to a newly created entry is
* returned.
*
* Notes above dshash_find() regarding locking and error handling equally
* apply here.
*/
void *
dshash_find_or_insert(dshash_table *hash_table,
const void *key,
bool *found)
{
dshash_hash hash;
size_t partition_index;
dshash_partition *partition;
dshash_table_item *item;
hash = hash_key(hash_table, key);
partition_index = PARTITION_FOR_HASH(hash);
partition = &hash_table->control->partitions[partition_index];
Assert(hash_table->control->magic == DSHASH_MAGIC);
ASSERT_NO_PARTITION_LOCKS_HELD_BY_ME(hash_table);
restart:
LWLockAcquire(PARTITION_LOCK(hash_table, partition_index),
LW_EXCLUSIVE);
ensure_valid_bucket_pointers(hash_table);
/* Search the active bucket. */
item = find_in_bucket(hash_table, key, BUCKET_FOR_HASH(hash_table, hash));
if (item)
*found = true;
else
{
*found = false;
/* Check if we are getting too full. */
if (partition->count > MAX_COUNT_PER_PARTITION(hash_table))
{
/*
* The load factor (= keys / buckets) for all buckets protected by
* this partition is > 0.75. Presumably the same applies
* generally across the whole hash table (though we don't attempt
* to track that directly to avoid contention on some kind of
* central counter; we just assume that this partition is
* representative). This is a good time to resize.
*
* Give up our existing lock first, because resizing needs to
* reacquire all the locks in the right order to avoid deadlocks.
*/
LWLockRelease(PARTITION_LOCK(hash_table, partition_index));
resize(hash_table, hash_table->size_log2 + 1);
goto restart;
}
/* Finally we can try to insert the new item. */
item = insert_into_bucket(hash_table, key,
&BUCKET_FOR_HASH(hash_table, hash));
item->hash = hash;
/* Adjust per-lock-partition counter for load factor knowledge. */
++partition->count;
}
/* The caller must release the lock with dshash_release_lock. */
return ENTRY_FROM_ITEM(item);
}
/*
* Remove an entry by key. Returns true if the key was found and the
* corresponding entry was removed.
*
* To delete an entry that you already have a pointer to, see
* dshash_delete_entry.
*/
bool
dshash_delete_key(dshash_table *hash_table, const void *key)
{
dshash_hash hash;
size_t partition;
bool found;
Assert(hash_table->control->magic == DSHASH_MAGIC);
ASSERT_NO_PARTITION_LOCKS_HELD_BY_ME(hash_table);
hash = hash_key(hash_table, key);
partition = PARTITION_FOR_HASH(hash);
LWLockAcquire(PARTITION_LOCK(hash_table, partition), LW_EXCLUSIVE);
ensure_valid_bucket_pointers(hash_table);
if (delete_key_from_bucket(hash_table, key,
&BUCKET_FOR_HASH(hash_table, hash)))
{
Assert(hash_table->control->partitions[partition].count > 0);
found = true;
--hash_table->control->partitions[partition].count;
}
else
found = false;
LWLockRelease(PARTITION_LOCK(hash_table, partition));
return found;
}
/*
* Remove an entry. The entry must already be exclusively locked, and must
* have been obtained by dshash_find or dshash_find_or_insert. Note that this
* function releases the lock just like dshash_release_lock.
*
* To delete an entry by key, see dshash_delete_key.
*/
void
dshash_delete_entry(dshash_table *hash_table, void *entry)
{
dshash_table_item *item = ITEM_FROM_ENTRY(entry);
size_t partition = PARTITION_FOR_HASH(item->hash);
Assert(hash_table->control->magic == DSHASH_MAGIC);
Assert(LWLockHeldByMeInMode(PARTITION_LOCK(hash_table, partition),
LW_EXCLUSIVE));
delete_item(hash_table, item);
LWLockRelease(PARTITION_LOCK(hash_table, partition));
}
/*
* Unlock an entry which was locked by dshash_find or dshash_find_or_insert.
*/
void
dshash_release_lock(dshash_table *hash_table, void *entry)
{
dshash_table_item *item = ITEM_FROM_ENTRY(entry);
size_t partition_index = PARTITION_FOR_HASH(item->hash);
Assert(hash_table->control->magic == DSHASH_MAGIC);
LWLockRelease(PARTITION_LOCK(hash_table, partition_index));
}
/*
* A compare function that forwards to memcmp.
*/
int
dshash_memcmp(const void *a, const void *b, size_t size, void *arg)
{
return memcmp(a, b, size);
}
/*
* A hash function that forwards to tag_hash.
*/
dshash_hash
dshash_memhash(const void *v, size_t size, void *arg)
{
return tag_hash(v, size);
}
/*
* A copy function that forwards to memcpy.
*/
void
dshash_memcpy(void *dest, const void *src, size_t size, void *arg)
{
(void) memcpy(dest, src, size);
}
/*
* A compare function that forwards to strcmp.
*/
int
dshash_strcmp(const void *a, const void *b, size_t size, void *arg)
{
Assert(strlen((const char *) a) < size);
Assert(strlen((const char *) b) < size);
return strcmp((const char *) a, (const char *) b);
}
/*
* A hash function that forwards to string_hash.
*/
dshash_hash
dshash_strhash(const void *v, size_t size, void *arg)
{
Assert(strlen((const char *) v) < size);
return string_hash((const char *) v, size);
}
/*
* A copy function that forwards to strcpy.
*/
void
dshash_strcpy(void *dest, const void *src, size_t size, void *arg)
{
Assert(strlen((const char *) src) < size);
(void) strcpy((char *) dest, (const char *) src);
}
/*
* Sequentially scan through dshash table and return all the elements one by
* one, return NULL when all elements have been returned.
*
* dshash_seq_term needs to be called when a scan finished. The caller may
* delete returned elements midst of a scan by using dshash_delete_current()
* if exclusive = true.
*/
void
dshash_seq_init(dshash_seq_status *status, dshash_table *hash_table,
bool exclusive)
{
status->hash_table = hash_table;
status->curbucket = 0;
status->nbuckets = 0;
status->curitem = NULL;
status->pnextitem = InvalidDsaPointer;
status->curpartition = -1;
status->exclusive = exclusive;
}
/*
* Returns the next element.
*
* Returned elements are locked and the caller may not release the lock. It is
* released by future calls to dshash_seq_next() or dshash_seq_term().
*/
void *
dshash_seq_next(dshash_seq_status *status)
{
dsa_pointer next_item_pointer;
/*
* Not yet holding any partition locks. Need to determine the size of the
* hash table, it could have been resized since we were looking last.
* Since we iterate in partition order, we can start by unconditionally
* lock partition 0.
*
* Once we hold the lock, no resizing can happen until the scan ends. So
* we don't need to repeatedly call ensure_valid_bucket_pointers().
*/
if (status->curpartition == -1)
{
Assert(status->curbucket == 0);
ASSERT_NO_PARTITION_LOCKS_HELD_BY_ME(status->hash_table);
status->curpartition = 0;
LWLockAcquire(PARTITION_LOCK(status->hash_table,
status->curpartition),
status->exclusive ? LW_EXCLUSIVE : LW_SHARED);
ensure_valid_bucket_pointers(status->hash_table);
status->nbuckets =
NUM_BUCKETS(status->hash_table->control->size_log2);
next_item_pointer = status->hash_table->buckets[status->curbucket];
}
else
next_item_pointer = status->pnextitem;
Assert(LWLockHeldByMeInMode(PARTITION_LOCK(status->hash_table,
status->curpartition),
status->exclusive ? LW_EXCLUSIVE : LW_SHARED));
/* Move to the next bucket if we finished the current bucket */
while (!DsaPointerIsValid(next_item_pointer))
{
int next_partition;
if (++status->curbucket >= status->nbuckets)
{
/* all buckets have been scanned. finish. */
return NULL;
}
/* Check if move to the next partition */
next_partition =
PARTITION_FOR_BUCKET_INDEX(status->curbucket,
status->hash_table->size_log2);
if (status->curpartition != next_partition)
{
/*
* Move to the next partition. Lock the next partition then
* release the current, not in the reverse order to avoid
* concurrent resizing. Avoid dead lock by taking lock in the
* same order with resize().
*/
LWLockAcquire(PARTITION_LOCK(status->hash_table,
next_partition),
status->exclusive ? LW_EXCLUSIVE : LW_SHARED);
LWLockRelease(PARTITION_LOCK(status->hash_table,
status->curpartition));
status->curpartition = next_partition;
}
next_item_pointer = status->hash_table->buckets[status->curbucket];
}
status->curitem =
dsa_get_address(status->hash_table->area, next_item_pointer);
/*
* The caller may delete the item. Store the next item in case of
* deletion.
*/
status->pnextitem = status->curitem->next;
return ENTRY_FROM_ITEM(status->curitem);
}
/*
* Terminates the seqscan and release all locks.
*
* Needs to be called after finishing or when exiting a seqscan.
*/
void
dshash_seq_term(dshash_seq_status *status)
{
if (status->curpartition >= 0)
LWLockRelease(PARTITION_LOCK(status->hash_table, status->curpartition));
}
/*
* Remove the current entry of the seq scan.
*/
void
dshash_delete_current(dshash_seq_status *status)
{
dshash_table *hash_table = status->hash_table;
dshash_table_item *item = status->curitem;
size_t partition PG_USED_FOR_ASSERTS_ONLY;
partition = PARTITION_FOR_HASH(item->hash);
Assert(status->exclusive);
Assert(hash_table->control->magic == DSHASH_MAGIC);
Assert(LWLockHeldByMeInMode(PARTITION_LOCK(hash_table, partition),
LW_EXCLUSIVE));
delete_item(hash_table, item);
}
/*
* Print debugging information about the internal state of the hash table to
* stderr. The caller must hold no partition locks.
*/
void
dshash_dump(dshash_table *hash_table)
{
size_t i;
size_t j;
Assert(hash_table->control->magic == DSHASH_MAGIC);
ASSERT_NO_PARTITION_LOCKS_HELD_BY_ME(hash_table);
for (i = 0; i < DSHASH_NUM_PARTITIONS; ++i)
{
Assert(!LWLockHeldByMe(PARTITION_LOCK(hash_table, i)));
LWLockAcquire(PARTITION_LOCK(hash_table, i), LW_SHARED);
}
ensure_valid_bucket_pointers(hash_table);
fprintf(stderr,
"hash table size = %zu\n", (size_t) 1 << hash_table->size_log2);
for (i = 0; i < DSHASH_NUM_PARTITIONS; ++i)
{
dshash_partition *partition = &hash_table->control->partitions[i];
size_t begin = BUCKET_INDEX_FOR_PARTITION(i, hash_table->size_log2);
size_t end = BUCKET_INDEX_FOR_PARTITION(i + 1, hash_table->size_log2);
fprintf(stderr, " partition %zu\n", i);
fprintf(stderr,
" active buckets (key count = %zu)\n", partition->count);
for (j = begin; j < end; ++j)
{
size_t count = 0;
dsa_pointer bucket = hash_table->buckets[j];
while (DsaPointerIsValid(bucket))
{
dshash_table_item *item;
item = dsa_get_address(hash_table->area, bucket);
bucket = item->next;
++count;
}
fprintf(stderr, " bucket %zu (key count = %zu)\n", j, count);
}
}
for (i = 0; i < DSHASH_NUM_PARTITIONS; ++i)
LWLockRelease(PARTITION_LOCK(hash_table, i));
}
/*
* Delete a locked item to which we have a pointer.
*/
static void
delete_item(dshash_table *hash_table, dshash_table_item *item)
{
size_t hash = item->hash;
size_t partition = PARTITION_FOR_HASH(hash);
Assert(LWLockHeldByMe(PARTITION_LOCK(hash_table, partition)));
if (delete_item_from_bucket(hash_table, item,
&BUCKET_FOR_HASH(hash_table, hash)))
{
Assert(hash_table->control->partitions[partition].count > 0);
--hash_table->control->partitions[partition].count;
}
else
{
Assert(false);
}
}
/*
* Grow the hash table if necessary to the requested number of buckets. The
* requested size must be double some previously observed size.
*
* Must be called without any partition lock held.
*/
static void
resize(dshash_table *hash_table, size_t new_size_log2)
{
dsa_pointer old_buckets;
dsa_pointer new_buckets_shared;
dsa_pointer *new_buckets;
size_t size;
size_t new_size = ((size_t) 1) << new_size_log2;
size_t i;
/*
* Acquire the locks for all lock partitions. This is expensive, but we
* shouldn't have to do it many times.
*/
for (i = 0; i < DSHASH_NUM_PARTITIONS; ++i)
{
Assert(!LWLockHeldByMe(PARTITION_LOCK(hash_table, i)));
LWLockAcquire(PARTITION_LOCK(hash_table, i), LW_EXCLUSIVE);
if (i == 0 && hash_table->control->size_log2 >= new_size_log2)
{
/*
* Another backend has already increased the size; we can avoid
* obtaining all the locks and return early.
*/
LWLockRelease(PARTITION_LOCK(hash_table, 0));
return;
}
}
Assert(new_size_log2 == hash_table->control->size_log2 + 1);
/* Allocate the space for the new table. */
new_buckets_shared =
dsa_allocate_extended(hash_table->area,
sizeof(dsa_pointer) * new_size,
DSA_ALLOC_HUGE | DSA_ALLOC_ZERO);
new_buckets = dsa_get_address(hash_table->area, new_buckets_shared);
/*
* We've allocated the new bucket array; all that remains to do now is to
* reinsert all items, which amounts to adjusting all the pointers.
*/
size = ((size_t) 1) << hash_table->control->size_log2;
for (i = 0; i < size; ++i)
{
dsa_pointer item_pointer = hash_table->buckets[i];
while (DsaPointerIsValid(item_pointer))
{
dshash_table_item *item;
dsa_pointer next_item_pointer;
item = dsa_get_address(hash_table->area, item_pointer);
next_item_pointer = item->next;
insert_item_into_bucket(hash_table, item_pointer, item,
&new_buckets[BUCKET_INDEX_FOR_HASH_AND_SIZE(item->hash,
new_size_log2)]);
item_pointer = next_item_pointer;
}
}
/* Swap the hash table into place and free the old one. */
old_buckets = hash_table->control->buckets;
hash_table->control->buckets = new_buckets_shared;
hash_table->control->size_log2 = new_size_log2;
hash_table->buckets = new_buckets;
dsa_free(hash_table->area, old_buckets);
/* Release all the locks. */
for (i = 0; i < DSHASH_NUM_PARTITIONS; ++i)
LWLockRelease(PARTITION_LOCK(hash_table, i));
}
/*
* Make sure that our backend-local bucket pointers are up to date. The
* caller must have locked one lock partition, which prevents resize() from
* running concurrently.
*/
static inline void
ensure_valid_bucket_pointers(dshash_table *hash_table)
{
if (hash_table->size_log2 != hash_table->control->size_log2)
{
hash_table->buckets = dsa_get_address(hash_table->area,
hash_table->control->buckets);
hash_table->size_log2 = hash_table->control->size_log2;
}
}
/*
* Scan a locked bucket for a match, using the provided compare function.
*/
static inline dshash_table_item *
find_in_bucket(dshash_table *hash_table, const void *key,
dsa_pointer item_pointer)
{
while (DsaPointerIsValid(item_pointer))
{
dshash_table_item *item;
item = dsa_get_address(hash_table->area, item_pointer);
if (equal_keys(hash_table, key, ENTRY_FROM_ITEM(item)))
return item;
item_pointer = item->next;
}
return NULL;
}
/*
* Insert an already-allocated item into a bucket.
*/
static void
insert_item_into_bucket(dshash_table *hash_table,
dsa_pointer item_pointer,
dshash_table_item *item,
dsa_pointer *bucket)
{
Assert(item == dsa_get_address(hash_table->area, item_pointer));
item->next = *bucket;
*bucket = item_pointer;
}
/*
* Allocate space for an entry with the given key and insert it into the
* provided bucket.
*/
static dshash_table_item *
insert_into_bucket(dshash_table *hash_table,
const void *key,
dsa_pointer *bucket)
{
dsa_pointer item_pointer;
dshash_table_item *item;
item_pointer = dsa_allocate(hash_table->area,
hash_table->params.entry_size +
MAXALIGN(sizeof(dshash_table_item)));
item = dsa_get_address(hash_table->area, item_pointer);
copy_key(hash_table, ENTRY_FROM_ITEM(item), key);
insert_item_into_bucket(hash_table, item_pointer, item, bucket);
return item;
}
/*
* Search a bucket for a matching key and delete it.
*/
static bool
delete_key_from_bucket(dshash_table *hash_table,
const void *key,
dsa_pointer *bucket_head)
{
while (DsaPointerIsValid(*bucket_head))
{
dshash_table_item *item;
item = dsa_get_address(hash_table->area, *bucket_head);
if (equal_keys(hash_table, key, ENTRY_FROM_ITEM(item)))
{
dsa_pointer next;
next = item->next;
dsa_free(hash_table->area, *bucket_head);
*bucket_head = next;
return true;
}
bucket_head = &item->next;
}
return false;
}
/*
* Delete the specified item from the bucket.
*/
static bool
delete_item_from_bucket(dshash_table *hash_table,
dshash_table_item *item,
dsa_pointer *bucket_head)
{
while (DsaPointerIsValid(*bucket_head))
{
dshash_table_item *bucket_item;
bucket_item = dsa_get_address(hash_table->area, *bucket_head);
if (bucket_item == item)
{
dsa_pointer next;
next = item->next;
dsa_free(hash_table->area, *bucket_head);
*bucket_head = next;
return true;
}
bucket_head = &bucket_item->next;
}
return false;
}
/*
* Compute the hash value for a key.
*/
static inline dshash_hash
hash_key(dshash_table *hash_table, const void *key)
{
return hash_table->params.hash_function(key,
hash_table->params.key_size,
hash_table->arg);
}
/*
* Check whether two keys compare equal.
*/
static inline bool
equal_keys(dshash_table *hash_table, const void *a, const void *b)
{
return hash_table->params.compare_function(a, b,
hash_table->params.key_size,
hash_table->arg) == 0;
}
/*
* Copy a key.
*/
static inline void
copy_key(dshash_table *hash_table, void *dest, const void *src)
{
hash_table->params.copy_function(dest, src,
hash_table->params.key_size,
hash_table->arg);
} | c | github | https://github.com/postgres/postgres | src/backend/lib/dshash.c |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_configsync_action import Parameters
from library.modules.bigip_configsync_action import ModuleManager
from library.modules.bigip_configsync_action import ArgumentSpec
# In Ansible 2.8, Ansible changed import paths.
from test.units.compat import unittest
from test.units.compat.mock import Mock
from test.units.compat.mock import patch
from test.units.modules.utils import set_module_args
except ImportError:
from ansible.modules.network.f5.bigip_configsync_action import Parameters
from ansible.modules.network.f5.bigip_configsync_action import ModuleManager
from ansible.modules.network.f5.bigip_configsync_action import ArgumentSpec
# Ansible 2.8 imports
from units.compat import unittest
from units.compat.mock import Mock
from units.compat.mock import patch
from units.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
sync_device_to_group=True,
sync_group_to_device=True,
overwrite_config=True,
device_group="foo"
)
p = Parameters(params=args)
assert p.sync_device_to_group is True
assert p.sync_group_to_device is True
assert p.overwrite_config is True
assert p.device_group == 'foo'
def test_module_parameters_yes_no(self):
args = dict(
sync_device_to_group='yes',
sync_group_to_device='no',
overwrite_config='yes',
device_group="foo"
)
p = Parameters(params=args)
assert p.sync_device_to_group == 'yes'
assert p.sync_group_to_device == 'no'
assert p.overwrite_config == 'yes'
assert p.device_group == 'foo'
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
self.patcher1 = patch('time.sleep')
self.patcher1.start()
def tearDown(self):
self.patcher1.stop()
def test_update_agent_status_traps(self, *args):
set_module_args(dict(
sync_device_to_group='yes',
device_group="foo",
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode,
mutually_exclusive=self.spec.mutually_exclusive,
required_one_of=self.spec.required_one_of
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm._device_group_exists = Mock(return_value=True)
mm._sync_to_group_required = Mock(return_value=False)
mm.execute_on_device = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=None)
mm._get_status_from_resource = Mock()
mm._get_status_from_resource.side_effect = [
'Changes Pending', 'Awaiting Initial Sync', 'In Sync'
]
results = mm.exec_module()
assert results['changed'] is True | unknown | codeparrot/codeparrot-clean | ||
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// +k8s:deepcopy-gen=package
// +k8s:conversion-gen=k8s.io/kubernetes/pkg/controller/endpointslicemirroring/config
// +k8s:conversion-gen-external-types=k8s.io/kube-controller-manager/config/v1alpha1
package v1alpha1 | go | github | https://github.com/kubernetes/kubernetes | pkg/controller/endpointslicemirroring/config/v1alpha1/doc.go |
{
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": {
"type": "datasource",
"uid": "grafana"
},
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations \u0026 Alerts",
"target": {
"limit": 100,
"matchAny": false,
"tags": [],
"type": "dashboard"
},
"type": "dashboard"
}
]
},
"editable": true,
"fiscalYearStartMonth": 0,
"graphTooltip": 0,
"links": [],
"liveNow": false,
"panels": [
{
"datasource": {
"type": "grafana-testdata-datasource"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"fillOpacity": 80,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineWidth": 1
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green"
},
{
"color": "red",
"value": 80
}
]
}
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
"y": 0
},
"id": 4,
"options": {
"bucketOffset": 0,
"combine": false,
"legend": {
"calcs": [
"sum"
],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
}
},
"targets": [
{
"datasource": {
"type": "grafana-testdata-datasource"
},
"refId": "A",
"scenarioId": "random_walk",
"spread": 10
}
],
"title": "Time series + Auto buckets",
"type": "histogram"
},
{
"datasource": {
"type": "datasource",
"uid": "-- Dashboard --"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"fillOpacity": 80,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineWidth": 1
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green"
},
{
"color": "red",
"value": 80
}
]
}
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 0
},
"id": 3,
"options": {
"bucketOffset": 0,
"bucketSize": 3,
"combine": false,
"legend": {
"calcs": [
"sum"
],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
}
},
"targets": [
{
"datasource": {
"type": "datasource",
"uid": "-- Dashboard --"
},
"panelId": 4,
"refId": "A"
}
],
"title": "Time series + bucket size 3",
"type": "histogram"
},
{
"datasource": {
"type": "grafana-testdata-datasource"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"fillOpacity": 80,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineWidth": 1
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green"
},
{
"color": "red",
"value": 80
}
]
}
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
"y": 8
},
"id": 5,
"options": {
"bucketOffset": 0,
"bucketSize": 1,
"combine": false,
"legend": {
"calcs": [
"sum"
],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
}
},
"targets": [
{
"csvFileName": "weight_height.csv",
"datasource": {
"type": "grafana-testdata-datasource"
},
"refId": "A",
"scenarioId": "csv_file"
}
],
"title": "People height distribution",
"transformations": [
{
"id": "filterFieldsByName",
"options": {
"include": {
"names": [
"Height"
]
}
}
}
],
"type": "histogram"
},
{
"datasource": {
"type": "grafana-testdata-datasource"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"fillOpacity": 80,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineWidth": 1
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green"
},
{
"color": "red",
"value": 80
}
]
}
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 8
},
"id": 6,
"options": {
"bucketOffset": 0,
"bucketSize": 5,
"combine": false,
"legend": {
"calcs": [
"sum"
],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
}
},
"targets": [
{
"csvFileName": "weight_height.csv",
"datasource": {
"type": "grafana-testdata-datasource"
},
"refId": "A",
"scenarioId": "csv_file"
}
],
"title": "People weight distribution",
"transformations": [
{
"id": "filterFieldsByName",
"options": {
"include": {
"names": [
"Weight"
]
}
}
}
],
"type": "histogram"
},
{
"datasource": {
"type": "grafana-testdata-datasource"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"align": "auto",
"cellOptions": {
"type": "auto"
},
"inspect": false
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green"
},
{
"color": "red",
"value": 80
}
]
}
},
"overrides": []
},
"gridPos": {
"h": 9,
"w": 12,
"x": 0,
"y": 16
},
"id": 8,
"options": {
"cellHeight": "sm",
"footer": {
"countRows": false,
"fields": [],
"reducer": [
"sum"
],
"show": false
},
"showHeader": true,
"sortBy": [
{
"desc": true,
"displayName": "Height"
}
]
},
"pluginVersion": "10.4.0-pre",
"targets": [
{
"csvFileName": "weight_height.csv",
"datasource": {
"type": "grafana-testdata-datasource"
},
"refId": "A",
"scenarioId": "csv_file"
}
],
"title": "Standalone transform - Height",
"transformations": [
{
"disabled": true,
"id": "filterFieldsByName",
"options": {
"include": {
"names": [
"Height"
]
}
}
},
{
"disabled": true,
"id": "histogram",
"options": {
"combine": true,
"fields": {}
}
}
],
"type": "table"
},
{
"datasource": {
"type": "grafana-testdata-datasource"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"align": "auto",
"cellOptions": {
"type": "auto"
},
"inspect": false
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green"
},
{
"color": "red",
"value": 80
}
]
}
},
"overrides": []
},
"gridPos": {
"h": 9,
"w": 12,
"x": 12,
"y": 16
},
"id": 9,
"options": {
"cellHeight": "sm",
"footer": {
"countRows": false,
"fields": [],
"reducer": [
"sum"
],
"show": false
},
"showHeader": true
},
"pluginVersion": "10.4.0-pre",
"targets": [
{
"csvFileName": "weight_height.csv",
"datasource": {
"type": "grafana-testdata-datasource"
},
"refId": "A",
"scenarioId": "csv_file"
}
],
"title": "Standalone transform - Weight",
"transformations": [
{
"id": "filterFieldsByName",
"options": {
"include": {
"names": [
"Weight"
]
}
}
},
{
"id": "histogram",
"options": {
"combine": true,
"fields": {}
}
}
],
"type": "table"
},
{
"datasource": {
"type": "grafana-testdata-datasource"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"fillOpacity": 20,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineWidth": 1
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green"
},
{
"color": "red",
"value": 80
}
]
}
},
"overrides": [
{
"matcher": {
"id": "byName",
"options": "count1"
},
"properties": [
{
"id": "color",
"value": {
"fixedColor": "red",
"mode": "fixed"
}
}
]
}
]
},
"gridPos": {
"h": 8,
"w": 8,
"x": 0,
"y": 25
},
"id": 11,
"options": {
"bucketOffset": 0,
"legend": {
"calcs": [
"sum"
],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
}
},
"targets": [
{
"csvContent": "xMin,xMax,count1,abcd\n1,2,10,123\n2,3,20,265\n3,4,30,73\n4,5,10,24",
"datasource": {
"type": "grafana-testdata-datasource"
},
"refId": "A",
"scenarioId": "csv_content"
}
],
"title": "Explicit xMin,xMax",
"type": "histogram"
},
{
"datasource": {
"type": "grafana-testdata-datasource"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"fillOpacity": 20,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineWidth": 1
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green"
},
{
"color": "red",
"value": 80
}
]
}
},
"overrides": [
{
"matcher": {
"id": "byName",
"options": "count1"
},
"properties": [
{
"id": "color",
"value": {
"fixedColor": "red",
"mode": "fixed"
}
}
]
}
]
},
"gridPos": {
"h": 8,
"w": 8,
"x": 8,
"y": 25
},
"id": 12,
"options": {
"bucketOffset": 0,
"legend": {
"calcs": [
"sum"
],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
}
},
"targets": [
{
"csvContent": "xMin,count1,abcd\n1,10,123\n2,20,265\n3,30,73\n4,10,24",
"datasource": {
"type": "grafana-testdata-datasource"
},
"refId": "A",
"scenarioId": "csv_content"
}
],
"title": "Implict xMax",
"type": "histogram"
},
{
"datasource": {
"type": "grafana-testdata-datasource"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"fillOpacity": 20,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineWidth": 1
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green"
},
{
"color": "red",
"value": 80
}
]
}
},
"overrides": [
{
"matcher": {
"id": "byName",
"options": "count1"
},
"properties": [
{
"id": "color",
"value": {
"fixedColor": "red",
"mode": "fixed"
}
}
]
}
]
},
"gridPos": {
"h": 8,
"w": 8,
"x": 16,
"y": 25
},
"id": 13,
"options": {
"bucketOffset": 0,
"legend": {
"calcs": [
"sum"
],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
}
},
"targets": [
{
"csvContent": "xMax,count1,abcd\n1,10,123\n2,20,265\n3,30,73\n4,10,24",
"datasource": {
"type": "grafana-testdata-datasource"
},
"refId": "A",
"scenarioId": "csv_content"
}
],
"title": "Implict xMin",
"type": "histogram"
},
{
"datasource": {
"type": "grafana-testdata-datasource"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"fillOpacity": 80,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineWidth": 1
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green"
},
{
"color": "red",
"value": 80
}
]
}
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
"y": 33
},
"id": 14,
"options": {
"bucketOffset": 0,
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
}
},
"targets": [
{
"datasource": {
"type": "grafana-testdata-datasource"
},
"rawFrameContent": "[\n {\n \"schema\": {\n \"refId\": \"A\",\n \"meta\": {\n \"custom\": {\n \"resultType\": \"matrix\"\n },\n \"type\": \"heatmap-rows\",\n \"typeVersion\": [\n 0,\n 1\n ]\n },\n \"name\": \"0.005\",\n \"fields\": [\n {\n \"config\": {\n \"interval\": 1200000\n },\n \"name\": \"Time\",\n \"type\": \"time\",\n \"typeInfo\": {\n \"frame\": \"time.Time\"\n }\n },\n {\n \"config\": {\n \"displayNameFromDS\": \"0.005\"\n },\n \"labels\": {\n \"le\": \"0.005\"\n },\n \"name\": \"0.005\",\n \"type\": \"number\",\n \"typeInfo\": {\n \"frame\": \"float64\"\n }\n },\n {\n \"config\": {\n \"displayNameFromDS\": \"0.01\"\n },\n \"labels\": {\n \"le\": \"0.01\"\n },\n \"name\": \"0.01\",\n \"type\": \"number\",\n \"typeInfo\": {\n \"frame\": \"float64\"\n }\n },\n {\n \"config\": {\n \"displayNameFromDS\": \"0.025\"\n },\n \"labels\": {\n \"le\": \"0.025\"\n },\n \"name\": \"0.025\",\n \"type\": \"number\",\n \"typeInfo\": {\n \"frame\": \"float64\"\n }\n },\n {\n \"config\": {\n \"displayNameFromDS\": \"0.05\"\n },\n \"labels\": {\n \"le\": \"0.05\"\n },\n \"name\": \"0.05\",\n \"type\": \"number\",\n \"typeInfo\": {\n \"frame\": \"float64\"\n }\n },\n {\n \"config\": {\n \"displayNameFromDS\": \"0.1\"\n },\n \"labels\": {\n \"le\": \"0.1\"\n },\n \"name\": \"0.1\",\n \"type\": \"number\",\n \"typeInfo\": {\n \"frame\": \"float64\"\n }\n },\n {\n \"config\": {\n \"displayNameFromDS\": \"0.25\"\n },\n \"labels\": {\n \"le\": \"0.25\"\n },\n \"name\": \"0.25\",\n \"type\": \"number\",\n \"typeInfo\": {\n \"frame\": \"float64\"\n }\n },\n {\n \"config\": {\n \"displayNameFromDS\": \"0.5\"\n },\n \"labels\": {\n \"le\": \"0.5\"\n },\n \"name\": \"0.5\",\n \"type\": \"number\",\n \"typeInfo\": {\n \"frame\": \"float64\"\n }\n },\n {\n \"config\": {\n \"displayNameFromDS\": \"1.0\"\n },\n \"labels\": {\n \"le\": \"1.0\"\n },\n \"name\": \"1.0\",\n \"type\": \"number\",\n \"typeInfo\": {\n \"frame\": \"float64\"\n }\n },\n {\n \"config\": {\n \"displayNameFromDS\": \"2.5\"\n },\n \"labels\": {\n \"le\": \"2.5\"\n },\n \"name\": \"2.5\",\n \"type\": \"number\",\n \"typeInfo\": {\n \"frame\": \"float64\"\n }\n },\n {\n \"config\": {\n \"displayNameFromDS\": \"5.0\"\n },\n \"labels\": {\n \"le\": \"5.0\"\n },\n \"name\": \"5.0\",\n \"type\": \"number\",\n \"typeInfo\": {\n \"frame\": \"float64\"\n }\n },\n {\n \"config\": {\n \"displayNameFromDS\": \"10.0\"\n },\n \"labels\": {\n \"le\": \"10.0\"\n },\n \"name\": \"10.0\",\n \"type\": \"number\",\n \"typeInfo\": {\n \"frame\": \"float64\"\n }\n },\n {\n \"config\": {\n \"displayNameFromDS\": \"25.0\"\n },\n \"labels\": {\n \"le\": \"25.0\"\n },\n \"name\": \"25.0\",\n \"type\": \"number\",\n \"typeInfo\": {\n \"frame\": \"float64\"\n }\n },\n {\n \"config\": {\n \"displayNameFromDS\": \"50.0\"\n },\n \"labels\": {\n \"le\": \"50.0\"\n },\n \"name\": \"50.0\",\n \"type\": \"number\",\n \"typeInfo\": {\n \"frame\": \"float64\"\n }\n },\n {\n \"config\": {\n \"displayNameFromDS\": \"100.0\"\n },\n \"labels\": {\n \"le\": \"100.0\"\n },\n \"name\": \"100.0\",\n \"type\": \"number\",\n \"typeInfo\": {\n \"frame\": \"float64\"\n }\n },\n {\n \"config\": {\n \"displayNameFromDS\": \"+Inf\"\n },\n \"labels\": {\n \"le\": \"+Inf\"\n },\n \"name\": \"+Inf\",\n \"type\": \"number\",\n \"typeInfo\": {\n \"frame\": \"float64\"\n }\n }\n ]\n },\n \"data\": {\n \"values\": [\n [\n 1706456400000,\n 1706457600000,\n 1706458800000,\n 1706460000000,\n 1706461200000,\n 1706462400000,\n 1706463600000,\n 1706464800000,\n 1706466000000,\n 1706467200000,\n 1706468400000,\n 1706469600000,\n 1706470800000,\n 1706472000000,\n 1706473200000,\n 1706474400000,\n 1706475600000,\n 1706476800000,\n 1706478000000\n ],\n [\n 0.19357429718875502,\n 0.18072289156626506,\n 0.18313253012048192,\n 0.18955823293172688,\n 0.18634538152610441,\n 0.19518072289156624,\n 0.20080321285140562,\n 0.18313253012048192,\n 0.19678714859437751,\n 0.18795180722891563,\n 0.18473895582329317,\n 0.19357429718875502,\n 0.19116465863453813,\n 0.19196787148594374,\n 0.19437751004016063,\n 0.19759036144578312,\n 0.19839357429718874,\n 0.19357429718875502,\n 0.18634538152610441\n ],\n [\n 0.22248995983935738,\n 0.229718875502008,\n 0.22248995983935738,\n 0.22168674698795174,\n 0.2305220883534136,\n 0.21285140562248994,\n 0.2128514056224899,\n 0.22891566265060237,\n 0.22570281124497987,\n 0.22088353413654616,\n 0.22088353413654618,\n 0.21927710843373488,\n 0.21686746987951805,\n 0.22248995983935738,\n 0.21847389558232927,\n 0.21124497991967867,\n 0.216867469879518,\n 0.2200803212851405,\n 0.22329317269076304\n ],\n [\n 0.017670682730923704,\n 0.02329317269076303,\n 0.027309236947791138,\n 0.02168674698795181,\n 0.016867469879518093,\n 0.025702811244979917,\n 0.02008032128514059,\n 0.018473895582329314,\n 0.01124497991967871,\n 0.024899598393574307,\n 0.025702811244979862,\n 0.0208835341365462,\n 0.02409638554216864,\n 0.01927710843373498,\n 0.0208835341365462,\n 0.02248995983935742,\n 0.016867469879518093,\n 0.02008032128514059,\n 0.02329317269076303\n ],\n [\n 0,\n 0,\n 0.0008032128514056658,\n 0.0008032128514056658,\n 0,\n 0,\n 0,\n 0.0032128514056224966,\n 0,\n 0,\n 0.0024096385542168863,\n 0,\n 0.001606425702811276,\n 0,\n 0,\n 0.0024096385542168863,\n 0.001606425702811276,\n 0,\n 0.0008032128514056103\n ],\n [\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0\n ],\n [\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0\n ],\n [\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0\n ],\n [\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0\n ],\n [\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0\n ],\n [\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0\n ],\n [\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0\n ],\n [\n 0.06666666666666665,\n 0.06666666666666665,\n 0.06666666666666665,\n 0.06666666666666665,\n 0.06666666666666665,\n 0.06666666666666665,\n 0.06666666666666665,\n 0.06666666666666665,\n 0.06666666666666665,\n 0.06666666666666665,\n 0.06666666666666665,\n 0.06666666666666665,\n 0.06666666666666665,\n 0.06666666666666665,\n 0.06666666666666665,\n 0.06666666666666665,\n 0.06666666666666665,\n 0.06666666666666665,\n 0.06666666666666665\n ],\n [\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0\n ],\n [\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0\n ],\n [\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0\n ]\n ]\n }\n }\n]",
"refId": "A",
"scenarioId": "raw_frame"
}
],
"title": "heatmap-rows frame",
"type": "histogram"
},
{
"datasource": {
"type": "grafana-testdata-datasource"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"fillOpacity": 80,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineWidth": 1
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green"
},
{
"color": "red",
"value": 80
}
]
}
},
"overrides": [
{
"matcher": {
"id": "byName",
"options": "sum(rate(cortex_request_duration_seconds{container=~\"compactor\", route=~\"(debug_pprof|metrics|ready)\"}[4m0s]))"
},
"properties": [
{
"id": "unit",
"value": "s"
}
]
}
]
},
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 33
},
"id": 15,
"options": {
"bucketOffset": 0,
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
}
},
"targets": [
{
"datasource": {
"type": "grafana-testdata-datasource"
},
"rawFrameContent": "[\n {\n \"schema\": {\n \"refId\": \"A\",\n \"meta\": {\n \"type\": \"heatmap-cells\",\n \"typeVersion\": [\n 0,\n 0\n ],\n \"executedQueryString\": \"Expr: sum(rate(cortex_request_duration_seconds{container=~\\\"compactor\\\", route=~\\\"(debug_pprof|metrics|ready)\\\"}[4m0s]))\\nStep: 1m0s\",\n \"preferredVisualisationType\": \"graph\"\n },\n \"fields\": [\n {\n \"name\": \"xMax\",\n \"type\": \"time\",\n \"typeInfo\": {\n \"frame\": \"time.Time\"\n },\n \"config\": {\n \"interval\": 60000\n }\n },\n {\n \"name\": \"yMin\",\n \"type\": \"number\",\n \"typeInfo\": {\n \"frame\": \"float64\"\n },\n \"labels\": {},\n \"config\": {\n \"displayNameFromDS\": \"sum(rate(cortex_request_duration_seconds{container=~\\\"compactor\\\", route=~\\\"(debug_pprof|metrics|ready)\\\"}[4m0s]))\"\n }\n },\n {\n \"name\": \"yMax\",\n \"type\": \"number\",\n \"typeInfo\": {\n \"frame\": \"float64\"\n },\n \"config\": {}\n },\n {\n \"name\": \"count\",\n \"type\": \"number\",\n \"typeInfo\": {\n \"frame\": \"float64\"\n },\n \"config\": {}\n },\n {\n \"name\": \"yLayout\",\n \"type\": \"number\",\n \"typeInfo\": {\n \"frame\": \"int8\"\n },\n \"config\": {}\n }\n ]\n },\n \"data\": {\n \"values\": [\n [\n 1706483460000,\n 1706483460000,\n 1706483460000,\n 1706483460000,\n 1706483460000,\n 1706483460000,\n 1706483460000,\n 1706483460000,\n 1706483460000,\n 1706483460000,\n 1706483460000,\n 1706483460000,\n 1706483460000,\n 1706483460000,\n 1706483460000,\n 1706483460000,\n 1706483460000,\n 1706483460000,\n 1706483460000,\n 1706483460000,\n 1706483460000,\n 1706483460000,\n 1706483460000,\n 1706483460000,\n 1706483460000,\n 1706483460000,\n 1706483460000,\n 1706483460000,\n 1706483460000,\n 1706483460000,\n 1706483460000,\n 1706483460000,\n 1706483460000,\n 1706483520000,\n 1706483520000,\n 1706483520000,\n 1706483520000,\n 1706483520000,\n 1706483520000,\n 1706483520000,\n 1706483520000,\n 1706483520000,\n 1706483520000,\n 1706483520000,\n 1706483520000,\n 1706483520000,\n 1706483520000,\n 1706483520000,\n 1706483520000,\n 1706483520000,\n 1706483520000,\n 1706483520000,\n 1706483520000,\n 1706483520000,\n 1706483520000,\n 1706483520000,\n 1706483520000,\n 1706483520000,\n 1706483520000,\n 1706483520000,\n 1706483520000,\n 1706483520000,\n 1706483520000,\n 1706483520000,\n 1706483520000,\n 1706483580000,\n 1706483580000,\n 1706483580000,\n 1706483580000,\n 1706483580000,\n 1706483580000,\n 1706483580000,\n 1706483580000,\n 1706483580000,\n 1706483580000,\n 1706483580000,\n 1706483580000,\n 1706483580000,\n 1706483580000,\n 1706483580000,\n 1706483580000,\n 1706483580000,\n 1706483580000,\n 1706483580000,\n 1706483580000,\n 1706483580000,\n 1706483580000,\n 1706483580000,\n 1706483580000,\n 1706483580000,\n 1706483580000,\n 1706483580000,\n 1706483580000,\n 1706483580000,\n 1706483640000,\n 1706483640000,\n 1706483640000,\n 1706483640000,\n 1706483640000,\n 1706483640000,\n 1706483640000,\n 1706483640000,\n 1706483640000,\n 1706483640000,\n 1706483640000,\n 1706483640000,\n 1706483640000,\n 1706483640000,\n 1706483640000,\n 1706483640000,\n 1706483640000,\n 1706483640000,\n 1706483640000,\n 1706483640000,\n 1706483640000,\n 1706483640000,\n 1706483640000,\n 1706483640000,\n 1706483640000,\n 1706483640000,\n 1706483640000,\n 1706483640000,\n 1706483640000,\n 1706483640000,\n 1706483700000,\n 1706483700000,\n 1706483700000,\n 1706483700000,\n 1706483700000,\n 1706483700000,\n 1706483700000,\n 1706483700000,\n 1706483700000,\n 1706483700000,\n 1706483700000,\n 1706483700000,\n 1706483700000,\n 1706483700000,\n 1706483700000,\n 1706483700000,\n 1706483700000,\n 1706483700000,\n 1706483700000,\n 1706483700000,\n 1706483700000,\n 1706483700000,\n 1706483700000,\n 1706483700000,\n 1706483700000,\n 1706483700000,\n 1706483700000,\n 1706483700000,\n 1706483700000,\n 1706483760000,\n 1706483760000,\n 1706483760000,\n 1706483760000,\n 1706483760000,\n 1706483760000,\n 1706483760000,\n 1706483760000,\n 1706483760000,\n 1706483760000,\n 1706483760000,\n 1706483760000,\n 1706483760000,\n 1706483760000,\n 1706483760000,\n 1706483760000,\n 1706483760000,\n 1706483760000,\n 1706483760000,\n 1706483760000,\n 1706483760000,\n 1706483760000,\n 1706483760000,\n 1706483760000,\n 1706483760000,\n 1706483760000,\n 1706483760000,\n 1706483760000,\n 1706483760000,\n 1706483760000,\n 1706483760000\n ],\n [\n 0.000012831061023768835,\n 0.000013992371264719713,\n 0.000016639827463764308,\n 0.0000181458605194507,\n 0.000019788201212326197,\n 0.000021579186437577742,\n 0.00002566212204753767,\n 0.000039576402424652394,\n 0.00020529697638030136,\n 0.0002238779402355154,\n 0.00026623723942022893,\n 0.0002903337683112112,\n 0.00031661121939721915,\n 0.00048828125,\n 0.002532889755177753,\n 0.002762135864009951,\n 0.00390625,\n 0.004259795830723663,\n 0.004645340292979379,\n 0.005065779510355506,\n 0.005524271728019902,\n 0.0060242610367497685,\n 0.006569503244169644,\n 0.007164094087536493,\n 0.0078125,\n 0.009290680585958758,\n 0.010131559020711013,\n 0.012048522073499537,\n 0.014328188175072986,\n 0.01703918332289465,\n 0.018581361171917516,\n 0.020263118041422026,\n 13.45434264405943,\n 0.000012831061023768835,\n 0.000013992371264719713,\n 0.000016639827463764308,\n 0.0000181458605194507,\n 0.000019788201212326197,\n 0.000021579186437577742,\n 0.00002566212204753767,\n 0.000039576402424652394,\n 0.0002238779402355154,\n 0.000244140625,\n 0.00026623723942022893,\n 0.0002903337683112112,\n 0.00048828125,\n 0.0021298979153618314,\n 0.002532889755177753,\n 0.002762135864009951,\n 0.00390625,\n 0.004259795830723663,\n 0.004645340292979379,\n 0.005065779510355506,\n 0.005524271728019902,\n 0.0060242610367497685,\n 0.006569503244169644,\n 0.007164094087536493,\n 0.0078125,\n 0.009290680585958758,\n 0.010131559020711013,\n 0.011048543456039804,\n 0.012048522073499537,\n 0.014328188175072986,\n 0.01703918332289465,\n 13.45434264405943,\n 0.000012831061023768835,\n 0.000013992371264719713,\n 0.000016639827463764308,\n 0.0000181458605194507,\n 0.000019788201212326197,\n 0.000021579186437577742,\n 0.00002566212204753767,\n 0.000244140625,\n 0.00026623723942022893,\n 0.0002903337683112112,\n 0.0021298979153618314,\n 0.002532889755177753,\n 0.002762135864009951,\n 0.00390625,\n 0.004259795830723663,\n 0.004645340292979379,\n 0.005065779510355506,\n 0.005524271728019902,\n 0.0060242610367497685,\n 0.006569503244169644,\n 0.007164094087536493,\n 0.0078125,\n 0.009290680585958758,\n 0.010131559020711013,\n 0.011048543456039804,\n 0.012048522073499537,\n 0.014328188175072986,\n 0.01703918332289465,\n 13.45434264405943,\n 0.000012831061023768835,\n 0.000013992371264719713,\n 0.0000152587890625,\n 0.000016639827463764308,\n 0.0000181458605194507,\n 0.000019788201212326197,\n 0.000021579186437577742,\n 0.00002566212204753767,\n 0.000244140625,\n 0.00026623723942022893,\n 0.0002903337683112112,\n 0.0021298979153618314,\n 0.002762135864009951,\n 0.00390625,\n 0.004259795830723663,\n 0.004645340292979379,\n 0.005065779510355506,\n 0.005524271728019902,\n 0.0060242610367497685,\n 0.006569503244169644,\n 0.007164094087536493,\n 0.0078125,\n 0.008519591661447326,\n 0.009290680585958758,\n 0.010131559020711013,\n 0.011048543456039804,\n 0.012048522073499537,\n 0.014328188175072986,\n 0.01703918332289465,\n 13.45434264405943,\n 0.000012831061023768835,\n 0.000013992371264719713,\n 0.0000152587890625,\n 0.000016639827463764308,\n 0.0000181458605194507,\n 0.000019788201212326197,\n 0.000021579186437577742,\n 0.00002566212204753767,\n 0.000244140625,\n 0.00026623723942022893,\n 0.0002903337683112112,\n 0.0005806675366224224,\n 0.0021298979153618314,\n 0.002762135864009951,\n 0.00390625,\n 0.004259795830723663,\n 0.004645340292979379,\n 0.005065779510355506,\n 0.005524271728019902,\n 0.0060242610367497685,\n 0.006569503244169644,\n 0.007164094087536493,\n 0.0078125,\n 0.008519591661447326,\n 0.009290680585958758,\n 0.010131559020711013,\n 0.011048543456039804,\n 0.015625,\n 13.45434264405943,\n 0.000012831061023768835,\n 0.000013992371264719713,\n 0.0000152587890625,\n 0.000016639827463764308,\n 0.0000181458605194507,\n 0.000019788201212326197,\n 0.000021579186437577742,\n 0.00002566212204753767,\n 0.000244140625,\n 0.00026623723942022893,\n 0.0002903337683112112,\n 0.00031661121939721915,\n 0.0003452669830012439,\n 0.0005806675366224224,\n 0.0021298979153618314,\n 0.002762135864009951,\n 0.00390625,\n 0.004259795830723663,\n 0.004645340292979379,\n 0.005065779510355506,\n 0.005524271728019902,\n 0.0060242610367497685,\n 0.006569503244169644,\n 0.007164094087536493,\n 0.0078125,\n 0.008519591661447326,\n 0.009290680585958758,\n 0.010131559020711013,\n 0.011048543456039804,\n 0.015625,\n 13.45434264405943\n ],\n [\n 0.000013992371264719713,\n 0.0000152587890625,\n 0.0000181458605194507,\n 0.000019788201212326197,\n 0.000021579186437577742,\n 0.000023532269674803783,\n 0.000027984742529439426,\n 0.000043158372875155485,\n 0.0002238779402355154,\n 0.000244140625,\n 0.0002903337683112112,\n 0.00031661121939721915,\n 0.0003452669830012439,\n 0.0005324744788404579,\n 0.002762135864009951,\n 0.0030121305183748843,\n 0.004259795830723663,\n 0.004645340292979379,\n 0.005065779510355506,\n 0.005524271728019902,\n 0.0060242610367497685,\n 0.006569503244169644,\n 0.007164094087536493,\n 0.0078125,\n 0.008519591661447326,\n 0.010131559020711013,\n 0.011048543456039804,\n 0.013139006488339287,\n 0.015625,\n 0.018581361171917516,\n 0.020263118041422026,\n 0.022097086912079608,\n 14.672064691274738,\n 0.000013992371264719713,\n 0.0000152587890625,\n 0.0000181458605194507,\n 0.000019788201212326197,\n 0.000021579186437577742,\n 0.000023532269674803783,\n 0.000027984742529439426,\n 0.000043158372875155485,\n 0.000244140625,\n 0.00026623723942022893,\n 0.0002903337683112112,\n 0.00031661121939721915,\n 0.0005324744788404579,\n 0.0023226701464896895,\n 0.002762135864009951,\n 0.0030121305183748843,\n 0.004259795830723663,\n 0.004645340292979379,\n 0.005065779510355506,\n 0.005524271728019902,\n 0.0060242610367497685,\n 0.006569503244169644,\n 0.007164094087536493,\n 0.0078125,\n 0.008519591661447326,\n 0.010131559020711013,\n 0.011048543456039804,\n 0.012048522073499537,\n 0.013139006488339287,\n 0.015625,\n 0.018581361171917516,\n 14.672064691274738,\n 0.000013992371264719713,\n 0.0000152587890625,\n 0.0000181458605194507,\n 0.000019788201212326197,\n 0.000021579186437577742,\n 0.000023532269674803783,\n 0.000027984742529439426,\n 0.00026623723942022893,\n 0.0002903337683112112,\n 0.00031661121939721915,\n 0.0023226701464896895,\n 0.002762135864009951,\n 0.0030121305183748843,\n 0.004259795830723663,\n 0.004645340292979379,\n 0.005065779510355506,\n 0.005524271728019902,\n 0.0060242610367497685,\n 0.006569503244169644,\n 0.007164094087536493,\n 0.0078125,\n 0.008519591661447326,\n 0.010131559020711013,\n 0.011048543456039804,\n 0.012048522073499537,\n 0.013139006488339287,\n 0.015625,\n 0.018581361171917516,\n 14.672064691274738,\n 0.000013992371264719713,\n 0.0000152587890625,\n 0.000016639827463764308,\n 0.0000181458605194507,\n 0.000019788201212326197,\n 0.000021579186437577742,\n 0.000023532269674803783,\n 0.000027984742529439426,\n 0.00026623723942022893,\n 0.0002903337683112112,\n 0.00031661121939721915,\n 0.0023226701464896895,\n 0.0030121305183748843,\n 0.004259795830723663,\n 0.004645340292979379,\n 0.005065779510355506,\n 0.005524271728019902,\n 0.0060242610367497685,\n 0.006569503244169644,\n 0.007164094087536493,\n 0.0078125,\n 0.008519591661447326,\n 0.009290680585958758,\n 0.010131559020711013,\n 0.011048543456039804,\n 0.012048522073499537,\n 0.013139006488339287,\n 0.015625,\n 0.018581361171917516,\n 14.672064691274738,\n 0.000013992371264719713,\n 0.0000152587890625,\n 0.000016639827463764308,\n 0.0000181458605194507,\n 0.000019788201212326197,\n 0.000021579186437577742,\n 0.000023532269674803783,\n 0.000027984742529439426,\n 0.00026623723942022893,\n 0.0002903337683112112,\n 0.00031661121939721915,\n 0.0006332224387944383,\n 0.0023226701464896895,\n 0.0030121305183748843,\n 0.004259795830723663,\n 0.004645340292979379,\n 0.005065779510355506,\n 0.005524271728019902,\n 0.0060242610367497685,\n 0.006569503244169644,\n 0.007164094087536493,\n 0.0078125,\n 0.008519591661447326,\n 0.009290680585958758,\n 0.010131559020711013,\n 0.011048543456039804,\n 0.012048522073499537,\n 0.01703918332289465,\n 14.672064691274738,\n 0.000013992371264719713,\n 0.0000152587890625,\n 0.000016639827463764308,\n 0.0000181458605194507,\n 0.000019788201212326197,\n 0.000021579186437577742,\n 0.000023532269674803783,\n 0.000027984742529439426,\n 0.00026623723942022893,\n 0.0002903337683112112,\n 0.00031661121939721915,\n 0.0003452669830012439,\n 0.00037651631479686053,\n 0.0006332224387944383,\n 0.0023226701464896895,\n 0.0030121305183748843,\n 0.004259795830723663,\n 0.004645340292979379,\n 0.005065779510355506,\n 0.005524271728019902,\n 0.0060242610367497685,\n 0.006569503244169644,\n 0.007164094087536493,\n 0.0078125,\n 0.008519591661447326,\n 0.009290680585958758,\n 0.010131559020711013,\n 0.011048543456039804,\n 0.012048522073499537,\n 0.01703918332289465,\n 14.672064691274738\n ],\n [\n 0.0044444444444444444,\n 0.017777777777777778,\n 0.0044444444444444444,\n 0.035555555555555556,\n 0.017777777777777778,\n 0.013333333333333332,\n 0.0044444444444444444,\n 0.0044444444444444444,\n 0.0044444444444444444,\n 0.0044444444444444444,\n 0.0044444444444444444,\n 0.008888888888888889,\n 0.0044444444444444444,\n 0.0044444444444444444,\n 0.008888888888888889,\n 0.0044444444444444444,\n 0.017777777777777778,\n 0.03111111111111111,\n 0.013333333333333332,\n 0.05333333333333334,\n 0.035555555555555556,\n 0.022222222222222223,\n 0.022222222222222223,\n 0.022222222222222223,\n 0.017777777777777778,\n 0.013333333333333332,\n 0.013333333333333332,\n 0.008888888888888889,\n 0.0044444444444444444,\n 0.0044444444444444444,\n 0.0044444444444444444,\n 0.0044444444444444444,\n 0.06666666666666667,\n 0.008888888888888889,\n 0.017777777777777778,\n 0.008888888888888889,\n 0.04,\n 0.0044444444444444444,\n 0.013333333333333332,\n 0.0044444444444444444,\n 0.0044444444444444444,\n 0.0044444444444444444,\n 0.0044444444444444444,\n 0.0044444444444444444,\n 0.013333333333333332,\n 0.0044444444444444444,\n 0.0044444444444444444,\n 0.008888888888888889,\n 0.0044444444444444444,\n 0.026666666666666665,\n 0.035555555555555556,\n 0.008888888888888889,\n 0.044444444444444446,\n 0.035555555555555556,\n 0.026666666666666665,\n 0.022222222222222223,\n 0.022222222222222223,\n 0.013333333333333332,\n 0.008888888888888889,\n 0.017777777777777778,\n 0.0044444444444444444,\n 0.008888888888888889,\n 0.0044444444444444444,\n 0.0044444444444444444,\n 0.06666666666666667,\n 0.008888888888888889,\n 0.017777777777777778,\n 0.017777777777777778,\n 0.04,\n 0.0044444444444444444,\n 0.008888888888888889,\n 0.0044444444444444444,\n 0.0044444444444444444,\n 0.008888888888888889,\n 0.017777777777777778,\n 0.0044444444444444444,\n 0.0044444444444444444,\n 0.008888888888888889,\n 0.022222222222222223,\n 0.03111111111111111,\n 0.008888888888888889,\n 0.044444444444444446,\n 0.035555555555555556,\n 0.026666666666666665,\n 0.026666666666666665,\n 0.022222222222222223,\n 0.017777777777777778,\n 0.008888888888888889,\n 0.017777777777777778,\n 0.0044444444444444444,\n 0.008888888888888889,\n 0.0044444444444444444,\n 0.0044444444444444444,\n 0.06666666666666667,\n 0.008888888888888889,\n 0.017777777777777778,\n 0.0044444444444444444,\n 0.017777777777777778,\n 0.03111111111111111,\n 0.008888888888888889,\n 0.008888888888888889,\n 0.0044444444444444444,\n 0.008888888888888889,\n 0.013333333333333332,\n 0.008888888888888889,\n 0.008888888888888889,\n 0.008888888888888889,\n 0.022222222222222223,\n 0.013333333333333332,\n 0.008888888888888889,\n 0.044444444444444446,\n 0.04,\n 0.022222222222222223,\n 0.022222222222222223,\n 0.022222222222222223,\n 0.03111111111111111,\n 0.0044444444444444444,\n 0.013333333333333332,\n 0.017777777777777778,\n 0.008888888888888889,\n 0.0044444444444444444,\n 0.0044444444444444444,\n 0.0044444444444444444,\n 0.06666666666666667,\n 0.008888888888888889,\n 0.026666666666666665,\n 0.0044444444444444444,\n 0.013333333333333332,\n 0.022222222222222223,\n 0.008888888888888889,\n 0.013333333333333332,\n 0.0044444444444444444,\n 0.008888888888888889,\n 0.008888888888888889,\n 0.008888888888888889,\n 0.0044444444444444444,\n 0.008888888888888889,\n 0.008888888888888889,\n 0.026666666666666665,\n 0.0044444444444444444,\n 0.0044444444444444444,\n 0.035555555555555556,\n 0.057777777777777775,\n 0.026666666666666665,\n 0.03111111111111111,\n 0.03111111111111111,\n 0.022222222222222223,\n 0.0044444444444444444,\n 0.013333333333333332,\n 0.013333333333333332,\n 0.008888888888888889,\n 0.0044444444444444444,\n 0.06666666666666667,\n 0.0044444444444444444,\n 0.026666666666666665,\n 0.0044444444444444444,\n 0.022222222222222223,\n 0.022222222222222223,\n 0.0044444444444444444,\n 0.013333333333333332,\n 0.0044444444444444444,\n 0.0044444444444444444,\n 0.0044444444444444444,\n 0.008888888888888889,\n 0.0044444444444444444,\n 0.0044444444444444444,\n 0.0044444444444444444,\n 0.0044444444444444444,\n 0.013333333333333332,\n 0.017777777777777778,\n 0.013333333333333332,\n 0.008888888888888889,\n 0.035555555555555556,\n 0.05333333333333333,\n 0.022222222222222223,\n 0.026666666666666665,\n 0.035555555555555556,\n 0.022222222222222223,\n 0.008888888888888889,\n 0.02222222222222222,\n 0.008888888888888889,\n 0.0044444444444444444,\n 0.0044444444444444444,\n 0.06666666666666667\n ],\n [\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0\n ]\n ]\n }\n }\n]",
"refId": "A",
"scenarioId": "raw_frame"
}
],
"title": "heatmap-cells frame",
"type": "histogram"
}
],
"refresh": "",
"schemaVersion": 42,
"tags": [
"gdev",
"panel-tests",
"graph-ng"
],
"templating": {
"list": []
},
"time": {
"from": "now-6h",
"to": "now"
},
"timepicker": {},
"timezone": "",
"title": "Panel Tests - Histogram",
"uid": "UTv--wqMk",
"weekStart": ""
} | json | github | https://github.com/grafana/grafana | apps/dashboard/pkg/migration/conversion/testdata/input/migrated_dev_dashboards/panel-histogram/v1beta1.histogram_tests.v42.json |
SELECT NOT(pg_numa_available()) AS skip_test \gset
\if :skip_test
\quit
\endif
-- We expect at least one entry for each buffer
select count(*) >= (select setting::bigint
from pg_settings
where name = 'shared_buffers')
from pg_buffercache_numa;
-- Check that the functions / views can't be accessed by default. To avoid
-- having to create a dedicated user, use the pg_database_owner pseudo-role.
SET ROLE pg_database_owner;
SELECT count(*) > 0 FROM pg_buffercache_numa;
RESET role;
-- Check that pg_monitor is allowed to query view / function
SET ROLE pg_monitor;
SELECT count(*) > 0 FROM pg_buffercache_numa;
RESET role; | sql | github | https://github.com/postgres/postgres | contrib/pg_buffercache/sql/pg_buffercache_numa.sql |
// boost cxx11_char_types.hpp --------------------------------------------------------//
// Copyright Beman Dawes 2011
// Distributed under the Boost Software License, Version 1.0.
// See http://www.boost.org/LICENSE_1_0.txt
//--------------------------------------------------------------------------------------//
// //
// The purpose of this header is to emulate the C++11 char16_t and char32_t //
// character and string types so that they can be used in both C++11 and C++03 //
// programs. //
// //
// The emulation names use char16/char32 rather than char16_t/char32_t to avoid use //
// of names that are keywords in C++11. //
// //
// The emulation names are placed in namespace boost, as is usual for Boost C++11 //
// emulation names such as those in header <boost/cstdint.hpp>. //
// //
// An alternative would would have been to place the C++11 emulation names at global //
// scope, and put the C++11 string types in namespace std. That is the approach taken //
// by Microsoft Visual Studio 2010, but is controversion with some Boost users and //
// developers, and runs counter to usual Boost practice. //
// //
// Thanks to Mathias Gaunard and others for discussions leading to the final form //
// of these typedefs. //
// //
// Boost C++11 C++03 //
// ---------------- -------------- -------------------------------- //
// boost::char16 char16_t uint16_t //
// boost::char32 char32_t uint32_t //
// boost::u16string std::u16string std::basic_string<boost::char16> //
// boost::u32string std::u32string std::basic_string<boost::char32> //
// //
// Uses the typedefs provided by Microsoft Visual C++ 2010 if present //
// //
// Thanks to Mathias Gaunard and others for discussions leading to the final form //
// of these typedefs. //
// //
//--------------------------------------------------------------------------------------//
#if !defined(BOOST_CXX11_CHAR_TYPES_HPP)
# define BOOST_CXX11_CHAR_TYPES_HPP
# include <boost/config.hpp>
# include <boost/cstdint.hpp>
# include <string>
namespace boost
{
# if defined(BOOST_NO_CXX11_CHAR16_T) && (!defined(_MSC_VER) || _MSC_VER < 1600) // 1600 == VC++10
typedef boost::uint_least16_t char16;
typedef std::basic_string<boost::char16> u16string;
# else
typedef char16_t char16;
typedef std::u16string u16string;
# endif
# if defined(BOOST_NO_CXX11_CHAR32_T) && (!defined(_MSC_VER) || _MSC_VER < 1600) // 1600 == VC++10
typedef boost::uint_least32_t char32;
typedef std::basic_string<boost::char32> u32string;
# else
typedef char32_t char32;
typedef std::u32string u32string;
# endif
} // namespace boost
#endif // !defined(BOOST_CXX11_CHAR_TYPES_HPP) | unknown | github | https://github.com/mysql/mysql-server | extra/boost/boost_1_87_0/boost/cxx11_char_types.hpp |
def binomial_coeffi(n, k):
if (k == 0 or k == n):
return 1
return (binomial_coeffi(n - 1, k - 1)
+ binomial_coeffi(n - 1, k))
def rencontres_number(n, m):
if (n == 0 and m == 0):
return 1
if (n == 1 and m == 0):
return 0
if (m == 0):
return ((n - 1) * (rencontres_number(n - 1, 0)+ rencontres_number(n - 2, 0)))
return (binomial_coeffi(n, m) * rencontres_number(n - m, 0)) | unknown | mbpp | ||
from __future__ import print_function
import gc
import os
import torch
import torch.optim as optim
from torch import nn
from model.model import TreeLSTMSentiment
from model.sentiment_trainer import SentimentTrainer
from data.embeddings import load_embedding_model
def choose_optimizer(args, model):
if args.optim =='adam':
return optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr, weight_decay=args.wd)
elif args.optim=='adagrad':
return optim.Adagrad([
{'params': model.parameters(), 'lr': args.lr}
], lr=args.lr, weight_decay=args.wd)
def train(train_dataset, dev_dataset, vocab, args):
# Optionally reweight loss per class to the distribution of classes in
# the public dataset
weight = torch.Tensor([1/0.024, 1/0.820, 1/0.156]) if args.reweight else None
criterion = nn.NLLLoss(weight=weight)
# initialize model, criterion/loss_function, optimizer
embedding_model = load_embedding_model(args,vocab)
model = TreeLSTMSentiment(args=args, criterion=criterion, embeddings=embedding_model, vocab=vocab)
if args.cuda:
model.cuda()
criterion.cuda()
optimizer = choose_optimizer(args,model)
# create trainer object for training and testing
trainer = SentimentTrainer(args, model ,criterion, optimizer, embedding_model)
experiment_dir = os.path.join(os.getcwd(), args.saved, "models_" + args.name)
if not os.path.exists(experiment_dir):
os.makedirs(experiment_dir)
open(experiment_dir+"/"+"config.txt", "w+").write(str(args))
max_dev = 0
max_dev_epoch = 0
for epoch in range(args.epochs):
train_loss = trainer.train(train_dataset)
if args.use_full_training_set:
dev_loss, dev_acc, _, _ = trainer.test(dev_dataset)
dev_acc = torch.mean(dev_acc)
print('==> Train loss : %f \t' % train_loss, end="")
print('Epoch ', epoch, 'dev percentage ', dev_acc)
model_filename = experiment_dir + '/' +'model_' + str(epoch) + '.pth'
torch.save(model, model_filename)
if dev_acc > max_dev:
max_dev = dev_acc
max_dev_epoch = epoch
max_model_filename = model_filename
gc.collect()
print('epoch ' + str(max_dev_epoch) + ' dev score of ' + str(max_dev))
return max_dev_epoch, max_dev, max_model_filename | unknown | codeparrot/codeparrot-clean | ||
"""
Outputs the object class tree read from LDAPv3 schema
of a given server
Usage: schema_oc_tree.py [--html] [LDAP URL]
"""
import sys,getopt,ldap,ldap.schema
ldap.trace_level = 1
def PrintSchemaTree(schema,se_class,se_tree,se_oid,level):
"""ASCII text output for console"""
se_obj = schema.get_obj(se_class,se_oid)
if se_obj!=None:
print '| '*(level-1)+'+---'*(level>0), \
', '.join(se_obj.names), \
'(%s)' % se_obj.oid
for sub_se_oid in se_tree[se_oid]:
print '| '*(level+1)
PrintSchemaTree(schema,se_class,se_tree,sub_se_oid,level+1)
def HTMLSchemaTree(schema,se_class,se_tree,se_oid,level):
"""HTML output for browser"""
se_obj = schema.get_obj(se_class,se_oid)
if se_obj!=None:
print """
<dt><strong>%s (%s)</strong></dt>
<dd>
%s
""" % (', '.join(se_obj.names),se_obj.oid,se_obj.desc)
if se_tree[se_oid]:
print '<dl>'
for sub_se_oid in se_tree[se_oid]:
HTMLSchemaTree(schema,se_class,se_tree,sub_se_oid,level+1)
print '</dl>'
print '</dd>'
ldap.set_option(ldap.OPT_DEBUG_LEVEL,0)
ldap._trace_level = 0
subschemasubentry_dn,schema = ldap.schema.urlfetch(sys.argv[-1],ldap.trace_level)
if subschemasubentry_dn is None:
print 'No sub schema sub entry found!'
sys.exit(1)
try:
options,args=getopt.getopt(sys.argv[1:],'',['html'])
except getopt.error,e:
print 'Error: %s\nUsage: schema_oc_tree.py [--html] [LDAP URL]'
html_output = options and options[0][0]=='--html'
oc_tree = schema.tree(ldap.schema.ObjectClass)
at_tree = schema.tree(ldap.schema.AttributeType)
#for k,v in oc_tree.items():
# print k,'->',v
#for k,v in at_tree.items():
# print k,'->',v
if html_output:
print """<html>
<head>
<title>Object class tree</title>
</head>
<body bgcolor="#ffffff">
<h1>Object class tree</h1>
<dl>
"""
HTMLSchemaTree(schema,ldap.schema.ObjectClass,oc_tree,'2.5.6.0',0)
print """</dl>
<h1>Attribute type tree</h1>
<dl>
"""
for a in schema.listall(ldap.schema.AttributeType):
if at_tree[a]:
HTMLSchemaTree(schema,ldap.schema.AttributeType,at_tree,a,0)
print
print """</dl>
</body>
</html>
"""
else:
print '*** Object class tree ***\n'
print
PrintSchemaTree(schema,ldap.schema.ObjectClass,oc_tree,'2.5.6.0',0)
print '\n*** Attribute types tree ***\n'
PrintSchemaTree(schema,ldap.schema.AttributeType,at_tree,'_',0) | unknown | codeparrot/codeparrot-clean | ||
package imagebackend
import (
"io"
"net/http"
"github.com/moby/moby/api/types/container"
imagetypes "github.com/moby/moby/api/types/image"
"github.com/moby/moby/api/types/registry"
"github.com/moby/moby/api/types/storage"
"github.com/moby/moby/v2/daemon/internal/filters"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
)
type PullOptions struct {
Platforms []ocispec.Platform
MetaHeaders http.Header
AuthConfig *registry.AuthConfig
OutStream io.Writer
}
type PushOptions struct {
Platforms []ocispec.Platform
MetaHeaders http.Header
AuthConfig *registry.AuthConfig
OutStream io.Writer
}
type RemoveOptions struct {
Platforms []ocispec.Platform
Force bool
PruneChildren bool
}
type ListOptions struct {
// All controls whether all images in the graph are filtered, or just
// the heads.
All bool
// Filters is a JSON-encoded set of filter arguments.
Filters filters.Args
// SharedSize indicates whether the shared size of images should be computed.
SharedSize bool
// Manifests indicates whether the image manifests should be returned.
Manifests bool
}
// GetImageOpts holds parameters to retrieve image information
// from the backend.
type GetImageOpts struct {
Platform *ocispec.Platform
}
// ImageInspectOpts holds parameters to inspect an image.
type ImageInspectOpts struct {
Manifests bool
Identity bool
Platform *ocispec.Platform
}
type InspectData struct {
imagetypes.InspectResponse
// Parent is the ID of the parent image.
//
// Depending on how the image was created, this field may be empty and
// is only set for images that were built/created locally. This field
// is omitted if the image was pulled from an image registry.
//
// This field is deprecated with the legacy builder, but returned by the API if present.
Parent string `json:",omitempty"`
// DockerVersion is the version of Docker that was used to build the image.
//
// Depending on how the image was created, this field may be omitted.
//
// This field is deprecated with the legacy builder, but returned by the API if present.
DockerVersion string `json:",omitempty"`
// Container is the ID of the container that was used to create the image.
//
// Depending on how the image was created, this field may be empty.
//
// This field is removed in API v1.45, but used for API <= v1.44 responses.
Container string
// ContainerConfig is an optional field containing the configuration of the
// container that was last committed when creating the image.
//
// Previous versions of Docker builder used this field to store build cache,
// and it is not in active use anymore.
//
// This field is removed in API v1.45, but used for API <= v1.44 responses.
ContainerConfig *container.Config
// GraphDriverLegacy is used for API versions < v1.52, which included the
// name of the snapshotter the GraphDriver field.
GraphDriverLegacy *storage.DriverData
} | go | github | https://github.com/moby/moby | daemon/server/imagebackend/image.go |
# zerovec [](https://crates.io/crates/zerovec)
<!-- cargo-rdme start -->
Zero-copy vector abstractions for arbitrary types, backed by byte slices.
`zerovec` enables a far wider range of types — beyond just `&[u8]` and `&str` — to participate in
zero-copy deserialization from byte slices. It is `serde` compatible and comes equipped with
proc macros
Clients upgrading to `zerovec` benefit from zero heap allocations when deserializing
read-only data.
This crate has four main types:
- [`ZeroVec<'a, T>`] (and [`ZeroSlice<T>`](ZeroSlice)) for fixed-width types like `u32`
- [`VarZeroVec<'a, T>`] (and [`VarZeroSlice<T>`](ZeroSlice)) for variable-width types like `str`
- [`ZeroMap<'a, K, V>`] to map from `K` to `V`
- [`ZeroMap2d<'a, K0, K1, V>`] to map from the pair `(K0, K1)` to `V`
The first two are intended as close-to-drop-in replacements for `Vec<T>` in Serde structs. The third and fourth are
intended as a replacement for `HashMap` or [`LiteMap`](https://docs.rs/litemap). When used with Serde derives, **be sure to apply
`#[serde(borrow)]` to these types**, same as one would for [`Cow<'a, T>`].
[`ZeroVec<'a, T>`], [`VarZeroVec<'a, T>`], [`ZeroMap<'a, K, V>`], and [`ZeroMap2d<'a, K0, K1, V>`] all behave like
[`Cow<'a, T>`] in that they abstract over either borrowed or owned data. When performing deserialization
from human-readable formats (like `json` and `xml`), typically these types will allocate and fully own their data, whereas if deserializing
from binary formats like `bincode` and `postcard`, these types will borrow data directly from the buffer being deserialized from,
avoiding allocations and only performing validity checks. As such, this crate can be pretty fast (see [below](#Performance) for more information)
on deserialization.
See [the design doc](https://github.com/unicode-org/icu4x/blob/main/utils/zerovec/design_doc.md) for details on how this crate
works under the hood.
## Cargo features
This crate has several optional Cargo features:
- `serde`: Allows serializing and deserializing `zerovec`'s abstractions via [`serde`](https://docs.rs/serde)
- `yoke`: Enables implementations of `Yokeable` from the [`yoke`](https://docs.rs/yoke/) crate, which is also useful
in situations involving a lot of zero-copy deserialization.
- `derive`: Makes it easier to use custom types in these collections by providing the `#[make_ule]` and
`#[make_varule]` proc macros, which generate appropriate [`ULE`](https://docs.rs/zerovec/latest/zerovec/ule/trait.ULE.html) and
[`VarULE`](https://docs.rs/zerovec/latest/zerovec/ule/trait.VarULE.html)-conformant types for a given "normal" type.
- `std`: Enabled `std::Error` implementations for error types. This crate is by default `no_std` with a dependency on `alloc`.
[`ZeroVec<'a, T>`]: ZeroVec
[`VarZeroVec<'a, T>`]: VarZeroVec
[`ZeroMap<'a, K, V>`]: ZeroMap
[`ZeroMap2d<'a, K0, K1, V>`]: ZeroMap2d
[`Cow<'a, T>`]: alloc::borrow::Cow
## Examples
Serialize and deserialize a struct with ZeroVec and VarZeroVec with Bincode:
```rust
use zerovec::{VarZeroVec, ZeroVec};
// This example requires the "serde" feature
#[derive(serde::Serialize, serde::Deserialize)]
pub struct DataStruct<'data> {
#[serde(borrow)]
nums: ZeroVec<'data, u32>,
#[serde(borrow)]
chars: ZeroVec<'data, char>,
#[serde(borrow)]
strs: VarZeroVec<'data, str>,
}
let data = DataStruct {
nums: ZeroVec::from_slice_or_alloc(&[211, 281, 421, 461]),
chars: ZeroVec::alloc_from_slice(&['ö', '冇', 'म']),
strs: VarZeroVec::from(&["hello", "world"]),
};
let bincode_bytes =
bincode::serialize(&data).expect("Serialization should be successful");
assert_eq!(bincode_bytes.len(), 63);
let deserialized: DataStruct = bincode::deserialize(&bincode_bytes)
.expect("Deserialization should be successful");
assert_eq!(deserialized.nums.first(), Some(211));
assert_eq!(deserialized.chars.get(1), Some('冇'));
assert_eq!(deserialized.strs.get(1), Some("world"));
// The deserialization will not have allocated anything
assert!(!deserialized.nums.is_owned());
```
Use custom types inside of ZeroVec:
```rust
use zerovec::{ZeroVec, VarZeroVec, ZeroMap};
use std::borrow::Cow;
use zerovec::ule::encode_varule_to_box;
// custom fixed-size ULE type for ZeroVec
#[zerovec::make_ule(DateULE)]
#[derive(Copy, Clone, PartialEq, Eq, Ord, PartialOrd, serde::Serialize, serde::Deserialize)]
struct Date {
y: u64,
m: u8,
d: u8
}
// custom variable sized VarULE type for VarZeroVec
#[zerovec::make_varule(PersonULE)]
#[zerovec::derive(Serialize, Deserialize)] // add Serde impls to PersonULE
#[derive(Clone, PartialEq, Eq, Ord, PartialOrd, serde::Serialize, serde::Deserialize)]
struct Person<'a> {
birthday: Date,
favorite_character: char,
#[serde(borrow)]
name: Cow<'a, str>,
}
#[derive(serde::Serialize, serde::Deserialize)]
struct Data<'a> {
#[serde(borrow)]
important_dates: ZeroVec<'a, Date>,
// note: VarZeroVec always must reference the ULE type directly
#[serde(borrow)]
important_people: VarZeroVec<'a, PersonULE>,
#[serde(borrow)]
birthdays_to_people: ZeroMap<'a, Date, PersonULE>
}
let person1 = Person {
birthday: Date { y: 1990, m: 9, d: 7},
favorite_character: 'π',
name: Cow::from("Kate")
};
let person2 = Person {
birthday: Date { y: 1960, m: 5, d: 25},
favorite_character: '冇',
name: Cow::from("Jesse")
};
let important_dates = ZeroVec::alloc_from_slice(&[Date { y: 1943, m: 3, d: 20}, Date { y: 1976, m: 8, d: 2}, Date { y: 1998, m: 2, d: 15}]);
let important_people = VarZeroVec::from(&[&person1, &person2]);
let mut birthdays_to_people: ZeroMap<Date, PersonULE> = ZeroMap::new();
// `.insert_var_v()` is slightly more convenient over `.insert()` for custom ULE types
birthdays_to_people.insert_var_v(&person1.birthday, &person1);
birthdays_to_people.insert_var_v(&person2.birthday, &person2);
let data = Data { important_dates, important_people, birthdays_to_people };
let bincode_bytes = bincode::serialize(&data)
.expect("Serialization should be successful");
assert_eq!(bincode_bytes.len(), 160);
let deserialized: Data = bincode::deserialize(&bincode_bytes)
.expect("Deserialization should be successful");
assert_eq!(deserialized.important_dates.get(0).unwrap().y, 1943);
assert_eq!(&deserialized.important_people.get(1).unwrap().name, "Jesse");
assert_eq!(&deserialized.important_people.get(0).unwrap().name, "Kate");
assert_eq!(&deserialized.birthdays_to_people.get(&person1.birthday).unwrap().name, "Kate");
} // feature = serde and derive
```
## Performance
`zerovec` is designed for fast deserialization from byte buffers with zero memory allocations
while minimizing performance regressions for common vector operations.
Benchmark results on x86_64:
| Operation | `Vec<T>` | `zerovec` |
|---|---|---|
| Deserialize vec of 100 `u32` | 233.18 ns | 14.120 ns |
| Compute sum of vec of 100 `u32` (read every element) | 8.7472 ns | 10.775 ns |
| Binary search vec of 1000 `u32` 50 times | 442.80 ns | 472.51 ns |
| Deserialize vec of 100 strings | 7.3740 μs\* | 1.4495 μs |
| Count chars in vec of 100 strings (read every element) | 747.50 ns | 955.28 ns |
| Binary search vec of 500 strings 10 times | 466.09 ns | 790.33 ns |
\* *This result is reported for `Vec<String>`. However, Serde also supports deserializing to the partially-zero-copy `Vec<&str>`; this gives 1.8420 μs, much faster than `Vec<String>` but a bit slower than `zerovec`.*
| Operation | `HashMap<K,V>` | `LiteMap<K,V>` | `ZeroMap<K,V>` |
|---|---|---|---|
| Deserialize a small map | 2.72 μs | 1.28 μs | 480 ns |
| Deserialize a large map | 50.5 ms | 18.3 ms | 3.74 ms |
| Look up from a small deserialized map | 49 ns | 42 ns | 54 ns |
| Look up from a large deserialized map | 51 ns | 155 ns | 213 ns |
Small = 16 elements, large = 131,072 elements. Maps contain `<String, String>`.
The benches used to generate the above table can be found in the `benches` directory in the project repository.
`zeromap` benches are named by convention, e.g. `zeromap/deserialize/small`, `zeromap/lookup/large`. The type
is appended for baseline comparisons, e.g. `zeromap/lookup/small/hashmap`.
<!-- cargo-rdme end -->
## More Information
For more information on development, authorship, contributing etc. please visit [`ICU4X home page`](https://github.com/unicode-org/icu4x). | unknown | github | https://github.com/nodejs/node | deps/crates/vendor/zerovec/README.md |
'''
Created on 4 aug. 2012
Copyright: Jev Kuznetsov
License: BSD
a module for downloading historic data from IB
'''
import ib
import pandas
from ib.ext.Contract import Contract
from ib.opt import ibConnection, message
from time import sleep
import tradingWithPython.lib.logger as logger
from pandas import DataFrame, Index
import datetime as dt
from timeKeeper import TimeKeeper
import time
timeFormat = "%Y%m%d %H:%M:%S"
class DataHandler(object):
''' handles incoming messages '''
def __init__(self,tws):
self._log = logger.getLogger('DH')
tws.register(self.msgHandler,message.HistoricalData)
self.reset()
def reset(self):
self._log.debug('Resetting data')
self.dataReady = False
self._timestamp = []
self._data = {'open':[],'high':[],'low':[],'close':[],'volume':[],'count':[],'WAP':[]}
def msgHandler(self,msg):
#print '[msg]', msg
if msg.date[:8] == 'finished':
self._log.debug('Data recieved')
self.dataReady = True
return
self._timestamp.append(dt.datetime.strptime(msg.date,timeFormat))
for k in self._data.keys():
self._data[k].append(getattr(msg, k))
@property
def data(self):
''' return downloaded data as a DataFrame '''
df = DataFrame(data=self._data,index=Index(self._timestamp))
return df
class Downloader(object):
def __init__(self,debug=False):
self._log = logger.getLogger('DLD')
self._log.debug('Initializing data dwonloader. Pandas version={0}, ibpy version:{1}'.format(pandas.__version__,ib.version))
self.tws = ibConnection()
self._dataHandler = DataHandler(self.tws)
if debug:
self.tws.registerAll(self._debugHandler)
self.tws.unregister(self._debugHandler,message.HistoricalData)
self._log.debug('Connecting to tws')
self.tws.connect()
self._timeKeeper = TimeKeeper() # keep track of past requests
self._reqId = 1 # current request id
def _debugHandler(self,msg):
print '[debug]', msg
def requestData(self,contract,endDateTime,durationStr='1800 S',barSizeSetting='1 secs',whatToShow='TRADES',useRTH=1,formatDate=1):
self._log.debug('Requesting data for %s end time %s.' % (contract.m_symbol,endDateTime))
while self._timeKeeper.nrRequests(timeSpan=600) > 59:
print 'Too many requests done. Waiting... '
time.sleep(1)
self._timeKeeper.addRequest()
self._dataHandler.reset()
self.tws.reqHistoricalData(self._reqId,contract,endDateTime,durationStr,barSizeSetting,whatToShow,useRTH,formatDate)
self._reqId+=1
#wait for data
startTime = time.time()
timeout = 3
while not self._dataHandler.dataReady and (time.time()-startTime < timeout):
sleep(2)
if not self._dataHandler.dataReady:
self._log.error('Data timeout')
print self._dataHandler.data
return self._dataHandler.data
def getIntradayData(self,contract, dateTuple ):
''' get full day data on 1-s interval
date: a tuple of (yyyy,mm,dd)
'''
openTime = dt.datetime(*dateTuple)+dt.timedelta(hours=16)
closeTime = dt.datetime(*dateTuple)+dt.timedelta(hours=22)
timeRange = pandas.date_range(openTime,closeTime,freq='30min')
datasets = []
for t in timeRange:
datasets.append(self.requestData(contract,t.strftime(timeFormat)))
return pandas.concat(datasets)
def disconnect(self):
self.tws.disconnect()
if __name__=='__main__':
dl = Downloader(debug=True)
c = Contract()
c.m_symbol = 'SPY'
c.m_secType = 'STK'
c.m_exchange = 'SMART'
c.m_currency = 'USD'
df = dl.getIntradayData(c, (2012,8,6))
df.to_csv('test.csv')
# df = dl.requestData(c, '20120803 22:00:00')
# df.to_csv('test1.csv')
# df = dl.requestData(c, '20120803 21:30:00')
# df.to_csv('test2.csv')
dl.disconnect()
print 'Done.' | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright (C) 2013 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.io;
import static com.google.common.base.StandardSystemProperty.OS_NAME;
import static com.google.common.io.RecursiveDeleteOption.ALLOW_INSECURE;
import static com.google.common.jimfs.Feature.SECURE_DIRECTORY_STREAM;
import static com.google.common.jimfs.Feature.SYMBOLIC_LINKS;
import static com.google.common.truth.Truth.assertThat;
import static java.nio.charset.StandardCharsets.UTF_8;
import static java.nio.file.LinkOption.NOFOLLOW_LINKS;
import static java.util.concurrent.Executors.newSingleThreadExecutor;
import static org.junit.Assert.assertThrows;
import com.google.common.collect.ObjectArrays;
import com.google.common.jimfs.Configuration;
import com.google.common.jimfs.Feature;
import com.google.common.jimfs.Jimfs;
import java.io.IOException;
import java.nio.file.FileAlreadyExistsException;
import java.nio.file.FileSystem;
import java.nio.file.FileSystemException;
import java.nio.file.FileSystems;
import java.nio.file.FileVisitResult;
import java.nio.file.Files;
import java.nio.file.NoSuchFileException;
import java.nio.file.Path;
import java.nio.file.SimpleFileVisitor;
import java.nio.file.attribute.BasicFileAttributes;
import java.nio.file.attribute.FileTime;
import java.util.EnumSet;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future;
import junit.framework.TestCase;
import junit.framework.TestSuite;
import org.jspecify.annotations.NullUnmarked;
/**
* Tests for {@link MoreFiles}.
*
* <p>Note: {@link MoreFiles#fileTraverser()} is tested in {@link MoreFilesFileTraverserTest}.
*
* @author Colin Decker
*/
@NullUnmarked
public class MoreFilesTest extends TestCase {
public static TestSuite suite() {
TestSuite suite = new TestSuite();
suite.addTest(
ByteSourceTester.tests(
"MoreFiles.asByteSource[Path]", SourceSinkFactories.pathByteSourceFactory(), true));
suite.addTest(
ByteSinkTester.tests(
"MoreFiles.asByteSink[Path]", SourceSinkFactories.pathByteSinkFactory()));
suite.addTest(
ByteSinkTester.tests(
"MoreFiles.asByteSink[Path, APPEND]",
SourceSinkFactories.appendingPathByteSinkFactory()));
suite.addTest(
CharSourceTester.tests(
"MoreFiles.asCharSource[Path, Charset]",
SourceSinkFactories.pathCharSourceFactory(),
false));
suite.addTest(
CharSinkTester.tests(
"MoreFiles.asCharSink[Path, Charset]", SourceSinkFactories.pathCharSinkFactory()));
suite.addTest(
CharSinkTester.tests(
"MoreFiles.asCharSink[Path, Charset, APPEND]",
SourceSinkFactories.appendingPathCharSinkFactory()));
suite.addTestSuite(MoreFilesTest.class);
return suite;
}
private static final FileSystem FS = FileSystems.getDefault();
private static Path root() {
return FS.getRootDirectories().iterator().next();
}
private Path tempDir;
@Override
protected void setUp() throws Exception {
tempDir = Files.createTempDirectory("MoreFilesTest");
}
@Override
protected void tearDown() throws Exception {
if (tempDir != null) {
// delete tempDir and its contents
Files.walkFileTree(
tempDir,
new SimpleFileVisitor<Path>() {
@Override
public FileVisitResult visitFile(Path file, BasicFileAttributes attrs)
throws IOException {
Files.deleteIfExists(file);
return FileVisitResult.CONTINUE;
}
@Override
public FileVisitResult postVisitDirectory(Path dir, IOException exc)
throws IOException {
if (exc != null) {
return FileVisitResult.TERMINATE;
}
Files.deleteIfExists(dir);
return FileVisitResult.CONTINUE;
}
});
}
}
private Path createTempFile() throws IOException {
return Files.createTempFile(tempDir, "test", ".test");
}
public void testByteSource_size_ofDirectory() throws IOException {
try (FileSystem fs = Jimfs.newFileSystem(Configuration.unix())) {
Path dir = fs.getPath("dir");
Files.createDirectory(dir);
ByteSource source = MoreFiles.asByteSource(dir);
assertThat(source.sizeIfKnown()).isAbsent();
assertThrows(IOException.class, () -> source.size());
}
}
public void testByteSource_size_ofSymlinkToDirectory() throws IOException {
try (FileSystem fs = Jimfs.newFileSystem(Configuration.unix())) {
Path dir = fs.getPath("dir");
Files.createDirectory(dir);
Path link = fs.getPath("link");
Files.createSymbolicLink(link, dir);
ByteSource source = MoreFiles.asByteSource(link);
assertThat(source.sizeIfKnown()).isAbsent();
assertThrows(IOException.class, () -> source.size());
}
}
public void testByteSource_size_ofSymlinkToRegularFile() throws IOException {
try (FileSystem fs = Jimfs.newFileSystem(Configuration.unix())) {
Path file = fs.getPath("file");
Files.write(file, new byte[10]);
Path link = fs.getPath("link");
Files.createSymbolicLink(link, file);
ByteSource source = MoreFiles.asByteSource(link);
assertEquals(10L, (long) source.sizeIfKnown().get());
assertEquals(10L, source.size());
}
}
public void testByteSource_size_ofSymlinkToRegularFile_nofollowLinks() throws IOException {
try (FileSystem fs = Jimfs.newFileSystem(Configuration.unix())) {
Path file = fs.getPath("file");
Files.write(file, new byte[10]);
Path link = fs.getPath("link");
Files.createSymbolicLink(link, file);
ByteSource source = MoreFiles.asByteSource(link, NOFOLLOW_LINKS);
assertThat(source.sizeIfKnown()).isAbsent();
assertThrows(IOException.class, () -> source.size());
}
}
public void testEqual() throws IOException {
try (FileSystem fs = Jimfs.newFileSystem(Configuration.unix())) {
Path fooPath = fs.getPath("foo");
Path barPath = fs.getPath("bar");
MoreFiles.asCharSink(fooPath, UTF_8).write("foo");
MoreFiles.asCharSink(barPath, UTF_8).write("barbar");
assertThat(MoreFiles.equal(fooPath, barPath)).isFalse();
assertThat(MoreFiles.equal(fooPath, fooPath)).isTrue();
assertThat(MoreFiles.asByteSource(fooPath).contentEquals(MoreFiles.asByteSource(fooPath)))
.isTrue();
Path fooCopy = Files.copy(fooPath, fs.getPath("fooCopy"));
assertThat(Files.isSameFile(fooPath, fooCopy)).isFalse();
assertThat(MoreFiles.equal(fooPath, fooCopy)).isTrue();
MoreFiles.asCharSink(fooCopy, UTF_8).write("boo");
assertThat(MoreFiles.asByteSource(fooPath).size())
.isEqualTo(MoreFiles.asByteSource(fooCopy).size());
assertThat(MoreFiles.equal(fooPath, fooCopy)).isFalse();
// should also assert that a Path that erroneously reports a size 0 can still be compared,
// not sure how to do that with the Path API
}
}
public void testEqual_links() throws IOException {
try (FileSystem fs = Jimfs.newFileSystem(Configuration.unix())) {
Path fooPath = fs.getPath("foo");
MoreFiles.asCharSink(fooPath, UTF_8).write("foo");
Path fooSymlink = fs.getPath("symlink");
Files.createSymbolicLink(fooSymlink, fooPath);
Path fooHardlink = fs.getPath("hardlink");
Files.createLink(fooHardlink, fooPath);
assertThat(MoreFiles.equal(fooPath, fooSymlink)).isTrue();
assertThat(MoreFiles.equal(fooPath, fooHardlink)).isTrue();
assertThat(MoreFiles.equal(fooSymlink, fooHardlink)).isTrue();
}
}
public void testTouch() throws IOException {
Path temp = createTempFile();
assertTrue(Files.exists(temp));
Files.delete(temp);
assertFalse(Files.exists(temp));
MoreFiles.touch(temp);
assertTrue(Files.exists(temp));
MoreFiles.touch(temp);
assertTrue(Files.exists(temp));
}
public void testTouchTime() throws IOException {
Path temp = createTempFile();
assertTrue(Files.exists(temp));
Files.setLastModifiedTime(temp, FileTime.fromMillis(0));
assertEquals(0, Files.getLastModifiedTime(temp).toMillis());
MoreFiles.touch(temp);
assertThat(Files.getLastModifiedTime(temp).toMillis()).isNotEqualTo(0);
}
public void testCreateParentDirectories_root() throws IOException {
// We use a fake filesystem to sidestep flaky problems with Windows (b/136041958).
try (FileSystem fs = Jimfs.newFileSystem(Configuration.unix())) {
Path root = fs.getRootDirectories().iterator().next();
assertThat(root.getParent()).isNull();
assertThat(root.toRealPath().getParent()).isNull();
MoreFiles.createParentDirectories(root); // test that there's no exception
}
}
public void testCreateParentDirectories_relativePath() throws IOException {
Path path = FS.getPath("nonexistent.file");
assertThat(path.getParent()).isNull();
assertThat(path.toAbsolutePath().getParent()).isNotNull();
MoreFiles.createParentDirectories(path); // test that there's no exception
}
public void testCreateParentDirectories_noParentsNeeded() throws IOException {
Path path = tempDir.resolve("nonexistent.file");
assertTrue(Files.exists(path.getParent()));
MoreFiles.createParentDirectories(path); // test that there's no exception
}
public void testCreateParentDirectories_oneParentNeeded() throws IOException {
Path path = tempDir.resolve("parent/nonexistent.file");
Path parent = path.getParent();
assertFalse(Files.exists(parent));
MoreFiles.createParentDirectories(path);
assertTrue(Files.exists(parent));
}
public void testCreateParentDirectories_multipleParentsNeeded() throws IOException {
Path path = tempDir.resolve("grandparent/parent/nonexistent.file");
Path parent = path.getParent();
Path grandparent = parent.getParent();
assertFalse(Files.exists(grandparent));
assertFalse(Files.exists(parent));
MoreFiles.createParentDirectories(path);
assertTrue(Files.exists(parent));
assertTrue(Files.exists(grandparent));
}
public void testCreateParentDirectories_noPermission() {
if (isWindows()) {
return; // TODO: b/136041958 - Create/find a directory that we don't have permissions on?
}
Path file = root().resolve("parent/nonexistent.file");
Path parent = file.getParent();
assertFalse(Files.exists(parent));
assertThrows(IOException.class, () -> MoreFiles.createParentDirectories(file));
}
public void testCreateParentDirectories_nonDirectoryParentExists() throws IOException {
Path parent = createTempFile();
assertTrue(Files.isRegularFile(parent));
Path file = parent.resolve("foo");
assertThrows(IOException.class, () -> MoreFiles.createParentDirectories(file));
}
public void testCreateParentDirectories_symlinkParentExists() throws IOException {
/*
* We use a fake filesystem to sidestep:
*
* - flaky problems with Windows (b/136041958)
*
* - the lack of support for symlinks in the default filesystem under Android's desugared
* java.nio.file
*/
try (FileSystem fs = Jimfs.newFileSystem(Configuration.unix())) {
Path symlink = fs.getPath("linkToDir");
Files.createSymbolicLink(symlink, fs.getRootDirectories().iterator().next());
Path file = symlink.resolve("foo");
MoreFiles.createParentDirectories(file);
}
}
public void testGetFileExtension() {
assertEquals("txt", MoreFiles.getFileExtension(FS.getPath(".txt")));
assertEquals("txt", MoreFiles.getFileExtension(FS.getPath("blah.txt")));
assertEquals("txt", MoreFiles.getFileExtension(FS.getPath("blah..txt")));
assertEquals("txt", MoreFiles.getFileExtension(FS.getPath(".blah.txt")));
assertEquals("txt", MoreFiles.getFileExtension(root().resolve("tmp/blah.txt")));
assertEquals("gz", MoreFiles.getFileExtension(FS.getPath("blah.tar.gz")));
assertEquals("", MoreFiles.getFileExtension(root()));
assertEquals("", MoreFiles.getFileExtension(FS.getPath(".")));
assertEquals("", MoreFiles.getFileExtension(FS.getPath("..")));
assertEquals("", MoreFiles.getFileExtension(FS.getPath("...")));
assertEquals("", MoreFiles.getFileExtension(FS.getPath("blah")));
assertEquals("", MoreFiles.getFileExtension(FS.getPath("blah.")));
assertEquals("", MoreFiles.getFileExtension(FS.getPath(".blah.")));
assertEquals("", MoreFiles.getFileExtension(root().resolve("foo.bar/blah")));
assertEquals("", MoreFiles.getFileExtension(root().resolve("foo/.bar/blah")));
}
public void testGetNameWithoutExtension() {
assertEquals("", MoreFiles.getNameWithoutExtension(FS.getPath(".txt")));
assertEquals("blah", MoreFiles.getNameWithoutExtension(FS.getPath("blah.txt")));
assertEquals("blah.", MoreFiles.getNameWithoutExtension(FS.getPath("blah..txt")));
assertEquals(".blah", MoreFiles.getNameWithoutExtension(FS.getPath(".blah.txt")));
assertEquals("blah", MoreFiles.getNameWithoutExtension(root().resolve("tmp/blah.txt")));
assertEquals("blah.tar", MoreFiles.getNameWithoutExtension(FS.getPath("blah.tar.gz")));
assertEquals("", MoreFiles.getNameWithoutExtension(root()));
assertEquals("", MoreFiles.getNameWithoutExtension(FS.getPath(".")));
assertEquals(".", MoreFiles.getNameWithoutExtension(FS.getPath("..")));
assertEquals("..", MoreFiles.getNameWithoutExtension(FS.getPath("...")));
assertEquals("blah", MoreFiles.getNameWithoutExtension(FS.getPath("blah")));
assertEquals("blah", MoreFiles.getNameWithoutExtension(FS.getPath("blah.")));
assertEquals(".blah", MoreFiles.getNameWithoutExtension(FS.getPath(".blah.")));
assertEquals("blah", MoreFiles.getNameWithoutExtension(root().resolve("foo.bar/blah")));
assertEquals("blah", MoreFiles.getNameWithoutExtension(root().resolve("foo/.bar/blah")));
}
public void testPredicates() throws IOException {
/*
* We use a fake filesystem to sidestep the lack of support for symlinks in the default
* filesystem under Android's desugared java.nio.file.
*/
try (FileSystem fs = Jimfs.newFileSystem(Configuration.unix())) {
Path file = fs.getPath("file");
Files.createFile(file);
Path dir = fs.getPath("dir");
Files.createDirectory(dir);
assertTrue(MoreFiles.isDirectory().apply(dir));
assertFalse(MoreFiles.isRegularFile().apply(dir));
assertFalse(MoreFiles.isDirectory().apply(file));
assertTrue(MoreFiles.isRegularFile().apply(file));
Path symlinkToDir = fs.getPath("symlinkToDir");
Path symlinkToFile = fs.getPath("symlinkToFile");
Files.createSymbolicLink(symlinkToDir, dir);
Files.createSymbolicLink(symlinkToFile, file);
assertTrue(MoreFiles.isDirectory().apply(symlinkToDir));
assertFalse(MoreFiles.isRegularFile().apply(symlinkToDir));
assertFalse(MoreFiles.isDirectory().apply(symlinkToFile));
assertTrue(MoreFiles.isRegularFile().apply(symlinkToFile));
assertFalse(MoreFiles.isDirectory(NOFOLLOW_LINKS).apply(symlinkToDir));
assertFalse(MoreFiles.isRegularFile(NOFOLLOW_LINKS).apply(symlinkToFile));
}
}
/**
* Creates a new file system for testing that supports the given features in addition to
* supporting symbolic links. The file system is created initially having the following file
* structure:
*
* <pre>
* /
* work/
* dir/
* a
* b/
* g
* h -> ../a
* i/
* j/
* k
* l/
* c
* d -> b/i
* e/
* f -> /dontdelete
* dontdelete/
* a
* b/
* c
* symlinktodir -> work/dir
* </pre>
*/
static FileSystem newTestFileSystem(Feature... supportedFeatures) throws IOException {
FileSystem fs =
Jimfs.newFileSystem(
Configuration.unix().toBuilder()
.setSupportedFeatures(ObjectArrays.concat(SYMBOLIC_LINKS, supportedFeatures))
.build());
Files.createDirectories(fs.getPath("dir/b/i/j/l"));
Files.createFile(fs.getPath("dir/a"));
Files.createFile(fs.getPath("dir/c"));
Files.createSymbolicLink(fs.getPath("dir/d"), fs.getPath("b/i"));
Files.createDirectory(fs.getPath("dir/e"));
Files.createSymbolicLink(fs.getPath("dir/f"), fs.getPath("/dontdelete"));
Files.createFile(fs.getPath("dir/b/g"));
Files.createSymbolicLink(fs.getPath("dir/b/h"), fs.getPath("../a"));
Files.createFile(fs.getPath("dir/b/i/j/k"));
Files.createDirectory(fs.getPath("/dontdelete"));
Files.createFile(fs.getPath("/dontdelete/a"));
Files.createDirectory(fs.getPath("/dontdelete/b"));
Files.createFile(fs.getPath("/dontdelete/c"));
Files.createSymbolicLink(fs.getPath("/symlinktodir"), fs.getPath("work/dir"));
return fs;
}
public void testDirectoryDeletion_basic() throws IOException {
for (DirectoryDeleteMethod method : EnumSet.allOf(DirectoryDeleteMethod.class)) {
try (FileSystem fs = newTestFileSystem(SECURE_DIRECTORY_STREAM)) {
Path dir = fs.getPath("dir");
assertEquals(6, MoreFiles.listFiles(dir).size());
method.delete(dir);
method.assertDeleteSucceeded(dir);
assertEquals(
"contents of /dontdelete deleted by delete method " + method,
3,
MoreFiles.listFiles(fs.getPath("/dontdelete")).size());
}
}
}
public void testDirectoryDeletion_emptyDir() throws IOException {
for (DirectoryDeleteMethod method : EnumSet.allOf(DirectoryDeleteMethod.class)) {
try (FileSystem fs = newTestFileSystem(SECURE_DIRECTORY_STREAM)) {
Path emptyDir = fs.getPath("dir/e");
assertEquals(0, MoreFiles.listFiles(emptyDir).size());
method.delete(emptyDir);
method.assertDeleteSucceeded(emptyDir);
}
}
}
public void testDeleteRecursively_symlinkToDir() throws IOException {
try (FileSystem fs = newTestFileSystem(SECURE_DIRECTORY_STREAM)) {
Path symlink = fs.getPath("/symlinktodir");
Path dir = fs.getPath("dir");
assertEquals(6, MoreFiles.listFiles(dir).size());
MoreFiles.deleteRecursively(symlink);
assertFalse(Files.exists(symlink));
assertTrue(Files.exists(dir));
assertEquals(6, MoreFiles.listFiles(dir).size());
}
}
public void testDeleteDirectoryContents_symlinkToDir() throws IOException {
try (FileSystem fs = newTestFileSystem(SECURE_DIRECTORY_STREAM)) {
Path symlink = fs.getPath("/symlinktodir");
Path dir = fs.getPath("dir");
assertEquals(6, MoreFiles.listFiles(symlink).size());
MoreFiles.deleteDirectoryContents(symlink);
assertTrue(Files.exists(symlink, NOFOLLOW_LINKS));
assertTrue(Files.exists(symlink));
assertTrue(Files.exists(dir));
assertEquals(0, MoreFiles.listFiles(symlink).size());
}
}
public void testDirectoryDeletion_sdsNotSupported_fails() throws IOException {
for (DirectoryDeleteMethod method : EnumSet.allOf(DirectoryDeleteMethod.class)) {
try (FileSystem fs = newTestFileSystem()) {
Path dir = fs.getPath("dir");
assertEquals(6, MoreFiles.listFiles(dir).size());
assertThrows(InsecureRecursiveDeleteException.class, () -> method.delete(dir));
assertTrue(Files.exists(dir));
assertEquals(6, MoreFiles.listFiles(dir).size());
}
}
}
public void testDirectoryDeletion_sdsNotSupported_allowInsecure() throws IOException {
for (DirectoryDeleteMethod method : EnumSet.allOf(DirectoryDeleteMethod.class)) {
try (FileSystem fs = newTestFileSystem()) {
Path dir = fs.getPath("dir");
assertEquals(6, MoreFiles.listFiles(dir).size());
method.delete(dir, ALLOW_INSECURE);
method.assertDeleteSucceeded(dir);
assertEquals(
"contents of /dontdelete deleted by delete method " + method,
3,
MoreFiles.listFiles(fs.getPath("/dontdelete")).size());
}
}
}
public void testDeleteRecursively_symlinkToDir_sdsNotSupported_allowInsecure()
throws IOException {
try (FileSystem fs = newTestFileSystem()) {
Path symlink = fs.getPath("/symlinktodir");
Path dir = fs.getPath("dir");
assertEquals(6, MoreFiles.listFiles(dir).size());
MoreFiles.deleteRecursively(symlink, ALLOW_INSECURE);
assertFalse(Files.exists(symlink));
assertTrue(Files.exists(dir));
assertEquals(6, MoreFiles.listFiles(dir).size());
}
}
public void testDeleteRecursively_nonexistingFile_throwsNoSuchFileException() throws IOException {
try (FileSystem fs = newTestFileSystem()) {
NoSuchFileException expected =
assertThrows(
NoSuchFileException.class,
() -> MoreFiles.deleteRecursively(fs.getPath("/work/nothere"), ALLOW_INSECURE));
assertThat(expected.getFile()).isEqualTo("/work/nothere");
}
}
public void testDeleteDirectoryContents_symlinkToDir_sdsNotSupported_allowInsecure()
throws IOException {
try (FileSystem fs = newTestFileSystem()) {
Path symlink = fs.getPath("/symlinktodir");
Path dir = fs.getPath("dir");
assertEquals(6, MoreFiles.listFiles(dir).size());
MoreFiles.deleteDirectoryContents(symlink, ALLOW_INSECURE);
assertEquals(0, MoreFiles.listFiles(dir).size());
}
}
/**
* This test attempts to create a situation in which one thread is constantly changing a file from
* being a real directory to being a symlink to another directory. It then calls
* deleteDirectoryContents thousands of times on a directory whose subtree contains the file
* that's switching between directory and symlink to try to ensure that under no circumstance does
* deleteDirectoryContents follow the symlink to the other directory and delete that directory's
* contents.
*
* <p>We can only test this with a file system that supports SecureDirectoryStream, because it's
* not possible to protect against this if the file system doesn't.
*/
@SuppressWarnings("ThreadPriorityCheck") // TODO: b/175898629 - Consider onSpinWait.
public void testDirectoryDeletion_directorySymlinkRace() throws IOException {
int iterations = isAndroid() ? 100 : 5000;
for (DirectoryDeleteMethod method : EnumSet.allOf(DirectoryDeleteMethod.class)) {
try (FileSystem fs = newTestFileSystem(SECURE_DIRECTORY_STREAM)) {
Path dirToDelete = fs.getPath("dir/b/i");
Path changingFile = dirToDelete.resolve("j/l");
Path symlinkTarget = fs.getPath("/dontdelete");
ExecutorService executor = newSingleThreadExecutor();
startDirectorySymlinkSwitching(changingFile, symlinkTarget, executor);
try {
for (int i = 0; i < iterations; i++) {
try {
Files.createDirectories(changingFile);
Files.createFile(dirToDelete.resolve("j/k"));
} catch (FileAlreadyExistsException expected) {
// if a file already exists, that's fine... just continue
}
try {
method.delete(dirToDelete);
} catch (FileSystemException expected) {
// the delete method may or may not throw an exception, but if it does that's fine
// and expected
}
// this test is mainly checking that the contents of /dontdelete aren't deleted under
// any circumstances
assertEquals(3, MoreFiles.listFiles(symlinkTarget).size());
Thread.yield();
}
} finally {
executor.shutdownNow();
}
}
}
}
public void testDeleteRecursively_nonDirectoryFile() throws IOException {
try (FileSystem fs = newTestFileSystem(SECURE_DIRECTORY_STREAM)) {
Path file = fs.getPath("dir/a");
assertTrue(Files.isRegularFile(file, NOFOLLOW_LINKS));
MoreFiles.deleteRecursively(file);
assertFalse(Files.exists(file, NOFOLLOW_LINKS));
Path symlink = fs.getPath("/symlinktodir");
assertTrue(Files.isSymbolicLink(symlink));
Path realSymlinkTarget = symlink.toRealPath();
assertTrue(Files.isDirectory(realSymlinkTarget, NOFOLLOW_LINKS));
MoreFiles.deleteRecursively(symlink);
assertFalse(Files.exists(symlink, NOFOLLOW_LINKS));
assertTrue(Files.isDirectory(realSymlinkTarget, NOFOLLOW_LINKS));
}
}
/**
* Starts a new task on the given executor that switches (deletes and replaces) a file between
* being a directory and being a symlink. The given {@code file} is the file that should switch
* between being a directory and being a symlink, while the given {@code target} is the target the
* symlink should have.
*/
@SuppressWarnings("ThreadPriorityCheck") // TODO: b/175898629 - Consider onSpinWait.
private static void startDirectorySymlinkSwitching(
Path file, Path target, ExecutorService executor) {
@SuppressWarnings("unused") // https://errorprone.info/bugpattern/FutureReturnValueIgnored
Future<?> possiblyIgnoredError =
executor.submit(
new Runnable() {
@Override
public void run() {
boolean createSymlink = false;
while (!Thread.interrupted()) {
try {
// trying to switch between a real directory and a symlink (dir -> /a)
if (Files.deleteIfExists(file)) {
if (createSymlink) {
Files.createSymbolicLink(file, target);
} else {
Files.createDirectory(file);
}
createSymlink = !createSymlink;
}
} catch (IOException tolerated) {
// it's expected that some of these will fail
}
Thread.yield();
}
}
});
}
/** Enum defining the two MoreFiles methods that delete directory contents. */
private enum DirectoryDeleteMethod {
DELETE_DIRECTORY_CONTENTS {
@Override
public void delete(Path path, RecursiveDeleteOption... options) throws IOException {
MoreFiles.deleteDirectoryContents(path, options);
}
@Override
public void assertDeleteSucceeded(Path path) throws IOException {
assertEquals(
"contents of directory " + path + " not deleted with delete method " + this,
0,
MoreFiles.listFiles(path).size());
}
},
DELETE_RECURSIVELY {
@Override
public void delete(Path path, RecursiveDeleteOption... options) throws IOException {
MoreFiles.deleteRecursively(path, options);
}
@Override
public void assertDeleteSucceeded(Path path) throws IOException {
assertFalse("file " + path + " not deleted with delete method " + this, Files.exists(path));
}
};
abstract void delete(Path path, RecursiveDeleteOption... options) throws IOException;
abstract void assertDeleteSucceeded(Path path) throws IOException;
}
private static boolean isWindows() {
return OS_NAME.value().startsWith("Windows");
}
private static boolean isAndroid() {
return System.getProperty("java.runtime.name", "").contains("Android");
}
} | java | github | https://github.com/google/guava | guava-tests/test/com/google/common/io/MoreFilesTest.java |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, # You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import absolute_import, print_function, unicode_literals
from mach.decorators import (
CommandProvider,
Command,
CommandArgument,
)
@CommandProvider
class BuiltinCommands(object):
def __init__(self, context):
self.context = context
@Command('mach-commands', category='misc',
description='List all mach commands.')
def commands(self):
print("\n".join(self.context.commands.command_handlers.keys()))
@Command('mach-debug-commands', category='misc',
description='Show info about available mach commands.')
@CommandArgument('match', metavar='MATCH', default=None, nargs='?',
help='Only display commands containing given substring.')
def debug_commands(self, match=None):
import inspect
handlers = self.context.commands.command_handlers
for command in sorted(handlers.keys()):
if match and match not in command:
continue
handler = handlers[command]
cls = handler.cls
method = getattr(cls, getattr(handler, 'method'))
print(command)
print('=' * len(command))
print('')
print('File: %s' % inspect.getsourcefile(method))
print('Class: %s' % cls.__name__)
print('Method: %s' % handler.method)
print('') | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from telemetry.core import util
from catapult_base import util as catapult_util
# TODO(aiolos): Move these functions to catapult_base or here.
GetBaseDir = util.GetBaseDir
GetTelemetryDir = util.GetTelemetryDir
GetUnittestDataDir = util.GetUnittestDataDir
GetChromiumSrcDir = util.GetChromiumSrcDir
GetBuildDirectories = util.GetBuildDirectories
IsExecutable = catapult_util.IsExecutable
def FindInstalledWindowsApplication(application_path):
"""Search common Windows installation directories for an application.
Args:
application_path: Path to application relative from installation location.
Returns:
A string representing the full path, or None if not found.
"""
search_paths = [os.getenv('PROGRAMFILES(X86)'),
os.getenv('PROGRAMFILES'),
os.getenv('LOCALAPPDATA')]
search_paths += os.getenv('PATH', '').split(os.pathsep)
for search_path in search_paths:
if not search_path:
continue
path = os.path.join(search_path, application_path)
if IsExecutable(path):
return path
return None
def IsSubpath(subpath, superpath):
"""Returns True iff subpath is or is in superpath."""
subpath = os.path.realpath(subpath)
superpath = os.path.realpath(superpath)
while len(subpath) >= len(superpath):
if subpath == superpath:
return True
subpath = os.path.split(subpath)[0]
return False
def ListFiles(base_directory, should_include_dir=lambda _: True,
should_include_file=lambda _: True):
matching_files = []
for root, dirs, files in os.walk(base_directory):
dirs[:] = [dir_name for dir_name in dirs if should_include_dir(dir_name)]
matching_files += [os.path.join(root, file_name)
for file_name in files if should_include_file(file_name)]
return sorted(matching_files) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env node
/**
* Analyze a CPU profile to identify hot modules
*/
const fs = require('fs')
const profilePath = process.argv[2]
if (!profilePath) {
console.error('Usage: node analyze-profile.js <profile.cpuprofile>')
process.exit(1)
}
const profile = JSON.parse(fs.readFileSync(profilePath, 'utf-8'))
// Extract nodes with their hit counts
const nodes = profile.nodes || []
// Group by file/module
const moduleHits = {}
nodes.forEach((node) => {
const fn = node.callFrame
if (fn && fn.url) {
const url = fn.url
// Extract module name from path
let moduleName = url
if (url.includes('next/dist/')) {
moduleName = url.split('next/dist/')[1]
} else if (url.includes('node_modules/')) {
moduleName = 'node_modules/' + url.split('node_modules/').pop()
}
if (!moduleHits[moduleName]) {
moduleHits[moduleName] = { hits: 0 }
}
moduleHits[moduleName].hits += node.hitCount || 0
}
})
// Sort by hits
const sorted = Object.entries(moduleHits)
.filter(([_, v]) => v.hits > 0)
.sort((a, b) => b[1].hits - a[1].hits)
.slice(0, 40)
console.log('Top 40 modules by CPU time:')
console.log('='.repeat(70))
sorted.forEach(([name, data], i) => {
console.log(`${String(i + 1).padStart(2)}. ${name} (${data.hits} hits)`)
}) | javascript | github | https://github.com/vercel/next.js | scripts/analyze-profile.js |
name: Index PyTorch Tests for Target Determination
on:
workflow_dispatch:
schedule:
- cron: '0 0 * * *'
permissions:
id-token: write
contents: read
jobs:
get-label-type:
if: github.repository_owner == 'pytorch'
name: get-label-type
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@main
with:
triggering_actor: ${{ github.triggering_actor }}
issue_owner: ${{ github.event.pull_request.user.login || github.event.issue.user.login }}
curr_branch: ${{ github.head_ref || github.ref_name }}
curr_ref_type: ${{ github.ref_type }}
index:
needs: get-label-type
runs-on: "${{ needs.get-label-type.outputs.label-type }}linux.g5.4xlarge.nvidia.gpu" # 1 GPU A10G 24GB each
environment: target-determinator-env
steps:
- name: Clone PyTorch
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
path: pytorch
- name: Setup Linux
uses: ./pytorch/.github/actions/setup-linux
- name: Login to ECR
uses: ./pytorch/.github/actions/ecr-login
- name: Calculate docker image
id: calculate-docker-image
uses: pytorch/test-infra/.github/actions/calculate-docker-image@main
with:
docker-image-name: ci-image:pytorch-linux-jammy-cuda12.8-cudnn9-py3-gcc11
working-directory: pytorch
- name: Use following to pull public copy of the image
id: print-ghcr-mirror
env:
ECR_DOCKER_IMAGE: ${{ steps.calculate-docker-image.outputs.docker-image }}
shell: bash
run: |
tag=${ECR_DOCKER_IMAGE##*:}
echo "docker pull ghcr.io/pytorch/ci-image:${tag/:/-}"
- name: Pull docker image
uses: pytorch/test-infra/.github/actions/pull-docker-image@main
with:
docker-image: ${{ steps.calculate-docker-image.outputs.docker-image }}
- name: Install nvidia driver, nvidia-docker runtime, set GPU_FLAG
id: install-nvidia-driver
uses: pytorch/test-infra/.github/actions/setup-nvidia@main
- name: Clone CodeLlama
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
repository: osalpekar/codellama
ref: 1ec50e0cfc0fadc3b6ceb146617e2119ab26eb34
path: codellama
- name: Clone Target Determination Code
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
repository: osalpekar/llm-target-determinator
ref: v0.0.2
path: llm-target-determinator
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@ececac1a45f3b08a01d2dd070d28d111c5fe6722 # v4.1.0
with:
role-to-assume: arn:aws:iam::308535385114:role/gha_target_determinator_s3_read_write
aws-region: us-east-1
- name: Download checkpoint
shell: bash
env:
AWS_DEFAULT_REGION: us-east-1
run: |
# Do this outside of docker so I don't have to put env vars in
pip3 install awscli==1.29.40
cd codellama
mkdir "CodeLlama-7b-Python"
aws s3 cp \
"s3://target-determinator-assets/CodeLlama-7b-Python" \
"CodeLlama-7b-Python" \
--recursive
- name: Run indexer
shell: bash -l {0}
env:
DOCKER_IMAGE: ${{ steps.calculate-docker-image.outputs.docker-image }}
GITHUB_RUN_ID: ${{ github.run_id }}
AWS_DEFAULT_REGION: us-east-1
run: |
# detached container should get cleaned up by teardown_ec2_linux
# Disable shellcheck warning for GPU_FLAG
# shellcheck disable=SC2086
container_name=$(docker run \
${GPU_FLAG:-} \
-e MAX_JOBS="$(nproc --ignore=2)" \
-e AWS_DEFAULT_REGION \
--env-file="/tmp/github_env_${GITHUB_RUN_ID}" \
--security-opt seccomp=unconfined \
--cap-add=SYS_PTRACE \
--tty \
--detach \
--user jenkins \
-v "${GITHUB_WORKSPACE}:/var/lib/jenkins/workspace" \
-w /var/lib/jenkins/workspace \
"${DOCKER_IMAGE}"
)
chmod +x pytorch/.github/scripts/td_llm_indexer.sh
docker exec -t "${container_name}" sh -c 'pytorch/.github/scripts/td_llm_indexer.sh'
- name: Upload to s3
shell: bash -l {0}
env:
AWS_DEFAULT_REGION: us-east-1
run: |
cd llm-target-determinator/assets
TIMESTAMP=$(date -Iseconds)
ZIP_NAME="indexer-files-${TIMESTAMP}.zip"
# Create a zipfile with all the generated indices
zip -r "${ZIP_NAME}" indexer-files
# Note that because the below 2 operations are not atomic, there will
# be a period of a few seconds between these where there is no index
# present in the latest/ folder. To account for this, the retriever
# should have some retry logic with backoff to ensure fetching the
# index doesn't fail.
# Move the old index into the archived/ folder
aws s3 mv \
"s3://target-determinator-assets/indexes/latest" \
"s3://target-determinator-assets/indexes/archived" \
--recursive
# Move the new index into the latestl/ folder
aws s3 cp \
"${ZIP_NAME}" \
"s3://target-determinator-assets/indexes/latest/${ZIP_NAME}"
- name: Teardown Linux
uses: pytorch/test-infra/.github/actions/teardown-linux@main
if: always()
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }}-${{ github.event_name == 'workflow_dispatch' }}
cancel-in-progress: true | unknown | github | https://github.com/pytorch/pytorch | .github/workflows/target-determination-indexer.yml |
#!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
__author__ = 'j.s@google.com (Jeff Scudder)'
import base64
class BasicAuth(object):
"""Sets the Authorization header as defined in RFC1945"""
def __init__(self, user_id, password):
self.basic_cookie = base64.encodestring(
'%s:%s' % (user_id, password)).strip()
def modify_request(self, http_request):
http_request.headers['Authorization'] = 'Basic %s' % self.basic_cookie
ModifyRequest = modify_request
class NoAuth(object):
def modify_request(self, http_request):
pass | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python3
# This tool helps building swift explicit module from the JSON output of the scan-dependencies command. It will build all the module dependencies from JSON and construct a response file for the common arguments for main module build.
# Usage:
# /path/to/bin/dir/swift-build-modules.py /path/to/swift-frontend /path/to/depscan.json /path/to/output-resp
#
import argparse
import json
import os
import subprocess
import sys
def writeOutputResponseFile(filename, cmd):
with open(filename, 'w') as output:
for c in cmd:
output.write('"{}"\n'.format(c))
def build_module(swift_frontend, mode, detail):
cmd = [swift_frontend] + detail['details'][mode]['commandLine']
subprocess.check_call(cmd)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('swift_frontend', help="path to swift-frontend")
parser.add_argument('input', help="path to json output from scan-dependencies")
parser.add_argument('-c', '--cas', metavar='<CAS directory>')
parser.add_argument('--llvm-cas-tool', metavar='<path>', default="llvm-cas")
parser.add_argument('-o', '--output', metavar="<output>",
help="output response file for building main module")
parser.add_argument('-b', '--bridging-header-resp', metavar="<response file>",
help="output response file for building bridging header")
args = parser.parse_args()
with open(args.input, 'r') as file:
# Read input json file.
deps = json.load(file)
modules = []
# Traverse the module name and detail pair in reverse order assuming that is the order of dependencies.
# Skip the first module since that is the main module.
module_names = reversed(deps['modules'][2::2])
module_details = reversed(deps['modules'][3::2])
for name, detail in zip(module_names, module_details):
module = {}
module["isFramework"] = False
if 'clang' in name:
build_module(args.swift_frontend, 'clang', detail)
module["moduleName"] = name['clang']
module["clangModulePath"] = detail["modulePath"]
if "moduleCacheKey" in detail["details"]['clang']:
module["clangModuleCacheKey"] = detail["details"]['clang']["moduleCacheKey"]
if 'swift' in name:
build_module(args.swift_frontend, 'swift', detail)
module["moduleName"] = name['swift']
module["modulePath"] = detail["modulePath"]
if "moduleCacheKey" in detail["details"]['swift']:
module["moduleCacheKey"] = detail["details"]['swift']["moduleCacheKey"]
if 'swiftPrebuiltExternal' in name:
module["moduleName"] = name['swiftPrebuiltExternal']
module["modulePath"] = detail["modulePath"]
if "moduleCacheKey" in detail["details"]['swiftPrebuiltExternal']:
module["moduleCacheKey"] = detail["details"]['swiftPrebuiltExternal']["moduleCacheKey"]
modules.append(module)
# Write output response file if requested.
if args.output:
cmd = deps['modules'][1]['details']['swift']['commandLine']
# Add some helpful flags for explicit module build.
cmd.extend(['-disable-implicit-swift-modules'])
# Write explicit module map.
module_map_out = args.output + ".map"
with open(module_map_out, 'w') as mapfile:
json.dump(modules, mapfile, indent=2)
# If using caching, create the map in CAS.
if args.cas:
casid = subprocess.check_output(
[args.llvm_cas_tool, '--cas', args.cas, '--make-blob', '--data', module_map_out], text=True).strip()
cmd.extend(['-explicit-swift-module-map-file', casid])
else:
cmd.extend(['-explicit-swift-module-map-file', module_map_out])
writeOutputResponseFile(args.output, cmd)
# Write bridging header response file if request.
if args.bridging_header_resp:
info = deps['modules'][1]['details']['swift']
# the first argument is `-frontend`
cmd = info['bridgingHeader']['commandLine'][1:]
# print input file name if using chained bridging header.
if "chainedBridgingHeaderPath" in info:
cmd.append(info['chainedBridgingHeaderPath'])
writeOutputResponseFile(args.bridging_header_resp, cmd)
if __name__ == '__main__':
main() | python | github | https://github.com/apple/swift | utils/swift-build-modules.py |
# This code is so you can run the samples without installing the package
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
#
testinfo = "s, t 5, s, t 10.1, s, q"
tags = "RichLabel"
import summa
from summa.director import director
from summa.sprite import Sprite
from summa.actions import *
from summa.text import *
class TestLayer(summa.layer.Layer):
def __init__(self):
super( TestLayer, self ).__init__()
x,y = director.get_window_size()
self.text = RichLabel(
"Hello {color (255, 0, 0, 255)}World!",
(x/2, y/2))
self.text.element.document.set_style(0, 5, dict(bold = True))
self.text.element.document.set_style(6, 11, dict(italic = True))
self.text.do( Rotate( 360, 10 ) )
self.text.do( ScaleTo( 10, 10 ) )
self.add( self.text )
def main():
director.init()
test_layer = TestLayer ()
main_scene = summa.scene.Scene (test_layer)
director.run (main_scene)
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
from __future__ import print_function
from numpy import pi, sin, cos, linspace, tan
from bokeh.util.browser import view
from bokeh.document import Document
from bokeh.embed import file_html
from bokeh.layouts import gridplot
from bokeh.models.glyphs import Line
from bokeh.models import Plot, DataRange1d, LinearAxis, ColumnDataSource, PanTool, WheelZoomTool
from bokeh.resources import INLINE
x = linspace(-2*pi, 2*pi, 1000)
source = ColumnDataSource(data = dict(
x = x,
y1 = sin(x),
y2 = cos(x),
y3 = tan(x),
y4 = sin(x) * cos(x),
)
)
def make_plot(source, xname, yname, line_color, xdr=None, ydr=None):
""" Returns a tuple (plot, [obj1...objN]); the former can be added
to a gridplot
"""
if xdr is None: xdr = DataRange1d()
if ydr is None: ydr = DataRange1d()
plot = Plot(x_range=xdr, y_range=ydr, min_border=50)
plot.add_layout(LinearAxis(), 'below')
plot.add_layout(LinearAxis(), 'left')
plot.add_glyph(source, Line(x=xname, y=yname, line_color=line_color))
plot.add_tools(PanTool(), WheelZoomTool())
return plot
plot1 = make_plot(source, "x", "y1", "blue")
plot2 = make_plot(source, "x", "y2", "red", xdr=plot1.x_range)
plot3 = make_plot(source, "x", "y3", "green")
plot4 = make_plot(source, "x", "y4", "black")
grid = gridplot([[plot1, plot2], [plot3, plot4]], plot_width=300, plot_height=300)
doc = Document()
doc.add_root(grid)
if __name__ == "__main__":
doc.validate()
filename = "grid.html"
with open(filename, "w") as f:
f.write(file_html(doc, INLINE, "Grid Plot Example"))
print("Wrote %s" % filename)
view(filename) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
"""
pygments.lexers.graphics
~~~~~~~~~~~~~~~~~~~~~~~~
Lexers for computer graphics and plotting related languages.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, words, include, bygroups, using, \
this, default
from pygments.token import Text, Comment, Operator, Keyword, Name, \
Number, Punctuation, String
__all__ = ['GLShaderLexer', 'PostScriptLexer', 'AsymptoteLexer', 'GnuplotLexer',
'PovrayLexer']
class GLShaderLexer(RegexLexer):
"""
GLSL (OpenGL Shader) lexer.
.. versionadded:: 1.1
"""
name = 'GLSL'
aliases = ['glsl']
filenames = ['*.vert', '*.frag', '*.geo']
mimetypes = ['text/x-glslsrc']
tokens = {
'root': [
(r'^#.*', Comment.Preproc),
(r'//.*', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(r'\+|-|~|!=?|\*|/|%|<<|>>|<=?|>=?|==?|&&?|\^|\|\|?',
Operator),
(r'[?:]', Operator), # quick hack for ternary
(r'\bdefined\b', Operator),
(r'[;{}(),\[\]]', Punctuation),
# FIXME when e is present, no decimal point needed
(r'[+-]?\d*\.\d+([eE][-+]?\d+)?', Number.Float),
(r'[+-]?\d+\.\d*([eE][-+]?\d+)?', Number.Float),
(r'0[xX][0-9a-fA-F]*', Number.Hex),
(r'0[0-7]*', Number.Oct),
(r'[1-9][0-9]*', Number.Integer),
(words((
'attribute', 'const', 'uniform', 'varying', 'centroid', 'break',
'continue', 'do', 'for', 'while', 'if', 'else', 'in', 'out',
'inout', 'float', 'int', 'void', 'bool', 'true', 'false',
'invariant', 'discard', 'return', 'mat2', 'mat3' 'mat4',
'mat2x2', 'mat3x2', 'mat4x2', 'mat2x3', 'mat3x3', 'mat4x3',
'mat2x4', 'mat3x4', 'mat4x4', 'vec2', 'vec3', 'vec4',
'ivec2', 'ivec3', 'ivec4', 'bvec2', 'bvec3', 'bvec4',
'sampler1D', 'sampler2D', 'sampler3D' 'samplerCube',
'sampler1DShadow', 'sampler2DShadow', 'struct'),
prefix=r'\b', suffix=r'\b'),
Keyword),
(words((
'asm', 'class', 'union', 'enum', 'typedef', 'template', 'this',
'packed', 'goto', 'switch', 'default', 'inline', 'noinline',
'volatile', 'public', 'static', 'extern', 'external', 'interface',
'long', 'short', 'double', 'half', 'fixed', 'unsigned', 'lowp',
'mediump', 'highp', 'precision', 'input', 'output',
'hvec2', 'hvec3', 'hvec4', 'dvec2', 'dvec3', 'dvec4',
'fvec2', 'fvec3', 'fvec4', 'sampler2DRect', 'sampler3DRect',
'sampler2DRectShadow', 'sizeof', 'cast', 'namespace', 'using'),
prefix=r'\b', suffix=r'\b'),
Keyword), # future use
(r'[a-zA-Z_]\w*', Name),
(r'\.', Punctuation),
(r'\s+', Text),
],
}
class PostScriptLexer(RegexLexer):
"""
Lexer for PostScript files.
The PostScript Language Reference published by Adobe at
<http://partners.adobe.com/public/developer/en/ps/PLRM.pdf>
is the authority for this.
.. versionadded:: 1.4
"""
name = 'PostScript'
aliases = ['postscript', 'postscr']
filenames = ['*.ps', '*.eps']
mimetypes = ['application/postscript']
delimiter = r'()<>\[\]{}/%\s'
delimiter_end = r'(?=[%s])' % delimiter
valid_name_chars = r'[^%s]' % delimiter
valid_name = r"%s+%s" % (valid_name_chars, delimiter_end)
tokens = {
'root': [
# All comment types
(r'^%!.+\n', Comment.Preproc),
(r'%%.*\n', Comment.Special),
(r'(^%.*\n){2,}', Comment.Multiline),
(r'%.*\n', Comment.Single),
# String literals are awkward; enter separate state.
(r'\(', String, 'stringliteral'),
(r'[{}<>\[\]]', Punctuation),
# Numbers
(r'<[0-9A-Fa-f]+>' + delimiter_end, Number.Hex),
# Slight abuse: use Oct to signify any explicit base system
(r'[0-9]+\#(\-|\+)?([0-9]+\.?|[0-9]*\.[0-9]+|[0-9]+\.[0-9]*)'
r'((e|E)[0-9]+)?' + delimiter_end, Number.Oct),
(r'(\-|\+)?([0-9]+\.?|[0-9]*\.[0-9]+|[0-9]+\.[0-9]*)((e|E)[0-9]+)?'
+ delimiter_end, Number.Float),
(r'(\-|\+)?[0-9]+' + delimiter_end, Number.Integer),
# References
(r'\/%s' % valid_name, Name.Variable),
# Names
(valid_name, Name.Function), # Anything else is executed
# These keywords taken from
# <http://www.math.ubc.ca/~cass/graphics/manual/pdf/a1.pdf>
# Is there an authoritative list anywhere that doesn't involve
# trawling documentation?
(r'(false|true)' + delimiter_end, Keyword.Constant),
# Conditionals / flow control
(r'(eq|ne|g[et]|l[et]|and|or|not|if(?:else)?|for(?:all)?)'
+ delimiter_end, Keyword.Reserved),
(words((
'abs', 'add', 'aload', 'arc', 'arcn', 'array', 'atan', 'begin',
'bind', 'ceiling', 'charpath', 'clip', 'closepath', 'concat',
'concatmatrix', 'copy', 'cos', 'currentlinewidth', 'currentmatrix',
'currentpoint', 'curveto', 'cvi', 'cvs', 'def', 'defaultmatrix',
'dict', 'dictstackoverflow', 'div', 'dtransform', 'dup', 'end',
'exch', 'exec', 'exit', 'exp', 'fill', 'findfont', 'floor', 'get',
'getinterval', 'grestore', 'gsave', 'gt', 'identmatrix', 'idiv',
'idtransform', 'index', 'invertmatrix', 'itransform', 'length',
'lineto', 'ln', 'load', 'log', 'loop', 'matrix', 'mod', 'moveto',
'mul', 'neg', 'newpath', 'pathforall', 'pathbbox', 'pop', 'print',
'pstack', 'put', 'quit', 'rand', 'rangecheck', 'rcurveto', 'repeat',
'restore', 'rlineto', 'rmoveto', 'roll', 'rotate', 'round', 'run',
'save', 'scale', 'scalefont', 'setdash', 'setfont', 'setgray',
'setlinecap', 'setlinejoin', 'setlinewidth', 'setmatrix',
'setrgbcolor', 'shfill', 'show', 'showpage', 'sin', 'sqrt',
'stack', 'stringwidth', 'stroke', 'strokepath', 'sub', 'syntaxerror',
'transform', 'translate', 'truncate', 'typecheck', 'undefined',
'undefinedfilename', 'undefinedresult'), suffix=delimiter_end),
Name.Builtin),
(r'\s+', Text),
],
'stringliteral': [
(r'[^()\\]+', String),
(r'\\', String.Escape, 'escape'),
(r'\(', String, '#push'),
(r'\)', String, '#pop'),
],
'escape': [
(r'[0-8]{3}|n|r|t|b|f|\\|\(|\)', String.Escape, '#pop'),
default('#pop'),
],
}
class AsymptoteLexer(RegexLexer):
"""
For `Asymptote <http://asymptote.sf.net/>`_ source code.
.. versionadded:: 1.2
"""
name = 'Asymptote'
aliases = ['asy', 'asymptote']
filenames = ['*.asy']
mimetypes = ['text/x-asymptote']
#: optional Comment or Whitespace
_ws = r'(?:\s|//.*?\n|/\*.*?\*/)+'
tokens = {
'whitespace': [
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuation
(r'//(\n|(.|\n)*?[^\\]\n)', Comment),
(r'/(\\\n)?\*(.|\n)*?\*(\\\n)?/', Comment),
],
'statements': [
# simple string (TeX friendly)
(r'"(\\\\|\\"|[^"])*"', String),
# C style string (with character escapes)
(r"'", String, 'string'),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'0x[0-9a-fA-F]+[Ll]?', Number.Hex),
(r'0[0-7]+[Ll]?', Number.Oct),
(r'\d+[Ll]?', Number.Integer),
(r'[~!%^&*+=|?:<>/-]', Operator),
(r'[()\[\],.]', Punctuation),
(r'\b(case)(.+?)(:)', bygroups(Keyword, using(this), Text)),
(r'(and|controls|tension|atleast|curl|if|else|while|for|do|'
r'return|break|continue|struct|typedef|new|access|import|'
r'unravel|from|include|quote|static|public|private|restricted|'
r'this|explicit|true|false|null|cycle|newframe|operator)\b', Keyword),
# Since an asy-type-name can be also an asy-function-name,
# in the following we test if the string " [a-zA-Z]" follows
# the Keyword.Type.
# Of course it is not perfect !
(r'(Braid|FitResult|Label|Legend|TreeNode|abscissa|arc|arrowhead|'
r'binarytree|binarytreeNode|block|bool|bool3|bounds|bqe|circle|'
r'conic|coord|coordsys|cputime|ellipse|file|filltype|frame|grid3|'
r'guide|horner|hsv|hyperbola|indexedTransform|int|inversion|key|'
r'light|line|linefit|marginT|marker|mass|object|pair|parabola|path|'
r'path3|pen|picture|point|position|projection|real|revolution|'
r'scaleT|scientific|segment|side|slice|splitface|string|surface|'
r'tensionSpecifier|ticklocate|ticksgridT|tickvalues|transform|'
r'transformation|tree|triangle|trilinear|triple|vector|'
r'vertex|void)(?=\s+[a-zA-Z])', Keyword.Type),
# Now the asy-type-name which are not asy-function-name
# except yours !
# Perhaps useless
(r'(Braid|FitResult|TreeNode|abscissa|arrowhead|block|bool|bool3|'
r'bounds|coord|frame|guide|horner|int|linefit|marginT|pair|pen|'
r'picture|position|real|revolution|slice|splitface|ticksgridT|'
r'tickvalues|tree|triple|vertex|void)\b', Keyword.Type),
('[a-zA-Z_]\w*:(?!:)', Name.Label),
('[a-zA-Z_]\w*', Name),
],
'root': [
include('whitespace'),
# functions
(r'((?:[\w*\s])+?(?:\s|\*))' # return arguments
r'([a-zA-Z_]\w*)' # method name
r'(\s*\([^;]*?\))' # signature
r'(' + _ws + r')(\{)',
bygroups(using(this), Name.Function, using(this), using(this),
Punctuation),
'function'),
# function declarations
(r'((?:[\w*\s])+?(?:\s|\*))' # return arguments
r'([a-zA-Z_]\w*)' # method name
r'(\s*\([^;]*?\))' # signature
r'(' + _ws + r')(;)',
bygroups(using(this), Name.Function, using(this), using(this),
Punctuation)),
default('statement'),
],
'statement': [
include('whitespace'),
include('statements'),
('[{}]', Punctuation),
(';', Punctuation, '#pop'),
],
'function': [
include('whitespace'),
include('statements'),
(';', Punctuation),
(r'\{', Punctuation, '#push'),
(r'\}', Punctuation, '#pop'),
],
'string': [
(r"'", String, '#pop'),
(r'\\([\\abfnrtv"\'?]|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'\n', String),
(r"[^\\'\n]+", String), # all other characters
(r'\\\n', String),
(r'\\n', String), # line continuation
(r'\\', String), # stray backslash
],
}
def get_tokens_unprocessed(self, text):
from pygments.lexers._asy_builtins import ASYFUNCNAME, ASYVARNAME
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text):
if token is Name and value in ASYFUNCNAME:
token = Name.Function
elif token is Name and value in ASYVARNAME:
token = Name.Variable
yield index, token, value
def _shortened(word):
dpos = word.find('$')
return '|'.join(word[:dpos] + word[dpos+1:i] + r'\b'
for i in range(len(word), dpos, -1))
def _shortened_many(*words):
return '|'.join(map(_shortened, words))
class GnuplotLexer(RegexLexer):
"""
For `Gnuplot <http://gnuplot.info/>`_ plotting scripts.
.. versionadded:: 0.11
"""
name = 'Gnuplot'
aliases = ['gnuplot']
filenames = ['*.plot', '*.plt']
mimetypes = ['text/x-gnuplot']
tokens = {
'root': [
include('whitespace'),
(_shortened('bi$nd'), Keyword, 'bind'),
(_shortened_many('ex$it', 'q$uit'), Keyword, 'quit'),
(_shortened('f$it'), Keyword, 'fit'),
(r'(if)(\s*)(\()', bygroups(Keyword, Text, Punctuation), 'if'),
(r'else\b', Keyword),
(_shortened('pa$use'), Keyword, 'pause'),
(_shortened_many('p$lot', 'rep$lot', 'sp$lot'), Keyword, 'plot'),
(_shortened('sa$ve'), Keyword, 'save'),
(_shortened('se$t'), Keyword, ('genericargs', 'optionarg')),
(_shortened_many('sh$ow', 'uns$et'),
Keyword, ('noargs', 'optionarg')),
(_shortened_many('low$er', 'ra$ise', 'ca$ll', 'cd$', 'cl$ear',
'h$elp', '\\?$', 'hi$story', 'l$oad', 'pr$int',
'pwd$', 're$read', 'res$et', 'scr$eendump',
'she$ll', 'sy$stem', 'up$date'),
Keyword, 'genericargs'),
(_shortened_many('pwd$', 're$read', 'res$et', 'scr$eendump',
'she$ll', 'test$'),
Keyword, 'noargs'),
('([a-zA-Z_]\w*)(\s*)(=)',
bygroups(Name.Variable, Text, Operator), 'genericargs'),
('([a-zA-Z_]\w*)(\s*\(.*?\)\s*)(=)',
bygroups(Name.Function, Text, Operator), 'genericargs'),
(r'@[a-zA-Z_]\w*', Name.Constant), # macros
(r';', Keyword),
],
'comment': [
(r'[^\\\n]', Comment),
(r'\\\n', Comment),
(r'\\', Comment),
# don't add the newline to the Comment token
default('#pop'),
],
'whitespace': [
('#', Comment, 'comment'),
(r'[ \t\v\f]+', Text),
],
'noargs': [
include('whitespace'),
# semicolon and newline end the argument list
(r';', Punctuation, '#pop'),
(r'\n', Text, '#pop'),
],
'dqstring': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # stray backslash
(r'\n', String, '#pop'), # newline ends the string too
],
'sqstring': [
(r"''", String), # escaped single quote
(r"'", String, '#pop'),
(r"[^\\'\n]+", String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # normal backslash
(r'\n', String, '#pop'), # newline ends the string too
],
'genericargs': [
include('noargs'),
(r'"', String, 'dqstring'),
(r"'", String, 'sqstring'),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+', Number.Float),
(r'(\d+\.\d*|\.\d+)', Number.Float),
(r'-?\d+', Number.Integer),
('[,.~!%^&*+=|?:<>/-]', Operator),
('[{}()\[\]]', Punctuation),
(r'(eq|ne)\b', Operator.Word),
(r'([a-zA-Z_]\w*)(\s*)(\()',
bygroups(Name.Function, Text, Punctuation)),
(r'[a-zA-Z_]\w*', Name),
(r'@[a-zA-Z_]\w*', Name.Constant), # macros
(r'\\\n', Text),
],
'optionarg': [
include('whitespace'),
(_shortened_many(
"a$ll", "an$gles", "ar$row", "au$toscale", "b$ars", "bor$der",
"box$width", "cl$abel", "c$lip", "cn$trparam", "co$ntour", "da$ta",
"data$file", "dg$rid3d", "du$mmy", "enc$oding", "dec$imalsign",
"fit$", "font$path", "fo$rmat", "fu$nction", "fu$nctions", "g$rid",
"hid$den3d", "his$torysize", "is$osamples", "k$ey", "keyt$itle",
"la$bel", "li$nestyle", "ls$", "loa$dpath", "loc$ale", "log$scale",
"mac$ros", "map$ping", "map$ping3d", "mar$gin", "lmar$gin",
"rmar$gin", "tmar$gin", "bmar$gin", "mo$use", "multi$plot",
"mxt$ics", "nomxt$ics", "mx2t$ics", "nomx2t$ics", "myt$ics",
"nomyt$ics", "my2t$ics", "nomy2t$ics", "mzt$ics", "nomzt$ics",
"mcbt$ics", "nomcbt$ics", "of$fsets", "or$igin", "o$utput",
"pa$rametric", "pm$3d", "pal$ette", "colorb$ox", "p$lot",
"poi$ntsize", "pol$ar", "pr$int", "obj$ect", "sa$mples", "si$ze",
"st$yle", "su$rface", "table$", "t$erminal", "termo$ptions", "ti$cs",
"ticsc$ale", "ticsl$evel", "timef$mt", "tim$estamp", "tit$le",
"v$ariables", "ve$rsion", "vi$ew", "xyp$lane", "xda$ta", "x2da$ta",
"yda$ta", "y2da$ta", "zda$ta", "cbda$ta", "xl$abel", "x2l$abel",
"yl$abel", "y2l$abel", "zl$abel", "cbl$abel", "xti$cs", "noxti$cs",
"x2ti$cs", "nox2ti$cs", "yti$cs", "noyti$cs", "y2ti$cs", "noy2ti$cs",
"zti$cs", "nozti$cs", "cbti$cs", "nocbti$cs", "xdti$cs", "noxdti$cs",
"x2dti$cs", "nox2dti$cs", "ydti$cs", "noydti$cs", "y2dti$cs",
"noy2dti$cs", "zdti$cs", "nozdti$cs", "cbdti$cs", "nocbdti$cs",
"xmti$cs", "noxmti$cs", "x2mti$cs", "nox2mti$cs", "ymti$cs",
"noymti$cs", "y2mti$cs", "noy2mti$cs", "zmti$cs", "nozmti$cs",
"cbmti$cs", "nocbmti$cs", "xr$ange", "x2r$ange", "yr$ange",
"y2r$ange", "zr$ange", "cbr$ange", "rr$ange", "tr$ange", "ur$ange",
"vr$ange", "xzeroa$xis", "x2zeroa$xis", "yzeroa$xis", "y2zeroa$xis",
"zzeroa$xis", "zeroa$xis", "z$ero"), Name.Builtin, '#pop'),
],
'bind': [
('!', Keyword, '#pop'),
(_shortened('all$windows'), Name.Builtin),
include('genericargs'),
],
'quit': [
(r'gnuplot\b', Keyword),
include('noargs'),
],
'fit': [
(r'via\b', Name.Builtin),
include('plot'),
],
'if': [
(r'\)', Punctuation, '#pop'),
include('genericargs'),
],
'pause': [
(r'(mouse|any|button1|button2|button3)\b', Name.Builtin),
(_shortened('key$press'), Name.Builtin),
include('genericargs'),
],
'plot': [
(_shortened_many('ax$es', 'axi$s', 'bin$ary', 'ev$ery', 'i$ndex',
'mat$rix', 's$mooth', 'thru$', 't$itle',
'not$itle', 'u$sing', 'w$ith'),
Name.Builtin),
include('genericargs'),
],
'save': [
(_shortened_many('f$unctions', 's$et', 't$erminal', 'v$ariables'),
Name.Builtin),
include('genericargs'),
],
}
class PovrayLexer(RegexLexer):
"""
For `Persistence of Vision Raytracer <http://www.povray.org/>`_ files.
.. versionadded:: 0.11
"""
name = 'POVRay'
aliases = ['pov']
filenames = ['*.pov', '*.inc']
mimetypes = ['text/x-povray']
tokens = {
'root': [
(r'/\*[\w\W]*?\*/', Comment.Multiline),
(r'//.*\n', Comment.Single),
(r'(?s)"(?:\\.|[^"\\])+"', String.Double),
(words((
'break', 'case', 'debug', 'declare', 'default', 'define', 'else',
'elseif', 'end', 'error', 'fclose', 'fopen', 'for', 'if', 'ifdef',
'ifndef', 'include', 'local', 'macro', 'range', 'read', 'render',
'statistics', 'switch', 'undef', 'version', 'warning', 'while',
'write'), prefix=r'#', suffix=r'\b'),
Comment.Preproc),
(words((
'aa_level', 'aa_threshold', 'abs', 'acos', 'acosh', 'adaptive', 'adc_bailout',
'agate', 'agate_turb', 'all', 'alpha', 'ambient', 'ambient_light', 'angle',
'aperture', 'arc_angle', 'area_light', 'asc', 'asin', 'asinh', 'assumed_gamma',
'atan', 'atan2', 'atanh', 'atmosphere', 'atmospheric_attenuation',
'attenuating', 'average', 'background', 'black_hole', 'blue', 'blur_samples',
'bounded_by', 'box_mapping', 'bozo', 'break', 'brick', 'brick_size',
'brightness', 'brilliance', 'bumps', 'bumpy1', 'bumpy2', 'bumpy3', 'bump_map',
'bump_size', 'case', 'caustics', 'ceil', 'checker', 'chr', 'clipped_by', 'clock',
'color', 'color_map', 'colour', 'colour_map', 'component', 'composite', 'concat',
'confidence', 'conic_sweep', 'constant', 'control0', 'control1', 'cos', 'cosh',
'count', 'crackle', 'crand', 'cube', 'cubic_spline', 'cylindrical_mapping',
'debug', 'declare', 'default', 'degrees', 'dents', 'diffuse', 'direction',
'distance', 'distance_maximum', 'div', 'dust', 'dust_type', 'eccentricity',
'else', 'emitting', 'end', 'error', 'error_bound', 'exp', 'exponent',
'fade_distance', 'fade_power', 'falloff', 'falloff_angle', 'false',
'file_exists', 'filter', 'finish', 'fisheye', 'flatness', 'flip', 'floor',
'focal_point', 'fog', 'fog_alt', 'fog_offset', 'fog_type', 'frequency', 'gif',
'global_settings', 'glowing', 'gradient', 'granite', 'gray_threshold',
'green', 'halo', 'hexagon', 'hf_gray_16', 'hierarchy', 'hollow', 'hypercomplex',
'if', 'ifdef', 'iff', 'image_map', 'incidence', 'include', 'int', 'interpolate',
'inverse', 'ior', 'irid', 'irid_wavelength', 'jitter', 'lambda', 'leopard',
'linear', 'linear_spline', 'linear_sweep', 'location', 'log', 'looks_like',
'look_at', 'low_error_factor', 'mandel', 'map_type', 'marble', 'material_map',
'matrix', 'max', 'max_intersections', 'max_iteration', 'max_trace_level',
'max_value', 'metallic', 'min', 'minimum_reuse', 'mod', 'mortar',
'nearest_count', 'no', 'normal', 'normal_map', 'no_shadow', 'number_of_waves',
'octaves', 'off', 'offset', 'omega', 'omnimax', 'on', 'once', 'onion', 'open',
'orthographic', 'panoramic', 'pattern1', 'pattern2', 'pattern3',
'perspective', 'pgm', 'phase', 'phong', 'phong_size', 'pi', 'pigment',
'pigment_map', 'planar_mapping', 'png', 'point_at', 'pot', 'pow', 'ppm',
'precision', 'pwr', 'quadratic_spline', 'quaternion', 'quick_color',
'quick_colour', 'quilted', 'radial', 'radians', 'radiosity', 'radius', 'rainbow',
'ramp_wave', 'rand', 'range', 'reciprocal', 'recursion_limit', 'red',
'reflection', 'refraction', 'render', 'repeat', 'rgb', 'rgbf', 'rgbft', 'rgbt',
'right', 'ripples', 'rotate', 'roughness', 'samples', 'scale', 'scallop_wave',
'scattering', 'seed', 'shadowless', 'sin', 'sine_wave', 'sinh', 'sky', 'sky_sphere',
'slice', 'slope_map', 'smooth', 'specular', 'spherical_mapping', 'spiral',
'spiral1', 'spiral2', 'spotlight', 'spotted', 'sqr', 'sqrt', 'statistics', 'str',
'strcmp', 'strength', 'strlen', 'strlwr', 'strupr', 'sturm', 'substr', 'switch', 'sys',
't', 'tan', 'tanh', 'test_camera_1', 'test_camera_2', 'test_camera_3',
'test_camera_4', 'texture', 'texture_map', 'tga', 'thickness', 'threshold',
'tightness', 'tile2', 'tiles', 'track', 'transform', 'translate', 'transmit',
'triangle_wave', 'true', 'ttf', 'turbulence', 'turb_depth', 'type',
'ultra_wide_angle', 'up', 'use_color', 'use_colour', 'use_index', 'u_steps',
'val', 'variance', 'vaxis_rotate', 'vcross', 'vdot', 'version', 'vlength',
'vnormalize', 'volume_object', 'volume_rendered', 'vol_with_light',
'vrotate', 'v_steps', 'warning', 'warp', 'water_level', 'waves', 'while', 'width',
'wood', 'wrinkles', 'yes'), prefix=r'\b', suffix=r'\b'),
Keyword),
(words((
'bicubic_patch', 'blob', 'box', 'camera', 'cone', 'cubic', 'cylinder', 'difference',
'disc', 'height_field', 'intersection', 'julia_fractal', 'lathe',
'light_source', 'merge', 'mesh', 'object', 'plane', 'poly', 'polygon', 'prism',
'quadric', 'quartic', 'smooth_triangle', 'sor', 'sphere', 'superellipsoid',
'text', 'torus', 'triangle', 'union'), suffix=r'\b'),
Name.Builtin),
# TODO: <=, etc
(r'[\[\](){}<>;,]', Punctuation),
(r'[-+*/=]', Operator),
(r'\b(x|y|z|u|v)\b', Name.Builtin.Pseudo),
(r'[a-zA-Z_]\w*', Name),
(r'[0-9]+\.[0-9]*', Number.Float),
(r'\.[0-9]+', Number.Float),
(r'[0-9]+', Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String),
(r'\s+', Text),
]
} | unknown | codeparrot/codeparrot-clean | ||
# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
%YAML 1.2
---
$id: http://devicetree.org/schemas/mtd/technologic,nand.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Technologic Systems NAND controller
maintainers:
- Nikita Shubin <nikita.shubin@maquefel.me>
allOf:
- $ref: nand-controller.yaml
properties:
compatible:
oneOf:
- const: technologic,ts7200-nand
- items:
- enum:
- technologic,ts7300-nand
- technologic,ts7260-nand
- technologic,ts7250-nand
- const: technologic,ts7200-nand
reg:
maxItems: 1
required:
- compatible
- reg
unevaluatedProperties: false
examples:
- |
nand-controller@60000000 {
compatible = "technologic,ts7200-nand";
reg = <0x60000000 0x8000000>;
#address-cells = <1>;
#size-cells = <0>;
nand@0 {
reg = <0>;
};
}; | unknown | github | https://github.com/torvalds/linux | Documentation/devicetree/bindings/mtd/technologic,nand.yaml |
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_COMPILER_JIT_PARTIALLY_DECLUSTER_PASS_H_
#define TENSORFLOW_COMPILER_JIT_PARTIALLY_DECLUSTER_PASS_H_
#include "tensorflow/core/common_runtime/optimization_registry.h"
namespace tensorflow {
// Clones or moves nodes from within a cluster to outside the cluster if
// profitable. There are two reasons why we do this:
//
// - Reducing device-to-host copies.
// - Reducing the number of XLA recompilations.
class PartiallyDeclusterPass : public GraphOptimizationPass {
public:
absl::Status Run(const GraphOptimizationPassOptions& options) override;
};
} // namespace tensorflow
#endif // TENSORFLOW_COMPILER_JIT_PARTIALLY_DECLUSTER_PASS_H_ | c | github | https://github.com/tensorflow/tensorflow | tensorflow/compiler/jit/partially_decluster_pass.h |
# Copyright 2010-2011, Sikuli.org
# Released under the MIT License.
from org.sikuli.script import Screen as JScreen
import inspect
import __main__
import sys
from Region import *
from java.awt import Rectangle
DEBUG=False
class Screen(Region):
def __init__(self, id=None):
if id != None:
r = JScreen.getBounds(id)
else:
r = JScreen().getBounds()
(x, y, w, h) = (int(r.getX()), int(r.getY()), \
int(r.getWidth()), int(r.getHeight()))
Region.__init__(self, x, y, w, h)
@classmethod
def getNumberScreens(cls):
return JScreen.getNumberScreens()
def getBounds(self):
return self.getScreen().getBounds()
def selectRegion(self, msg=None):
if msg:
r = self.getScreen().selectRegion(msg)
else:
r = self.getScreen().selectRegion()
if r:
return Region(r)
else:
return None
def showRegion(self, region):
self.getScreen().showRegion(region)
##
# Enters the screen-capture mode asking the user to capture a region of
# the screen if no arguments are given.
# If any arguments are specified, capture() automatically captures the given
# region of the screen.
# @param *args The args can be 4 integers: x, y, w, and h, a <a href="org/sikuli/script/Match.html">Match</a> object or a {@link #Region} object.
# @return The path to the captured image.
#
def capture(self, *args):
scr = self.getScreen()
if len(args) == 0:
simg = scr.userCapture()
if simg:
return simg.getFilename()
else:
return None
elif len(args) == 1:
if __builtin__.type(args[0]) is types.StringType or __builtin__.type(args[0]) is types.UnicodeType:
simg = scr.userCapture(args[0])
if simg:
return simg.getFilename()
else:
return None
else:
return scr.capture(args[0]).getFilename()
elif len(args) == 4:
return scr.capture(args[0], args[1], args[2], args[3]).getFilename()
else:
return None
def toString(self):
return self.getScreen().toString()
def _exposeAllMethods(self, mod):
exclude_list = [ 'class', 'classDictInit', 'clone', 'equals', 'finalize',
'getClass', 'hashCode', 'notify', 'notifyAll',
'toGlobalCoord', 'toString',
'capture', 'selectRegion']
dict = sys.modules[mod].__dict__
for name in dir(self):
if inspect.ismethod(getattr(self,name)) \
and name[0] != '_' and name[:7] != 'super__' and \
not name in exclude_list:
if DEBUG: print "expose " + name
dict[name] = eval("self."+name)
#__main__.__dict__[name] = eval("self."+name) | unknown | codeparrot/codeparrot-clean | ||
package kotlinx.coroutines.reactive
import kotlinx.coroutines.InternalCoroutinesApi
import org.reactivestreams.Publisher
import kotlin.coroutines.CoroutineContext
/** @suppress */
@InternalCoroutinesApi
public interface ContextInjector {
/**
* Injects `ReactorContext` element from the given context into the `SubscriberContext` of the publisher.
* This API used as an indirection layer between `reactive` and `reactor` modules.
*/
public fun <T> injectCoroutineContext(publisher: Publisher<T>, coroutineContext: CoroutineContext): Publisher<T>
} | kotlin | github | https://github.com/Kotlin/kotlinx.coroutines | reactive/kotlinx-coroutines-reactive/src/ContextInjector.kt |
/*
* Copyright (C) Igor Sysoev
* Copyright (C) Nginx, Inc.
*/
#ifndef _NGX_THREAD_H_INCLUDED_
#define _NGX_THREAD_H_INCLUDED_
#include <ngx_config.h>
#include <ngx_core.h>
typedef HANDLE ngx_tid_t;
typedef DWORD ngx_thread_value_t;
ngx_err_t ngx_create_thread(ngx_tid_t *tid,
ngx_thread_value_t (__stdcall *func)(void *arg), void *arg, ngx_log_t *log);
#define ngx_log_tid GetCurrentThreadId()
#define NGX_TID_T_FMT "%ud"
#endif /* _NGX_THREAD_H_INCLUDED_ */ | c | github | https://github.com/nginx/nginx | src/os/win32/ngx_thread.h |
"""
Classes to represent the default SQL aggregate functions
"""
class AggregateField(object):
"""An internal field mockup used to identify aggregates in the
data-conversion parts of the database backend.
"""
def __init__(self, internal_type):
self.internal_type = internal_type
def get_internal_type(self):
return self.internal_type
ordinal_aggregate_field = AggregateField('IntegerField')
computed_aggregate_field = AggregateField('FloatField')
class Aggregate(object):
"""
Default SQL Aggregate.
"""
is_ordinal = False
is_computed = False
sql_template = '%(function)s(%(field)s)'
def __init__(self, col, source=None, is_summary=False, **extra):
"""Instantiate an SQL aggregate
* col is a column reference describing the subject field
of the aggregate. It can be an alias, or a tuple describing
a table and column name.
* source is the underlying field or aggregate definition for
the column reference. If the aggregate is not an ordinal or
computed type, this reference is used to determine the coerced
output type of the aggregate.
* extra is a dictionary of additional data to provide for the
aggregate definition
Also utilizes the class variables:
* sql_function, the name of the SQL function that implements the
aggregate.
* sql_template, a template string that is used to render the
aggregate into SQL.
* is_ordinal, a boolean indicating if the output of this aggregate
is an integer (e.g., a count)
* is_computed, a boolean indicating if this output of this aggregate
is a computed float (e.g., an average), regardless of the input
type.
"""
self.col = col
self.source = source
self.is_summary = is_summary
self.extra = extra
# Follow the chain of aggregate sources back until you find an
# actual field, or an aggregate that forces a particular output
# type. This type of this field will be used to coerce values
# retrieved from the database.
tmp = self
while tmp and isinstance(tmp, Aggregate):
if getattr(tmp, 'is_ordinal', False):
tmp = ordinal_aggregate_field
elif getattr(tmp, 'is_computed', False):
tmp = computed_aggregate_field
else:
tmp = tmp.source
self.field = tmp
def relabel_aliases(self, change_map):
if isinstance(self.col, (list, tuple)):
self.col = (change_map.get(self.col[0], self.col[0]), self.col[1])
def as_sql(self, qn, connection):
"Return the aggregate, rendered as SQL."
if hasattr(self.col, 'as_sql'):
field_name = self.col.as_sql(qn, connection)
elif isinstance(self.col, (list, tuple)):
field_name = '.'.join([qn(c) for c in self.col])
else:
field_name = self.col
params = {
'function': self.sql_function,
'field': field_name
}
params.update(self.extra)
return self.sql_template % params
class Avg(Aggregate):
is_computed = True
sql_function = 'AVG'
class Count(Aggregate):
is_ordinal = True
sql_function = 'COUNT'
sql_template = '%(function)s(%(distinct)s%(field)s)'
def __init__(self, col, distinct=False, **extra):
super(Count, self).__init__(col, distinct=distinct and 'DISTINCT ' or '', **extra)
class Max(Aggregate):
sql_function = 'MAX'
class Min(Aggregate):
sql_function = 'MIN'
class StdDev(Aggregate):
is_computed = True
def __init__(self, col, sample=False, **extra):
super(StdDev, self).__init__(col, **extra)
self.sql_function = sample and 'STDDEV_SAMP' or 'STDDEV_POP'
class Sum(Aggregate):
sql_function = 'SUM'
class Variance(Aggregate):
is_computed = True
def __init__(self, col, sample=False, **extra):
super(Variance, self).__init__(col, **extra)
self.sql_function = sample and 'VAR_SAMP' or 'VAR_POP' | unknown | codeparrot/codeparrot-clean | ||
// C Extension module to test pycore_lock.h API
#include "parts.h"
#include "pycore_lock.h"
#include "pycore_pythread.h" // PyThread_get_thread_ident_ex()
#include "clinic/test_lock.c.h"
#ifdef MS_WINDOWS
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
#else
#include <unistd.h> // usleep()
#endif
/*[clinic input]
module _testinternalcapi
[clinic start generated code]*/
/*[clinic end generated code: output=da39a3ee5e6b4b0d input=7bb583d8c9eb9a78]*/
static void
pysleep(int ms)
{
#ifdef MS_WINDOWS
Sleep(ms);
#else
usleep(ms * 1000);
#endif
}
static PyObject *
test_lock_basic(PyObject *self, PyObject *obj)
{
PyMutex m = (PyMutex){0};
// uncontended lock and unlock
PyMutex_Lock(&m);
assert(m._bits == 1);
PyMutex_Unlock(&m);
assert(m._bits == 0);
Py_RETURN_NONE;
}
struct test_lock2_data {
PyMutex m;
PyEvent done;
int started;
};
static void
lock_thread(void *arg)
{
struct test_lock2_data *test_data = arg;
PyMutex *m = &test_data->m;
_Py_atomic_store_int(&test_data->started, 1);
PyMutex_Lock(m);
// gh-135641: in rare cases the lock may still have `_Py_HAS_PARKED` set
// (m->_bits == 3) due to bucket collisions in the parking lot hash table
// between this mutex and the `test_data.done` event.
assert(m->_bits == 1 || m->_bits == 3);
PyMutex_Unlock(m);
assert(m->_bits == 0);
_PyEvent_Notify(&test_data->done);
}
static PyObject *
test_lock_two_threads(PyObject *self, PyObject *obj)
{
// lock attempt by two threads
struct test_lock2_data test_data;
memset(&test_data, 0, sizeof(test_data));
PyMutex_Lock(&test_data.m);
assert(test_data.m._bits == 1);
PyThread_start_new_thread(lock_thread, &test_data);
// wait up to two seconds for the lock_thread to attempt to lock "m"
int iters = 0;
uint8_t v;
do {
pysleep(10); // allow some time for the other thread to try to lock
v = _Py_atomic_load_uint8_relaxed(&test_data.m._bits);
assert(v == 1 || v == 3);
iters++;
} while (v != 3 && iters < 200);
// both the "locked" and the "has parked" bits should be set
v = _Py_atomic_load_uint8_relaxed(&test_data.m._bits);
assert(v == 3);
PyMutex_Unlock(&test_data.m);
PyEvent_Wait(&test_data.done);
assert(test_data.m._bits == 0);
Py_RETURN_NONE;
}
#define COUNTER_THREADS 5
#define COUNTER_ITERS 10000
struct test_data_counter {
PyMutex m;
Py_ssize_t counter;
};
struct thread_data_counter {
struct test_data_counter *test_data;
PyEvent done_event;
};
static void
counter_thread(void *arg)
{
struct thread_data_counter *thread_data = arg;
struct test_data_counter *test_data = thread_data->test_data;
for (Py_ssize_t i = 0; i < COUNTER_ITERS; i++) {
PyMutex_Lock(&test_data->m);
test_data->counter++;
PyMutex_Unlock(&test_data->m);
}
_PyEvent_Notify(&thread_data->done_event);
}
static PyObject *
test_lock_counter(PyObject *self, PyObject *obj)
{
// Test with rapidly locking and unlocking mutex
struct test_data_counter test_data;
memset(&test_data, 0, sizeof(test_data));
struct thread_data_counter thread_data[COUNTER_THREADS];
memset(&thread_data, 0, sizeof(thread_data));
for (Py_ssize_t i = 0; i < COUNTER_THREADS; i++) {
thread_data[i].test_data = &test_data;
PyThread_start_new_thread(counter_thread, &thread_data[i]);
}
for (Py_ssize_t i = 0; i < COUNTER_THREADS; i++) {
PyEvent_Wait(&thread_data[i].done_event);
}
assert(test_data.counter == COUNTER_THREADS * COUNTER_ITERS);
Py_RETURN_NONE;
}
#define SLOW_COUNTER_ITERS 100
static void
slow_counter_thread(void *arg)
{
struct thread_data_counter *thread_data = arg;
struct test_data_counter *test_data = thread_data->test_data;
for (Py_ssize_t i = 0; i < SLOW_COUNTER_ITERS; i++) {
PyMutex_Lock(&test_data->m);
if (i % 7 == 0) {
pysleep(2);
}
test_data->counter++;
PyMutex_Unlock(&test_data->m);
}
_PyEvent_Notify(&thread_data->done_event);
}
static PyObject *
test_lock_counter_slow(PyObject *self, PyObject *obj)
{
// Test lock/unlock with occasional "long" critical section, which will
// trigger handoff of the lock.
struct test_data_counter test_data;
memset(&test_data, 0, sizeof(test_data));
struct thread_data_counter thread_data[COUNTER_THREADS];
memset(&thread_data, 0, sizeof(thread_data));
for (Py_ssize_t i = 0; i < COUNTER_THREADS; i++) {
thread_data[i].test_data = &test_data;
PyThread_start_new_thread(slow_counter_thread, &thread_data[i]);
}
for (Py_ssize_t i = 0; i < COUNTER_THREADS; i++) {
PyEvent_Wait(&thread_data[i].done_event);
}
assert(test_data.counter == COUNTER_THREADS * SLOW_COUNTER_ITERS);
Py_RETURN_NONE;
}
struct bench_data_locks {
int stop;
int use_pymutex;
int critical_section_length;
char padding[200];
PyThread_type_lock lock;
PyMutex m;
double value;
Py_ssize_t total_iters;
};
struct bench_thread_data {
struct bench_data_locks *bench_data;
Py_ssize_t iters;
PyEvent done;
};
static void
thread_benchmark_locks(void *arg)
{
struct bench_thread_data *thread_data = arg;
struct bench_data_locks *bench_data = thread_data->bench_data;
int use_pymutex = bench_data->use_pymutex;
int critical_section_length = bench_data->critical_section_length;
double my_value = 1.0;
Py_ssize_t iters = 0;
while (!_Py_atomic_load_int_relaxed(&bench_data->stop)) {
if (use_pymutex) {
PyMutex_Lock(&bench_data->m);
for (int i = 0; i < critical_section_length; i++) {
bench_data->value += my_value;
my_value = bench_data->value;
}
PyMutex_Unlock(&bench_data->m);
}
else {
PyThread_acquire_lock(bench_data->lock, 1);
for (int i = 0; i < critical_section_length; i++) {
bench_data->value += my_value;
my_value = bench_data->value;
}
PyThread_release_lock(bench_data->lock);
}
iters++;
}
thread_data->iters = iters;
_Py_atomic_add_ssize(&bench_data->total_iters, iters);
_PyEvent_Notify(&thread_data->done);
}
/*[clinic input]
_testinternalcapi.benchmark_locks
num_threads: Py_ssize_t
use_pymutex: bool = True
critical_section_length: int = 1
time_ms: int = 1000
/
[clinic start generated code]*/
static PyObject *
_testinternalcapi_benchmark_locks_impl(PyObject *module,
Py_ssize_t num_threads,
int use_pymutex,
int critical_section_length,
int time_ms)
/*[clinic end generated code: output=381df8d7e9a74f18 input=f3aeaf688738c121]*/
{
// Run from Tools/lockbench/lockbench.py
// Based on the WebKit lock benchmarks:
// https://github.com/WebKit/WebKit/blob/main/Source/WTF/benchmarks/LockSpeedTest.cpp
// See also https://webkit.org/blog/6161/locking-in-webkit/
PyObject *thread_iters = NULL;
PyObject *res = NULL;
struct bench_data_locks bench_data;
memset(&bench_data, 0, sizeof(bench_data));
bench_data.use_pymutex = use_pymutex;
bench_data.critical_section_length = critical_section_length;
bench_data.lock = PyThread_allocate_lock();
if (bench_data.lock == NULL) {
return PyErr_NoMemory();
}
struct bench_thread_data *thread_data = NULL;
thread_data = PyMem_Calloc(num_threads, sizeof(*thread_data));
if (thread_data == NULL) {
PyErr_NoMemory();
goto exit;
}
thread_iters = PyList_New(num_threads);
if (thread_iters == NULL) {
goto exit;
}
PyTime_t start, end;
if (PyTime_PerfCounter(&start) < 0) {
goto exit;
}
for (Py_ssize_t i = 0; i < num_threads; i++) {
thread_data[i].bench_data = &bench_data;
PyThread_start_new_thread(thread_benchmark_locks, &thread_data[i]);
}
// Let the threads run for `time_ms` milliseconds
pysleep(time_ms);
_Py_atomic_store_int(&bench_data.stop, 1);
// Wait for the threads to finish
for (Py_ssize_t i = 0; i < num_threads; i++) {
PyEvent_Wait(&thread_data[i].done);
}
Py_ssize_t total_iters = bench_data.total_iters;
if (PyTime_PerfCounter(&end) < 0) {
goto exit;
}
// Return the total number of acquisitions and the number of acquisitions
// for each thread.
for (Py_ssize_t i = 0; i < num_threads; i++) {
PyObject *iter = PyLong_FromSsize_t(thread_data[i].iters);
if (iter == NULL) {
goto exit;
}
PyList_SET_ITEM(thread_iters, i, iter);
}
assert(end != start);
double rate = total_iters * 1e9 / (end - start);
res = Py_BuildValue("(dO)", rate, thread_iters);
exit:
PyThread_free_lock(bench_data.lock);
PyMem_Free(thread_data);
Py_XDECREF(thread_iters);
return res;
}
static PyObject *
test_lock_benchmark(PyObject *module, PyObject *obj)
{
// Just make sure the benchmark runs without crashing
PyObject *res = _testinternalcapi_benchmark_locks_impl(
module, 1, 1, 1, 100);
if (res == NULL) {
return NULL;
}
Py_DECREF(res);
Py_RETURN_NONE;
}
static int
init_maybe_fail(void *arg)
{
int *counter = (int *)arg;
(*counter)++;
if (*counter < 5) {
// failure
return -1;
}
assert(*counter == 5);
return 0;
}
static PyObject *
test_lock_once(PyObject *self, PyObject *obj)
{
_PyOnceFlag once = {0};
int counter = 0;
for (int i = 0; i < 10; i++) {
int res = _PyOnceFlag_CallOnce(&once, init_maybe_fail, &counter);
if (i < 4) {
assert(res == -1);
}
else {
assert(res == 0);
assert(counter == 5);
}
}
Py_RETURN_NONE;
}
struct test_rwlock_data {
Py_ssize_t nthreads;
_PyRWMutex rw;
PyEvent step1;
PyEvent step2;
PyEvent step3;
PyEvent done;
};
static void
rdlock_thread(void *arg)
{
struct test_rwlock_data *test_data = arg;
// Acquire the lock in read mode
_PyRWMutex_RLock(&test_data->rw);
PyEvent_Wait(&test_data->step1);
_PyRWMutex_RUnlock(&test_data->rw);
_PyRWMutex_RLock(&test_data->rw);
PyEvent_Wait(&test_data->step3);
_PyRWMutex_RUnlock(&test_data->rw);
if (_Py_atomic_add_ssize(&test_data->nthreads, -1) == 1) {
_PyEvent_Notify(&test_data->done);
}
}
static void
wrlock_thread(void *arg)
{
struct test_rwlock_data *test_data = arg;
// First acquire the lock in write mode
_PyRWMutex_Lock(&test_data->rw);
PyEvent_Wait(&test_data->step2);
_PyRWMutex_Unlock(&test_data->rw);
if (_Py_atomic_add_ssize(&test_data->nthreads, -1) == 1) {
_PyEvent_Notify(&test_data->done);
}
}
static void
wait_until(uintptr_t *ptr, uintptr_t value)
{
// wait up to two seconds for *ptr == value
int iters = 0;
uintptr_t bits;
do {
pysleep(10);
bits = _Py_atomic_load_uintptr(ptr);
iters++;
} while (bits != value && iters < 200);
}
static PyObject *
test_lock_rwlock(PyObject *self, PyObject *obj)
{
struct test_rwlock_data test_data = {.nthreads = 3};
_PyRWMutex_Lock(&test_data.rw);
assert(test_data.rw.bits == 1);
_PyRWMutex_Unlock(&test_data.rw);
assert(test_data.rw.bits == 0);
// Start two readers
PyThread_start_new_thread(rdlock_thread, &test_data);
PyThread_start_new_thread(rdlock_thread, &test_data);
// wait up to two seconds for the threads to attempt to read-lock "rw"
wait_until(&test_data.rw.bits, 8);
assert(test_data.rw.bits == 8);
// start writer (while readers hold lock)
PyThread_start_new_thread(wrlock_thread, &test_data);
wait_until(&test_data.rw.bits, 10);
assert(test_data.rw.bits == 10);
// readers release lock, writer should acquire it
_PyEvent_Notify(&test_data.step1);
wait_until(&test_data.rw.bits, 3);
assert(test_data.rw.bits == 3);
// writer releases lock, readers acquire it
_PyEvent_Notify(&test_data.step2);
wait_until(&test_data.rw.bits, 8);
assert(test_data.rw.bits == 8);
// readers release lock again
_PyEvent_Notify(&test_data.step3);
wait_until(&test_data.rw.bits, 0);
assert(test_data.rw.bits == 0);
PyEvent_Wait(&test_data.done);
Py_RETURN_NONE;
}
static PyObject *
test_lock_recursive(PyObject *self, PyObject *obj)
{
_PyRecursiveMutex m = (_PyRecursiveMutex){0};
assert(!_PyRecursiveMutex_IsLockedByCurrentThread(&m));
_PyRecursiveMutex_Lock(&m);
assert(m.thread == PyThread_get_thread_ident_ex());
assert(PyMutex_IsLocked(&m.mutex));
assert(m.level == 0);
_PyRecursiveMutex_Lock(&m);
assert(m.level == 1);
_PyRecursiveMutex_Unlock(&m);
_PyRecursiveMutex_Unlock(&m);
assert(m.thread == 0);
assert(!PyMutex_IsLocked(&m.mutex));
assert(m.level == 0);
Py_RETURN_NONE;
}
static PyMethodDef test_methods[] = {
{"test_lock_basic", test_lock_basic, METH_NOARGS},
{"test_lock_two_threads", test_lock_two_threads, METH_NOARGS},
{"test_lock_counter", test_lock_counter, METH_NOARGS},
{"test_lock_counter_slow", test_lock_counter_slow, METH_NOARGS},
_TESTINTERNALCAPI_BENCHMARK_LOCKS_METHODDEF
{"test_lock_benchmark", test_lock_benchmark, METH_NOARGS},
{"test_lock_once", test_lock_once, METH_NOARGS},
{"test_lock_rwlock", test_lock_rwlock, METH_NOARGS},
{"test_lock_recursive", test_lock_recursive, METH_NOARGS},
{NULL, NULL} /* sentinel */
};
int
_PyTestInternalCapi_Init_Lock(PyObject *mod)
{
if (PyModule_AddFunctions(mod, test_methods) < 0) {
return -1;
}
return 0;
} | c | github | https://github.com/python/cpython | Modules/_testinternalcapi/test_lock.c |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from webob import exc
from senlin.api.common import util
from senlin.api.common import wsgi
from senlin.common import consts
from senlin.common.i18n import _
from senlin.common import utils
class ActionData(object):
"""All required data fields for an action."""
PARAMS = (consts.ACTION_NAME, consts.ACTION_TARGET, consts.ACTION_ACTION)
def __init__(self, data):
self.data = data
def name(self):
if consts.ACTION_NAME not in self.data:
raise exc.HTTPBadRequest(_("No action name specified"))
return self.data[consts.ACTION_NAME]
def target(self):
if consts.ACTION_TARGET not in self.data:
raise exc.HTTPBadRequest(_("No target specified"))
return self.data[consts.ACTION_TARGET]
def action(self):
if consts.ACTION_ACTION not in self.data:
raise exc.HTTPBadRequest(_("No action specified"))
return self.data[consts.ACTION_ACTION]
def params(self):
data = self.data.items()
return dict((k, v) for k, v in data if k not in self.PARAMS)
class ActionController(wsgi.Controller):
"""WSGI controller for Actions in Senlin v1 API."""
# Define request scope (must match what is in policy.json)
REQUEST_SCOPE = 'actions'
@util.policy_enforce
def index(self, req):
filter_whitelist = {
consts.ACTION_NAME: 'mixed',
consts.ACTION_TARGET: 'mixed',
consts.ACTION_ACTION: 'mixed',
consts.ACTION_STATUS: 'mixed',
}
param_whitelist = {
consts.PARAM_LIMIT: 'single',
consts.PARAM_MARKER: 'single',
consts.PARAM_SORT: 'single',
consts.PARAM_GLOBAL_PROJECT: 'single',
}
for key in req.params.keys():
if (key not in param_whitelist.keys() and key not in
filter_whitelist.keys()):
raise exc.HTTPBadRequest(_('Invalid parameter %s') % key)
params = util.get_allowed_params(req.params, param_whitelist)
filters = util.get_allowed_params(req.params, filter_whitelist)
key = consts.PARAM_LIMIT
if key in params:
params[key] = utils.parse_int_param(key, params[key])
key = consts.PARAM_GLOBAL_PROJECT
if key in params:
global_project = utils.parse_bool_param(key, params[key])
params.pop(key)
params['project_safe'] = not global_project
if not filters:
filters = None
actions = self.rpc_client.action_list(req.context, filters=filters,
**params)
return {'actions': actions}
@util.policy_enforce
def create(self, req, body):
data = ActionData(body)
result = self.rpc_client.action_create(req.context,
data.name(),
data.target(),
data.action(),
data.params())
return result
@util.policy_enforce
def get(self, req, action_id):
action = self.rpc_client.action_get(req.context, action_id)
if not action:
raise exc.HTTPNotFound()
return {'action': action} | unknown | codeparrot/codeparrot-clean | ||
from rest_framework import serializers
from expdj.apps.experiments.models import (Battery, CognitiveAtlasTask,
ExperimentTemplate)
class BatterySerializer(serializers.ModelSerializer):
owner = serializers.StringRelatedField()
contributors = serializers.StringRelatedField(many=True)
experiments = serializers.StringRelatedField(many=True)
required_batteries = serializers.StringRelatedField(many=True)
restricted_batteries = serializers.StringRelatedField(many=True)
class Meta:
model = Battery
fields = [
'name', 'description', 'credentials', 'consent', 'advertisement',
'instructions', 'owner', 'contributors', 'experiments', 'add_date',
'modify_date', 'maximum_time', 'number_of_experiments', 'active',
'presentation_order', 'blacklist_active', 'blacklist_threshold',
'bonus_active', 'required_batteries', 'restricted_batteries'
]
class BatteryDescriptionSerializer(serializers.ModelSerializer):
class Meta:
model = Battery
fields = ['name', 'description']
class CognitiveAtlasTaskSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = CognitiveAtlasTask
fields = ('name', 'cog_atlas_id')
class ExperimentTemplateSerializer(serializers.HyperlinkedModelSerializer):
cognitive_atlas_task = CognitiveAtlasTaskSerializer()
class Meta:
model = ExperimentTemplate
fields = [
'exp_id', 'name', 'cognitive_atlas_task', 'reference', 'version',
'template'
] | unknown | codeparrot/codeparrot-clean | ||
#! /usr/bin/env python
from Crypto.Cipher import AES
from binascii import a2b_base64
def pkcs_7_pad(data, final_len = None):
if final_len == None:
final_len = (len(data)/16 + 1)*16
padding_len = final_len - len(data)
return data + chr(padding_len)*padding_len
def pkcs_7_unpad(data):
padding_len = ord(data[len(data)-1])
for i in range(len(data)-padding_len,len(data)):
if ord(data[i]) != padding_len:
return data
return data[:-padding_len]
def AES_128_ECB_encrypt(data, key, pad = False):
cipher = AES.new(key, AES.MODE_ECB)
if pad:
data = pkcs_7_pad(data)
return cipher.encrypt(data)
def AES_128_ECB_decrypt(data, key, unpad = False):
cipher = AES.new(key, AES.MODE_ECB)
decr = cipher.decrypt(data)
if unpad:
decr = pkcs_7_unpad(decr)
return decr
def xor_data(A, B):
return ''.join(chr(ord(A[i])^ord(B[i])) for i in range(len(A)))
def AES_128_CBC_encrypt(data, key, iv):
data = pkcs_7_pad(data)
block_count = len(data)/16
encrypted_data = ''
prev_block = iv
for b in range(block_count):
cur_block = data[b*16:(b+1)*16]
encrypted_block = AES_128_ECB_encrypt(xor_data(cur_block, prev_block), key)
encrypted_data += encrypted_block
prev_block = encrypted_block
return encrypted_data
def AES_128_CBC_decrypt(data, key, iv):
block_count = len(data)/16
decrypted_data = ''
prev_block = iv
for b in range(block_count):
cur_block = data[b*16:(b+1)*16]
decrypted_block = AES_128_ECB_decrypt(cur_block, key)
decrypted_data += xor_data(decrypted_block, prev_block)
prev_block = cur_block
return pkcs_7_unpad(decrypted_data)
if __name__ == '__main__':
text = 'abcdefghijklmnopqrstuvwxyz!'
key = 'abcdef1234567890'
iv = '128348347dhrughdf'
if AES_128_CBC_decrypt(AES_128_CBC_encrypt(text, key, iv), key, iv) == text:
print "[+] CBC decrypt(encrypt(text))==text test passed"
else:
print "[-] CBC test failed" | unknown | codeparrot/codeparrot-clean | ||
#
# Copyright (C) 2016
# The Board of Trustees of the Leland Stanford Junior University
# Written by Stephane Thiell <sthiell@stanford.edu>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sasutils.scsi import BlockDevice, SCSIDevice, SCSIHost
from sasutils.sysfs import SysfsDevice
#
# SAS topology components
#
class SASPhy(SysfsDevice):
def __init__(self, device, subsys='sas_phy'):
SysfsDevice.__init__(self, device, subsys)
class SASPort(SysfsDevice):
def __init__(self, device, subsys='sas_port'):
SysfsDevice.__init__(self, device, subsys)
self.expanders = []
self.end_devices = []
self.phys = []
phys = self.device.glob('phy-*')
for phy in phys:
self.phys.append(SASPhy(phy))
end_devices = self.device.glob('end_device-*')
for end_device in end_devices:
self.end_devices.append(SASEndDevice(end_device))
expanders = self.device.glob('expander-*')
for expander in expanders:
self.expanders.append(SASExpander(expander))
class SASNode(SysfsDevice):
def __init__(self, device, subsys=None):
SysfsDevice.__init__(self, device, subsys)
self.phys = []
self.ports = []
ports = self.device.glob('port-*')
for port in ports:
# print('node has port %s' % port.path)
self.ports.append(
SASPort(port)) # port.node(port))) #'sas_port/port-*')))
phys = self.device.glob('phy-*')
for phy in phys:
# print('node has phy %s' % phy.path)
self.phys.append(
SASPhy(phy)) # .node(phy))) #phy.node('sas_phy/phy-*')))
def __repr__(self):
return '<%s.%s %s phys=%d ports=%d>' % (self.__module__,
self.__class__.__name__,
self.sysfsnode.path,
len(self.phys), len(self.ports))
__str__ = __repr__
def end_devices_by_scsi_type(self, device_type):
"""
Iterate over end_devices (direct children) by scsi type.
SCSI types are defined in the scsi module.
"""
for port in self.ports:
for end_device in port.end_devices:
if int(end_device.scsi_device.attrs.type) == int(device_type):
yield end_device
class SASHost(SASNode):
def __init__(self, device, subsys='sas_host'):
SASNode.__init__(self, device, subsys)
self.scsi_host = SCSIHost(device)
def __str__(self):
return '<%s.%s %s>' % (self.__module__, self.__class__.__name__,
self.__dict__)
class SASExpander(SASNode):
def __init__(self, device, subsys='sas_expander'):
SASNode.__init__(self, device, subsys)
self.sas_device = SASDevice(device)
class SASDevice(SysfsDevice):
def __init__(self, device, subsys='sas_device'):
SysfsDevice.__init__(self, device, subsys)
class SASEndDevice(SysfsDevice):
def __init__(self, device, subsys='sas_end_device'):
SysfsDevice.__init__(self, device, subsys)
self.sas_device = SASDevice(device)
# a single SAS end device can handle several SCSI targets
self.targets = [SCSIDevice(dev) for dev in
device.glob('target*/*[0-9]')]
#
# Other useful SAS classes
#
class SASBlockDevice(BlockDevice):
"""
SAS-aware block device class that allows direct access to SASEndDevice.
"""
def __init__(self, device):
BlockDevice.__init__(self, device)
self._end_device = None
@property
def end_device(self):
if not self._end_device:
self._end_device = SASEndDevice(self.sysfsnode.node('../../../..'))
return self._end_device | unknown | codeparrot/codeparrot-clean | ||
- Feature Name: Lazy Transaction Record Creation (a.k.a Deprecate BeginTransaction)
- Status: completed
- Start Date: 2018-12-09
- Authors: Nathan VanBenschoten
- RFC PR: #32971
- Cockroach Issue: #25437
# Summary
Remove transaction record creation from serving as the first step of every non-1PC transaction. Defer transaction record creation as late as possible in the lifetime of a transaction, often skipping a transaction record with the `PENDING` state entirely.
This will provide a performance improvement, simplify the transaction model, and pave the way for the implementation of the more ambitious [parallel commits RFC](https://github.com/cockroachdb/cockroach/blob/master/docs/RFCS/20180324_parallel_commit.md).
# Motivation
There are a few motivations for this change with varying levels of urgency.
The most important reason to make this change is that the [parallel commits RFC](https://github.com/cockroachdb/cockroach/blob/master/docs/RFCS/20180324_parallel_commit.md) doesn't work without it. Eager creation of transaction records causes a latching stall that forces the commit of a transaction record to wait on the replication of its creation (i.e. `EndTransaction` waits on `BeginTransaction`). We could try to address this through some kind of below-latch manager pipelining (previously referred to as "below CommandQueue pipelining"), but we don't currently understand how to make that work in practice. Instead, this RFC proposes that we allow "fast-enough" transactions (those that never need a heartbeat) to skip the `PENDING` state entirely. This avoids the latching stall and allows parallel commits to achieve single-round trip transactions.
The other reason to make this change is that it avoids a sizable key-value write during the first writing operation in a transaction. Writing the transaction record is always piggy-backed on to another Raft consensus round, but the extra write does increase the size of the corresponding Raft entry and results in an extra RocksDB write. Eliminating this should provide a noticeable improvement in performance of write-heavy workloads that perform explicit transactions.
Finally, removing eager transaction record creation simplifies the transaction model and is in line with the goal to eventually have a fully idempotent transaction model.
# Guide-level explanation
## The Transaction State Machine
The current transaction state machine works as follows:
1. Create the transaction record
2. Perform writes by laying down intents, pointing them at the transaction record
3. Periodically heartbeat the transaction record
4. Repeat steps 2 & 3 as many times as necessary
5. Commit the transaction record
6. Resolve all intents (asynchronously)
The new transaction state machine will be as follows:
1. Perform writes by laying down intents, pointing them at where the transaction record will be written
2. Periodically heartbeat the transaction record, creating it if necessary
3. Repeat steps 1 & 2 as many times as necessary
4. Commit the transaction record, creating it if necessary
5. Resolve all intents (asynchronously)
### Properties Preserved
1. Only the transaction coordinator that owns a transaction can create its transaction record in the `PENDING` state or create/move its transaction record in/to the `COMMITTED` state. This is important to avoid an entire class of bugs that could result in concurrent transactions "reviving" a finalized transaction.
2. Any transaction can move a transaction record to the `ABORTED` state.
3. Outside of extreme priority mismatches, a contending transaction will wait until a transaction has been inactive for at least `TxnLivenessThreshold` before aborting it. As we'll see below, this RFC actually strengthens this property.
## The Role of Transaction Records
Transaction records serve three different roles in the CockroachDB transaction model:
1. they serve as the single linearization point of a transaction. Transactions are considered `COMMITTED` or `ABORTED` when the Raft entry that changes the transaction record to one of these two states is committed. This role interacts with the `EndTransaction` and `PushTxn` request types.
2. they serve as the source of truth for the liveness of a transaction. Transaction coordinators heartbeat their transaction record and concurrent transactions observe the state of a transaction's record to learn about its disposition. This role interacts with the `HeartbeatTxn`, `PushTxn`, and `QueryTxn` request types.
3. they perform bookkeeping on the resources held by finalized (`COMMITTED` or `ABORTED`) transactions. A `COMMITTED` and sometimes an `ABORTED` transaction record will point at each of the transaction's key spans, allowing both the original transaction coordinator and other transactions to resolve and later garbage-collect the intents. This role interacts with the `ResolveIntent`, `ResolveIntentRange`, and `GC` request types.
Interestingly, there are alternative means of achieving each of these goals that give transaction records a smaller or larger role in the transaction model. For instance, the [parallel commits RFC](https://github.com/cockroachdb/cockroach/blob/master/docs/RFCS/20180324_parallel_commit.md) proposes a new linearization point for transactions. Another example is that transaction liveness checks could circumvent the transaction record entirely by talking to transaction coordinators directly.
This RFC does not make any changes to the roles that a transaction record plays in the transaction model.
## Concurrent Transactions
As mentioned in the previous section, the transaction record serves as the source of truth for the liveness of a transaction. This means that contending transactions that need to coordinate with a certain transaction look at that transaction's record to determine what to do. This typically takes the form of a variant of the `PushTxn` request. The `PushTxn` request visits the transaction record of the "pushee" and makes decisions based on its state. If it is `COMMITTED` or `ABORTED`, the request returns that to the "pusher" immediately. If it is `PENDING`, priorities are taken into account. For all but the most extreme priorities, a push blocks and the `txnwait.Queue` gets involved if a transaction record is sufficiently "active". The transactions liveness is based on its starting timestamp and its last heartbeat timestamp. If a transaction goes too long without a heartbeat, it is considered "expired" and can be aborted by any pusher.
Lazily creating transaction records adds a complication here. Now that we're no longer writing a transaction record as early as possible [1] in the lifetime of a transaction, its very easy for a concurrent transaction to observe an intent of an ongoing transaction, push its record, and find nothing there. In fact, we expect this to be the case for any push in the first `min(txn duration, DefaultHeartbeatInterval)` of a transaction's lifetime. Currently, a push that doesn't find a record writes it as `ABORTED` and moves on. That isn't workable if we want to increase the amount of time without a transaction record.
We get around this by using the existence of an intent at a given timestamp as evidence of client-instructed activity. Instead of just considering the transaction record's original and heartbeat timestamp to determine if it is expired, we also take into consideration the intent's timestamp. We know that the client had to have been alive at the timestamp that the intent was written to issue the write [2]. This means that it is now possible for a transaction to wait on another transaction without the pushee's transaction record existing yet. This works as expected - either the transaction record is eventually written by the transactions first heartbeat and the concurrent transaction continues waiting or the transaction record is never written and the "activity" attributed to the transaction due to the intent becomes stale enough that the transaction can be considered expired.
[1] note that we currently don't ensure that a transaction record is written before writing other intents. Instead, we allow a transaction's first writing batch to be issued in parallel with its `BeginTransaction` request. This means that the referenced race was always possible, it was just unlikely. If the transaction record was delayed, a contending push, triggered by running into one of the parallel writes, could have aborted the transaction record before it was ever laid down. This RFC will ensure that we actually handle this race properly in the future.
[2] currently clients write intents at their OrigTimestamp, regardless of their current Timestamp. There is ongoing work to make clients write intents at their current Timestamp. Writing intents at Timestamp will improve the ability for intents to convey liveness information, but it's not critical for this approach described here to work.
# Reference-level explanation
## Detailed design
### TxnCoordSender
The `TxnCoordSender` no longer sends `BeginTransaction` requests if it observes that the cluster version setting can handle lazy transaction record creation.
The `TxnCoordSender` will also need to be more careful about never sending a committing `EndTransaction` request once it has sent an aborting `EndTransaction` request. This is because the aborting `EndTransaction` request could clean up all trace of the transaction, allowing the committing `EndTransaction` request to succeed. This might already be the case today.
### BeginTransaction
Other than being marked as deprecated, this request will behave the same.
### HeartbeatTxn
`batcheval.HeartbeatTxn` will be adjusted to create transaction records that are missing in the `PENDING` state instead of throwing an error.
It is critical that we don't allow `batcheval.HeartbeatTxn` to recreate an aborted and GCed transaction record. To ensure this, we compare the transaction's `OrigTimestamp` against the `TxnSpanGCThreshold` when creating a transaction record. This `OrigTimestamp` will always be the lower bound of the transaction's activity. If we compared the transaction's "last active" time against the `TxnSpanGCThreshold` then a later heartbeat could indicate more recent client activity and slip past the `TxnSpanGCThreshold` check.
### EndTransaction
`batcheval.EndTransaction` will be adjusted to create transaction records directly in the `COMMITTED` or `ABORTED` state without requiring that one already exists in the `PENDING` state.
A similar check to that discussed above will be performed on the `TxnSpanGCThreshold`.
### PushTxn
`batcheval.PushTxn` must undergo the largest transformation with this change. It must be adjusted to be able to handle missing transaction records without immediately marking the transaction as `ABORTED`. It will do so by using the provided intent `TxnMeta` to synthesize a `PENDING` pushee transaction if it can't find a transaction record. Critically, it will never actually persist this `PENDING` transaction record, but it will act as if the transaction record is what it pulled off disk. The synthetic record's last active timestamp will be set to the timestamp of the intent for purposes of transaction expiration.
A synthetic record will be considered `ABORTED` immediately if its timestamp is beneath the Replica's `TxnSpanGCThreshold`.
`batcheval.PushTxn` handling of extant transaction records will not change.
By synthesizing transaction records based on the last active timestamp of an intent, `batcheval.PushTxn` will allow transactions to continue executing without fear of being immediately aborted even without having written a transaction record. As long as the transaction commits or begins heartbeating its transaction record within `TxnLivenessThreshold`, it will be safe from rogue aborts. Since most transactions have a duration less than `TxnLivenessThreshold`, most transactions won't ever need to write a transaction record in the `PENDING` state at all.
For most cases, the push will now fail and the pusher will enter the `txnwait.Queue`. However, there are still cases where the push will succeed, specifically with extreme priorities or expired transactions.
#### Successful PUSH_ABORT
Nothing changes here. Regardless of whether the transaction record is synthetic or not, it can be persisted if being moved directly to the `ABORTED` state. This is similar to what `batcheval.PushTxn` already does for missing transaction records.
#### Successful PUSH_TIMESTAMP
This case is more tricky. An explicit design goal here is to avoid allowing other transactions from creating a `PENDING` transaction record for a transaction. This is an important property because it simplifies a number of other state transitions.
This complicates PUSH_TIMESTAMP requests, who want to ensure that a transaction can only commit above a certain timestamp. Currently, the request does this by modifying the existing transaction record and moving its timestamp. However, with lazily creating transaction records, a PUSH_TIMESTAMP request may find no transaction record through which to convey this information.
To facilitate this behavior without allowing other transactions from writing `PENDING` transaction records, this RFC proposes that PUSH_TIMESTAMP requests use the read timestamp cache to convey this information. A successful PUSH_TIMESTAMP request will bump the read timestamp cache for the local transaction key to the desired timestamp. `EndTransaction` requests will then check the read timestamp cache (in addition to the write timestamp cache, [maybe](#replica-side-writetooold-avoidance)) and will forward their txn timestamp accordingly. This draws clear parallels to the other uses of the read timestamp cache and is much less of an abuse than the current use of the write timestamp cache to store finalized transaction IDs.
This is a rare operation, so there is little concern of it adding strain to the read timestamp cache. Frequent page rotations in the cache will have the exact same effect as they do on any other transactional writes, so that is not a concern either.
#### Transaction Record GC
There are a number of interesting cases around transaction record cleanup that arise once we allow lazily creating transaction records. Before discussing them, let's first review the current mechanics of transaction record garbage collection:
- transaction records can be GCed by two sources: `EndTransaction` requests and the GC queue.
- transaction records can be GCed if they are `COMMITTED` or `ABORTED`. If a transaction record is `PENDING` and processed by the GC queue, it will first be aborted before being GCed.
- `EndTransaction` requests can only be issued by the transaction's own coordinator, not by concurrent transactions. This property is held [even in the parallel commits RFC](https://github.com/cockroachdb/cockroach/blob/master/docs/RFCS/20180324_parallel_commit.md#transaction-record-recreation). This means that any `EndTransaction` request that GCs a transaction record was known to the transaction's coordinator.
- `COMMITTED` transaction records are only GCed after resolving all intents.
- For committing `EndTransaction` requests, this takes two paths, depending on whether all intents are resolved synchronously or not.
- The GC queue atomically bumps the `TxnSpanGCThreshold` while garbage collecting transaction records equal to or beneath that timestamp.
What this means in practice is that a non-replayed request from the transaction coordinator itself should never stumble into an already GCed transaction record without also finding itself beneath the `TxnSpanGCThreshold`. Requests could be replayed, but that is [protected against](#Replay-Protection-/-Idempotency) as well.
Thing are more interesting for concurrent transactions, which have no guarantee about the state that they observe a transaction in. It is completely valid that a concurrent transaction observes a missing transaction record that was either `COMMITTED` or `ABORTED` and later GCed without having any indication of what happened to it. This is already the case, and we opt for rewriting the transaction record as `ABORTED` in that case. This isn't great, but it also won't cause any issues.
The situation becomes a little worse now that `batcheval.PushTxn` will handle (often wait for) missing transaction records. In fact, a `PushTxn` request has little way of conclusively determining whether a transaction record has not yet been written or has already been finalized and GCed. This won't cause any correctness issues, but it means that naively a `PushTxn` may end up waiting for a transaction record that has already been GCed simply because it saw activity on one of the transaction's intents within the `TxnLivenessThreshold`.
It is unclear how to avoid this issue entirely. This RFC's current thinking is a pusher should query the intent that created the `WriteIntentError` immediately upon entering the `txnwait.Queue`. All intents must be resolved by a transaction coordinator before it is allowed to GC the transaction record. As such, by verifying that the intent still exists when entering the `txnwait.Queue`, we remove a race where pushers may wait on transaction records that have already been cleaned up eagerly by their coordinator. However, this detection can be fooled by replayed writes, which can result in intents even after a transaction has been `ABORTED` or `COMMITTED`. We expect that this is a sufficiently rare case that the effect of delaying concurrent transactions up to `TxnLivenessThreshold` will be a non-issue. However, if it becomes an issue, we may need to explore alternate txn liveness check methods, like talking to transaction coordinators directly.
### QueryTxn / txnwait.Queue
The `txnwait.Queue` queries both the pusher and the pushee transaction using `QueryTxn` requests while waiting for changes to either. While doing so, the `txnwait.Queue` has access to `TxnMeta` objects for both of these transactions. Either the `QueryTxn` request or the `txnwait.Queue` itself will need to be made aware that transaction records are now created lazily. `QueryTxn` requests will also need to be adjusted to return waiting dependent transactions even for missing transaction records.
Currently the `txnwait.Queue` works properly even if the pusher does not have a transaction record but does not behave properly when the pushee does not have a transaction record. There are a few alternatives to fixing this:
- `QueryTxn` could use its provided `TxnMeta` object to synthesize a transaction record when one does not exist. This is nice from the perspective of hiding complication behind an abstraction. It is also consistent with the change to return dependent transactions for missing transaction records. However, one concern is that this might undermine the purpose of `QueryTxn` in hypothetical situations where the abstraction is undesirable.
- `txnwait.Queue` could use its provided `TxnMeta` object to synthesize a transaction record for the pushee when one is not returned from `QueryTxn`.
- `txnwait.Queue` could have an explicit code path to deal with a missing transaction record for the pushee that takes the provided `TxnMeta` object into account without explicitly synthesizing a record.
This current thinking is that the first alternative should be pursued.
### Replay Protection / Idempotency
Transaction replays are a serious concern. If a batch or a series of batches gets duplicated or lost and later shows up, we can't allow it to have adverse effects. Currently, this is guarded against by updating the write timestamp cache on `EndTransaction` requests and consulting the write timestamp cache on `BeginTransaction` requests. Because the old transaction state machine required a transaction record to commit and that a `BeginTransaction` request would fail if it ran into the write timestamp cache, this prevented replays of transactions.
Now that we no longer have `BeginTransaction` requests, things are a little different. However, the same approach can be used. We can update the write timestamp cache on `EndTransaction` requests and consult it on committing `EndTransaction` requests. This will prevent more than a single `EndTransaction` request from executing successfully.
However, if we wanted to, we could do more. We can use MVCC to enforce this replay protection for us. If the EndTransaction request is replayed alone then it is harmless to let it write a new `COMMITTED` transaction record. If any writes are replayed after a successful commit then they will necessarily hit WriteTooOld errors and will be pushed to a new timestamp. If those writes are in the same batch as the EndTransaction then they will prevent it from committing. If those writes are in different batches from the EndTransaction then they will not be resolvable.
| Initial Action | Replay | Protection |
|------------------------------|---------------------------------------|--------------------------------------------------------------|
| txn aborted by coordinator | replay commit | not allowed by client, see [TxnCoordSender](#TxnCoordSender) |
| txn aborted by coordinator | replay abort | could happen, won't cause issues |
| txn committed by coordinator | replay abort | not allowed by client, see [TxnCoordSender](#TxnCoordSender) |
| txn committed by coordinator | replay commit with writes in batch | protected by WriteToOld errors |
| txn committed by coordinator | replay commit without writes in batch | could happen, won't cause issues |
| txn aborted by other txn | commit or replay commit | hit aborted txn record or TxnSpanGCThreshold |
| txn aborted by other txn | replay abort | could happen, won't cause issues |
#### Replica-Side WriteTooOld Avoidance
Unfortunately, this currently breaks down because we have an optimization that avoids blocking transaction commit on WriteTooOld errors in two places. The first is during [1PC transaction evaluation](https://github.com/cockroachdb/cockroach/blob/8f30db0f07e940667bc34314ec6a446491a29790/pkg/storage/replica.go#L6036) and the second is during [EndTransaction evaluation](https://github.com/cockroachdb/cockroach/blob/8f30db0f07e940667bc34314ec6a446491a29790/pkg/storage/batcheval/cmd_end_transaction.go#L401). These cases allow a transaction to commit through a WriteTooOld error even without client-side approval. This breaks our ability to use MVCC to provide replay protection. It's unclear how important this optimization is as it's only applicable to heavily contended transactions with no refresh spans (i.e. blind writes). Blind writes from SQL are not very common and the optimization only saves a single round-trip to the client, who will never need an actual transaction retry because it can be trivially refreshed.
#### Commit Triggers
Above we assumed that EndTransactions without corresponding writes could harmlessly be replayed because they are side-effect free. This doesn't take commit triggers into account. If we wanted to allow this in full, we would need to make commit triggers idempotent.
#### Proposed Strategy
Because of the added complication of using MVCC to handle replays, we're opting not to pursue this approach immediately. Instead, we will simply use the write timestamp cache to protect against repeat `EndTransaction` requests.
### 1PC Transactions
Without `BeginTransaction` requests, the detection of "one-phase commit transactions" gets a little more tricky. Previously, a 1PC transaction was recognizable simply by looking for batches with `BeginTransaction` and `EndTransaction` requests together. `DistSender` made sure to never send these in the same batch unless all writes in the batch ended up on the same Range.
This detection is moderately more difficult to make efficient without `BeginTransaction`. The straightforward but wrong approach would be to compare the key bounds of each of the `IntentSpans` in the batch's `EndTransaction` with the Range it is evaluating on. If all `IntentSpans` are in the Range's bounds, the batch can be evaluated as a 1PC transaction. This gets tripped up when the txn has written before but only to the range that its record is on.
A workable approach would be to only increment the transaction's sequence number on writes and to check whether the batch contains all of the sequence numbers up to the exclusive upper bound sequence on the `EndTransaction` request. This simplifies to checking the first write in the batch to see if it has sequence number 1, but that simplification will break down under parallel commits, so it will not be pursued.
### Intent Timestamps
Now that intent timestamps are being used to determine the last time that a client was definitely active, we need to make sure that it tracks that information. Luckily, `intent.Txn.Timestamp` gives us exactly this! For an intent to have a certain timestamp, the client must have been alive at that timestamp to instruct the intent to be written.
A concern here is that intents can be pushed by other transactions to higher timestamps. Conveniently, when an intent is pushed, only the `MVCCMetadata.Timestamp` and the provisional timestamped key-value are changed. The Timestamp in the TxnMeta struct (`MVCCMetadata.Txn.Timestamp`) itself is [left unchanged](https://github.com/cockroachdb/cockroach/blob/fd28ed991385b446028f870d0049122fcabc94e3/pkg/storage/engine/mvcc.go#L2142-L2147).
## Drawbacks
This change is invasive and has the potential to be destabilizing. We shouldn't do it if we aren't willing to address some fallout from it. That said, I have a working prototype with most of the design items listed here implemented and it doesn't seem to be hitting very many issues with unit tests.
The other drawback is that the migration for this could be a little tricky. I think everything will work if we leave the BeginTransaction code-paths in place and make the decision on whether to use the request type or not on the client based on the active cluster version. This works because the current proposal manages to avoid adding any new state to the transaction record or to intents. An earlier prototype did add new state to intents, which would have made the migration much more difficult.
## Rationale and Alternatives
The only viable alternative to this change that allows parallel commits to work is lifting the mutual exclusion between evaluation and replication of BeginTransaction and EndTransaction requests. We have thrown around the idea of "pipelining" beneath the replica latching level, but there are no clear ideas on how this would actually work.
## Unresolved questions
- Will removing the transaction record auto-GC mechanism cause a performance hit? Will delaying all other transaction record GC runs cause one? Or will that actually speed things up? See section [Transaction Record GC](#transaction-record-gc).
- Should we remove server-side WriteTooOld avoidance? See section [Replica-side WriteTooOld avoidance](#replica-side-writetooold-avoidance).
- Is there any reason to keep `BeginTransaction` around? It seems like any test that needs a transaction record can just use a `HeartbeatTxn` request. | unknown | github | https://github.com/cockroachdb/cockroach | docs/RFCS/20181209_lazy_txn_record_creation.md |
/*
* Copyright 2014-2024 JetBrains s.r.o and contributors. Use of this source code is governed by the Apache 2.0 license.
*/
package io.ktor.network.tls
import io.ktor.network.sockets.*
import io.ktor.utils.io.*
import kotlin.coroutines.*
internal expect suspend fun openTLSSession(
socket: Socket,
input: ByteReadChannel,
output: ByteWriteChannel,
config: TLSConfig,
context: CoroutineContext
): Socket | kotlin | github | https://github.com/ktorio/ktor | ktor-network/ktor-network-tls/common/src/io/ktor/network/tls/TLSClientSession.kt |
function Component() {
const renderItem = item => {
// Multiple returns so that the return type is a Phi (union)
if (item == null) {
return null;
}
// Normally we assume that it's safe to mutate globals in a function passed
// as a prop, because the prop could be used as an event handler or effect.
// But if the function returns JSX we can assume it's a render helper, ie
// called during render, and thus it's unsafe to mutate globals or call
// other impure code.
global.property = true;
return <Item item={item} value={rand} />;
};
return <ItemList renderItem={renderItem} />;
} | javascript | github | https://github.com/facebook/react | compiler/packages/babel-plugin-react-compiler/src/__tests__/fixtures/compiler/error.invalid-mutate-global-in-render-helper-phi-return-prop.js |
#!"C:\Users\hog\Documents\Visual Studio 2010\Projects\ArdupilotMega\ArdupilotMega\bin\Debug\ipy.exe"
from numpy.testing import *
from numpy.distutils.misc_util import appendpath, minrelpath, gpaths, rel_path
from os.path import join, sep, dirname
ajoin = lambda *paths: join(*((sep,)+paths))
class TestAppendpath(TestCase):
def test_1(self):
assert_equal(appendpath('prefix','name'),join('prefix','name'))
assert_equal(appendpath('/prefix','name'),ajoin('prefix','name'))
assert_equal(appendpath('/prefix','/name'),ajoin('prefix','name'))
assert_equal(appendpath('prefix','/name'),join('prefix','name'))
def test_2(self):
assert_equal(appendpath('prefix/sub','name'),
join('prefix','sub','name'))
assert_equal(appendpath('prefix/sub','sup/name'),
join('prefix','sub','sup','name'))
assert_equal(appendpath('/prefix/sub','/prefix/name'),
ajoin('prefix','sub','name'))
def test_3(self):
assert_equal(appendpath('/prefix/sub','/prefix/sup/name'),
ajoin('prefix','sub','sup','name'))
assert_equal(appendpath('/prefix/sub/sub2','/prefix/sup/sup2/name'),
ajoin('prefix','sub','sub2','sup','sup2','name'))
assert_equal(appendpath('/prefix/sub/sub2','/prefix/sub/sup/name'),
ajoin('prefix','sub','sub2','sup','name'))
class TestMinrelpath(TestCase):
def test_1(self):
n = lambda path: path.replace('/',sep)
assert_equal(minrelpath(n('aa/bb')),n('aa/bb'))
assert_equal(minrelpath('..'),'..')
assert_equal(minrelpath(n('aa/..')),'')
assert_equal(minrelpath(n('aa/../bb')),'bb')
assert_equal(minrelpath(n('aa/bb/..')),'aa')
assert_equal(minrelpath(n('aa/bb/../..')),'')
assert_equal(minrelpath(n('aa/bb/../cc/../dd')),n('aa/dd'))
assert_equal(minrelpath(n('.././..')),n('../..'))
assert_equal(minrelpath(n('aa/bb/.././../dd')),n('dd'))
class TestGpaths(TestCase):
def test_gpaths(self):
local_path = minrelpath(join(dirname(__file__),'..'))
ls = gpaths('command/*.py', local_path)
assert join(local_path,'command','build_src.py') in ls,`ls`
f = gpaths('system_info.py', local_path)
assert join(local_path,'system_info.py')==f[0],`f`
if __name__ == "__main__":
run_module_suite() | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# ######################################################################
# Copyright (c) 2014, Brookhaven Science Associates, Brookhaven #
# National Laboratory. All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of the Brookhaven Science Associates, Brookhaven #
# National Laboratory nor the names of its contributors may be used #
# to endorse or promote products derived from this software without #
# specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE #
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, #
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES #
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR #
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) #
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, #
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OTHERWISE) ARISING #
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
########################################################################
"""
This is an example script utilizing dpc.py for Differential Phase Contrast
(DPC) imaging based on Fourier shift fitting.
This script requires a SOFC folder containing the test data in your home
directory. The default path for the results (texts and JPEGs) is also your home
directory. It will automatically download the data to your home directory if
you installed wget and unzip utilities. You can also manually download and
decompress the data at https://www.dropbox.com/s/963c4ymfmbjg5dm/SOFC.zip
Steps
-----
In this file:
1. Set parameters
2. Load the reference image
3. Save intermediate and final results
in skxray.dpc.dpc_runner:
1. Dimension reduction along x and y direction
2. 1-D IFFT
3. Same calculation on each diffraction pattern
3.1. Read a diffraction pattern
3.2. Dimension reduction along x and y direction
3.3. 1-D IFFT
3.4. Nonlinear fitting
4. Reconstruct the final phase image
"""
import os
from subprocess import call
import scipy
import numpy as np
import matplotlib.pyplot as plt
from pims import ImageSequence
import zipfile
import requests
from clint.textui import progress
import tempfile
from skxray.core import dpc
# dump log messages to screen
dpc.logger.setLevel(dpc.logging.DEBUG)
handler = dpc.logging.StreamHandler()
handler.setLevel(dpc.logging.DEBUG)
dpc.logger.addHandler(handler)
def load_image(filename):
"""
Load an image
Parameters
----------
filename : string
the location and name of an image
Return
----------
t : 2-D numpy array
store the image data
"""
if os.path.exists(filename):
t = plt.imread(filename)
else:
print('Please download and decompress the test data to your home directory\n\
Google drive link, https://drive.google.com/file/d/0B3v6W1bQwN_AVjdYdERHUDBsMmM/edit?usp=sharing\n\
Dropbox link, https://www.dropbox.com/s/963c4ymfmbjg5dm/SOFC.zip')
raise Exception('File not found: %s' % filename)
return t
def download_zip(url, path):
r = requests.get(url, stream=True)
temp = tempfile.NamedTemporaryFile(suffix='.zip')
print('Downloading url --> %s\nto --> %s' % (url, temp.name))
with open(temp.name, 'wb') as f:
total_length = int(r.headers.get('content-length'))
for chunk in progress.bar(r.iter_content(chunk_size=1024), expected_size=(total_length/1024) + 1):
if chunk:
f.write(chunk)
f.flush()
z = zipfile.ZipFile(temp)
print("extracting to --> %s" % path)
z.extractall(path=path)
def run():
# download to this folder
current_folder = os.sep.join(__file__.split(os.sep)[:-1])
dpc_demo_data_path = os.path.join(current_folder, 'SOFC')
if not os.path.exists(dpc_demo_data_path):
zip_file_url = 'https://www.dropbox.com/s/963c4ymfmbjg5dm/SOFC.zip?dl=1'
import download
download.run()
# download_zip(zip_file_url, current_folder)
# 1. Set parameters
start_point = [1, 0]
first_image = 1
pixel_size = (55, 55)
focus_to_det = 1.46e6
scan_xstep = 0.1
scan_ystep = 0.1
scan_rows = 121
scan_cols = 121
energy = 19.5
roi = None
padding = 0
weighting = 1.
bad_pixels = None
solver = 'Nelder-Mead'
images = ImageSequence(dpc_demo_data_path + "/*.tif")
img_size = images[0].shape
ref_image = np.ones(img_size)
scale = True
negate = True
print('running dpc')
# 2. Use dpc.dpc_runner
phase, amplitude = dpc.dpc_runner(
ref_image, images, start_point, pixel_size, focus_to_det, scan_rows,
scan_cols, scan_xstep, scan_ystep, energy, padding, weighting, solver,
roi, bad_pixels, negate, scale)
# 3. Save intermediate and final results
print('saving dpc output to disk in --> %s' % current_folder)
scipy.misc.imsave(os.path.join(current_folder, 'phase.jpg'), phase)
np.savetxt(os.path.join(current_folder, 'phase.txt'), phase)
scipy.misc.imsave(os.path.join(current_folder, 'amplitude.jpg'), amplitude)
np.savetxt(os.path.join(current_folder, 'amplitude.txt'), amplitude)
if __name__ == '__main__':
run() | unknown | codeparrot/codeparrot-clean | ||
import parser
import unittest
import sys
import struct
from test import test_support as support
from test.script_helper import assert_python_failure
#
# First, we test that we can generate trees from valid source fragments,
# and that these valid trees are indeed allowed by the tree-loading side
# of the parser module.
#
class RoundtripLegalSyntaxTestCase(unittest.TestCase):
def roundtrip(self, f, s):
st1 = f(s)
t = st1.totuple()
try:
st2 = parser.sequence2st(t)
except parser.ParserError, why:
self.fail("could not roundtrip %r: %s" % (s, why))
self.assertEqual(t, st2.totuple(),
"could not re-generate syntax tree")
def check_expr(self, s):
self.roundtrip(parser.expr, s)
def test_flags_passed(self):
# The unicode literals flags has to be passed from the paser to AST
# generation.
suite = parser.suite("from __future__ import unicode_literals; x = ''")
code = suite.compile()
scope = {}
exec code in scope
self.assertIsInstance(scope["x"], unicode)
def check_suite(self, s):
self.roundtrip(parser.suite, s)
def test_yield_statement(self):
self.check_suite("def f(): yield 1")
self.check_suite("def f(): yield")
self.check_suite("def f(): x += yield")
self.check_suite("def f(): x = yield 1")
self.check_suite("def f(): x = y = yield 1")
self.check_suite("def f(): x = yield")
self.check_suite("def f(): x = y = yield")
self.check_suite("def f(): 1 + (yield)*2")
self.check_suite("def f(): (yield 1)*2")
self.check_suite("def f(): return; yield 1")
self.check_suite("def f(): yield 1; return")
self.check_suite("def f():\n"
" for x in range(30):\n"
" yield x\n")
self.check_suite("def f():\n"
" if (yield):\n"
" yield x\n")
def test_expressions(self):
self.check_expr("foo(1)")
self.check_expr("{1:1}")
self.check_expr("{1:1, 2:2, 3:3}")
self.check_expr("{1:1, 2:2, 3:3,}")
self.check_expr("{1}")
self.check_expr("{1, 2, 3}")
self.check_expr("{1, 2, 3,}")
self.check_expr("[]")
self.check_expr("[1]")
self.check_expr("[1, 2, 3]")
self.check_expr("[1, 2, 3,]")
self.check_expr("()")
self.check_expr("(1,)")
self.check_expr("(1, 2, 3)")
self.check_expr("(1, 2, 3,)")
self.check_expr("[x**3 for x in range(20)]")
self.check_expr("[x**3 for x in range(20) if x % 3]")
self.check_expr("[x**3 for x in range(20) if x % 2 if x % 3]")
self.check_expr("[x+y for x in range(30) for y in range(20) if x % 2 if y % 3]")
#self.check_expr("[x for x in lambda: True, lambda: False if x()]")
self.check_expr("list(x**3 for x in range(20))")
self.check_expr("list(x**3 for x in range(20) if x % 3)")
self.check_expr("list(x**3 for x in range(20) if x % 2 if x % 3)")
self.check_expr("list(x+y for x in range(30) for y in range(20) if x % 2 if y % 3)")
self.check_expr("{x**3 for x in range(30)}")
self.check_expr("{x**3 for x in range(30) if x % 3}")
self.check_expr("{x**3 for x in range(30) if x % 2 if x % 3}")
self.check_expr("{x+y for x in range(30) for y in range(20) if x % 2 if y % 3}")
self.check_expr("{x**3: y**2 for x, y in zip(range(30), range(30))}")
self.check_expr("{x**3: y**2 for x, y in zip(range(30), range(30)) if x % 3}")
self.check_expr("{x**3: y**2 for x, y in zip(range(30), range(30)) if x % 3 if y % 3}")
self.check_expr("{x:y for x in range(30) for y in range(20) if x % 2 if y % 3}")
self.check_expr("foo(*args)")
self.check_expr("foo(*args, **kw)")
self.check_expr("foo(**kw)")
self.check_expr("foo(key=value)")
self.check_expr("foo(key=value, *args)")
self.check_expr("foo(key=value, *args, **kw)")
self.check_expr("foo(key=value, **kw)")
self.check_expr("foo(a, b, c, *args)")
self.check_expr("foo(a, b, c, *args, **kw)")
self.check_expr("foo(a, b, c, **kw)")
self.check_expr("foo(a, *args, keyword=23)")
self.check_expr("foo + bar")
self.check_expr("foo - bar")
self.check_expr("foo * bar")
self.check_expr("foo / bar")
self.check_expr("foo // bar")
self.check_expr("lambda: 0")
self.check_expr("lambda x: 0")
self.check_expr("lambda *y: 0")
self.check_expr("lambda *y, **z: 0")
self.check_expr("lambda **z: 0")
self.check_expr("lambda x, y: 0")
self.check_expr("lambda foo=bar: 0")
self.check_expr("lambda foo=bar, spaz=nifty+spit: 0")
self.check_expr("lambda foo=bar, **z: 0")
self.check_expr("lambda foo=bar, blaz=blat+2, **z: 0")
self.check_expr("lambda foo=bar, blaz=blat+2, *y, **z: 0")
self.check_expr("lambda x, *y, **z: 0")
self.check_expr("lambda x: 5 if x else 2")
self.check_expr("(x for x in range(10))")
self.check_expr("foo(x for x in range(10))")
def test_print(self):
self.check_suite("print")
self.check_suite("print 1")
self.check_suite("print 1,")
self.check_suite("print >>fp")
self.check_suite("print >>fp, 1")
self.check_suite("print >>fp, 1,")
def test_simple_expression(self):
# expr_stmt
self.check_suite("a")
def test_simple_assignments(self):
self.check_suite("a = b")
self.check_suite("a = b = c = d = e")
def test_simple_augmented_assignments(self):
self.check_suite("a += b")
self.check_suite("a -= b")
self.check_suite("a *= b")
self.check_suite("a /= b")
self.check_suite("a //= b")
self.check_suite("a %= b")
self.check_suite("a &= b")
self.check_suite("a |= b")
self.check_suite("a ^= b")
self.check_suite("a <<= b")
self.check_suite("a >>= b")
self.check_suite("a **= b")
def test_function_defs(self):
self.check_suite("def f(): pass")
self.check_suite("def f(*args): pass")
self.check_suite("def f(*args, **kw): pass")
self.check_suite("def f(**kw): pass")
self.check_suite("def f(foo=bar): pass")
self.check_suite("def f(foo=bar, *args): pass")
self.check_suite("def f(foo=bar, *args, **kw): pass")
self.check_suite("def f(foo=bar, **kw): pass")
self.check_suite("def f(a, b): pass")
self.check_suite("def f(a, b, *args): pass")
self.check_suite("def f(a, b, *args, **kw): pass")
self.check_suite("def f(a, b, **kw): pass")
self.check_suite("def f(a, b, foo=bar): pass")
self.check_suite("def f(a, b, foo=bar, *args): pass")
self.check_suite("def f(a, b, foo=bar, *args, **kw): pass")
self.check_suite("def f(a, b, foo=bar, **kw): pass")
self.check_suite("@staticmethod\n"
"def f(): pass")
self.check_suite("@staticmethod\n"
"@funcattrs(x, y)\n"
"def f(): pass")
self.check_suite("@funcattrs()\n"
"def f(): pass")
def test_class_defs(self):
self.check_suite("class foo():pass")
self.check_suite("@class_decorator\n"
"class foo():pass")
self.check_suite("@class_decorator(arg)\n"
"class foo():pass")
self.check_suite("@decorator1\n"
"@decorator2\n"
"class foo():pass")
def test_import_from_statement(self):
self.check_suite("from sys.path import *")
self.check_suite("from sys.path import dirname")
self.check_suite("from sys.path import (dirname)")
self.check_suite("from sys.path import (dirname,)")
self.check_suite("from sys.path import dirname as my_dirname")
self.check_suite("from sys.path import (dirname as my_dirname)")
self.check_suite("from sys.path import (dirname as my_dirname,)")
self.check_suite("from sys.path import dirname, basename")
self.check_suite("from sys.path import (dirname, basename)")
self.check_suite("from sys.path import (dirname, basename,)")
self.check_suite(
"from sys.path import dirname as my_dirname, basename")
self.check_suite(
"from sys.path import (dirname as my_dirname, basename)")
self.check_suite(
"from sys.path import (dirname as my_dirname, basename,)")
self.check_suite(
"from sys.path import dirname, basename as my_basename")
self.check_suite(
"from sys.path import (dirname, basename as my_basename)")
self.check_suite(
"from sys.path import (dirname, basename as my_basename,)")
self.check_suite("from .bogus import x")
def test_basic_import_statement(self):
self.check_suite("import sys")
self.check_suite("import sys as system")
self.check_suite("import sys, math")
self.check_suite("import sys as system, math")
self.check_suite("import sys, math as my_math")
def test_relative_imports(self):
self.check_suite("from . import name")
self.check_suite("from .. import name")
self.check_suite("from .pkg import name")
self.check_suite("from ..pkg import name")
def test_pep263(self):
self.check_suite("# -*- coding: iso-8859-1 -*-\n"
"pass\n")
def test_assert(self):
self.check_suite("assert alo < ahi and blo < bhi\n")
def test_with(self):
self.check_suite("with open('x'): pass\n")
self.check_suite("with open('x') as f: pass\n")
self.check_suite("with open('x') as f, open('y') as g: pass\n")
def test_try_stmt(self):
self.check_suite("try: pass\nexcept: pass\n")
self.check_suite("try: pass\nfinally: pass\n")
self.check_suite("try: pass\nexcept A: pass\nfinally: pass\n")
self.check_suite("try: pass\nexcept A: pass\nexcept: pass\n"
"finally: pass\n")
self.check_suite("try: pass\nexcept: pass\nelse: pass\n")
self.check_suite("try: pass\nexcept: pass\nelse: pass\n"
"finally: pass\n")
def test_except_clause(self):
self.check_suite("try: pass\nexcept: pass\n")
self.check_suite("try: pass\nexcept A: pass\n")
self.check_suite("try: pass\nexcept A, e: pass\n")
self.check_suite("try: pass\nexcept A as e: pass\n")
def test_position(self):
# An absolutely minimal test of position information. Better
# tests would be a big project.
code = "def f(x):\n return x + 1"
st1 = parser.suite(code)
st2 = st1.totuple(line_info=1, col_info=1)
def walk(tree):
node_type = tree[0]
next = tree[1]
if isinstance(next, tuple):
for elt in tree[1:]:
for x in walk(elt):
yield x
else:
yield tree
terminals = list(walk(st2))
self.assertEqual([
(1, 'def', 1, 0),
(1, 'f', 1, 4),
(7, '(', 1, 5),
(1, 'x', 1, 6),
(8, ')', 1, 7),
(11, ':', 1, 8),
(4, '', 1, 9),
(5, '', 2, -1),
(1, 'return', 2, 4),
(1, 'x', 2, 11),
(14, '+', 2, 13),
(2, '1', 2, 15),
(4, '', 2, 16),
(6, '', 2, -1),
(4, '', 2, -1),
(0, '', 2, -1)],
terminals)
#
# Second, we take *invalid* trees and make sure we get ParserError
# rejections for them.
#
class IllegalSyntaxTestCase(unittest.TestCase):
def check_bad_tree(self, tree, label):
try:
parser.sequence2st(tree)
except parser.ParserError:
pass
else:
self.fail("did not detect invalid tree for %r" % label)
def test_junk(self):
# not even remotely valid:
self.check_bad_tree((1, 2, 3), "<junk>")
def test_illegal_yield_1(self):
# Illegal yield statement: def f(): return 1; yield 1
tree = \
(257,
(264,
(285,
(259,
(1, 'def'),
(1, 'f'),
(260, (7, '('), (8, ')')),
(11, ':'),
(291,
(4, ''),
(5, ''),
(264,
(265,
(266,
(272,
(275,
(1, 'return'),
(313,
(292,
(293,
(294,
(295,
(297,
(298,
(299,
(300,
(301,
(302, (303, (304, (305, (2, '1')))))))))))))))))),
(264,
(265,
(266,
(272,
(276,
(1, 'yield'),
(313,
(292,
(293,
(294,
(295,
(297,
(298,
(299,
(300,
(301,
(302,
(303, (304, (305, (2, '1')))))))))))))))))),
(4, ''))),
(6, ''))))),
(4, ''),
(0, ''))))
self.check_bad_tree(tree, "def f():\n return 1\n yield 1")
def test_illegal_yield_2(self):
# Illegal return in generator: def f(): return 1; yield 1
tree = \
(257,
(264,
(265,
(266,
(278,
(1, 'from'),
(281, (1, '__future__')),
(1, 'import'),
(279, (1, 'generators')))),
(4, ''))),
(264,
(285,
(259,
(1, 'def'),
(1, 'f'),
(260, (7, '('), (8, ')')),
(11, ':'),
(291,
(4, ''),
(5, ''),
(264,
(265,
(266,
(272,
(275,
(1, 'return'),
(313,
(292,
(293,
(294,
(295,
(297,
(298,
(299,
(300,
(301,
(302, (303, (304, (305, (2, '1')))))))))))))))))),
(264,
(265,
(266,
(272,
(276,
(1, 'yield'),
(313,
(292,
(293,
(294,
(295,
(297,
(298,
(299,
(300,
(301,
(302,
(303, (304, (305, (2, '1')))))))))))))))))),
(4, ''))),
(6, ''))))),
(4, ''),
(0, ''))))
self.check_bad_tree(tree, "def f():\n return 1\n yield 1")
def test_print_chevron_comma(self):
# Illegal input: print >>fp,
tree = \
(257,
(264,
(265,
(266,
(268,
(1, 'print'),
(35, '>>'),
(290,
(291,
(292,
(293,
(295,
(296,
(297,
(298, (299, (300, (301, (302, (303, (1, 'fp')))))))))))))),
(12, ','))),
(4, ''))),
(0, ''))
self.check_bad_tree(tree, "print >>fp,")
def test_a_comma_comma_c(self):
# Illegal input: a,,c
tree = \
(258,
(311,
(290,
(291,
(292,
(293,
(295,
(296,
(297,
(298, (299, (300, (301, (302, (303, (1, 'a')))))))))))))),
(12, ','),
(12, ','),
(290,
(291,
(292,
(293,
(295,
(296,
(297,
(298, (299, (300, (301, (302, (303, (1, 'c'))))))))))))))),
(4, ''),
(0, ''))
self.check_bad_tree(tree, "a,,c")
def test_illegal_operator(self):
# Illegal input: a $= b
tree = \
(257,
(264,
(265,
(266,
(267,
(312,
(291,
(292,
(293,
(294,
(296,
(297,
(298,
(299,
(300, (301, (302, (303, (304, (1, 'a'))))))))))))))),
(268, (37, '$=')),
(312,
(291,
(292,
(293,
(294,
(296,
(297,
(298,
(299,
(300, (301, (302, (303, (304, (1, 'b'))))))))))))))))),
(4, ''))),
(0, ''))
self.check_bad_tree(tree, "a $= b")
def test_malformed_global(self):
#doesn't have global keyword in ast
tree = (257,
(264,
(265,
(266,
(282, (1, 'foo'))), (4, ''))),
(4, ''),
(0, ''))
self.check_bad_tree(tree, "malformed global ast")
def test_missing_import_source(self):
# from import a
tree = \
(257,
(267,
(268,
(269,
(281,
(283, (1, 'from'), (1, 'import'),
(286, (284, (1, 'fred')))))),
(4, ''))),
(4, ''), (0, ''))
self.check_bad_tree(tree, "from import a")
class CompileTestCase(unittest.TestCase):
# These tests are very minimal. :-(
def test_compile_expr(self):
st = parser.expr('2 + 3')
code = parser.compilest(st)
self.assertEqual(eval(code), 5)
def test_compile_suite(self):
st = parser.suite('x = 2; y = x + 3')
code = parser.compilest(st)
globs = {}
exec code in globs
self.assertEqual(globs['y'], 5)
def test_compile_error(self):
st = parser.suite('1 = 3 + 4')
self.assertRaises(SyntaxError, parser.compilest, st)
def test_compile_badunicode(self):
st = parser.suite('a = u"\U12345678"')
self.assertRaises(SyntaxError, parser.compilest, st)
st = parser.suite('a = u"\u1"')
self.assertRaises(SyntaxError, parser.compilest, st)
def test_issue_9011(self):
# Issue 9011: compilation of an unary minus expression changed
# the meaning of the ST, so that a second compilation produced
# incorrect results.
st = parser.expr('-3')
code1 = parser.compilest(st)
self.assertEqual(eval(code1), -3)
code2 = parser.compilest(st)
self.assertEqual(eval(code2), -3)
class ParserStackLimitTestCase(unittest.TestCase):
"""try to push the parser to/over its limits.
see http://bugs.python.org/issue1881 for a discussion
"""
def _nested_expression(self, level):
return "["*level+"]"*level
def test_deeply_nested_list(self):
e = self._nested_expression(99)
st = parser.expr(e)
st.compile()
def test_trigger_memory_error(self):
e = self._nested_expression(100)
rc, out, err = assert_python_failure('-c', e)
# parsing the expression will result in an error message
# followed by a MemoryError (see #11963)
self.assertIn(b's_push: parser stack overflow', err)
self.assertIn(b'MemoryError', err)
class STObjectTestCase(unittest.TestCase):
"""Test operations on ST objects themselves"""
check_sizeof = support.check_sizeof
@support.cpython_only
def test_sizeof(self):
def XXXROUNDUP(n):
if n <= 1:
return n
if n <= 128:
return (n + 3) & ~3
return 1 << (n - 1).bit_length()
basesize = support.calcobjsize('Pii')
nodesize = struct.calcsize('hP3iP0h')
def sizeofchildren(node):
if node is None:
return 0
res = 0
hasstr = len(node) > 1 and isinstance(node[-1], str)
if hasstr:
res += len(node[-1]) + 1
children = node[1:-1] if hasstr else node[1:]
if children:
res += XXXROUNDUP(len(children)) * nodesize
for child in children:
res += sizeofchildren(child)
return res
def check_st_sizeof(st):
self.check_sizeof(st, basesize + nodesize +
sizeofchildren(st.totuple()))
check_st_sizeof(parser.expr('2 + 3'))
check_st_sizeof(parser.expr('2 + 3 + 4'))
check_st_sizeof(parser.suite('x = 2 + 3'))
check_st_sizeof(parser.suite(''))
check_st_sizeof(parser.suite('# -*- coding: utf-8 -*-'))
check_st_sizeof(parser.expr('[' + '2,' * 1000 + ']'))
# XXX tests for pickling and unpickling of ST objects should go here
def test_main():
support.run_unittest(
RoundtripLegalSyntaxTestCase,
IllegalSyntaxTestCase,
CompileTestCase,
ParserStackLimitTestCase,
STObjectTestCase,
)
if __name__ == "__main__":
test_main() | unknown | codeparrot/codeparrot-clean | ||
# frozen_string_literal: true
require "cases/helper"
require "active_support/core_ext/hash/indifferent_access"
require "active_support/hash_with_indifferent_access"
class AttributeAssignmentTest < ActiveModel::TestCase
class Model
include ActiveModel::AttributeAssignment
attr_accessor :name, :description
def initialize(attributes = {})
assign_attributes(attributes)
end
def broken_attribute=(value)
raise ErrorFromAttributeWriter
end
private
attr_writer :metadata
end
class ErrorFromAttributeWriter < StandardError
end
class ProtectedParams
attr_accessor :permitted
alias :permitted? :permitted
delegate :keys, :key?, :has_key?, :empty?, to: :@parameters
def initialize(attributes)
@parameters = attributes.with_indifferent_access
@permitted = false
end
def permit!
@permitted = true
self
end
def [](key)
@parameters[key]
end
def to_h
@parameters
end
def each_pair(&block)
@parameters.each_pair(&block)
end
def dup
super.tap do |duplicate|
duplicate.instance_variable_set :@permitted, permitted?
end
end
end
test "simple assignment" do
model = Model.new
model.assign_attributes(name: "hello", description: "world")
assert_equal "hello", model.name
assert_equal "world", model.description
end
test "simple assignment alias" do
model = Model.new
model.attributes = { name: "hello", description: "world" }
assert_equal "hello", model.name
assert_equal "world", model.description
end
test "assign non-existing attribute" do
model = Model.new
error = assert_raises(ActiveModel::UnknownAttributeError) do
model.assign_attributes(hz: 1)
end
assert_equal model, error.record
assert_equal "hz", error.attribute
end
test "assign non-existing attribute by overriding #attribute_writer_missing" do
model_class = Class.new(Model) do
attr_accessor :assigned_attributes
def attribute_writer_missing(name, value) = @assigned_attributes[name] = value
end
model = model_class.new(assigned_attributes: {})
model.assign_attributes unknown: "attribute"
assert_equal({ "unknown" => "attribute" }, model.assigned_attributes)
end
test "assign private attribute" do
model = Model.new
assert_raises(ActiveModel::UnknownAttributeError) do
model.assign_attributes(metadata: { a: 1 })
end
end
test "does not swallow errors raised in an attribute writer" do
assert_raises(ErrorFromAttributeWriter) do
Model.new(broken_attribute: 1)
end
end
test "an ArgumentError is raised if a non-hash-like object is passed" do
err = assert_raises(ArgumentError) do
Model.new(1)
end
assert_equal("When assigning attributes, you must pass a hash as an argument, Integer passed.", err.message)
end
test "forbidden attributes cannot be used for mass assignment" do
params = ProtectedParams.new(name: "Guille", description: "m")
assert_raises(ActiveModel::ForbiddenAttributesError) do
Model.new(params)
end
end
test "permitted attributes can be used for mass assignment" do
params = ProtectedParams.new(name: "Guille", description: "desc")
params.permit!
model = Model.new(params)
assert_equal "Guille", model.name
assert_equal "desc", model.description
end
test "regular hash should still be used for mass assignment" do
model = Model.new(name: "Guille", description: "m")
assert_equal "Guille", model.name
assert_equal "m", model.description
end
test "assigning no attributes should not raise, even if the hash is un-permitted" do
model = Model.new
assert_nil model.assign_attributes(ProtectedParams.new({}))
end
test "passing an object with each_pair but without each" do
model = Model.new
h = { name: "hello", description: "world" }
h.instance_eval { undef :each }
model.assign_attributes(h)
assert_equal "hello", model.name
assert_equal "world", model.description
end
end | ruby | github | https://github.com/rails/rails | activemodel/test/cases/attribute_assignment_test.rb |
/***************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at https://curl.se/docs/copyright.html.
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the COPYING file.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
* SPDX-License-Identifier: curl
*
***************************************************************************/
/* <DESC>
* Issue an HTTP POST and provide the data through the read callback.
* </DESC>
*/
#include <stdio.h>
#include <string.h>
#include <curl/curl.h>
/* silly test data to POST */
static const char data[] =
"Lorem ipsum dolor sit amet, consectetur adipiscing elit. "
"Sed vel urna neque. Ut quis leo metus. Quisque eleifend, ex at "
"laoreet rhoncus, odio ipsum semper metus, at tempus ante urna in mauris. "
"Suspendisse ornare tempor venenatis. Ut dui neque, pellentesque a ______ "
"eget, mattis vitae ligula. Fusce ut pharetra est. Ut ullamcorper mi ac "
"sollicitudin semper. Praesent sit amet tellus ______, posuere nulla non, "
"rhoncus ipsum.";
struct WriteThis {
const char *readptr;
size_t sizeleft;
};
static size_t read_cb(char *dest, size_t size, size_t nmemb, void *userp)
{
struct WriteThis *wt = (struct WriteThis *)userp;
size_t buffer_size = size * nmemb;
if(wt->sizeleft) {
/* copy as much as possible from the source to the destination */
size_t copy_this_much = wt->sizeleft;
if(copy_this_much > buffer_size)
copy_this_much = buffer_size;
memcpy(dest, wt->readptr, copy_this_much);
wt->readptr += copy_this_much;
wt->sizeleft -= copy_this_much;
return copy_this_much; /* we copied this many bytes */
}
return 0; /* no more data left to deliver */
}
int main(void)
{
CURL *curl;
CURLcode result;
struct WriteThis wt;
wt.readptr = data;
wt.sizeleft = strlen(data);
/* In Windows, this inits the Winsock stuff */
result = curl_global_init(CURL_GLOBAL_DEFAULT);
/* Check for errors */
if(result != CURLE_OK) {
fprintf(stderr, "curl_global_init() failed: %s\n",
curl_easy_strerror(result));
return (int)result;
}
/* get a curl handle */
curl = curl_easy_init();
if(curl) {
/* First set the URL that is about to receive our POST. */
curl_easy_setopt(curl, CURLOPT_URL, "https://example.com/index.cgi");
/* Now specify we want to POST data */
curl_easy_setopt(curl, CURLOPT_POST, 1L);
/* we want to use our own read function */
curl_easy_setopt(curl, CURLOPT_READFUNCTION, read_cb);
/* pointer to pass to our read function */
curl_easy_setopt(curl, CURLOPT_READDATA, &wt);
/* get verbose debug output please */
curl_easy_setopt(curl, CURLOPT_VERBOSE, 1L);
/*
If you use POST to an HTTP 1.1 server, you can send data without knowing
the size before starting the POST if you use chunked encoding. You
enable this by adding a header like "Transfer-Encoding: chunked" with
CURLOPT_HTTPHEADER. With HTTP 1.0 or without chunked transfer, you must
specify the size in the request.
*/
#ifdef USE_CHUNKED
{
struct curl_slist *chunk = NULL;
chunk = curl_slist_append(chunk, "Transfer-Encoding: chunked");
result = curl_easy_setopt(curl, CURLOPT_HTTPHEADER, chunk);
/* use curl_slist_free_all() after the *perform() call to free this
list again */
}
#else
/* Set the expected POST size. If you want to POST large amounts of data,
consider CURLOPT_POSTFIELDSIZE_LARGE */
curl_easy_setopt(curl, CURLOPT_POSTFIELDSIZE, (long)wt.sizeleft);
#endif
#ifdef DISABLE_EXPECT
/*
Using POST with HTTP 1.1 implies the use of a "Expect: 100-continue"
header. You can disable this header with CURLOPT_HTTPHEADER as usual.
NOTE: if you want chunked transfer too, you need to combine these two
since you can only set one list of headers with CURLOPT_HTTPHEADER. */
/* A less good option would be to enforce HTTP 1.0, but that might also
have other implications. */
{
struct curl_slist *chunk = NULL;
chunk = curl_slist_append(chunk, "Expect:");
result = curl_easy_setopt(curl, CURLOPT_HTTPHEADER, chunk);
/* use curl_slist_free_all() after the *perform() call to free this
list again */
}
#endif
/* Perform the request, result gets the return code */
result = curl_easy_perform(curl);
/* Check for errors */
if(result != CURLE_OK)
fprintf(stderr, "curl_easy_perform() failed: %s\n",
curl_easy_strerror(result));
/* always cleanup */
curl_easy_cleanup(curl);
}
curl_global_cleanup();
return 0;
} | c | github | https://github.com/curl/curl | docs/examples/post-callback.c |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: pingdom
short_description: Pause/unpause Pingdom alerts
description:
- This module will let you pause/unpause Pingdom alerts
version_added: "1.2"
author:
- "Dylan Silva (@thaumos)"
- "Justin Johns"
requirements:
- "This pingdom python library: https://github.com/mbabineau/pingdom-python"
options:
state:
description:
- Define whether or not the check should be running or paused.
required: true
choices: [ "running", "paused" ]
checkid:
description:
- Pingdom ID of the check.
required: true
uid:
description:
- Pingdom user ID.
required: true
passwd:
description:
- Pingdom user password.
required: true
key:
description:
- Pingdom API key.
required: true
notes:
- This module does not yet have support to add/remove checks.
'''
EXAMPLES = '''
# Pause the check with the ID of 12345.
- pingdom:
uid: example@example.com
passwd: password123
key: apipassword123
checkid: 12345
state: paused
# Unpause the check with the ID of 12345.
- pingdom:
uid: example@example.com
passwd: password123
key: apipassword123
checkid: 12345
state: running
'''
try:
import pingdom
HAS_PINGDOM = True
except:
HAS_PINGDOM = False
from ansible.module_utils.basic import AnsibleModule
def pause(checkid, uid, passwd, key):
c = pingdom.PingdomConnection(uid, passwd, key)
c.modify_check(checkid, paused=True)
check = c.get_check(checkid)
name = check.name
result = check.status
# if result != "paused": # api output buggy - accept raw exception for now
# return (True, name, result)
return (False, name, result)
def unpause(checkid, uid, passwd, key):
c = pingdom.PingdomConnection(uid, passwd, key)
c.modify_check(checkid, paused=False)
check = c.get_check(checkid)
name = check.name
result = check.status
# if result != "up": # api output buggy - accept raw exception for now
# return (True, name, result)
return (False, name, result)
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(required=True, choices=['running', 'paused', 'started', 'stopped']),
checkid=dict(required=True),
uid=dict(required=True),
passwd=dict(required=True, no_log=True),
key=dict(required=True)
)
)
if not HAS_PINGDOM:
module.fail_json(msg="Missing required pingdom module (check docs)")
checkid = module.params['checkid']
state = module.params['state']
uid = module.params['uid']
passwd = module.params['passwd']
key = module.params['key']
if (state == "paused" or state == "stopped"):
(rc, name, result) = pause(checkid, uid, passwd, key)
if (state == "running" or state == "started"):
(rc, name, result) = unpause(checkid, uid, passwd, key)
if rc != 0:
module.fail_json(checkid=checkid, name=name, status=result)
module.exit_json(checkid=checkid, name=name, status=result)
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
from sympy.core import (S, symbols, Eq, pi, Catalan, EulerGamma, Lambda,
Dummy, Function)
from sympy.core.compatibility import StringIO
from sympy import erf, Integral, Piecewise
from sympy import Equality
from sympy.matrices import Matrix, MatrixSymbol
from sympy.printing.codeprinter import Assignment
from sympy.utilities.codegen import OctaveCodeGen, codegen, make_routine
from sympy.utilities.pytest import raises
from sympy.utilities.lambdify import implemented_function
from sympy.utilities.pytest import XFAIL
import sympy
x, y, z = symbols('x,y,z')
def test_empty_m_code():
code_gen = OctaveCodeGen()
output = StringIO()
code_gen.dump_m([], output, "file", header=False, empty=False)
source = output.getvalue()
assert source == ""
def test_m_simple_code():
name_expr = ("test", (x + y)*z)
result, = codegen(name_expr, "Octave", header=False, empty=False)
assert result[0] == "test.m"
source = result[1]
expected = (
"function out1 = test(x, y, z)\n"
" out1 = z.*(x + y);\n"
"end\n"
)
assert source == expected
def test_m_simple_code_with_header():
name_expr = ("test", (x + y)*z)
result, = codegen(name_expr, "Octave", header=True, empty=False)
assert result[0] == "test.m"
source = result[1]
expected = (
"function out1 = test(x, y, z)\n"
" %TEST Autogenerated by sympy\n"
" % Code generated with sympy " + sympy.__version__ + "\n"
" %\n"
" % See http://www.sympy.org/ for more information.\n"
" %\n"
" % This file is part of 'project'\n"
" out1 = z.*(x + y);\n"
"end\n"
)
assert source == expected
def test_m_simple_code_nameout():
expr = Equality(z, (x + y))
name_expr = ("test", expr)
result, = codegen(name_expr, "Octave", header=False, empty=False)
source = result[1]
expected = (
"function z = test(x, y)\n"
" z = x + y;\n"
"end\n"
)
assert source == expected
def test_m_numbersymbol():
name_expr = ("test", pi**Catalan)
result, = codegen(name_expr, "Octave", header=False, empty=False)
source = result[1]
expected = (
"function out1 = test()\n"
" out1 = pi^0.915965594177219;\n"
"end\n"
)
assert source == expected
@XFAIL
def test_m_numbersymbol_no_inline():
# FIXME: how to pass inline=False to the OctaveCodePrinter?
name_expr = ("test", [pi**Catalan, EulerGamma])
result, = codegen(name_expr, "Octave", header=False,
empty=False, inline=False)
source = result[1]
expected = (
"function [out1, out2] = test()\n"
" Catalan = 0.915965594177219; % constant\n"
" EulerGamma = 0.5772156649015329; % constant\n"
" out1 = pi^Catalan;\n"
" out2 = EulerGamma;\n"
"end\n"
)
assert source == expected
def test_m_code_argument_order():
expr = x + y
routine = make_routine("test", expr, argument_sequence=[z, x, y], language="octave")
code_gen = OctaveCodeGen()
output = StringIO()
code_gen.dump_m([routine], output, "test", header=False, empty=False)
source = output.getvalue()
expected = (
"function out1 = test(z, x, y)\n"
" out1 = x + y;\n"
"end\n"
)
assert source == expected
def test_multiple_results_m():
# Here the output order is the input order
expr1 = (x + y)*z
expr2 = (x - y)*z
name_expr = ("test", [expr1, expr2])
result, = codegen(name_expr, "Octave", header=False, empty=False)
source = result[1]
expected = (
"function [out1, out2] = test(x, y, z)\n"
" out1 = z.*(x + y);\n"
" out2 = z.*(x - y);\n"
"end\n"
)
assert source == expected
def test_results_named_unordered():
# Here output order is based on name_expr
A, B, C = symbols('A,B,C')
expr1 = Equality(C, (x + y)*z)
expr2 = Equality(A, (x - y)*z)
expr3 = Equality(B, 2*x)
name_expr = ("test", [expr1, expr2, expr3])
result, = codegen(name_expr, "Octave", header=False, empty=False)
source = result[1]
expected = (
"function [C, A, B] = test(x, y, z)\n"
" C = z.*(x + y);\n"
" A = z.*(x - y);\n"
" B = 2*x;\n"
"end\n"
)
assert source == expected
def test_results_named_ordered():
A, B, C = symbols('A,B,C')
expr1 = Equality(C, (x + y)*z)
expr2 = Equality(A, (x - y)*z)
expr3 = Equality(B, 2*x)
name_expr = ("test", [expr1, expr2, expr3])
result = codegen(name_expr, "Octave", header=False, empty=False,
argument_sequence=(x, z, y))
assert result[0][0] == "test.m"
source = result[0][1]
expected = (
"function [C, A, B] = test(x, z, y)\n"
" C = z.*(x + y);\n"
" A = z.*(x - y);\n"
" B = 2*x;\n"
"end\n"
)
assert source == expected
def test_complicated_m_codegen():
from sympy import sin, cos, tan
name_expr = ("testlong",
[ ((sin(x) + cos(y) + tan(z))**3).expand(),
cos(cos(cos(cos(cos(cos(cos(cos(x + y + z))))))))
])
result = codegen(name_expr, "Octave", header=False, empty=False)
assert result[0][0] == "testlong.m"
source = result[0][1]
expected = (
"function [out1, out2] = testlong(x, y, z)\n"
" out1 = sin(x).^3 + 3*sin(x).^2.*cos(y) + 3*sin(x).^2.*tan(z)"
" + 3*sin(x).*cos(y).^2 + 6*sin(x).*cos(y).*tan(z) + 3*sin(x).*tan(z).^2"
" + cos(y).^3 + 3*cos(y).^2.*tan(z) + 3*cos(y).*tan(z).^2 + tan(z).^3;\n"
" out2 = cos(cos(cos(cos(cos(cos(cos(cos(x + y + z))))))));\n"
"end\n"
)
assert source == expected
def test_m_output_arg_mixed_unordered():
# named outputs are alphabetical, unnamed output appear in the given order
from sympy import sin, cos, tan
a = symbols("a")
name_expr = ("foo", [cos(2*x), Equality(y, sin(x)), cos(x), Equality(a, sin(2*x))])
result, = codegen(name_expr, "Octave", header=False, empty=False)
assert result[0] == "foo.m"
source = result[1];
expected = (
'function [out1, y, out3, a] = foo(x)\n'
' out1 = cos(2*x);\n'
' y = sin(x);\n'
' out3 = cos(x);\n'
' a = sin(2*x);\n'
'end\n'
)
assert source == expected
def test_m_piecewise_():
pw = Piecewise((0, x < -1), (x**2, x <= 1), (-x+2, x > 1), (1, True))
name_expr = ("pwtest", pw)
result, = codegen(name_expr, "Octave", header=False, empty=False)
source = result[1]
expected = (
"function out1 = pwtest(x)\n"
" out1 = ((x < -1).*(0) + (~(x < -1)).*( ...\n"
" (x <= 1).*(x.^2) + (~(x <= 1)).*( ...\n"
" (x > 1).*(-x + 2) + (~(x > 1)).*(1))));\n"
"end\n"
)
assert source == expected
@XFAIL
def test_m_piecewise_no_inline():
# FIXME: how to pass inline=False to the OctaveCodePrinter?
pw = Piecewise((0, x < -1), (x**2, x <= 1), (-x+2, x > 1), (1, True))
name_expr = ("pwtest", pw)
result, = codegen(name_expr, "Octave", header=False, empty=False,
inline=False)
source = result[1]
expected = (
"function out1 = pwtest(x)\n"
" if (x < -1)\n"
" out1 = 0;\n"
" elseif (x <= 1)\n"
" out1 = x.^2;\n"
" elseif (x > 1)\n"
" out1 = -x + 2;\n"
" else\n"
" out1 = 1;\n"
" end\n"
"end\n"
)
assert source == expected
def test_m_multifcns_per_file():
name_expr = [ ("foo", [2*x, 3*y]), ("bar", [y**2, 4*y]) ]
result = codegen(name_expr, "Octave", header=False, empty=False)
assert result[0][0] == "foo.m"
source = result[0][1];
expected = (
"function [out1, out2] = foo(x, y)\n"
" out1 = 2*x;\n"
" out2 = 3*y;\n"
"end\n"
"function [out1, out2] = bar(y)\n"
" out1 = y.^2;\n"
" out2 = 4*y;\n"
"end\n"
)
assert source == expected
def test_m_multifcns_per_file_w_header():
name_expr = [ ("foo", [2*x, 3*y]), ("bar", [y**2, 4*y]) ]
result = codegen(name_expr, "Octave", header=True, empty=False)
assert result[0][0] == "foo.m"
source = result[0][1];
expected = (
"function [out1, out2] = foo(x, y)\n"
" %FOO Autogenerated by sympy\n"
" % Code generated with sympy " + sympy.__version__ + "\n"
" %\n"
" % See http://www.sympy.org/ for more information.\n"
" %\n"
" % This file is part of 'project'\n"
" out1 = 2*x;\n"
" out2 = 3*y;\n"
"end\n"
"function [out1, out2] = bar(y)\n"
" out1 = y.^2;\n"
" out2 = 4*y;\n"
"end\n"
)
assert source == expected
def test_m_filename_match_first_fcn():
name_expr = [ ("foo", [2*x, 3*y]), ("bar", [y**2, 4*y]) ]
raises(ValueError, lambda: codegen(name_expr,
"Octave", prefix="bar", header=False, empty=False))
def test_m_matrix_named():
e2 = Matrix([[x, 2*y, pi*z]])
name_expr = ("test", Equality(MatrixSymbol('myout1', 1, 3), e2))
result = codegen(name_expr, "Octave", header=False, empty=False)
assert result[0][0] == "test.m"
source = result[0][1]
expected = (
"function myout1 = test(x, y, z)\n"
" myout1 = [x 2*y pi*z];\n"
"end\n"
)
assert source == expected
def test_m_matrix_named_matsym():
myout1 = MatrixSymbol('myout1', 1, 3)
e2 = Matrix([[x, 2*y, pi*z]])
name_expr = ("test", Equality(myout1, e2, evaluate=False))
result, = codegen(name_expr, "Octave", header=False, empty=False)
source = result[1]
expected = (
"function myout1 = test(x, y, z)\n"
" myout1 = [x 2*y pi*z];\n"
"end\n"
)
assert source == expected
def test_m_matrix_output_autoname():
expr = Matrix([[x, x+y, 3]])
name_expr = ("test", expr)
result, = codegen(name_expr, "Octave", header=False, empty=False)
source = result[1]
expected = (
"function out1 = test(x, y)\n"
" out1 = [x x + y 3];\n"
"end\n"
)
assert source == expected
def test_m_matrix_output_autoname_2():
e1 = (x + y)
e2 = Matrix([[2*x, 2*y, 2*z]])
e3 = Matrix([[x], [y], [z]])
e4 = Matrix([[x, y], [z, 16]])
name_expr = ("test", (e1, e2, e3, e4))
result, = codegen(name_expr, "Octave", header=False, empty=False)
source = result[1]
expected = (
"function [out1, out2, out3, out4] = test(x, y, z)\n"
" out1 = x + y;\n"
" out2 = [2*x 2*y 2*z];\n"
" out3 = [x; y; z];\n"
" out4 = [x y;\n"
" z 16];\n"
"end\n"
)
assert source == expected
def test_m_results_matrix_named_ordered():
B, C = symbols('B,C')
A = MatrixSymbol('A', 1, 3)
expr1 = Equality(C, (x + y)*z)
expr2 = Equality(A, Matrix([[1, 2, x]]))
expr3 = Equality(B, 2*x)
name_expr = ("test", [expr1, expr2, expr3])
result, = codegen(name_expr, "Octave", header=False, empty=False,
argument_sequence=(x, z, y))
source = result[1]
expected = (
"function [C, A, B] = test(x, z, y)\n"
" C = z.*(x + y);\n"
" A = [1 2 x];\n"
" B = 2*x;\n"
"end\n"
)
assert source == expected
def test_m_matrixsymbol_slice():
A = MatrixSymbol('A', 2, 3)
B = MatrixSymbol('B', 1, 3)
C = MatrixSymbol('C', 1, 3)
D = MatrixSymbol('D', 2, 1)
name_expr = ("test", [Equality(B, A[0, :]),
Equality(C, A[1, :]),
Equality(D, A[:, 2])])
result, = codegen(name_expr, "Octave", header=False, empty=False)
source = result[1]
expected = (
"function [B, C, D] = test(A)\n"
" B = A(1, :);\n"
" C = A(2, :);\n"
" D = A(:, 3);\n"
"end\n"
)
assert source == expected
def test_m_matrixsymbol_slice2():
A = MatrixSymbol('A', 3, 4)
B = MatrixSymbol('B', 2, 2)
C = MatrixSymbol('C', 2, 2)
name_expr = ("test", [Equality(B, A[0:2, 0:2]),
Equality(C, A[0:2, 1:3])])
result, = codegen(name_expr, "Octave", header=False, empty=False)
source = result[1]
expected = (
"function [B, C] = test(A)\n"
" B = A(1:2, 1:2);\n"
" C = A(1:2, 2:3);\n"
"end\n"
)
assert source == expected
def test_m_matrixsymbol_slice3():
A = MatrixSymbol('A', 8, 7)
B = MatrixSymbol('B', 2, 2)
C = MatrixSymbol('C', 4, 2)
name_expr = ("test", [Equality(B, A[6:, 1::3]),
Equality(C, A[::2, ::3])])
result, = codegen(name_expr, "Octave", header=False, empty=False)
source = result[1]
expected = (
"function [B, C] = test(A)\n"
" B = A(7:end, 2:3:end);\n"
" C = A(1:2:end, 1:3:end);\n"
"end\n"
)
assert source == expected
def test_m_matrixsymbol_slice_autoname():
A = MatrixSymbol('A', 2, 3)
B = MatrixSymbol('B', 1, 3)
name_expr = ("test", [Equality(B, A[0,:]), A[1,:], A[:,0], A[:,1]])
result, = codegen(name_expr, "Octave", header=False, empty=False)
source = result[1]
expected = (
"function [B, out2, out3, out4] = test(A)\n"
" B = A(1, :);\n"
" out2 = A(2, :);\n"
" out3 = A(:, 1);\n"
" out4 = A(:, 2);\n"
"end\n"
)
assert source == expected
def test_m_loops():
# Note: an Octave programmer would probably vectorize this across one or
# more dimensions. Also, size(A) would be used rather than passing in m
# and n. Perhaps users would expect us to vectorize automatically here?
# Or is it possible to represent such things using IndexedBase?
from sympy.tensor import IndexedBase, Idx
from sympy import symbols
n, m = symbols('n m', integer=True)
A = IndexedBase('A')
x = IndexedBase('x')
y = IndexedBase('y')
i = Idx('i', m)
j = Idx('j', n)
result, = codegen(('mat_vec_mult', Eq(y[i], A[i, j]*x[j])), "Octave",
header=False, empty=False)
source = result[1]
expected = (
'function y = mat_vec_mult(A, m, n, x)\n'
' for i = 1:m\n'
' y(i) = 0;\n'
' end\n'
' for i = 1:m\n'
' for j = 1:n\n'
' y(i) = %(rhs)s + y(i);\n'
' end\n'
' end\n'
'end\n'
)
assert (source == expected % {'rhs': 'A(%s, %s).*x(j)' % (i, j)} or
source == expected % {'rhs': 'x(j).*A(%s, %s)' % (i, j)})
def test_m_tensor_loops_multiple_contractions():
# see comments in previous test about vectorizing
from sympy.tensor import IndexedBase, Idx
from sympy import symbols
n, m, o, p = symbols('n m o p', integer=True)
A = IndexedBase('A')
B = IndexedBase('B')
y = IndexedBase('y')
i = Idx('i', m)
j = Idx('j', n)
k = Idx('k', o)
l = Idx('l', p)
result, = codegen(('tensorthing', Eq(y[i], B[j, k, l]*A[i, j, k, l])),
"Octave", header=False, empty=False)
source = result[1]
expected = (
'function y = tensorthing(A, B, m, n, o, p)\n'
' for i = 1:m\n'
' y(i) = 0;\n'
' end\n'
' for i = 1:m\n'
' for j = 1:n\n'
' for k = 1:o\n'
' for l = 1:p\n'
' y(i) = y(i) + B(j, k, l).*A(i, j, k, l);\n'
' end\n'
' end\n'
' end\n'
' end\n'
'end\n'
)
assert source == expected
def test_m_InOutArgument():
expr = Equality(x, x**2)
name_expr = ("mysqr", expr)
result, = codegen(name_expr, "Octave", header=False, empty=False)
source = result[1]
expected = (
"function x = mysqr(x)\n"
" x = x.^2;\n"
"end\n"
)
assert source == expected
def test_m_InOutArgument_order():
# can specify the order as (x, y)
expr = Equality(x, x**2 + y)
name_expr = ("test", expr)
result, = codegen(name_expr, "Octave", header=False,
empty=False, argument_sequence=(x,y))
source = result[1]
expected = (
"function x = test(x, y)\n"
" x = x.^2 + y;\n"
"end\n"
)
assert source == expected
# make sure it gives (x, y) not (y, x)
expr = Equality(x, x**2 + y)
name_expr = ("test", expr)
result, = codegen(name_expr, "Octave", header=False, empty=False)
source = result[1]
expected = (
"function x = test(x, y)\n"
" x = x.^2 + y;\n"
"end\n"
)
assert source == expected
def test_m_not_supported():
f = Function('f')
name_expr = ("test", [f(x).diff(x), S.ComplexInfinity])
result, = codegen(name_expr, "Octave", header=False, empty=False)
source = result[1]
expected = (
"function [out1, out2] = test(x)\n"
" % unsupported: Derivative(f(x), x)\n"
" % unsupported: zoo\n"
" out1 = Derivative(f(x), x);\n"
" out2 = zoo;\n"
"end\n"
)
assert source == expected
def test_global_vars_octave():
x, y, z, t = symbols("x y z t")
result = codegen(('f', x*y), "Octave", header=False, empty=False,
global_vars=(y,))
source = result[0][1]
expected = (
"function out1 = f(x)\n"
" global y\n"
" out1 = x.*y;\n"
"end\n"
)
assert source == expected
result = codegen(('f', x*y+z), "Octave", header=False, empty=False,
argument_sequence=(x, y), global_vars=(z, t))
source = result[0][1]
expected = (
"function out1 = f(x, y)\n"
" global t z\n"
" out1 = x.*y + z;\n"
"end\n"
)
assert source == expected | unknown | codeparrot/codeparrot-clean | ||
/*
* _codecs_kr.c: Codecs collection for Korean encodings
*
* Written by Hye-Shik Chang <perky@FreeBSD.org>
*/
#include "cjkcodecs.h"
#include "mappings_kr.h"
/*
* EUC-KR codec
*/
#define EUCKR_JAMO_FIRSTBYTE 0xA4
#define EUCKR_JAMO_FILLER 0xD4
static const unsigned char u2cgk_choseong[19] = {
0xa1, 0xa2, 0xa4, 0xa7, 0xa8, 0xa9, 0xb1, 0xb2,
0xb3, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xbb,
0xbc, 0xbd, 0xbe
};
static const unsigned char u2cgk_jungseong[21] = {
0xbf, 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6,
0xc7, 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce,
0xcf, 0xd0, 0xd1, 0xd2, 0xd3
};
static const unsigned char u2cgk_jongseong[28] = {
0xd4, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7,
0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, 0xb0,
0xb1, 0xb2, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xba,
0xbb, 0xbc, 0xbd, 0xbe
};
ENCODER(euc_kr)
{
while (*inpos < inlen) {
Py_UCS4 c = INCHAR1;
DBCHAR code;
if (c < 0x80) {
WRITEBYTE1((unsigned char)c);
NEXT(1, 1);
continue;
}
if (c > 0xFFFF)
return 1;
REQUIRE_OUTBUF(2);
if (TRYMAP_ENC(cp949, code, c))
;
else
return 1;
if ((code & 0x8000) == 0) {
/* KS X 1001 coded character */
OUTBYTE1((code >> 8) | 0x80);
OUTBYTE2((code & 0xFF) | 0x80);
NEXT(1, 2);
}
else {
/* Mapping is found in CP949 extension,
but we encode it in KS X 1001:1998,
make-up sequence for EUC-KR. */
REQUIRE_OUTBUF(8);
/* syllable composition precedence */
OUTBYTE1(EUCKR_JAMO_FIRSTBYTE);
OUTBYTE2(EUCKR_JAMO_FILLER);
/* All code points in CP949 extension are in unicode
* Hangul Syllable area. */
assert(0xac00 <= c && c <= 0xd7a3);
c -= 0xac00;
OUTBYTE3(EUCKR_JAMO_FIRSTBYTE);
OUTBYTE4(u2cgk_choseong[c / 588]);
NEXT_OUT(4);
OUTBYTE1(EUCKR_JAMO_FIRSTBYTE);
OUTBYTE2(u2cgk_jungseong[(c / 28) % 21]);
OUTBYTE3(EUCKR_JAMO_FIRSTBYTE);
OUTBYTE4(u2cgk_jongseong[c % 28]);
NEXT(1, 4);
}
}
return 0;
}
#define NONE 127
static const unsigned char cgk2u_choseong[] = { /* [A1, BE] */
0, 1, NONE, 2, NONE, NONE, 3, 4,
5, NONE, NONE, NONE, NONE, NONE, NONE, NONE,
6, 7, 8, NONE, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18
};
static const unsigned char cgk2u_jongseong[] = { /* [A1, BE] */
1, 2, 3, 4, 5, 6, 7, NONE,
8, 9, 10, 11, 12, 13, 14, 15,
16, 17, NONE, 18, 19, 20, 21, 22,
NONE, 23, 24, 25, 26, 27
};
DECODER(euc_kr)
{
while (inleft > 0) {
unsigned char c = INBYTE1;
Py_UCS4 decoded;
if (c < 0x80) {
OUTCHAR(c);
NEXT_IN(1);
continue;
}
REQUIRE_INBUF(2);
if (c == EUCKR_JAMO_FIRSTBYTE &&
INBYTE2 == EUCKR_JAMO_FILLER) {
/* KS X 1001:1998 make-up sequence */
DBCHAR cho, jung, jong;
REQUIRE_INBUF(8);
if ((*inbuf)[2] != EUCKR_JAMO_FIRSTBYTE ||
(*inbuf)[4] != EUCKR_JAMO_FIRSTBYTE ||
(*inbuf)[6] != EUCKR_JAMO_FIRSTBYTE)
return 1;
c = (*inbuf)[3];
if (0xa1 <= c && c <= 0xbe)
cho = cgk2u_choseong[c - 0xa1];
else
cho = NONE;
c = (*inbuf)[5];
jung = (0xbf <= c && c <= 0xd3) ? c - 0xbf : NONE;
c = (*inbuf)[7];
if (c == EUCKR_JAMO_FILLER)
jong = 0;
else if (0xa1 <= c && c <= 0xbe)
jong = cgk2u_jongseong[c - 0xa1];
else
jong = NONE;
if (cho == NONE || jung == NONE || jong == NONE)
return 1;
OUTCHAR(0xac00 + cho*588 + jung*28 + jong);
NEXT_IN(8);
}
else if (TRYMAP_DEC(ksx1001, decoded, c ^ 0x80, INBYTE2 ^ 0x80)) {
OUTCHAR(decoded);
NEXT_IN(2);
}
else
return 1;
}
return 0;
}
#undef NONE
/*
* CP949 codec
*/
ENCODER(cp949)
{
while (*inpos < inlen) {
Py_UCS4 c = INCHAR1;
DBCHAR code;
if (c < 0x80) {
WRITEBYTE1((unsigned char)c);
NEXT(1, 1);
continue;
}
if (c > 0xFFFF)
return 1;
REQUIRE_OUTBUF(2);
if (TRYMAP_ENC(cp949, code, c))
;
else
return 1;
OUTBYTE1((code >> 8) | 0x80);
if (code & 0x8000)
OUTBYTE2(code & 0xFF); /* MSB set: CP949 */
else
OUTBYTE2((code & 0xFF) | 0x80); /* MSB unset: ks x 1001 */
NEXT(1, 2);
}
return 0;
}
DECODER(cp949)
{
while (inleft > 0) {
unsigned char c = INBYTE1;
Py_UCS4 decoded;
if (c < 0x80) {
OUTCHAR(c);
NEXT_IN(1);
continue;
}
REQUIRE_INBUF(2);
if (TRYMAP_DEC(ksx1001, decoded, c ^ 0x80, INBYTE2 ^ 0x80))
OUTCHAR(decoded);
else if (TRYMAP_DEC(cp949ext, decoded, c, INBYTE2))
OUTCHAR(decoded);
else
return 1;
NEXT_IN(2);
}
return 0;
}
/*
* JOHAB codec
*/
static const unsigned char u2johabidx_choseong[32] = {
0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
0x10, 0x11, 0x12, 0x13, 0x14,
};
static const unsigned char u2johabidx_jungseong[32] = {
0x03, 0x04, 0x05, 0x06, 0x07,
0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
0x1a, 0x1b, 0x1c, 0x1d,
};
static const unsigned char u2johabidx_jongseong[32] = {
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
0x10, 0x11, 0x13, 0x14, 0x15, 0x16, 0x17,
0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d,
};
static const DBCHAR u2johabjamo[] = {
0x8841, 0x8c41, 0x8444, 0x9041, 0x8446, 0x8447, 0x9441,
0x9841, 0x9c41, 0x844a, 0x844b, 0x844c, 0x844d, 0x844e, 0x844f,
0x8450, 0xa041, 0xa441, 0xa841, 0x8454, 0xac41, 0xb041, 0xb441,
0xb841, 0xbc41, 0xc041, 0xc441, 0xc841, 0xcc41, 0xd041, 0x8461,
0x8481, 0x84a1, 0x84c1, 0x84e1, 0x8541, 0x8561, 0x8581, 0x85a1,
0x85c1, 0x85e1, 0x8641, 0x8661, 0x8681, 0x86a1, 0x86c1, 0x86e1,
0x8741, 0x8761, 0x8781, 0x87a1,
};
ENCODER(johab)
{
while (*inpos < inlen) {
Py_UCS4 c = INCHAR1;
DBCHAR code;
if (c < 0x80) {
WRITEBYTE1((unsigned char)c);
NEXT(1, 1);
continue;
}
if (c > 0xFFFF)
return 1;
REQUIRE_OUTBUF(2);
if (c >= 0xac00 && c <= 0xd7a3) {
c -= 0xac00;
code = 0x8000 |
(u2johabidx_choseong[c / 588] << 10) |
(u2johabidx_jungseong[(c / 28) % 21] << 5) |
u2johabidx_jongseong[c % 28];
}
else if (c >= 0x3131 && c <= 0x3163)
code = u2johabjamo[c - 0x3131];
else if (TRYMAP_ENC(cp949, code, c)) {
unsigned char c1, c2, t2;
unsigned short t1;
assert((code & 0x8000) == 0);
c1 = code >> 8;
c2 = code & 0xff;
if (((c1 >= 0x21 && c1 <= 0x2c) ||
(c1 >= 0x4a && c1 <= 0x7d)) &&
(c2 >= 0x21 && c2 <= 0x7e)) {
t1 = (c1 < 0x4a ? (c1 - 0x21 + 0x1b2) :
(c1 - 0x21 + 0x197));
t2 = ((t1 & 1) ? 0x5e : 0) + (c2 - 0x21);
OUTBYTE1(t1 >> 1);
OUTBYTE2(t2 < 0x4e ? t2 + 0x31 : t2 + 0x43);
NEXT(1, 2);
continue;
}
else
return 1;
}
else
return 1;
OUTBYTE1(code >> 8);
OUTBYTE2(code & 0xff);
NEXT(1, 2);
}
return 0;
}
#define FILL 0xfd
#define NONE 0xff
static const unsigned char johabidx_choseong[32] = {
NONE, FILL, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d,
0x0e, 0x0f, 0x10, 0x11, 0x12, NONE, NONE, NONE,
NONE, NONE, NONE, NONE, NONE, NONE, NONE, NONE,
};
static const unsigned char johabidx_jungseong[32] = {
NONE, NONE, FILL, 0x00, 0x01, 0x02, 0x03, 0x04,
NONE, NONE, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a,
NONE, NONE, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10,
NONE, NONE, 0x11, 0x12, 0x13, 0x14, NONE, NONE,
};
static const unsigned char johabidx_jongseong[32] = {
NONE, FILL, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06,
0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e,
0x0f, 0x10, NONE, 0x11, 0x12, 0x13, 0x14, 0x15,
0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, NONE, NONE,
};
static const unsigned char johabjamo_choseong[32] = {
NONE, FILL, 0x31, 0x32, 0x34, 0x37, 0x38, 0x39,
0x41, 0x42, 0x43, 0x45, 0x46, 0x47, 0x48, 0x49,
0x4a, 0x4b, 0x4c, 0x4d, 0x4e, NONE, NONE, NONE,
NONE, NONE, NONE, NONE, NONE, NONE, NONE, NONE,
};
static const unsigned char johabjamo_jungseong[32] = {
NONE, NONE, FILL, 0x4f, 0x50, 0x51, 0x52, 0x53,
NONE, NONE, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59,
NONE, NONE, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
NONE, NONE, 0x60, 0x61, 0x62, 0x63, NONE, NONE,
};
static const unsigned char johabjamo_jongseong[32] = {
NONE, FILL, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36,
0x37, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
0x40, 0x41, NONE, 0x42, 0x44, 0x45, 0x46, 0x47,
0x48, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, NONE, NONE,
};
DECODER(johab)
{
while (inleft > 0) {
unsigned char c = INBYTE1, c2;
Py_UCS4 decoded;
if (c < 0x80) {
OUTCHAR(c);
NEXT_IN(1);
continue;
}
REQUIRE_INBUF(2);
c2 = INBYTE2;
if (c < 0xd8) {
/* johab hangul */
unsigned char c_cho, c_jung, c_jong;
unsigned char i_cho, i_jung, i_jong;
c_cho = (c >> 2) & 0x1f;
c_jung = ((c << 3) | c2 >> 5) & 0x1f;
c_jong = c2 & 0x1f;
i_cho = johabidx_choseong[c_cho];
i_jung = johabidx_jungseong[c_jung];
i_jong = johabidx_jongseong[c_jong];
if (i_cho == NONE || i_jung == NONE || i_jong == NONE)
return 1;
/* we don't use U+1100 hangul jamo yet. */
if (i_cho == FILL) {
if (i_jung == FILL) {
if (i_jong == FILL)
OUTCHAR(0x3000);
else
OUTCHAR(0x3100 |
johabjamo_jongseong[c_jong]);
}
else {
if (i_jong == FILL)
OUTCHAR(0x3100 |
johabjamo_jungseong[c_jung]);
else
return 1;
}
} else {
if (i_jung == FILL) {
if (i_jong == FILL)
OUTCHAR(0x3100 |
johabjamo_choseong[c_cho]);
else
return 1;
}
else
OUTCHAR(0xac00 +
i_cho * 588 +
i_jung * 28 +
(i_jong == FILL ? 0 : i_jong));
}
NEXT_IN(2);
} else {
/* KS X 1001 except hangul jamos and syllables */
if (c == 0xdf || c > 0xf9 ||
c2 < 0x31 || (c2 >= 0x80 && c2 < 0x91) ||
(c2 & 0x7f) == 0x7f ||
(c == 0xda && (c2 >= 0xa1 && c2 <= 0xd3)))
return 1;
else {
unsigned char t1, t2;
t1 = (c < 0xe0 ? 2 * (c - 0xd9) :
2 * c - 0x197);
t2 = (c2 < 0x91 ? c2 - 0x31 : c2 - 0x43);
t1 = t1 + (t2 < 0x5e ? 0 : 1) + 0x21;
t2 = (t2 < 0x5e ? t2 : t2 - 0x5e) + 0x21;
if (TRYMAP_DEC(ksx1001, decoded, t1, t2)) {
OUTCHAR(decoded);
NEXT_IN(2);
}
else {
return 1;
}
}
}
}
return 0;
}
#undef NONE
#undef FILL
BEGIN_MAPPINGS_LIST(3)
MAPPING_DECONLY(ksx1001)
MAPPING_ENCONLY(cp949)
MAPPING_DECONLY(cp949ext)
END_MAPPINGS_LIST
BEGIN_CODECS_LIST(3)
CODEC_STATELESS(euc_kr)
CODEC_STATELESS(cp949)
CODEC_STATELESS(johab)
END_CODECS_LIST
I_AM_A_MODULE_FOR(kr) | c | github | https://github.com/python/cpython | Modules/cjkcodecs/_codecs_kr.c |
# -*- coding: utf-8 -*-
#
# This file is part of Harvesting Kit.
# Copyright (C) 2015 CERN.
#
# Harvesting Kit is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Harvesting Kit is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Harvesting Kit; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Common data for tests."""
journal_mappings = {'A&A': 'Astron.Astrophys.',
'A&A': 'Astron.Astrophys.',
'ApJ': 'Astrophys.J.',
'ARA&A': 'Ann.Rev.Astron.Astrophys.',
'ARA&A': 'Ann.Rev.Astron.Astrophys.',
'Applied Optics': 'Appl.Opt.',
'ApJS': 'Astrophys.J.Suppl.',
'BAAS': 'Bull.Am.Astron.Soc.',
'Physics Letters': 'Phys.Lett.'}
edp_test_record = 'data/sample_edpsciences_record.xml'
aps_test_record = 'data/sample_aps_record.xml'
ws_test_record = 'data/sample_ws_record.xml'
ws_erratum_test_record = 'data/sample_ws_erratum_record.xml'
aps_output = 'data/sample_aps_output.xml'
edp_output = 'data/sample_edp_output.xml'
ws_output = 'data/sample_ws_output.xml'
ws_erratum_output = 'data/sample_ws_erratum_output.xml' | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
#
# Copyright (C) 2009, 2010, 2012 Google Inc. All rights reserved.
# Copyright (C) 2009 Torch Mobile Inc.
# Copyright (C) 2009 Apple Inc. All rights reserved.
# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org)
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# This is the modified version of Google's cpplint. The original code is
# http://google-styleguide.googlecode.com/svn/trunk/cpplint/cpplint.py
"""Support for check-webkit-style."""
import codecs
import math # for log
import os
import os.path
import re
import sre_compile
import string
import sys
import unicodedata
from webkitpy.common.memoized import memoized
# The key to use to provide a class to fake loading a header file.
INCLUDE_IO_INJECTION_KEY = 'include_header_io'
# Headers that we consider STL headers.
_STL_HEADERS = frozenset([
'algobase.h', 'algorithm', 'alloc.h', 'bitset', 'deque', 'exception',
'function.h', 'functional', 'hash_map', 'hash_map.h', 'hash_set',
'hash_set.h', 'iterator', 'list', 'list.h', 'map', 'memory', 'pair.h',
'pthread_alloc', 'queue', 'set', 'set.h', 'sstream', 'stack',
'stl_alloc.h', 'stl_relops.h', 'type_traits.h',
'utility', 'vector', 'vector.h',
])
# Non-STL C++ system headers.
_CPP_HEADERS = frozenset([
'algo.h', 'builtinbuf.h', 'bvector.h', 'cassert', 'cctype',
'cerrno', 'cfloat', 'ciso646', 'climits', 'clocale', 'cmath',
'complex', 'complex.h', 'csetjmp', 'csignal', 'cstdarg', 'cstddef',
'cstdio', 'cstdlib', 'cstring', 'ctime', 'cwchar', 'cwctype',
'defalloc.h', 'deque.h', 'editbuf.h', 'exception', 'fstream',
'fstream.h', 'hashtable.h', 'heap.h', 'indstream.h', 'iomanip',
'iomanip.h', 'ios', 'iosfwd', 'iostream', 'iostream.h', 'istream.h',
'iterator.h', 'limits', 'map.h', 'multimap.h', 'multiset.h',
'numeric', 'ostream.h', 'parsestream.h', 'pfstream.h', 'PlotFile.h',
'procbuf.h', 'pthread_alloc.h', 'rope', 'rope.h', 'ropeimpl.h',
'SFile.h', 'slist', 'slist.h', 'stack.h', 'stdexcept',
'stdiostream.h', 'streambuf.h', 'stream.h', 'strfile.h', 'string',
'strstream', 'strstream.h', 'tempbuf.h', 'tree.h', 'typeinfo', 'valarray',
])
# Assertion macros. These are defined in base/logging.h and
# testing/base/gunit.h. Note that the _M versions need to come first
# for substring matching to work.
_CHECK_MACROS = [
'DCHECK', 'CHECK',
'EXPECT_TRUE_M', 'EXPECT_TRUE',
'ASSERT_TRUE_M', 'ASSERT_TRUE',
'EXPECT_FALSE_M', 'EXPECT_FALSE',
'ASSERT_FALSE_M', 'ASSERT_FALSE',
]
# Replacement macros for CHECK/DCHECK/EXPECT_TRUE/EXPECT_FALSE
_CHECK_REPLACEMENT = dict([(m, {}) for m in _CHECK_MACROS])
for op, replacement in [('==', 'EQ'), ('!=', 'NE'),
('>=', 'GE'), ('>', 'GT'),
('<=', 'LE'), ('<', 'LT')]:
_CHECK_REPLACEMENT['DCHECK'][op] = 'DCHECK_%s' % replacement
_CHECK_REPLACEMENT['CHECK'][op] = 'CHECK_%s' % replacement
_CHECK_REPLACEMENT['EXPECT_TRUE'][op] = 'EXPECT_%s' % replacement
_CHECK_REPLACEMENT['ASSERT_TRUE'][op] = 'ASSERT_%s' % replacement
_CHECK_REPLACEMENT['EXPECT_TRUE_M'][op] = 'EXPECT_%s_M' % replacement
_CHECK_REPLACEMENT['ASSERT_TRUE_M'][op] = 'ASSERT_%s_M' % replacement
for op, inv_replacement in [('==', 'NE'), ('!=', 'EQ'),
('>=', 'LT'), ('>', 'LE'),
('<=', 'GT'), ('<', 'GE')]:
_CHECK_REPLACEMENT['EXPECT_FALSE'][op] = 'EXPECT_%s' % inv_replacement
_CHECK_REPLACEMENT['ASSERT_FALSE'][op] = 'ASSERT_%s' % inv_replacement
_CHECK_REPLACEMENT['EXPECT_FALSE_M'][op] = 'EXPECT_%s_M' % inv_replacement
_CHECK_REPLACEMENT['ASSERT_FALSE_M'][op] = 'ASSERT_%s_M' % inv_replacement
# These constants define types of headers for use with
# _IncludeState.check_next_include_order().
_CONFIG_HEADER = 0
_PRIMARY_HEADER = 1
_OTHER_HEADER = 2
_MOC_HEADER = 3
# A dictionary of items customize behavior for unit test. For example,
# INCLUDE_IO_INJECTION_KEY allows providing a custom io class which allows
# for faking a header file.
_unit_test_config = {}
# The regexp compilation caching is inlined in all regexp functions for
# performance reasons; factoring it out into a separate function turns out
# to be noticeably expensive.
_regexp_compile_cache = {}
def match(pattern, s):
"""Matches the string with the pattern, caching the compiled regexp."""
if not pattern in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].match(s)
def search(pattern, s):
"""Searches the string for the pattern, caching the compiled regexp."""
if not pattern in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].search(s)
def sub(pattern, replacement, s):
"""Substitutes occurrences of a pattern, caching the compiled regexp."""
if not pattern in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].sub(replacement, s)
def subn(pattern, replacement, s):
"""Substitutes occurrences of a pattern, caching the compiled regexp."""
if not pattern in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].subn(replacement, s)
def iteratively_replace_matches_with_char(pattern, char_replacement, s):
"""Returns the string with replacement done.
Every character in the match is replaced with char.
Due to the iterative nature, pattern should not match char or
there will be an infinite loop.
Example:
pattern = r'<[^>]>' # template parameters
char_replacement = '_'
s = 'A<B<C, D>>'
Returns 'A_________'
Args:
pattern: The regex to match.
char_replacement: The character to put in place of every
character of the match.
s: The string on which to do the replacements.
Returns:
True, if the given line is blank.
"""
while True:
matched = search(pattern, s)
if not matched:
return s
start_match_index = matched.start(0)
end_match_index = matched.end(0)
match_length = end_match_index - start_match_index
s = s[:start_match_index] + char_replacement * match_length + s[end_match_index:]
def _rfind_in_lines(regex, lines, start_position, not_found_position):
"""Does a reverse find starting at start position and going backwards until
a match is found.
Returns the position where the regex ended.
"""
# Put the regex in a group and proceed it with a greedy expression that
# matches anything to ensure that we get the last possible match in a line.
last_in_line_regex = r'.*(' + regex + ')'
current_row = start_position.row
# Start with the given row and trim off everything past what may be matched.
current_line = lines[start_position.row][:start_position.column]
while True:
found_match = match(last_in_line_regex, current_line)
if found_match:
return Position(current_row, found_match.end(1))
# A match was not found so continue backward.
current_row -= 1
if current_row < 0:
return not_found_position
current_line = lines[current_row]
def _convert_to_lower_with_underscores(text):
"""Converts all text strings in camelCase or PascalCase to lowers with underscores."""
# First add underscores before any capital letter followed by a lower case letter
# as long as it is in a word.
# (This put an underscore before Password but not P and A in WPAPassword).
text = sub(r'(?<=[A-Za-z0-9])([A-Z])(?=[a-z])', r'_\1', text)
# Next add underscores before capitals at the end of words if it was
# preceeded by lower case letter or number.
# (This puts an underscore before A in isA but not A in CBA).
text = sub(r'(?<=[a-z0-9])([A-Z])(?=\b)', r'_\1', text)
# Next add underscores when you have a captial letter which is followed by a capital letter
# but is not proceeded by one. (This puts an underscore before A in 'WordADay').
text = sub(r'(?<=[a-z0-9])([A-Z][A-Z_])', r'_\1', text)
return text.lower()
def _create_acronym(text):
"""Creates an acronym for the given text."""
# Removes all lower case letters except those starting words.
text = sub(r'(?<!\b)[a-z]', '', text)
return text.upper()
def up_to_unmatched_closing_paren(s):
"""Splits a string into two parts up to first unmatched ')'.
Args:
s: a string which is a substring of line after '('
(e.g., "a == (b + c))").
Returns:
A pair of strings (prefix before first unmatched ')',
remainder of s after first unmatched ')'), e.g.,
up_to_unmatched_closing_paren("a == (b + c)) { ")
returns "a == (b + c)", " {".
Returns None, None if there is no unmatched ')'
"""
i = 1
for pos, c in enumerate(s):
if c == '(':
i += 1
elif c == ')':
i -= 1
if i == 0:
return s[:pos], s[pos + 1:]
return None, None
class _IncludeState(dict):
"""Tracks line numbers for includes, and the order in which includes appear.
As a dict, an _IncludeState object serves as a mapping between include
filename and line number on which that file was included.
Call check_next_include_order() once for each header in the file, passing
in the type constants defined above. Calls in an illegal order will
raise an _IncludeError with an appropriate error message.
"""
# self._section will move monotonically through this set. If it ever
# needs to move backwards, check_next_include_order will raise an error.
_INITIAL_SECTION = 0
_CONFIG_SECTION = 1
_PRIMARY_SECTION = 2
_OTHER_SECTION = 3
_TYPE_NAMES = {
_CONFIG_HEADER: 'WebCore config.h',
_PRIMARY_HEADER: 'header this file implements',
_OTHER_HEADER: 'other header',
_MOC_HEADER: 'moc file',
}
_SECTION_NAMES = {
_INITIAL_SECTION: "... nothing.",
_CONFIG_SECTION: "WebCore config.h.",
_PRIMARY_SECTION: 'a header this file implements.',
_OTHER_SECTION: 'other header.',
}
def __init__(self):
dict.__init__(self)
self._section = self._INITIAL_SECTION
self._visited_primary_section = False
self.header_types = dict();
def visited_primary_section(self):
return self._visited_primary_section
def check_next_include_order(self, header_type, file_is_header, primary_header_exists):
"""Returns a non-empty error message if the next header is out of order.
This function also updates the internal state to be ready to check
the next include.
Args:
header_type: One of the _XXX_HEADER constants defined above.
file_is_header: Whether the file that owns this _IncludeState is itself a header
Returns:
The empty string if the header is in the right order, or an
error message describing what's wrong.
"""
if header_type == _CONFIG_HEADER and file_is_header:
return 'Header file should not contain WebCore config.h.'
if header_type == _PRIMARY_HEADER and file_is_header:
return 'Header file should not contain itself.'
if header_type == _MOC_HEADER:
return ''
error_message = ''
if self._section != self._OTHER_SECTION:
before_error_message = ('Found %s before %s' %
(self._TYPE_NAMES[header_type],
self._SECTION_NAMES[self._section + 1]))
after_error_message = ('Found %s after %s' %
(self._TYPE_NAMES[header_type],
self._SECTION_NAMES[self._section]))
if header_type == _CONFIG_HEADER:
if self._section >= self._CONFIG_SECTION:
error_message = after_error_message
self._section = self._CONFIG_SECTION
elif header_type == _PRIMARY_HEADER:
if self._section >= self._PRIMARY_SECTION:
error_message = after_error_message
elif self._section < self._CONFIG_SECTION:
error_message = before_error_message
self._section = self._PRIMARY_SECTION
self._visited_primary_section = True
else:
assert header_type == _OTHER_HEADER
if not file_is_header and self._section < self._PRIMARY_SECTION:
if primary_header_exists:
error_message = before_error_message
self._section = self._OTHER_SECTION
return error_message
class Position(object):
"""Holds the position of something."""
def __init__(self, row, column):
self.row = row
self.column = column
def __str__(self):
return '(%s, %s)' % (self.row, self.column)
def __cmp__(self, other):
return self.row.__cmp__(other.row) or self.column.__cmp__(other.column)
class Parameter(object):
"""Information about one function parameter."""
def __init__(self, parameter, parameter_name_index, row):
self.type = parameter[:parameter_name_index].strip()
# Remove any initializers from the parameter name (e.g. int i = 5).
self.name = sub(r'=.*', '', parameter[parameter_name_index:]).strip()
self.row = row
@memoized
def lower_with_underscores_name(self):
"""Returns the parameter name in the lower with underscores format."""
return _convert_to_lower_with_underscores(self.name)
class SingleLineView(object):
"""Converts multiple lines into a single line (with line breaks replaced by a
space) to allow for easier searching."""
def __init__(self, lines, start_position, end_position):
"""Create a SingleLineView instance.
Args:
lines: a list of multiple lines to combine into a single line.
start_position: offset within lines of where to start the single line.
end_position: just after where to end (like a slice operation).
"""
# Get the rows of interest.
trimmed_lines = lines[start_position.row:end_position.row + 1]
# Remove the columns on the last line that aren't included.
trimmed_lines[-1] = trimmed_lines[-1][:end_position.column]
# Remove the columns on the first line that aren't included.
trimmed_lines[0] = trimmed_lines[0][start_position.column:]
# Create a single line with all of the parameters.
self.single_line = ' '.join(trimmed_lines)
# Keep the row lengths, so we can calculate the original row number
# given a column in the single line (adding 1 due to the space added
# during the join).
self._row_lengths = [len(line) + 1 for line in trimmed_lines]
self._starting_row = start_position.row
def convert_column_to_row(self, single_line_column_number):
"""Convert the column number from the single line into the original
line number.
Special cases:
* Columns in the added spaces are considered part of the previous line.
* Columns beyond the end of the line are consider part the last line
in the view."""
total_columns = 0
row_offset = 0
while row_offset < len(self._row_lengths) - 1 and single_line_column_number >= total_columns + self._row_lengths[row_offset]:
total_columns += self._row_lengths[row_offset]
row_offset += 1
return self._starting_row + row_offset
def create_skeleton_parameters(all_parameters):
"""Converts a parameter list to a skeleton version.
The skeleton only has one word for the parameter name, one word for the type,
and commas after each parameter and only there. Everything in the skeleton
remains in the same columns as the original."""
all_simplifications = (
# Remove template parameters, function declaration parameters, etc.
r'(<[^<>]*?>)|(\([^\(\)]*?\))|(\{[^\{\}]*?\})',
# Remove all initializers.
r'=[^,]*',
# Remove :: and everything before it.
r'[^,]*::',
# Remove modifiers like &, *.
r'[&*]',
# Remove const modifiers.
r'\bconst\s+(?=[A-Za-z])',
# Remove numerical modifiers like long.
r'\b(unsigned|long|short)\s+(?=unsigned|long|short|int|char|double|float)')
skeleton_parameters = all_parameters
for simplification in all_simplifications:
skeleton_parameters = iteratively_replace_matches_with_char(simplification, ' ', skeleton_parameters)
# If there are any parameters, then add a , after the last one to
# make a regular pattern of a , following every parameter.
if skeleton_parameters.strip():
skeleton_parameters += ','
return skeleton_parameters
def find_parameter_name_index(skeleton_parameter):
"""Determines where the parametere name starts given the skeleton parameter."""
# The first space from the right in the simplified parameter is where the parameter
# name starts unless the first space is before any content in the simplified parameter.
before_name_index = skeleton_parameter.rstrip().rfind(' ')
if before_name_index != -1 and skeleton_parameter[:before_name_index].strip():
return before_name_index + 1
return len(skeleton_parameter)
def parameter_list(elided_lines, start_position, end_position):
"""Generator for a function's parameters."""
# Create new positions that omit the outer parenthesis of the parameters.
start_position = Position(row=start_position.row, column=start_position.column + 1)
end_position = Position(row=end_position.row, column=end_position.column - 1)
single_line_view = SingleLineView(elided_lines, start_position, end_position)
skeleton_parameters = create_skeleton_parameters(single_line_view.single_line)
end_index = -1
while True:
# Find the end of the next parameter.
start_index = end_index + 1
end_index = skeleton_parameters.find(',', start_index)
# No comma means that all parameters have been parsed.
if end_index == -1:
return
row = single_line_view.convert_column_to_row(end_index)
# Parse the parameter into a type and parameter name.
skeleton_parameter = skeleton_parameters[start_index:end_index]
name_offset = find_parameter_name_index(skeleton_parameter)
parameter = single_line_view.single_line[start_index:end_index]
yield Parameter(parameter, name_offset, row)
class _FunctionState(object):
"""Tracks current function name and the number of lines in its body.
Attributes:
min_confidence: The minimum confidence level to use while checking style.
"""
_NORMAL_TRIGGER = 250 # for --v=0, 500 for --v=1, etc.
_TEST_TRIGGER = 400 # about 50% more than _NORMAL_TRIGGER.
def __init__(self, min_confidence):
self.min_confidence = min_confidence
self.current_function = ''
self.in_a_function = False
self.lines_in_function = 0
# Make sure these will not be mistaken for real positions (even when a
# small amount is added to them).
self.body_start_position = Position(-1000, 0)
self.end_position = Position(-1000, 0)
def begin(self, function_name, function_name_start_position, body_start_position, end_position,
parameter_start_position, parameter_end_position, clean_lines):
"""Start analyzing function body.
Args:
function_name: The name of the function being tracked.
function_name_start_position: Position in elided where the function name starts.
body_start_position: Position in elided of the { or the ; for a prototype.
end_position: Position in elided just after the final } (or ; is.
parameter_start_position: Position in elided of the '(' for the parameters.
parameter_end_position: Position in elided just after the ')' for the parameters.
clean_lines: A CleansedLines instance containing the file.
"""
self.in_a_function = True
self.lines_in_function = -1 # Don't count the open brace line.
self.current_function = function_name
self.function_name_start_position = function_name_start_position
self.body_start_position = body_start_position
self.end_position = end_position
self.is_declaration = clean_lines.elided[body_start_position.row][body_start_position.column] == ';'
self.parameter_start_position = parameter_start_position
self.parameter_end_position = parameter_end_position
self.is_pure = False
if self.is_declaration:
characters_after_parameters = SingleLineView(clean_lines.elided, parameter_end_position, body_start_position).single_line
self.is_pure = bool(match(r'\s*=\s*0\s*', characters_after_parameters))
self._clean_lines = clean_lines
self._parameter_list = None
def modifiers_and_return_type(self):
"""Returns the modifiers and the return type."""
# Go backwards from where the function name is until we encounter one of several things:
# ';' or '{' or '}' or 'private:', etc. or '#' or return Position(0, 0)
elided = self._clean_lines.elided
start_modifiers = _rfind_in_lines(r';|\{|\}|((private|public|protected):)|(#.*)',
elided, self.parameter_start_position, Position(0, 0))
return SingleLineView(elided, start_modifiers, self.function_name_start_position).single_line.strip()
def parameter_list(self):
if not self._parameter_list:
# Store the final result as a tuple since that is immutable.
self._parameter_list = tuple(parameter_list(self._clean_lines.elided, self.parameter_start_position, self.parameter_end_position))
return self._parameter_list
def count(self, line_number):
"""Count line in current function body."""
if self.in_a_function and line_number >= self.body_start_position.row:
self.lines_in_function += 1
def check(self, error, line_number):
"""Report if too many lines in function body.
Args:
error: The function to call with any errors found.
line_number: The number of the line to check.
"""
if match(r'T(EST|est)', self.current_function):
base_trigger = self._TEST_TRIGGER
else:
base_trigger = self._NORMAL_TRIGGER
trigger = base_trigger * 2 ** self.min_confidence
if self.lines_in_function > trigger:
error_level = int(math.log(self.lines_in_function / base_trigger, 2))
# 50 => 0, 100 => 1, 200 => 2, 400 => 3, 800 => 4, 1600 => 5, ...
if error_level > 5:
error_level = 5
error(line_number, 'readability/fn_size', error_level,
'Small and focused functions are preferred:'
' %s has %d non-comment lines'
' (error triggered by exceeding %d lines).' % (
self.current_function, self.lines_in_function, trigger))
def end(self):
"""Stop analyzing function body."""
self.in_a_function = False
class _IncludeError(Exception):
"""Indicates a problem with the include order in a file."""
pass
class FileInfo:
"""Provides utility functions for filenames.
FileInfo provides easy access to the components of a file's path
relative to the project root.
"""
def __init__(self, filename):
self._filename = filename
def full_name(self):
"""Make Windows paths like Unix."""
return os.path.abspath(self._filename).replace('\\', '/')
def repository_name(self):
"""Full name after removing the local path to the repository.
If we have a real absolute path name here we can try to do something smart:
detecting the root of the checkout and truncating /path/to/checkout from
the name so that we get header guards that don't include things like
"C:\Documents and Settings\..." or "/home/username/..." in them and thus
people on different computers who have checked the source out to different
locations won't see bogus errors.
"""
fullname = self.full_name()
if os.path.exists(fullname):
project_dir = os.path.dirname(fullname)
if os.path.exists(os.path.join(project_dir, ".svn")):
# If there's a .svn file in the current directory, we
# recursively look up the directory tree for the top
# of the SVN checkout
root_dir = project_dir
one_up_dir = os.path.dirname(root_dir)
while os.path.exists(os.path.join(one_up_dir, ".svn")):
root_dir = os.path.dirname(root_dir)
one_up_dir = os.path.dirname(one_up_dir)
prefix = os.path.commonprefix([root_dir, project_dir])
return fullname[len(prefix) + 1:]
# Not SVN <= 1.6? Try to find a git, or svn top level directory by
# searching up from the current path.
root_dir = os.path.dirname(fullname)
while (root_dir != os.path.dirname(root_dir)
and not os.path.exists(os.path.join(root_dir, ".git"))
and not os.path.exists(os.path.join(root_dir, ".svn"))):
root_dir = os.path.dirname(root_dir)
if (os.path.exists(os.path.join(root_dir, ".git")) or
os.path.exists(os.path.join(root_dir, ".svn"))):
prefix = os.path.commonprefix([root_dir, project_dir])
return fullname[len(prefix) + 1:]
# Don't know what to do; header guard warnings may be wrong...
return fullname
def split(self):
"""Splits the file into the directory, basename, and extension.
For 'chrome/browser/browser.cpp', Split() would
return ('chrome/browser', 'browser', '.cpp')
Returns:
A tuple of (directory, basename, extension).
"""
googlename = self.repository_name()
project, rest = os.path.split(googlename)
return (project,) + os.path.splitext(rest)
def base_name(self):
"""File base name - text after the final slash, before the final period."""
return self.split()[1]
def extension(self):
"""File extension - text following the final period."""
return self.split()[2]
def no_extension(self):
"""File has no source file extension."""
return '/'.join(self.split()[0:2])
def is_source(self):
"""File has a source file extension."""
return self.extension()[1:] in ('c', 'cc', 'cpp', 'cxx')
# Matches standard C++ escape esequences per 2.13.2.3 of the C++ standard.
_RE_PATTERN_CLEANSE_LINE_ESCAPES = re.compile(
r'\\([abfnrtv?"\\\']|\d+|x[0-9a-fA-F]+)')
# Matches strings. Escape codes should already be removed by ESCAPES.
_RE_PATTERN_CLEANSE_LINE_DOUBLE_QUOTES = re.compile(r'"[^"]*"')
# Matches characters. Escape codes should already be removed by ESCAPES.
_RE_PATTERN_CLEANSE_LINE_SINGLE_QUOTES = re.compile(r"'.'")
# Matches multi-line C++ comments.
# This RE is a little bit more complicated than one might expect, because we
# have to take care of space removals tools so we can handle comments inside
# statements better.
# The current rule is: We only clear spaces from both sides when we're at the
# end of the line. Otherwise, we try to remove spaces from the right side,
# if this doesn't work we try on left side but only if there's a non-character
# on the right.
_RE_PATTERN_CLEANSE_LINE_C_COMMENTS = re.compile(
r"""(\s*/\*.*\*/\s*$|
/\*.*\*/\s+|
\s+/\*.*\*/(?=\W)|
/\*.*\*/)""", re.VERBOSE)
def is_cpp_string(line):
"""Does line terminate so, that the next symbol is in string constant.
This function does not consider single-line nor multi-line comments.
Args:
line: is a partial line of code starting from the 0..n.
Returns:
True, if next character appended to 'line' is inside a
string constant.
"""
line = line.replace(r'\\', 'XX') # after this, \\" does not match to \"
return ((line.count('"') - line.count(r'\"') - line.count("'\"'")) & 1) == 1
def find_next_multi_line_comment_start(lines, line_index):
"""Find the beginning marker for a multiline comment."""
while line_index < len(lines):
if lines[line_index].strip().startswith('/*'):
# Only return this marker if the comment goes beyond this line
if lines[line_index].strip().find('*/', 2) < 0:
return line_index
line_index += 1
return len(lines)
def find_next_multi_line_comment_end(lines, line_index):
"""We are inside a comment, find the end marker."""
while line_index < len(lines):
if lines[line_index].strip().endswith('*/'):
return line_index
line_index += 1
return len(lines)
def remove_multi_line_comments_from_range(lines, begin, end):
"""Clears a range of lines for multi-line comments."""
# Having // dummy comments makes the lines non-empty, so we will not get
# unnecessary blank line warnings later in the code.
for i in range(begin, end):
lines[i] = '// dummy'
def remove_multi_line_comments(lines, error):
"""Removes multiline (c-style) comments from lines."""
line_index = 0
while line_index < len(lines):
line_index_begin = find_next_multi_line_comment_start(lines, line_index)
if line_index_begin >= len(lines):
return
line_index_end = find_next_multi_line_comment_end(lines, line_index_begin)
if line_index_end >= len(lines):
error(line_index_begin + 1, 'readability/multiline_comment', 5,
'Could not find end of multi-line comment')
return
remove_multi_line_comments_from_range(lines, line_index_begin, line_index_end + 1)
line_index = line_index_end + 1
def cleanse_comments(line):
"""Removes //-comments and single-line C-style /* */ comments.
Args:
line: A line of C++ source.
Returns:
The line with single-line comments removed.
"""
comment_position = line.find('//')
if comment_position != -1 and not is_cpp_string(line[:comment_position]):
line = line[:comment_position]
# get rid of /* ... */
return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub('', line)
class CleansedLines(object):
"""Holds 3 copies of all lines with different preprocessing applied to them.
1) elided member contains lines without strings and comments,
2) lines member contains lines without comments, and
3) raw member contains all the lines without processing.
All these three members are of <type 'list'>, and of the same length.
"""
def __init__(self, lines):
self.elided = []
self.lines = []
self.raw_lines = lines
self._num_lines = len(lines)
for line_number in range(len(lines)):
self.lines.append(cleanse_comments(lines[line_number]))
elided = self.collapse_strings(lines[line_number])
self.elided.append(cleanse_comments(elided))
def num_lines(self):
"""Returns the number of lines represented."""
return self._num_lines
@staticmethod
def collapse_strings(elided):
"""Collapses strings and chars on a line to simple "" or '' blocks.
We nix strings first so we're not fooled by text like '"http://"'
Args:
elided: The line being processed.
Returns:
The line with collapsed strings.
"""
if not _RE_PATTERN_INCLUDE.match(elided):
# Remove escaped characters first to make quote/single quote collapsing
# basic. Things that look like escaped characters shouldn't occur
# outside of strings and chars.
elided = _RE_PATTERN_CLEANSE_LINE_ESCAPES.sub('', elided)
elided = _RE_PATTERN_CLEANSE_LINE_SINGLE_QUOTES.sub("''", elided)
elided = _RE_PATTERN_CLEANSE_LINE_DOUBLE_QUOTES.sub('""', elided)
return elided
def close_expression(elided, position):
"""If input points to ( or { or [, finds the position that closes it.
If elided[position.row][position.column] points to a '(' or '{' or '[',
finds the line_number/pos that correspond to the closing of the expression.
Args:
elided: A CleansedLines.elided instance containing the file.
position: The position of the opening item.
Returns:
The Position *past* the closing brace, or Position(len(elided), -1)
if we never find a close. Note we ignore strings and comments when matching.
"""
line = elided[position.row]
start_character = line[position.column]
if start_character == '(':
enclosing_character_regex = r'[\(\)]'
elif start_character == '[':
enclosing_character_regex = r'[\[\]]'
elif start_character == '{':
enclosing_character_regex = r'[\{\}]'
else:
return Position(len(elided), -1)
current_column = position.column + 1
line_number = position.row
net_open = 1
for line in elided[position.row:]:
line = line[current_column:]
# Search the current line for opening and closing characters.
while True:
next_enclosing_character = search(enclosing_character_regex, line)
# No more on this line.
if not next_enclosing_character:
break
current_column += next_enclosing_character.end(0)
line = line[next_enclosing_character.end(0):]
if next_enclosing_character.group(0) == start_character:
net_open += 1
else:
net_open -= 1
if not net_open:
return Position(line_number, current_column)
# Proceed to the next line.
line_number += 1
current_column = 0
# The given item was not closed.
return Position(len(elided), -1)
def check_for_copyright(lines, error):
"""Logs an error if no Copyright message appears at the top of the file."""
# We'll say it should occur by line 10. Don't forget there's a
# dummy line at the front.
for line in xrange(1, min(len(lines), 11)):
if re.search(r'Copyright', lines[line], re.I):
break
else: # means no copyright line was found
error(0, 'legal/copyright', 5,
'No copyright message found. '
'You should have a line: "Copyright [year] <Copyright Owner>"')
def get_header_guard_cpp_variable(filename):
"""Returns the CPP variable that should be used as a header guard.
Args:
filename: The name of a C++ header file.
Returns:
The CPP variable that should be used as a header guard in the
named file.
"""
# Restores original filename in case that style checker is invoked from Emacs's
# flymake.
filename = re.sub(r'_flymake\.h$', '.h', filename)
standard_name = sub(r'[-.\s]', '_', os.path.basename(filename))
# Files under WTF typically have header guards that start with WTF_.
if '/wtf/' in filename:
special_name = "WTF_" + standard_name
else:
special_name = standard_name
return (special_name, standard_name)
def check_for_header_guard(filename, lines, error):
"""Checks that the file contains a header guard.
Logs an error if no #ifndef header guard is present. For other
headers, checks that the full pathname is used.
Args:
filename: The name of the C++ header file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
cppvar = get_header_guard_cpp_variable(filename)
ifndef = None
ifndef_line_number = 0
define = None
for line_number, line in enumerate(lines):
line_split = line.split()
if len(line_split) >= 2:
# find the first occurrence of #ifndef and #define, save arg
if not ifndef and line_split[0] == '#ifndef':
# set ifndef to the header guard presented on the #ifndef line.
ifndef = line_split[1]
ifndef_line_number = line_number
if not define and line_split[0] == '#define':
define = line_split[1]
if define and ifndef:
break
if not ifndef or not define or ifndef != define:
error(0, 'build/header_guard', 5,
'No #ifndef header guard found, suggested CPP variable is: %s' %
cppvar[0])
return
# The guard should be File_h.
if ifndef not in cppvar:
error(ifndef_line_number, 'build/header_guard', 5,
'#ifndef header guard has wrong style, please use: %s' % cppvar[0])
def check_for_unicode_replacement_characters(lines, error):
"""Logs an error for each line containing Unicode replacement characters.
These indicate that either the file contained invalid UTF-8 (likely)
or Unicode replacement characters (which it shouldn't). Note that
it's possible for this to throw off line numbering if the invalid
UTF-8 occurred adjacent to a newline.
Args:
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
for line_number, line in enumerate(lines):
if u'\ufffd' in line:
error(line_number, 'readability/utf8', 5,
'Line contains invalid UTF-8 (or Unicode replacement character).')
def check_for_new_line_at_eof(lines, error):
"""Logs an error if there is no newline char at the end of the file.
Args:
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
# The array lines() was created by adding two newlines to the
# original file (go figure), then splitting on \n.
# To verify that the file ends in \n, we just have to make sure the
# last-but-two element of lines() exists and is empty.
if len(lines) < 3 or lines[-2]:
error(len(lines) - 2, 'whitespace/ending_newline', 5,
'Could not find a newline character at the end of the file.')
def check_for_multiline_comments_and_strings(clean_lines, line_number, error):
"""Logs an error if we see /* ... */ or "..." that extend past one line.
/* ... */ comments are legit inside macros, for one line.
Otherwise, we prefer // comments, so it's ok to warn about the
other. Likewise, it's ok for strings to extend across multiple
lines, as long as a line continuation character (backslash)
terminates each line. Although not currently prohibited by the C++
style guide, it's ugly and unnecessary. We don't do well with either
in this lint program, so we warn about both.
Args:
clean_lines: A CleansedLines instance containing the file.
line_number: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[line_number]
# Remove all \\ (escaped backslashes) from the line. They are OK, and the
# second (escaped) slash may trigger later \" detection erroneously.
line = line.replace('\\\\', '')
if line.count('/*') > line.count('*/'):
error(line_number, 'readability/multiline_comment', 5,
'Complex multi-line /*...*/-style comment found. '
'Lint may give bogus warnings. '
'Consider replacing these with //-style comments, '
'with #if 0...#endif, '
'or with more clearly structured multi-line comments.')
if (line.count('"') - line.count('\\"')) % 2:
error(line_number, 'readability/multiline_string', 5,
'Multi-line string ("...") found. This lint script doesn\'t '
'do well with such strings, and may give bogus warnings. They\'re '
'ugly and unnecessary, and you should use concatenation instead".')
_THREADING_LIST = (
('asctime(', 'asctime_r('),
('ctime(', 'ctime_r('),
('getgrgid(', 'getgrgid_r('),
('getgrnam(', 'getgrnam_r('),
('getlogin(', 'getlogin_r('),
('getpwnam(', 'getpwnam_r('),
('getpwuid(', 'getpwuid_r('),
('gmtime(', 'gmtime_r('),
('localtime(', 'localtime_r('),
('rand(', 'rand_r('),
('readdir(', 'readdir_r('),
('strtok(', 'strtok_r('),
('ttyname(', 'ttyname_r('),
)
def check_posix_threading(clean_lines, line_number, error):
"""Checks for calls to thread-unsafe functions.
Much code has been originally written without consideration of
multi-threading. Also, engineers are relying on their old experience;
they have learned posix before threading extensions were added. These
tests guide the engineers to use thread-safe functions (when using
posix directly).
Args:
clean_lines: A CleansedLines instance containing the file.
line_number: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[line_number]
for single_thread_function, multithread_safe_function in _THREADING_LIST:
index = line.find(single_thread_function)
# Comparisons made explicit for clarity
if index >= 0 and (index == 0 or (not line[index - 1].isalnum()
and line[index - 1] not in ('_', '.', '>'))):
error(line_number, 'runtime/threadsafe_fn', 2,
'Consider using ' + multithread_safe_function +
'...) instead of ' + single_thread_function +
'...) for improved thread safety.')
# Matches invalid increment: *count++, which moves pointer instead of
# incrementing a value.
_RE_PATTERN_INVALID_INCREMENT = re.compile(
r'^\s*\*\w+(\+\+|--);')
def check_invalid_increment(clean_lines, line_number, error):
"""Checks for invalid increment *count++.
For example following function:
void increment_counter(int* count) {
*count++;
}
is invalid, because it effectively does count++, moving pointer, and should
be replaced with ++*count, (*count)++ or *count += 1.
Args:
clean_lines: A CleansedLines instance containing the file.
line_number: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[line_number]
if _RE_PATTERN_INVALID_INCREMENT.match(line):
error(line_number, 'runtime/invalid_increment', 5,
'Changing pointer instead of value (or unused value of operator*).')
class _ClassInfo(object):
"""Stores information about a class."""
def __init__(self, name, line_number):
self.name = name
self.line_number = line_number
self.seen_open_brace = False
self.is_derived = False
self.virtual_method_line_number = None
self.has_virtual_destructor = False
self.brace_depth = 0
class _ClassState(object):
"""Holds the current state of the parse relating to class declarations.
It maintains a stack of _ClassInfos representing the parser's guess
as to the current nesting of class declarations. The innermost class
is at the top (back) of the stack. Typically, the stack will either
be empty or have exactly one entry.
"""
def __init__(self):
self.classinfo_stack = []
def check_finished(self, error):
"""Checks that all classes have been completely parsed.
Call this when all lines in a file have been processed.
Args:
error: The function to call with any errors found.
"""
if self.classinfo_stack:
# Note: This test can result in false positives if #ifdef constructs
# get in the way of brace matching. See the testBuildClass test in
# cpp_style_unittest.py for an example of this.
error(self.classinfo_stack[0].line_number, 'build/class', 5,
'Failed to find complete declaration of class %s' %
self.classinfo_stack[0].name)
class _FileState(object):
def __init__(self, clean_lines, file_extension):
self._did_inside_namespace_indent_warning = False
self._clean_lines = clean_lines
if file_extension in ['m', 'mm']:
self._is_objective_c = True
self._is_c = False
elif file_extension == 'h':
# In the case of header files, it is unknown if the file
# is c / objective c or not, so set this value to None and then
# if it is requested, use heuristics to guess the value.
self._is_objective_c = None
self._is_c = None
elif file_extension == 'c':
self._is_c = True
self._is_objective_c = False
else:
self._is_objective_c = False
self._is_c = False
def set_did_inside_namespace_indent_warning(self):
self._did_inside_namespace_indent_warning = True
def did_inside_namespace_indent_warning(self):
return self._did_inside_namespace_indent_warning
def is_objective_c(self):
if self._is_objective_c is None:
for line in self._clean_lines.elided:
# Starting with @ or #import seem like the best indications
# that we have an Objective C file.
if line.startswith("@") or line.startswith("#import"):
self._is_objective_c = True
break
else:
self._is_objective_c = False
return self._is_objective_c
def is_c(self):
if self._is_c is None:
for line in self._clean_lines.lines:
# if extern "C" is found, then it is a good indication
# that we have a C header file.
if line.startswith('extern "C"'):
self._is_c = True
break
else:
self._is_c = False
return self._is_c
def is_c_or_objective_c(self):
"""Return whether the file extension corresponds to C or Objective-C."""
return self.is_c() or self.is_objective_c()
class _EnumState(object):
"""Maintains whether currently in an enum declaration, and checks whether
enum declarations follow the style guide.
"""
def __init__(self):
self.in_enum_decl = False
self.is_webidl_enum = False
def process_clean_line(self, line):
# FIXME: The regular expressions for expr_all_uppercase and expr_enum_end only accept integers
# and identifiers for the value of the enumerator, but do not accept any other constant
# expressions. However, this is sufficient for now (11/27/2012).
expr_all_uppercase = r'\s*[A-Z0-9_]+\s*(?:=\s*[a-zA-Z0-9]+\s*)?,?\s*$'
expr_starts_lowercase = r'\s*[a-z]'
expr_enum_end = r'}\s*(?:[a-zA-Z0-9]+\s*(?:=\s*[a-zA-Z0-9]+)?)?\s*;\s*'
expr_enum_start = r'\s*enum(?:\s+[a-zA-Z0-9]+)?\s*\{?\s*'
if self.in_enum_decl:
if match(r'\s*' + expr_enum_end + r'$', line):
self.in_enum_decl = False
self.is_webidl_enum = False
elif match(expr_all_uppercase, line):
return self.is_webidl_enum
elif match(expr_starts_lowercase, line):
return False
else:
matched = match(expr_enum_start + r'$', line)
if matched:
self.in_enum_decl = True
else:
matched = match(expr_enum_start + r'(?P<members>.*)' + expr_enum_end + r'$', line)
if matched:
members = matched.group('members').split(',')
found_invalid_member = False
for member in members:
if match(expr_all_uppercase, member):
found_invalid_member = not self.is_webidl_enum
if match(expr_starts_lowercase, member):
found_invalid_member = True
if found_invalid_member:
self.is_webidl_enum = False
return False
return True
return True
def check_for_non_standard_constructs(clean_lines, line_number,
class_state, error):
"""Logs an error if we see certain non-ANSI constructs ignored by gcc-2.
Complain about several constructs which gcc-2 accepts, but which are
not standard C++. Warning about these in lint is one way to ease the
transition to new compilers.
- put storage class first (e.g. "static const" instead of "const static").
- "%lld" instead of %qd" in printf-type functions.
- "%1$d" is non-standard in printf-type functions.
- "\%" is an undefined character escape sequence.
- text after #endif is not allowed.
- invalid inner-style forward declaration.
- >? and <? operators, and their >?= and <?= cousins.
- classes with virtual methods need virtual destructors (compiler warning
available, but not turned on yet.)
Additionally, check for constructor/destructor style violations as it
is very convenient to do so while checking for gcc-2 compliance.
Args:
clean_lines: A CleansedLines instance containing the file.
line_number: The number of the line to check.
class_state: A _ClassState instance which maintains information about
the current stack of nested class declarations being parsed.
error: A callable to which errors are reported, which takes parameters:
line number, error level, and message
"""
# Remove comments from the line, but leave in strings for now.
line = clean_lines.lines[line_number]
if search(r'printf\s*\(.*".*%[-+ ]?\d*q', line):
error(line_number, 'runtime/printf_format', 3,
'%q in format strings is deprecated. Use %ll instead.')
if search(r'printf\s*\(.*".*%\d+\$', line):
error(line_number, 'runtime/printf_format', 2,
'%N$ formats are unconventional. Try rewriting to avoid them.')
# Remove escaped backslashes before looking for undefined escapes.
line = line.replace('\\\\', '')
if search(r'("|\').*\\(%|\[|\(|{)', line):
error(line_number, 'build/printf_format', 3,
'%, [, (, and { are undefined character escapes. Unescape them.')
# For the rest, work with both comments and strings removed.
line = clean_lines.elided[line_number]
if search(r'\b(const|volatile|void|char|short|int|long'
r'|float|double|signed|unsigned'
r'|schar|u?int8|u?int16|u?int32|u?int64)'
r'\s+(auto|register|static|extern|typedef)\b',
line):
error(line_number, 'build/storage_class', 5,
'Storage class (static, extern, typedef, etc) should be first.')
if match(r'\s*#\s*endif\s*[^/\s]+', line):
error(line_number, 'build/endif_comment', 5,
'Uncommented text after #endif is non-standard. Use a comment.')
if match(r'\s*class\s+(\w+\s*::\s*)+\w+\s*;', line):
error(line_number, 'build/forward_decl', 5,
'Inner-style forward declarations are invalid. Remove this line.')
if search(r'(\w+|[+-]?\d+(\.\d*)?)\s*(<|>)\?=?\s*(\w+|[+-]?\d+)(\.\d*)?', line):
error(line_number, 'build/deprecated', 3,
'>? and <? (max and min) operators are non-standard and deprecated.')
# Track class entry and exit, and attempt to find cases within the
# class declaration that don't meet the C++ style
# guidelines. Tracking is very dependent on the code matching Google
# style guidelines, but it seems to perform well enough in testing
# to be a worthwhile addition to the checks.
classinfo_stack = class_state.classinfo_stack
# Look for a class declaration
class_decl_match = match(
r'\s*(template\s*<[\w\s<>,:]*>\s*)?(class|struct)\s+(\w+(::\w+)*)', line)
if class_decl_match:
classinfo_stack.append(_ClassInfo(class_decl_match.group(3), line_number))
# Everything else in this function uses the top of the stack if it's
# not empty.
if not classinfo_stack:
return
classinfo = classinfo_stack[-1]
# If the opening brace hasn't been seen look for it and also
# parent class declarations.
if not classinfo.seen_open_brace:
# If the line has a ';' in it, assume it's a forward declaration or
# a single-line class declaration, which we won't process.
if line.find(';') != -1:
classinfo_stack.pop()
return
classinfo.seen_open_brace = (line.find('{') != -1)
# Look for a bare ':'
if search('(^|[^:]):($|[^:])', line):
classinfo.is_derived = True
if not classinfo.seen_open_brace:
return # Everything else in this function is for after open brace
# The class may have been declared with namespace or classname qualifiers.
# The constructor and destructor will not have those qualifiers.
base_classname = classinfo.name.split('::')[-1]
# Look for single-argument constructors that aren't marked explicit.
# Technically a valid construct, but against style.
args = match(r'(?<!explicit)\s+%s\s*\(([^,()]+)\)'
% re.escape(base_classname),
line)
if (args
and args.group(1) != 'void'
and not match(r'(const\s+)?%s\s*&' % re.escape(base_classname),
args.group(1).strip())):
error(line_number, 'runtime/explicit', 5,
'Single-argument constructors should be marked explicit.')
# Look for methods declared virtual.
if search(r'\bvirtual\b', line):
classinfo.virtual_method_line_number = line_number
# Only look for a destructor declaration on the same line. It would
# be extremely unlikely for the destructor declaration to occupy
# more than one line.
if search(r'~%s\s*\(' % base_classname, line):
classinfo.has_virtual_destructor = True
# Look for class end.
brace_depth = classinfo.brace_depth
brace_depth = brace_depth + line.count('{') - line.count('}')
if brace_depth <= 0:
classinfo = classinfo_stack.pop()
# Try to detect missing virtual destructor declarations.
# For now, only warn if a non-derived class with virtual methods lacks
# a virtual destructor. This is to make it less likely that people will
# declare derived virtual destructors without declaring the base
# destructor virtual.
if ((classinfo.virtual_method_line_number is not None)
and (not classinfo.has_virtual_destructor)
and (not classinfo.is_derived)): # Only warn for base classes
error(classinfo.line_number, 'runtime/virtual', 4,
'The class %s probably needs a virtual destructor due to '
'having virtual method(s), one declared at line %d.'
% (classinfo.name, classinfo.virtual_method_line_number))
else:
classinfo.brace_depth = brace_depth
def check_spacing_for_function_call(line, line_number, error):
"""Checks for the correctness of various spacing around function calls.
Args:
line: The text of the line to check.
line_number: The number of the line to check.
error: The function to call with any errors found.
"""
# Since function calls often occur inside if/for/foreach/while/switch
# expressions - which have their own, more liberal conventions - we
# first see if we should be looking inside such an expression for a
# function call, to which we can apply more strict standards.
function_call = line # if there's no control flow construct, look at whole line
for pattern in (r'\bif\s*\((.*)\)\s*{',
r'\bfor\s*\((.*)\)\s*{',
r'\bforeach\s*\((.*)\)\s*{',
r'\bwhile\s*\((.*)\)\s*[{;]',
r'\bswitch\s*\((.*)\)\s*{'):
matched = search(pattern, line)
if matched:
function_call = matched.group(1) # look inside the parens for function calls
break
# Except in if/for/foreach/while/switch, there should never be space
# immediately inside parens (eg "f( 3, 4 )"). We make an exception
# for nested parens ( (a+b) + c ). Likewise, there should never be
# a space before a ( when it's a function argument. I assume it's a
# function argument when the char before the whitespace is legal in
# a function name (alnum + _) and we're not starting a macro. Also ignore
# pointers and references to arrays and functions coz they're too tricky:
# we use a very simple way to recognize these:
# " (something)(maybe-something)" or
# " (something)(maybe-something," or
# " (something)[something]"
# Note that we assume the contents of [] to be short enough that
# they'll never need to wrap.
if ( # Ignore control structures.
not search(r'\b(if|for|foreach|while|switch|return|new|delete)\b', function_call)
# Ignore pointers/references to functions.
and not search(r' \([^)]+\)\([^)]*(\)|,$)', function_call)
# Ignore pointers/references to arrays.
and not search(r' \([^)]+\)\[[^\]]+\]', function_call)):
if search(r'\w\s*\([ \t](?!\s*\\$)', function_call): # a ( used for a fn call
error(line_number, 'whitespace/parens', 4,
'Extra space after ( in function call')
elif search(r'\([ \t]+(?!(\s*\\)|\()', function_call):
error(line_number, 'whitespace/parens', 2,
'Extra space after (')
if (search(r'\w\s+\(', function_call)
and not match(r'\s*(#|typedef)', function_call)):
error(line_number, 'whitespace/parens', 4,
'Extra space before ( in function call')
# If the ) is followed only by a newline or a { + newline, assume it's
# part of a control statement (if/while/etc), and don't complain
if search(r'[^)\s]\s+\)(?!\s*$|{\s*$)', function_call):
error(line_number, 'whitespace/parens', 2,
'Extra space before )')
def is_blank_line(line):
"""Returns true if the given line is blank.
We consider a line to be blank if the line is empty or consists of
only white spaces.
Args:
line: A line of a string.
Returns:
True, if the given line is blank.
"""
return not line or line.isspace()
def detect_functions(clean_lines, line_number, function_state, error):
"""Finds where functions start and end.
Uses a simplistic algorithm assuming other style guidelines
(especially spacing) are followed.
Trivial bodies are unchecked, so constructors with huge initializer lists
may be missed.
Args:
clean_lines: A CleansedLines instance containing the file.
line_number: The number of the line to check.
function_state: Current function name and lines in body so far.
error: The function to call with any errors found.
"""
# Are we now past the end of a function?
if function_state.end_position.row + 1 == line_number:
function_state.end()
# If we're in a function, don't try to detect a new one.
if function_state.in_a_function:
return
lines = clean_lines.lines
line = lines[line_number]
raw = clean_lines.raw_lines
raw_line = raw[line_number]
# Lines ending with a \ indicate a macro. Don't try to check them.
if raw_line.endswith('\\'):
return
regexp = r'\s*(\w(\w|::|\*|\&|\s|<|>|,|~|(operator\s*(/|-|=|!|\+)+))*)\(' # decls * & space::name( ...
match_result = match(regexp, line)
if not match_result:
return
# If the name is all caps and underscores, figure it's a macro and
# ignore it, unless it's TEST or TEST_F.
function_name = match_result.group(1).split()[-1]
if function_name != 'TEST' and function_name != 'TEST_F' and match(r'[A-Z_]+$', function_name):
return
joined_line = ''
for start_line_number in xrange(line_number, clean_lines.num_lines()):
start_line = clean_lines.elided[start_line_number]
joined_line += ' ' + start_line.lstrip()
body_match = search(r'{|;', start_line)
if body_match:
body_start_position = Position(start_line_number, body_match.start(0))
# Replace template constructs with _ so that no spaces remain in the function name,
# while keeping the column numbers of other characters the same as "line".
line_with_no_templates = iteratively_replace_matches_with_char(r'<[^<>]*>', '_', line)
match_function = search(r'((\w|:|<|>|,|~|(operator\s*(/|-|=|!|\+)+))*)\(', line_with_no_templates)
if not match_function:
return # The '(' must have been inside of a template.
# Use the column numbers from the modified line to find the
# function name in the original line.
function = line[match_function.start(1):match_function.end(1)]
function_name_start_position = Position(line_number, match_function.start(1))
if match(r'TEST', function): # Handle TEST... macros
parameter_regexp = search(r'(\(.*\))', joined_line)
if parameter_regexp: # Ignore bad syntax
function += parameter_regexp.group(1)
else:
function += '()'
parameter_start_position = Position(line_number, match_function.end(1))
parameter_end_position = close_expression(clean_lines.elided, parameter_start_position)
if parameter_end_position.row == len(clean_lines.elided):
# No end was found.
return
if start_line[body_start_position.column] == ';':
end_position = Position(body_start_position.row, body_start_position.column + 1)
else:
end_position = close_expression(clean_lines.elided, body_start_position)
# Check for nonsensical positions. (This happens in test cases which check code snippets.)
if parameter_end_position > body_start_position:
return
function_state.begin(function, function_name_start_position, body_start_position, end_position,
parameter_start_position, parameter_end_position, clean_lines)
return
# No body for the function (or evidence of a non-function) was found.
error(line_number, 'readability/fn_size', 5,
'Lint failed to find start of function body.')
def check_for_function_lengths(clean_lines, line_number, function_state, error):
"""Reports for long function bodies.
For an overview why this is done, see:
http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Write_Short_Functions
Blank/comment lines are not counted so as to avoid encouraging the removal
of vertical space and commments just to get through a lint check.
NOLINT *on the last line of a function* disables this check.
Args:
clean_lines: A CleansedLines instance containing the file.
line_number: The number of the line to check.
function_state: Current function name and lines in body so far.
error: The function to call with any errors found.
"""
lines = clean_lines.lines
line = lines[line_number]
raw = clean_lines.raw_lines
raw_line = raw[line_number]
if function_state.end_position.row == line_number: # last line
if not search(r'\bNOLINT\b', raw_line):
function_state.check(error, line_number)
elif not match(r'^\s*$', line):
function_state.count(line_number) # Count non-blank/non-comment lines.
def _check_parameter_name_against_text(parameter, text, error):
"""Checks to see if the parameter name is contained within the text.
Return false if the check failed (i.e. an error was produced).
"""
# Treat 'lower with underscores' as a canonical form because it is
# case insensitive while still retaining word breaks. (This ensures that
# 'elate' doesn't look like it is duplicating of 'NateLate'.)
canonical_parameter_name = parameter.lower_with_underscores_name()
# Appends "object" to all text to catch variables that did the same (but only
# do this when the parameter name is more than a single character to avoid
# flagging 'b' which may be an ok variable when used in an rgba function).
if len(canonical_parameter_name) > 1:
text = sub(r'(\w)\b', r'\1Object', text)
canonical_text = _convert_to_lower_with_underscores(text)
# Used to detect cases like ec for ExceptionCode.
acronym = _create_acronym(text).lower()
if canonical_text.find(canonical_parameter_name) != -1 or acronym.find(canonical_parameter_name) != -1:
error(parameter.row, 'readability/parameter_name', 5,
'The parameter name "%s" adds no information, so it should be removed.' % parameter.name)
return False
return True
def check_function_definition_and_pass_ptr(type_text, row, location_description, error):
"""Check that function definitions for use Pass*Ptr instead of *Ptr.
Args:
type_text: A string containing the type. (For return values, it may contain more than the type.)
row: The row number of the type.
location_description: Used to indicate where the type is. This is either 'parameter' or 'return'.
error: The function to call with any errors found.
"""
match_ref_or_own_ptr = '(?=\W|^)(Ref|Own)Ptr(?=\W)'
bad_type_usage = search(match_ref_or_own_ptr, type_text)
if not bad_type_usage or type_text.endswith('&') or type_text.endswith('*'):
return
type_name = bad_type_usage.group(0)
error(row, 'readability/pass_ptr', 5,
'The %s type should use Pass%s instead of %s.' % (location_description, type_name, type_name))
def check_function_definition(filename, file_extension, clean_lines, line_number, function_state, error):
"""Check that function definitions for style issues.
Specifically, check that parameter names in declarations add information.
Args:
filename: Filename of the file that is being processed.
file_extension: The current file extension, without the leading dot.
clean_lines: A CleansedLines instance containing the file.
line_number: The number of the line to check.
function_state: Current function name and lines in body so far.
error: The function to call with any errors found.
"""
if line_number != function_state.body_start_position.row:
return
modifiers_and_return_type = function_state.modifiers_and_return_type()
check_function_definition_and_pass_ptr(modifiers_and_return_type, function_state.function_name_start_position.row, 'return', error)
parameter_list = function_state.parameter_list()
for parameter in parameter_list:
check_function_definition_and_pass_ptr(parameter.type, parameter.row, 'parameter', error)
# Do checks specific to function declarations and parameter names.
if not function_state.is_declaration or not parameter.name:
continue
# Check the parameter name against the function name for single parameter set functions.
if len(parameter_list) == 1 and match('set[A-Z]', function_state.current_function):
trimmed_function_name = function_state.current_function[len('set'):]
if not _check_parameter_name_against_text(parameter, trimmed_function_name, error):
continue # Since an error was noted for this name, move to the next parameter.
# Check the parameter name against the type.
if not _check_parameter_name_against_text(parameter, parameter.type, error):
continue # Since an error was noted for this name, move to the next parameter.
def check_pass_ptr_usage(clean_lines, line_number, function_state, error):
"""Check for proper usage of Pass*Ptr.
Currently this is limited to detecting declarations of Pass*Ptr
variables inside of functions.
Args:
clean_lines: A CleansedLines instance containing the file.
line_number: The number of the line to check.
function_state: Current function name and lines in body so far.
error: The function to call with any errors found.
"""
if not function_state.in_a_function:
return
lines = clean_lines.lines
line = lines[line_number]
if line_number > function_state.body_start_position.row:
matched_pass_ptr = match(r'^\s*Pass([A-Z][A-Za-z]*)Ptr<', line)
if matched_pass_ptr:
type_name = 'Pass%sPtr' % matched_pass_ptr.group(1)
error(line_number, 'readability/pass_ptr', 5,
'Local variables should never be %s (see '
'http://webkit.org/coding/RefPtr.html).' % type_name)
def check_for_leaky_patterns(clean_lines, line_number, function_state, error):
"""Check for constructs known to be leak prone.
Args:
clean_lines: A CleansedLines instance containing the file.
line_number: The number of the line to check.
function_state: Current function name and lines in body so far.
error: The function to call with any errors found.
"""
lines = clean_lines.lines
line = lines[line_number]
matched_get_dc = search(r'\b(?P<function_name>GetDC(Ex)?)\s*\(', line)
if matched_get_dc:
error(line_number, 'runtime/leaky_pattern', 5,
'Use the class HWndDC instead of calling %s to avoid potential '
'memory leaks.' % matched_get_dc.group('function_name'))
matched_create_dc = search(r'\b(?P<function_name>Create(Compatible)?DC)\s*\(', line)
matched_own_dc = search(r'\badoptPtr\b', line)
if matched_create_dc and not matched_own_dc:
error(line_number, 'runtime/leaky_pattern', 5,
'Use adoptPtr and OwnPtr<HDC> when calling %s to avoid potential '
'memory leaks.' % matched_create_dc.group('function_name'))
def check_spacing(file_extension, clean_lines, line_number, error):
"""Checks for the correctness of various spacing issues in the code.
Things we check for: spaces around operators, spaces after
if/for/while/switch, no spaces around parens in function calls, two
spaces between code and comment, don't start a block with a blank
line, don't end a function with a blank line, don't have too many
blank lines in a row.
Args:
file_extension: The current file extension, without the leading dot.
clean_lines: A CleansedLines instance containing the file.
line_number: The number of the line to check.
error: The function to call with any errors found.
"""
raw = clean_lines.raw_lines
line = raw[line_number]
# Before nixing comments, check if the line is blank for no good
# reason. This includes the first line after a block is opened, and
# blank lines at the end of a function (ie, right before a line like '}').
if is_blank_line(line):
elided = clean_lines.elided
previous_line = elided[line_number - 1]
previous_brace = previous_line.rfind('{')
# FIXME: Don't complain if line before blank line, and line after,
# both start with alnums and are indented the same amount.
# This ignores whitespace at the start of a namespace block
# because those are not usually indented.
if (previous_brace != -1 and previous_line[previous_brace:].find('}') == -1
and previous_line[:previous_brace].find('namespace') == -1):
# OK, we have a blank line at the start of a code block. Before we
# complain, we check if it is an exception to the rule: The previous
# non-empty line has the parameters of a function header that are indented
# 4 spaces (because they did not fit in a 80 column line when placed on
# the same line as the function name). We also check for the case where
# the previous line is indented 6 spaces, which may happen when the
# initializers of a constructor do not fit into a 80 column line.
exception = False
if match(r' {6}\w', previous_line): # Initializer list?
# We are looking for the opening column of initializer list, which
# should be indented 4 spaces to cause 6 space indentation afterwards.
search_position = line_number - 2
while (search_position >= 0
and match(r' {6}\w', elided[search_position])):
search_position -= 1
exception = (search_position >= 0
and elided[search_position][:5] == ' :')
else:
# Search for the function arguments or an initializer list. We use a
# simple heuristic here: If the line is indented 4 spaces; and we have a
# closing paren, without the opening paren, followed by an opening brace
# or colon (for initializer lists) we assume that it is the last line of
# a function header. If we have a colon indented 4 spaces, it is an
# initializer list.
exception = (match(r' {4}\w[^\(]*\)\s*(const\s*)?(\{\s*$|:)',
previous_line)
or match(r' {4}:', previous_line))
if not exception:
error(line_number, 'whitespace/blank_line', 2,
'Blank line at the start of a code block. Is this needed?')
# This doesn't ignore whitespace at the end of a namespace block
# because that is too hard without pairing open/close braces;
# however, a special exception is made for namespace closing
# brackets which have a comment containing "namespace".
#
# Also, ignore blank lines at the end of a block in a long if-else
# chain, like this:
# if (condition1) {
# // Something followed by a blank line
#
# } else if (condition2) {
# // Something else
# }
if line_number + 1 < clean_lines.num_lines():
next_line = raw[line_number + 1]
if (next_line
and match(r'\s*}', next_line)
and next_line.find('namespace') == -1
and next_line.find('} else ') == -1):
error(line_number, 'whitespace/blank_line', 3,
'Blank line at the end of a code block. Is this needed?')
# Next, we check for proper spacing with respect to comments.
comment_position = line.find('//')
if comment_position != -1:
# Check if the // may be in quotes. If so, ignore it
# Comparisons made explicit for clarity
if (line.count('"', 0, comment_position) - line.count('\\"', 0, comment_position)) % 2 == 0: # not in quotes
# Allow one space before end of line comment.
if (not match(r'^\s*$', line[:comment_position])
and (comment_position >= 1
and ((line[comment_position - 1] not in string.whitespace)
or (comment_position >= 2
and line[comment_position - 2] in string.whitespace)))):
error(line_number, 'whitespace/comments', 5,
'One space before end of line comments')
# There should always be a space between the // and the comment
commentend = comment_position + 2
if commentend < len(line) and not line[commentend] == ' ':
# but some lines are exceptions -- e.g. if they're big
# comment delimiters like:
# //----------------------------------------------------------
# or they begin with multiple slashes followed by a space:
# //////// Header comment
matched = (search(r'[=/-]{4,}\s*$', line[commentend:])
or search(r'^/+ ', line[commentend:]))
if not matched:
error(line_number, 'whitespace/comments', 4,
'Should have a space between // and comment')
# There should only be one space after punctuation in a comment.
if search(r'[.!?,;:]\s\s+\w', line[comment_position:]):
error(line_number, 'whitespace/comments', 5,
'Should have only a single space after a punctuation in a comment.')
line = clean_lines.elided[line_number] # get rid of comments and strings
# Don't try to do spacing checks for operator methods
line = sub(r'operator(==|!=|<|<<|<=|>=|>>|>|\+=|-=|\*=|/=|%=|&=|\|=|^=|<<=|>>=|/)\(', 'operator\(', line)
# Don't try to do spacing checks for #include or #import statements at
# minimum because it messes up checks for spacing around /
if match(r'\s*#\s*(?:include|import)', line):
return
if search(r'[\w.]=[\w.]', line):
error(line_number, 'whitespace/operators', 4,
'Missing spaces around =')
# FIXME: It's not ok to have spaces around binary operators like .
# You should always have whitespace around binary operators.
# Alas, we can't test < or > because they're legitimately used sans spaces
# (a->b, vector<int> a). The only time we can tell is a < with no >, and
# only if it's not template params list spilling into the next line.
matched = search(r'[^<>=!\s](==|!=|\+=|-=|\*=|/=|/|\|=|&=|<<=|>>=|<=|>=|\|\||\||&&|>>|<<)[^<>=!\s]', line)
if not matched:
# Note that while it seems that the '<[^<]*' term in the following
# regexp could be simplified to '<.*', which would indeed match
# the same class of strings, the [^<] means that searching for the
# regexp takes linear rather than quadratic time.
if not search(r'<[^<]*,\s*$', line): # template params spill
matched = search(r'[^<>=!\s](<)[^<>=!\s]([^>]|->)*$', line)
if matched:
error(line_number, 'whitespace/operators', 3,
'Missing spaces around %s' % matched.group(1))
# There shouldn't be space around unary operators
matched = search(r'(!\s|~\s|[\s]--[\s;]|[\s]\+\+[\s;])', line)
if matched:
error(line_number, 'whitespace/operators', 4,
'Extra space for operator %s' % matched.group(1))
# A pet peeve of mine: no spaces after an if, while, switch, or for
matched = search(r' (if\(|for\(|foreach\(|while\(|switch\()', line)
if matched:
error(line_number, 'whitespace/parens', 5,
'Missing space before ( in %s' % matched.group(1))
# For if/for/foreach/while/switch, the left and right parens should be
# consistent about how many spaces are inside the parens, and
# there should either be zero or one spaces inside the parens.
# We don't want: "if ( foo)" or "if ( foo )".
# Exception: "for ( ; foo; bar)" and "for (foo; bar; )" are allowed.
matched = search(r'\b(?P<statement>if|for|foreach|while|switch)\s*\((?P<remainder>.*)$', line)
if matched:
statement = matched.group('statement')
condition, rest = up_to_unmatched_closing_paren(matched.group('remainder'))
if condition is not None:
condition_match = search(r'(?P<leading>[ ]*)(?P<separator>.).*[^ ]+(?P<trailing>[ ]*)', condition)
if condition_match:
n_leading = len(condition_match.group('leading'))
n_trailing = len(condition_match.group('trailing'))
if n_leading != 0:
for_exception = statement == 'for' and condition.startswith(' ;')
if not for_exception:
error(line_number, 'whitespace/parens', 5,
'Extra space after ( in %s' % statement)
if n_trailing != 0:
for_exception = statement == 'for' and condition.endswith('; ')
if not for_exception:
error(line_number, 'whitespace/parens', 5,
'Extra space before ) in %s' % statement)
# Do not check for more than one command in macros
in_preprocessor_directive = match(r'\s*#', line)
if not in_preprocessor_directive and not match(r'((\s*{\s*}?)|(\s*;?))\s*\\?$', rest):
error(line_number, 'whitespace/parens', 4,
'More than one command on the same line in %s' % statement)
# You should always have a space after a comma (either as fn arg or operator)
if search(r',[^\s]', line):
error(line_number, 'whitespace/comma', 3,
'Missing space after ,')
matched = search(r'^\s*(?P<token1>[a-zA-Z0-9_\*&]+)\s\s+(?P<token2>[a-zA-Z0-9_\*&]+)', line)
if matched:
error(line_number, 'whitespace/declaration', 3,
'Extra space between %s and %s' % (matched.group('token1'), matched.group('token2')))
if file_extension == 'cpp':
# C++ should have the & or * beside the type not the variable name.
matched = match(r'\s*\w+(?<!\breturn|\bdelete)\s+(?P<pointer_operator>\*|\&)\w+', line)
if matched:
error(line_number, 'whitespace/declaration', 3,
'Declaration has space between type name and %s in %s' % (matched.group('pointer_operator'), matched.group(0).strip()))
elif file_extension == 'c':
# C Pointer declaration should have the * beside the variable not the type name.
matched = search(r'^\s*\w+\*\s+\w+', line)
if matched:
error(line_number, 'whitespace/declaration', 3,
'Declaration has space between * and variable name in %s' % matched.group(0).strip())
# Next we will look for issues with function calls.
check_spacing_for_function_call(line, line_number, error)
# Except after an opening paren, you should have spaces before your braces.
# And since you should never have braces at the beginning of a line, this is
# an easy test.
if search(r'[^ ({]{', line):
error(line_number, 'whitespace/braces', 5,
'Missing space before {')
# Make sure '} else {' has spaces.
if search(r'}else', line):
error(line_number, 'whitespace/braces', 5,
'Missing space before else')
# You shouldn't have spaces before your brackets, except maybe after
# 'delete []' or 'new char * []'.
if search(r'\w\s+\[', line) and not search(r'delete\s+\[', line):
error(line_number, 'whitespace/braces', 5,
'Extra space before [')
# There should always be a single space in between braces on the same line.
if search(r'\{\}', line):
error(line_number, 'whitespace/braces', 5, 'Missing space inside { }.')
if search(r'\{\s\s+\}', line):
error(line_number, 'whitespace/braces', 5, 'Too many spaces inside { }.')
# You shouldn't have a space before a semicolon at the end of the line.
# There's a special case for "for" since the style guide allows space before
# the semicolon there.
if search(r':\s*;\s*$', line):
error(line_number, 'whitespace/semicolon', 5,
'Semicolon defining empty statement. Use { } instead.')
elif search(r'^\s*;\s*$', line):
error(line_number, 'whitespace/semicolon', 5,
'Line contains only semicolon. If this should be an empty statement, '
'use { } instead.')
elif (search(r'\s+;\s*$', line) and not search(r'\bfor\b', line)):
error(line_number, 'whitespace/semicolon', 5,
'Extra space before last semicolon. If this should be an empty '
'statement, use { } instead.')
elif (search(r'\b(for|while)\s*\(.*\)\s*;\s*$', line)
and line.count('(') == line.count(')')
# Allow do {} while();
and not search(r'}\s*while', line)):
error(line_number, 'whitespace/semicolon', 5,
'Semicolon defining empty statement for this loop. Use { } instead.')
def get_previous_non_blank_line(clean_lines, line_number):
"""Return the most recent non-blank line and its line number.
Args:
clean_lines: A CleansedLines instance containing the file contents.
line_number: The number of the line to check.
Returns:
A tuple with two elements. The first element is the contents of the last
non-blank line before the current line, or the empty string if this is the
first non-blank line. The second is the line number of that line, or -1
if this is the first non-blank line.
"""
previous_line_number = line_number - 1
while previous_line_number >= 0:
previous_line = clean_lines.elided[previous_line_number]
if not is_blank_line(previous_line): # if not a blank line...
return (previous_line, previous_line_number)
previous_line_number -= 1
return ('', -1)
def check_namespace_indentation(clean_lines, line_number, file_extension, file_state, error):
"""Looks for indentation errors inside of namespaces.
Args:
clean_lines: A CleansedLines instance containing the file.
line_number: The number of the line to check.
file_extension: The extension (dot not included) of the file.
file_state: A _FileState instance which maintains information about
the state of things in the file.
error: The function to call with any errors found.
"""
line = clean_lines.elided[line_number] # Get rid of comments and strings.
namespace_match = match(r'(?P<namespace_indentation>\s*)namespace\s+\S+\s*{\s*$', line)
if not namespace_match:
return
current_indentation_level = len(namespace_match.group('namespace_indentation'))
if current_indentation_level > 0:
# Don't warn about an indented namespace if we already warned about indented code.
if not file_state.did_inside_namespace_indent_warning():
error(line_number, 'whitespace/indent', 4,
'namespace should never be indented.')
return
looking_for_semicolon = False;
line_offset = 0
in_preprocessor_directive = False;
for current_line in clean_lines.elided[line_number + 1:]:
line_offset += 1
if not current_line.strip():
continue
if not current_indentation_level:
if not (in_preprocessor_directive or looking_for_semicolon):
if not match(r'\S', current_line) and not file_state.did_inside_namespace_indent_warning():
file_state.set_did_inside_namespace_indent_warning()
error(line_number + line_offset, 'whitespace/indent', 4,
'Code inside a namespace should not be indented.')
if in_preprocessor_directive or (current_line.strip()[0] == '#'): # This takes care of preprocessor directive syntax.
in_preprocessor_directive = current_line[-1] == '\\'
else:
looking_for_semicolon = ((current_line.find(';') == -1) and (current_line.strip()[-1] != '}')) or (current_line[-1] == '\\')
else:
looking_for_semicolon = False; # If we have a brace we may not need a semicolon.
current_indentation_level += current_line.count('{') - current_line.count('}')
if current_indentation_level < 0:
break;
def check_enum_casing(clean_lines, line_number, enum_state, error):
"""Looks for incorrectly named enum values.
Args:
clean_lines: A CleansedLines instance containing the file.
line_number: The number of the line to check.
enum_state: A _EnumState instance which maintains enum declaration state.
error: The function to call with any errors found.
"""
enum_state.is_webidl_enum |= bool(match(r'\s*// Web(?:Kit)?IDL enum\s*$', clean_lines.raw_lines[line_number]))
line = clean_lines.elided[line_number] # Get rid of comments and strings.
if not enum_state.process_clean_line(line):
error(line_number, 'readability/enum_casing', 4,
'enum members should use InterCaps with an initial capital letter.')
def check_directive_indentation(clean_lines, line_number, file_state, error):
"""Looks for indentation of preprocessor directives.
Args:
clean_lines: A CleansedLines instance containing the file.
line_number: The number of the line to check.
file_state: A _FileState instance which maintains information about
the state of things in the file.
error: The function to call with any errors found.
"""
line = clean_lines.elided[line_number] # Get rid of comments and strings.
indented_preprocessor_directives = match(r'\s+#', line)
if not indented_preprocessor_directives:
return
error(line_number, 'whitespace/indent', 4, 'preprocessor directives (e.g., #ifdef, #define, #import) should never be indented.')
def get_initial_spaces_for_line(clean_line):
initial_spaces = 0
while initial_spaces < len(clean_line) and clean_line[initial_spaces] == ' ':
initial_spaces += 1
return initial_spaces
def check_indentation_amount(clean_lines, line_number, error):
line = clean_lines.elided[line_number]
initial_spaces = get_initial_spaces_for_line(line)
if initial_spaces % 4:
error(line_number, 'whitespace/indent', 3,
'Weird number of spaces at line-start. Are you using a 4-space indent?')
return
previous_line = get_previous_non_blank_line(clean_lines, line_number)[0]
if not previous_line.strip() or match(r'\s*\w+\s*:\s*$', previous_line) or previous_line[0] == '#':
return
previous_line_initial_spaces = get_initial_spaces_for_line(previous_line)
if initial_spaces > previous_line_initial_spaces + 4:
error(line_number, 'whitespace/indent', 3, 'When wrapping a line, only indent 4 spaces.')
def check_using_std(clean_lines, line_number, file_state, error):
"""Looks for 'using std::foo;' statements which should be replaced with 'using namespace std;'.
Args:
clean_lines: A CleansedLines instance containing the file.
line_number: The number of the line to check.
file_state: A _FileState instance which maintains information about
the state of things in the file.
error: The function to call with any errors found.
"""
# This check doesn't apply to C or Objective-C implementation files.
if file_state.is_c_or_objective_c():
return
line = clean_lines.elided[line_number] # Get rid of comments and strings.
using_std_match = match(r'\s*using\s+std::(?P<method_name>\S+)\s*;\s*$', line)
if not using_std_match:
return
method_name = using_std_match.group('method_name')
error(line_number, 'build/using_std', 4,
"Use 'using namespace std;' instead of 'using std::%s;'." % method_name)
def check_using_namespace(clean_lines, line_number, file_extension, error):
"""Looks for 'using namespace foo;' which should be removed.
Args:
clean_lines: A CleansedLines instance containing the file.
line_number: The number of the line to check.
file_extension: The extension (dot not included) of the file.
error: The function to call with any errors found.
"""
# This check applies only to headers.
if file_extension != 'h':
return
line = clean_lines.elided[line_number] # Get rid of comments and strings.
using_namespace_match = match(r'\s*using\s+namespace\s+(?P<method_name>\S+)\s*;\s*$', line)
if not using_namespace_match:
return
method_name = using_namespace_match.group('method_name')
error(line_number, 'build/using_namespace', 4,
"Do not use 'using namespace %s;'." % method_name)
def check_max_min_macros(clean_lines, line_number, file_state, error):
"""Looks use of MAX() and MIN() macros that should be replaced with std::max() and std::min().
Args:
clean_lines: A CleansedLines instance containing the file.
line_number: The number of the line to check.
file_state: A _FileState instance which maintains information about
the state of things in the file.
error: The function to call with any errors found.
"""
# This check doesn't apply to C or Objective-C implementation files.
if file_state.is_c_or_objective_c():
return
line = clean_lines.elided[line_number] # Get rid of comments and strings.
max_min_macros_search = search(r'\b(?P<max_min_macro>(MAX|MIN))\s*\(', line)
if not max_min_macros_search:
return
max_min_macro = max_min_macros_search.group('max_min_macro')
max_min_macro_lower = max_min_macro.lower()
error(line_number, 'runtime/max_min_macros', 4,
'Use std::%s() or std::%s<type>() instead of the %s() macro.'
% (max_min_macro_lower, max_min_macro_lower, max_min_macro))
def check_ctype_functions(clean_lines, line_number, file_state, error):
"""Looks for use of the standard functions in ctype.h and suggest they be replaced
by use of equivilent ones in <wtf/ASCIICType.h>?.
Args:
clean_lines: A CleansedLines instance containing the file.
line_number: The number of the line to check.
file_state: A _FileState instance which maintains information about
the state of things in the file.
error: The function to call with any errors found.
"""
line = clean_lines.elided[line_number] # Get rid of comments and strings.
ctype_function_search = search(r'\b(?P<ctype_function>(isalnum|isalpha|isascii|isblank|iscntrl|isdigit|isgraph|islower|isprint|ispunct|isspace|isupper|isxdigit|toascii|tolower|toupper))\s*\(', line)
if not ctype_function_search:
return
ctype_function = ctype_function_search.group('ctype_function')
error(line_number, 'runtime/ctype_function', 4,
'Use equivelent function in <wtf/ASCIICType.h> instead of the %s() function.'
% (ctype_function))
def check_switch_indentation(clean_lines, line_number, error):
"""Looks for indentation errors inside of switch statements.
Args:
clean_lines: A CleansedLines instance containing the file.
line_number: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[line_number] # Get rid of comments and strings.
switch_match = match(r'(?P<switch_indentation>\s*)switch\s*\(.+\)\s*{\s*$', line)
if not switch_match:
return
switch_indentation = switch_match.group('switch_indentation')
inner_indentation = switch_indentation + ' ' * 4
line_offset = 0
encountered_nested_switch = False
for current_line in clean_lines.elided[line_number + 1:]:
line_offset += 1
# Skip not only empty lines but also those with preprocessor directives.
if current_line.strip() == '' or current_line.startswith('#'):
continue
if match(r'\s*switch\s*\(.+\)\s*{\s*$', current_line):
# Complexity alarm - another switch statement nested inside the one
# that we're currently testing. We'll need to track the extent of
# that inner switch if the upcoming label tests are still supposed
# to work correctly. Let's not do that; instead, we'll finish
# checking this line, and then leave it like that. Assuming the
# indentation is done consistently (even if incorrectly), this will
# still catch all indentation issues in practice.
encountered_nested_switch = True
current_indentation_match = match(r'(?P<indentation>\s*)(?P<remaining_line>.*)$', current_line);
current_indentation = current_indentation_match.group('indentation')
remaining_line = current_indentation_match.group('remaining_line')
# End the check at the end of the switch statement.
if remaining_line.startswith('}') and current_indentation == switch_indentation:
break
# Case and default branches should not be indented. The regexp also
# catches single-line cases like "default: break;" but does not trigger
# on stuff like "Document::Foo();".
elif match(r'(default|case\s+.*)\s*:([^:].*)?$', remaining_line):
if current_indentation != switch_indentation:
error(line_number + line_offset, 'whitespace/indent', 4,
'A case label should not be indented, but line up with its switch statement.')
# Don't throw an error for multiple badly indented labels,
# one should be enough to figure out the problem.
break
# We ignore goto labels at the very beginning of a line.
elif match(r'\w+\s*:\s*$', remaining_line):
continue
# It's not a goto label, so check if it's indented at least as far as
# the switch statement plus one more level of indentation.
elif not current_indentation.startswith(inner_indentation):
error(line_number + line_offset, 'whitespace/indent', 4,
'Non-label code inside switch statements should be indented.')
# Don't throw an error for multiple badly indented statements,
# one should be enough to figure out the problem.
break
if encountered_nested_switch:
break
def check_braces(clean_lines, line_number, error):
"""Looks for misplaced braces (e.g. at the end of line).
Args:
clean_lines: A CleansedLines instance containing the file.
line_number: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[line_number] # Get rid of comments and strings.
if match(r'\s*{\s*$', line):
# We allow an open brace to start a line in the case where someone
# is using braces for function definition or in a block to
# explicitly create a new scope, which is commonly used to control
# the lifetime of stack-allocated variables. We don't detect this
# perfectly: we just don't complain if the last non-whitespace
# character on the previous non-blank line is ';', ':', '{', '}',
# ')', or ') const' and doesn't begin with 'if|for|while|switch|else'.
# We also allow '#' for #endif and '=' for array initialization.
previous_line = get_previous_non_blank_line(clean_lines, line_number)[0]
if ((not search(r'[;:}{)=]\s*$|\)\s*((const|OVERRIDE)\s*)*\s*$', previous_line)
or search(r'\b(if|for|foreach|while|switch|else)\b', previous_line))
and previous_line.find('#') < 0):
error(line_number, 'whitespace/braces', 4,
'This { should be at the end of the previous line')
elif (search(r'\)\s*(((const|OVERRIDE)\s*)*\s*)?{\s*$', line)
and line.count('(') == line.count(')')
and not search(r'\b(if|for|foreach|while|switch)\b', line)
and not match(r'\s+[A-Z_][A-Z_0-9]+\b', line)):
error(line_number, 'whitespace/braces', 4,
'Place brace on its own line for function definitions.')
if (match(r'\s*}\s*(else\s*({\s*)?)?$', line) and line_number > 1):
# We check if a closed brace has started a line to see if a
# one line control statement was previous.
previous_line = clean_lines.elided[line_number - 2]
last_open_brace = previous_line.rfind('{')
if (last_open_brace != -1 and previous_line.find('}', last_open_brace) == -1
and search(r'\b(if|for|foreach|while|else)\b', previous_line)):
error(line_number, 'whitespace/braces', 4,
'One line control clauses should not use braces.')
# An else clause should be on the same line as the preceding closing brace.
if match(r'\s*else\s*', line):
previous_line = get_previous_non_blank_line(clean_lines, line_number)[0]
if match(r'\s*}\s*$', previous_line):
error(line_number, 'whitespace/newline', 4,
'An else should appear on the same line as the preceding }')
# Likewise, an else should never have the else clause on the same line
if search(r'\belse [^\s{]', line) and not search(r'\belse if\b', line):
error(line_number, 'whitespace/newline', 4,
'Else clause should never be on same line as else (use 2 lines)')
# In the same way, a do/while should never be on one line
if match(r'\s*do [^\s{]', line):
error(line_number, 'whitespace/newline', 4,
'do/while clauses should not be on a single line')
# Braces shouldn't be followed by a ; unless they're defining a struct
# or initializing an array.
# We can't tell in general, but we can for some common cases.
previous_line_number = line_number
while True:
(previous_line, previous_line_number) = get_previous_non_blank_line(clean_lines, previous_line_number)
if match(r'\s+{.*}\s*;', line) and not previous_line.count(';'):
line = previous_line + line
else:
break
if (search(r'{.*}\s*;', line)
and line.count('{') == line.count('}')
and not search(r'struct|class|enum|\s*=\s*{', line)):
error(line_number, 'readability/braces', 4,
"You don't need a ; after a }")
def check_exit_statement_simplifications(clean_lines, line_number, error):
"""Looks for else or else-if statements that should be written as an
if statement when the prior if concludes with a return, break, continue or
goto statement.
Args:
clean_lines: A CleansedLines instance containing the file.
line_number: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[line_number] # Get rid of comments and strings.
else_match = match(r'(?P<else_indentation>\s*)(\}\s*)?else(\s+if\s*\(|(?P<else>\s*(\{\s*)?\Z))', line)
if not else_match:
return
else_indentation = else_match.group('else_indentation')
inner_indentation = else_indentation + ' ' * 4
previous_lines = clean_lines.elided[:line_number]
previous_lines.reverse()
line_offset = 0
encountered_exit_statement = False
for current_line in previous_lines:
line_offset -= 1
# Skip not only empty lines but also those with preprocessor directives
# and goto labels.
if current_line.strip() == '' or current_line.startswith('#') or match(r'\w+\s*:\s*$', current_line):
continue
# Skip lines with closing braces on the original indentation level.
# Even though the styleguide says they should be on the same line as
# the "else if" statement, we also want to check for instances where
# the current code does not comply with the coding style. Thus, ignore
# these lines and proceed to the line before that.
if current_line == else_indentation + '}':
continue
current_indentation_match = match(r'(?P<indentation>\s*)(?P<remaining_line>.*)$', current_line);
current_indentation = current_indentation_match.group('indentation')
remaining_line = current_indentation_match.group('remaining_line')
# As we're going up the lines, the first real statement to encounter
# has to be an exit statement (return, break, continue or goto) -
# otherwise, this check doesn't apply.
if not encountered_exit_statement:
# We only want to find exit statements if they are on exactly
# the same level of indentation as expected from the code inside
# the block. If the indentation doesn't strictly match then we
# might have a nested if or something, which must be ignored.
if current_indentation != inner_indentation:
break
if match(r'(return(\W+.*)|(break|continue)\s*;|goto\s*\w+;)$', remaining_line):
encountered_exit_statement = True
continue
break
# When code execution reaches this point, we've found an exit statement
# as last statement of the previous block. Now we only need to make
# sure that the block belongs to an "if", then we can throw an error.
# Skip lines with opening braces on the original indentation level,
# similar to the closing braces check above. ("if (condition)\n{")
if current_line == else_indentation + '{':
continue
# Skip everything that's further indented than our "else" or "else if".
if current_indentation.startswith(else_indentation) and current_indentation != else_indentation:
continue
# So we've got a line with same (or less) indentation. Is it an "if"?
# If yes: throw an error. If no: don't throw an error.
# Whatever the outcome, this is the end of our loop.
if match(r'if\s*\(', remaining_line):
if else_match.start('else') != -1:
error(line_number + line_offset, 'readability/control_flow', 4,
'An else statement can be removed when the prior "if" '
'concludes with a return, break, continue or goto statement.')
else:
error(line_number + line_offset, 'readability/control_flow', 4,
'An else if statement should be written as an if statement '
'when the prior "if" concludes with a return, break, '
'continue or goto statement.')
break
def replaceable_check(operator, macro, line):
"""Determine whether a basic CHECK can be replaced with a more specific one.
For example suggest using CHECK_EQ instead of CHECK(a == b) and
similarly for CHECK_GE, CHECK_GT, CHECK_LE, CHECK_LT, CHECK_NE.
Args:
operator: The C++ operator used in the CHECK.
macro: The CHECK or EXPECT macro being called.
line: The current source line.
Returns:
True if the CHECK can be replaced with a more specific one.
"""
# This matches decimal and hex integers, strings, and chars (in that order).
match_constant = r'([-+]?(\d+|0[xX][0-9a-fA-F]+)[lLuU]{0,3}|".*"|\'.*\')'
# Expression to match two sides of the operator with something that
# looks like a literal, since CHECK(x == iterator) won't compile.
# This means we can't catch all the cases where a more specific
# CHECK is possible, but it's less annoying than dealing with
# extraneous warnings.
match_this = (r'\s*' + macro + r'\((\s*' +
match_constant + r'\s*' + operator + r'[^<>].*|'
r'.*[^<>]' + operator + r'\s*' + match_constant +
r'\s*\))')
# Don't complain about CHECK(x == NULL) or similar because
# CHECK_EQ(x, NULL) won't compile (requires a cast).
# Also, don't complain about more complex boolean expressions
# involving && or || such as CHECK(a == b || c == d).
return match(match_this, line) and not search(r'NULL|&&|\|\|', line)
def check_check(clean_lines, line_number, error):
"""Checks the use of CHECK and EXPECT macros.
Args:
clean_lines: A CleansedLines instance containing the file.
line_number: The number of the line to check.
error: The function to call with any errors found.
"""
# Decide the set of replacement macros that should be suggested
raw_lines = clean_lines.raw_lines
current_macro = ''
for macro in _CHECK_MACROS:
if raw_lines[line_number].find(macro) >= 0:
current_macro = macro
break
if not current_macro:
# Don't waste time here if line doesn't contain 'CHECK' or 'EXPECT'
return
line = clean_lines.elided[line_number] # get rid of comments and strings
# Encourage replacing plain CHECKs with CHECK_EQ/CHECK_NE/etc.
for operator in ['==', '!=', '>=', '>', '<=', '<']:
if replaceable_check(operator, current_macro, line):
error(line_number, 'readability/check', 2,
'Consider using %s instead of %s(a %s b)' % (
_CHECK_REPLACEMENT[current_macro][operator],
current_macro, operator))
break
def check_for_comparisons_to_zero(clean_lines, line_number, error):
# Get the line without comments and strings.
line = clean_lines.elided[line_number]
# Include NULL here so that users don't have to convert NULL to 0 first and then get this error.
if search(r'[=!]=\s*(NULL|0|true|false)[^\w.]', line) or search(r'[^\w.](NULL|0|true|false)\s*[=!]=', line):
if not search('LIKELY', line) and not search('UNLIKELY', line):
error(line_number, 'readability/comparison_to_zero', 5,
'Tests for true/false, null/non-null, and zero/non-zero should all be done without equality comparisons.')
def check_for_null(clean_lines, line_number, file_state, error):
# This check doesn't apply to C or Objective-C implementation files.
if file_state.is_c_or_objective_c():
return
line = clean_lines.elided[line_number]
# Don't warn about NULL usage in g_*(). See Bug 32858 and 39372.
if search(r'\bg(_[a-z]+)+\b', line):
return
# Don't warn about NULL usage in gst_*(). See Bug 70498.
if search(r'\bgst(_[a-z]+)+\b', line):
return
# Don't warn about NULL usage in gdk_pixbuf_save_to_*{join,concat}(). See Bug 43090.
if search(r'\bgdk_pixbuf_save_to\w+\b', line):
return
# Don't warn about NULL usage in gtk_widget_style_get(), gtk_style_context_get_style(), or gtk_style_context_get(). See Bug 51758
if search(r'\bgtk_widget_style_get\(\w+\b', line) or search(r'\bgtk_style_context_get_style\(\w+\b', line) or search(r'\bgtk_style_context_get\(\w+\b', line):
return
# Don't warn about NULL usage in soup_server_new(). See Bug 77890.
if search(r'\bsoup_server_new\(\w+\b', line):
return
if search(r'\bNULL\b', line):
error(line_number, 'readability/null', 5, 'Use 0 instead of NULL.')
return
line = clean_lines.raw_lines[line_number]
# See if NULL occurs in any comments in the line. If the search for NULL using the raw line
# matches, then do the check with strings collapsed to avoid giving errors for
# NULLs occurring in strings.
if search(r'\bNULL\b', line) and search(r'\bNULL\b', CleansedLines.collapse_strings(line)):
error(line_number, 'readability/null', 4, 'Use 0 or null instead of NULL (even in *comments*).')
def get_line_width(line):
"""Determines the width of the line in column positions.
Args:
line: A string, which may be a Unicode string.
Returns:
The width of the line in column positions, accounting for Unicode
combining characters and wide characters.
"""
if isinstance(line, unicode):
width = 0
for c in unicodedata.normalize('NFC', line):
if unicodedata.east_asian_width(c) in ('W', 'F'):
width += 2
elif not unicodedata.combining(c):
width += 1
return width
return len(line)
def check_style(clean_lines, line_number, file_extension, class_state, file_state, enum_state, error):
"""Checks rules from the 'C++ style rules' section of cppguide.html.
Most of these rules are hard to test (naming, comment style), but we
do what we can. In particular we check for 4-space indents, line lengths,
tab usage, spaces inside code, etc.
Args:
clean_lines: A CleansedLines instance containing the file.
line_number: The number of the line to check.
file_extension: The extension (without the dot) of the filename.
class_state: A _ClassState instance which maintains information about
the current stack of nested class declarations being parsed.
file_state: A _FileState instance which maintains information about
the state of things in the file.
enum_state: A _EnumState instance which maintains the current enum state.
error: The function to call with any errors found.
"""
raw_lines = clean_lines.raw_lines
line = raw_lines[line_number]
if line.find('\t') != -1:
error(line_number, 'whitespace/tab', 1,
'Tab found; better to use spaces')
cleansed_line = clean_lines.elided[line_number]
if line and line[-1].isspace():
error(line_number, 'whitespace/end_of_line', 4,
'Line ends in whitespace. Consider deleting these extra spaces.')
if (cleansed_line.count(';') > 1
# for loops are allowed two ;'s (and may run over two lines).
and cleansed_line.find('for') == -1
and (get_previous_non_blank_line(clean_lines, line_number)[0].find('for') == -1
or get_previous_non_blank_line(clean_lines, line_number)[0].find(';') != -1)
# It's ok to have many commands in a switch case that fits in 1 line
and not ((cleansed_line.find('case ') != -1
or cleansed_line.find('default:') != -1)
and cleansed_line.find('break;') != -1)
# Also it's ok to have many commands in trivial single-line accessors in class definitions.
and not (match(r'.*\(.*\).*{.*.}', line)
and class_state.classinfo_stack
and line.count('{') == line.count('}'))
and not cleansed_line.startswith('#define ')
# It's ok to use use WTF_MAKE_NONCOPYABLE and WTF_MAKE_FAST_ALLOCATED macros in 1 line
and not (cleansed_line.find("WTF_MAKE_NONCOPYABLE") != -1
and cleansed_line.find("WTF_MAKE_FAST_ALLOCATED") != -1)):
error(line_number, 'whitespace/newline', 4,
'More than one command on the same line')
if cleansed_line.strip().endswith('||') or cleansed_line.strip().endswith('&&'):
error(line_number, 'whitespace/operators', 4,
'Boolean expressions that span multiple lines should have their '
'operators on the left side of the line instead of the right side.')
# Some more style checks
check_namespace_indentation(clean_lines, line_number, file_extension, file_state, error)
check_directive_indentation(clean_lines, line_number, file_state, error)
check_using_std(clean_lines, line_number, file_state, error)
check_using_namespace(clean_lines, line_number, file_extension, error)
check_max_min_macros(clean_lines, line_number, file_state, error)
check_ctype_functions(clean_lines, line_number, file_state, error)
check_switch_indentation(clean_lines, line_number, error)
check_braces(clean_lines, line_number, error)
check_exit_statement_simplifications(clean_lines, line_number, error)
check_spacing(file_extension, clean_lines, line_number, error)
check_check(clean_lines, line_number, error)
check_for_comparisons_to_zero(clean_lines, line_number, error)
check_for_null(clean_lines, line_number, file_state, error)
check_indentation_amount(clean_lines, line_number, error)
check_enum_casing(clean_lines, line_number, enum_state, error)
_RE_PATTERN_INCLUDE_NEW_STYLE = re.compile(r'#include +"[^/]+\.h"')
_RE_PATTERN_INCLUDE = re.compile(r'^\s*#\s*include\s*([<"])([^>"]*)[>"].*$')
# Matches the first component of a filename delimited by -s and _s. That is:
# _RE_FIRST_COMPONENT.match('foo').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo.cpp').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo-bar_baz.cpp').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo_bar-baz.cpp').group(0) == 'foo'
_RE_FIRST_COMPONENT = re.compile(r'^[^-_.]+')
def _drop_common_suffixes(filename):
"""Drops common suffixes like _test.cpp or -inl.h from filename.
For example:
>>> _drop_common_suffixes('foo/foo-inl.h')
'foo/foo'
>>> _drop_common_suffixes('foo/bar/foo.cpp')
'foo/bar/foo'
>>> _drop_common_suffixes('foo/foo_internal.h')
'foo/foo'
>>> _drop_common_suffixes('foo/foo_unusualinternal.h')
'foo/foo_unusualinternal'
Args:
filename: The input filename.
Returns:
The filename with the common suffix removed.
"""
for suffix in ('test.cpp', 'regtest.cpp', 'unittest.cpp',
'inl.h', 'impl.h', 'internal.h'):
if (filename.endswith(suffix) and len(filename) > len(suffix)
and filename[-len(suffix) - 1] in ('-', '_')):
return filename[:-len(suffix) - 1]
return os.path.splitext(filename)[0]
def _classify_include(filename, include, is_system, include_state):
"""Figures out what kind of header 'include' is.
Args:
filename: The current file cpp_style is running over.
include: The path to a #included file.
is_system: True if the #include used <> rather than "".
include_state: An _IncludeState instance in which the headers are inserted.
Returns:
One of the _XXX_HEADER constants.
For example:
>>> _classify_include('foo.cpp', 'config.h', False)
_CONFIG_HEADER
>>> _classify_include('foo.cpp', 'foo.h', False)
_PRIMARY_HEADER
>>> _classify_include('foo.cpp', 'bar.h', False)
_OTHER_HEADER
"""
# If it is a system header we know it is classified as _OTHER_HEADER.
if is_system and not include.startswith('public/'):
return _OTHER_HEADER
# If the include is named config.h then this is WebCore/config.h.
if include == "config.h":
return _CONFIG_HEADER
# There cannot be primary includes in header files themselves. Only an
# include exactly matches the header filename will be is flagged as
# primary, so that it triggers the "don't include yourself" check.
if filename.endswith('.h') and filename != include:
return _OTHER_HEADER;
# Qt's moc files do not follow the naming and ordering rules, so they should be skipped
if include.startswith('moc_') and include.endswith('.cpp'):
return _MOC_HEADER
if include.endswith('.moc'):
return _MOC_HEADER
# If the target file basename starts with the include we're checking
# then we consider it the primary header.
target_base = FileInfo(filename).base_name()
include_base = FileInfo(include).base_name()
# If we haven't encountered a primary header, then be lenient in checking.
if not include_state.visited_primary_section():
if target_base.find(include_base) != -1:
return _PRIMARY_HEADER
# Qt private APIs use _p.h suffix.
if include_base.find(target_base) != -1 and include_base.endswith('_p'):
return _PRIMARY_HEADER
# If we already encountered a primary header, perform a strict comparison.
# In case the two filename bases are the same then the above lenient check
# probably was a false positive.
elif include_state.visited_primary_section() and target_base == include_base:
if include == "ResourceHandleWin.h":
# FIXME: Thus far, we've only seen one example of these, but if we
# start to see more, please consider generalizing this check
# somehow.
return _OTHER_HEADER
return _PRIMARY_HEADER
return _OTHER_HEADER
def _does_primary_header_exist(filename):
"""Return a primary header file name for a file, or empty string
if the file is not source file or primary header does not exist.
"""
fileinfo = FileInfo(filename)
if not fileinfo.is_source():
return False
primary_header = fileinfo.no_extension() + ".h"
return os.path.isfile(primary_header)
def check_include_line(filename, file_extension, clean_lines, line_number, include_state, error):
"""Check rules that are applicable to #include lines.
Strings on #include lines are NOT removed from elided line, to make
certain tasks easier. However, to prevent false positives, checks
applicable to #include lines in CheckLanguage must be put here.
Args:
filename: The name of the current file.
file_extension: The current file extension, without the leading dot.
clean_lines: A CleansedLines instance containing the file.
line_number: The number of the line to check.
include_state: An _IncludeState instance in which the headers are inserted.
error: The function to call with any errors found.
"""
# FIXME: For readability or as a possible optimization, consider
# exiting early here by checking whether the "build/include"
# category should be checked for the given filename. This
# may involve having the error handler classes expose a
# should_check() method, in addition to the usual __call__
# method.
line = clean_lines.lines[line_number]
matched = _RE_PATTERN_INCLUDE.search(line)
if not matched:
return
include = matched.group(2)
is_system = (matched.group(1) == '<')
# Look for any of the stream classes that are part of standard C++.
if match(r'(f|ind|io|i|o|parse|pf|stdio|str|)?stream$', include):
error(line_number, 'readability/streams', 3,
'Streams are highly discouraged.')
# Look for specific includes to fix.
if include.startswith('wtf/') and not is_system:
error(line_number, 'build/include', 4,
'wtf includes should be <wtf/file.h> instead of "wtf/file.h".')
duplicate_header = include in include_state
if duplicate_header:
error(line_number, 'build/include', 4,
'"%s" already included at %s:%s' %
(include, filename, include_state[include]))
else:
include_state[include] = line_number
header_type = _classify_include(filename, include, is_system, include_state)
primary_header_exists = _does_primary_header_exist(filename)
include_state.header_types[line_number] = header_type
# Only proceed if this isn't a duplicate header.
if duplicate_header:
return
# We want to ensure that headers appear in the right order:
# 1) for implementation files: config.h, primary header, blank line, alphabetically sorted
# 2) for header files: alphabetically sorted
# The include_state object keeps track of the last type seen
# and complains if the header types are out of order or missing.
error_message = include_state.check_next_include_order(header_type,
file_extension == "h",
primary_header_exists)
# Check to make sure we have a blank line after primary header.
if not error_message and header_type == _PRIMARY_HEADER:
next_line = clean_lines.raw_lines[line_number + 1]
if not is_blank_line(next_line):
error(line_number, 'build/include_order', 4,
'You should add a blank line after implementation file\'s own header.')
# Check to make sure all headers besides config.h and the primary header are
# alphabetically sorted. Skip Qt's moc files.
if not error_message and header_type == _OTHER_HEADER:
previous_line_number = line_number - 1;
previous_line = clean_lines.lines[previous_line_number]
previous_match = _RE_PATTERN_INCLUDE.search(previous_line)
while (not previous_match and previous_line_number > 0
and not search(r'\A(#if|#ifdef|#ifndef|#else|#elif|#endif)', previous_line)):
previous_line_number -= 1;
previous_line = clean_lines.lines[previous_line_number]
previous_match = _RE_PATTERN_INCLUDE.search(previous_line)
if previous_match:
previous_header_type = include_state.header_types[previous_line_number]
if previous_header_type == _OTHER_HEADER and previous_line.strip() > line.strip():
# This type of error is potentially a problem with this line or the previous one,
# so if the error is filtered for one line, report it for the next. This is so that
# we properly handle patches, for which only modified lines produce errors.
if not error(line_number - 1, 'build/include_order', 4, 'Alphabetical sorting problem.'):
error(line_number, 'build/include_order', 4, 'Alphabetical sorting problem.')
if error_message:
if file_extension == 'h':
error(line_number, 'build/include_order', 4,
'%s Should be: alphabetically sorted.' %
error_message)
else:
error(line_number, 'build/include_order', 4,
'%s Should be: config.h, primary header, blank line, and then alphabetically sorted.' %
error_message)
def check_language(filename, clean_lines, line_number, file_extension, include_state,
file_state, error):
"""Checks rules from the 'C++ language rules' section of cppguide.html.
Some of these rules are hard to test (function overloading, using
uint32 inappropriately), but we do the best we can.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
line_number: The number of the line to check.
file_extension: The extension (without the dot) of the filename.
include_state: An _IncludeState instance in which the headers are inserted.
file_state: A _FileState instance which maintains information about
the state of things in the file.
error: The function to call with any errors found.
"""
# If the line is empty or consists of entirely a comment, no need to
# check it.
line = clean_lines.elided[line_number]
if not line:
return
matched = _RE_PATTERN_INCLUDE.search(line)
if matched:
check_include_line(filename, file_extension, clean_lines, line_number, include_state, error)
return
# FIXME: figure out if they're using default arguments in fn proto.
# Check to see if they're using an conversion function cast.
# I just try to capture the most common basic types, though there are more.
# Parameterless conversion functions, such as bool(), are allowed as they are
# probably a member operator declaration or default constructor.
matched = search(
r'\b(int|float|double|bool|char|int32|uint32|int64|uint64)\([^)]', line)
if matched:
# gMock methods are defined using some variant of MOCK_METHODx(name, type)
# where type may be float(), int(string), etc. Without context they are
# virtually indistinguishable from int(x) casts.
if not match(r'^\s*MOCK_(CONST_)?METHOD\d+(_T)?\(', line):
error(line_number, 'readability/casting', 4,
'Using deprecated casting style. '
'Use static_cast<%s>(...) instead' %
matched.group(1))
check_c_style_cast(line_number, line, clean_lines.raw_lines[line_number],
'static_cast',
r'\((int|float|double|bool|char|u?int(16|32|64))\)',
error)
# This doesn't catch all cases. Consider (const char * const)"hello".
check_c_style_cast(line_number, line, clean_lines.raw_lines[line_number],
'reinterpret_cast', r'\((\w+\s?\*+\s?)\)', error)
# In addition, we look for people taking the address of a cast. This
# is dangerous -- casts can assign to temporaries, so the pointer doesn't
# point where you think.
if search(
r'(&\([^)]+\)[\w(])|(&(static|dynamic|reinterpret)_cast\b)', line):
error(line_number, 'runtime/casting', 4,
('Are you taking an address of a cast? '
'This is dangerous: could be a temp var. '
'Take the address before doing the cast, rather than after'))
# Check for people declaring static/global STL strings at the top level.
# This is dangerous because the C++ language does not guarantee that
# globals with constructors are initialized before the first access.
matched = match(
r'((?:|static +)(?:|const +))string +([a-zA-Z0-9_:]+)\b(.*)',
line)
# Make sure it's not a function.
# Function template specialization looks like: "string foo<Type>(...".
# Class template definitions look like: "string Foo<Type>::Method(...".
if matched and not match(r'\s*(<.*>)?(::[a-zA-Z0-9_]+)?\s*\(([^"]|$)',
matched.group(3)):
error(line_number, 'runtime/string', 4,
'For a static/global string constant, use a C style string instead: '
'"%schar %s[]".' %
(matched.group(1), matched.group(2)))
# Check that we're not using RTTI outside of testing code.
if search(r'\bdynamic_cast<', line):
error(line_number, 'runtime/rtti', 5,
'Do not use dynamic_cast<>. If you need to cast within a class '
"hierarchy, use static_cast<> to upcast. Google doesn't support "
'RTTI.')
if search(r'\b([A-Za-z0-9_]*_)\(\1\)', line):
error(line_number, 'runtime/init', 4,
'You seem to be initializing a member variable with itself.')
if file_extension == 'h':
# FIXME: check that 1-arg constructors are explicit.
# How to tell it's a constructor?
# (handled in check_for_non_standard_constructs for now)
pass
# Check if people are using the verboten C basic types. The only exception
# we regularly allow is "unsigned short port" for port.
if search(r'\bshort port\b', line):
if not search(r'\bunsigned short port\b', line):
error(line_number, 'runtime/int', 4,
'Use "unsigned short" for ports, not "short"')
# When snprintf is used, the second argument shouldn't be a literal.
matched = search(r'snprintf\s*\(([^,]*),\s*([0-9]*)\s*,', line)
if matched:
error(line_number, 'runtime/printf', 3,
'If you can, use sizeof(%s) instead of %s as the 2nd arg '
'to snprintf.' % (matched.group(1), matched.group(2)))
# Check if some verboten C functions are being used.
if search(r'\bsprintf\b', line):
error(line_number, 'runtime/printf', 5,
'Never use sprintf. Use snprintf instead.')
matched = search(r'\b(strcpy|strcat)\b', line)
if matched:
error(line_number, 'runtime/printf', 4,
'Almost always, snprintf is better than %s' % matched.group(1))
if search(r'\bsscanf\b', line):
error(line_number, 'runtime/printf', 1,
'sscanf can be ok, but is slow and can overflow buffers.')
# Check for suspicious usage of "if" like
# } if (a == b) {
if search(r'\}\s*if\s*\(', line):
error(line_number, 'readability/braces', 4,
'Did you mean "else if"? If not, start a new line for "if".')
# Check for potential format string bugs like printf(foo).
# We constrain the pattern not to pick things like DocidForPrintf(foo).
# Not perfect but it can catch printf(foo.c_str()) and printf(foo->c_str())
matched = re.search(r'\b((?:string)?printf)\s*\(([\w.\->()]+)\)', line, re.I)
if matched:
error(line_number, 'runtime/printf', 4,
'Potential format string bug. Do %s("%%s", %s) instead.'
% (matched.group(1), matched.group(2)))
# Check for potential memset bugs like memset(buf, sizeof(buf), 0).
matched = search(r'memset\s*\(([^,]*),\s*([^,]*),\s*0\s*\)', line)
if matched and not match(r"^''|-?[0-9]+|0x[0-9A-Fa-f]$", matched.group(2)):
error(line_number, 'runtime/memset', 4,
'Did you mean "memset(%s, 0, %s)"?'
% (matched.group(1), matched.group(2)))
# Detect variable-length arrays.
matched = match(r'\s*(.+::)?(\w+) [a-z]\w*\[(.+)];', line)
if (matched and matched.group(2) != 'return' and matched.group(2) != 'delete' and
matched.group(3).find(']') == -1):
# Split the size using space and arithmetic operators as delimiters.
# If any of the resulting tokens are not compile time constants then
# report the error.
tokens = re.split(r'\s|\+|\-|\*|\/|<<|>>]', matched.group(3))
is_const = True
skip_next = False
for tok in tokens:
if skip_next:
skip_next = False
continue
if search(r'sizeof\(.+\)', tok):
continue
if search(r'arraysize\(\w+\)', tok):
continue
tok = tok.lstrip('(')
tok = tok.rstrip(')')
if not tok:
continue
if match(r'\d+', tok):
continue
if match(r'0[xX][0-9a-fA-F]+', tok):
continue
if match(r'k[A-Z0-9]\w*', tok):
continue
if match(r'(.+::)?k[A-Z0-9]\w*', tok):
continue
if match(r'(.+::)?[A-Z][A-Z0-9_]*', tok):
continue
# A catch all for tricky sizeof cases, including 'sizeof expression',
# 'sizeof(*type)', 'sizeof(const type)', 'sizeof(struct StructName)'
# requires skipping the next token becasue we split on ' ' and '*'.
if tok.startswith('sizeof'):
skip_next = True
continue
is_const = False
break
if not is_const:
error(line_number, 'runtime/arrays', 1,
'Do not use variable-length arrays. Use an appropriately named '
"('k' followed by CamelCase) compile-time constant for the size.")
# Check for use of unnamed namespaces in header files. Registration
# macros are typically OK, so we allow use of "namespace {" on lines
# that end with backslashes.
if (file_extension == 'h'
and search(r'\bnamespace\s*{', line)
and line[-1] != '\\'):
error(line_number, 'build/namespaces', 4,
'Do not use unnamed namespaces in header files. See '
'http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Namespaces'
' for more information.')
# Check for plain bitfields declared without either "singed" or "unsigned".
# Most compilers treat such bitfields as signed, but there are still compilers like
# RVCT 4.0 that use unsigned by default.
matched = re.match(r'\s*((const|mutable)\s+)?(char|(short(\s+int)?)|int|long(\s+(long|int))?)\s+[a-zA-Z_][a-zA-Z0-9_]*\s*:\s*\d+\s*;', line)
if matched:
error(line_number, 'runtime/bitfields', 5,
'Please declare integral type bitfields with either signed or unsigned.')
check_identifier_name_in_declaration(filename, line_number, line, file_state, error)
# Check for unsigned int (should be just 'unsigned')
if search(r'\bunsigned int\b', line):
error(line_number, 'runtime/unsigned', 1,
'Omit int when using unsigned')
# Check that we're not using static_cast<Text*>.
if search(r'\bstatic_cast<Text\*>', line):
error(line_number, 'readability/check', 4,
'Consider using toText helper function in WebCore/dom/Text.h '
'instead of static_cast<Text*>')
def check_identifier_name_in_declaration(filename, line_number, line, file_state, error):
"""Checks if identifier names contain any underscores.
As identifiers in libraries we are using have a bunch of
underscores, we only warn about the declarations of identifiers
and don't check use of identifiers.
Args:
filename: The name of the current file.
line_number: The number of the line to check.
line: The line of code to check.
file_state: A _FileState instance which maintains information about
the state of things in the file.
error: The function to call with any errors found.
"""
# We don't check a return statement.
if match(r'\s*(return|delete)\b', line):
return
# Basically, a declaration is a type name followed by whitespaces
# followed by an identifier. The type name can be complicated
# due to type adjectives and templates. We remove them first to
# simplify the process to find declarations of identifiers.
# Convert "long long", "long double", and "long long int" to
# simple types, but don't remove simple "long".
line = sub(r'long (long )?(?=long|double|int)', '', line)
# Convert unsigned/signed types to simple types, too.
line = sub(r'(unsigned|signed) (?=char|short|int|long)', '', line)
line = sub(r'\b(inline|using|static|const|volatile|auto|register|extern|typedef|restrict|struct|class|virtual)(?=\W)', '', line)
# Remove "new" and "new (expr)" to simplify, too.
line = sub(r'new\s*(\([^)]*\))?', '', line)
# Remove all template parameters by removing matching < and >.
# Loop until no templates are removed to remove nested templates.
while True:
line, number_of_replacements = subn(r'<([\w\s:]|::)+\s*[*&]*\s*>', '', line)
if not number_of_replacements:
break
# Declarations of local variables can be in condition expressions
# of control flow statements (e.g., "if (RenderObject* p = o->parent())").
# We remove the keywords and the first parenthesis.
#
# Declarations in "while", "if", and "switch" are different from
# other declarations in two aspects:
#
# - There can be only one declaration between the parentheses.
# (i.e., you cannot write "if (int i = 0, j = 1) {}")
# - The variable must be initialized.
# (i.e., you cannot write "if (int i) {}")
#
# and we will need different treatments for them.
line = sub(r'^\s*for\s*\(', '', line)
line, control_statement = subn(r'^\s*(while|else if|if|switch)\s*\(', '', line)
# Detect variable and functions.
type_regexp = r'\w([\w]|\s*[*&]\s*|::)+'
identifier_regexp = r'(?P<identifier>[\w:]+)'
maybe_bitfield_regexp = r'(:\s*\d+\s*)?'
character_after_identifier_regexp = r'(?P<character_after_identifier>[[;()=,])(?!=)'
declaration_without_type_regexp = r'\s*' + identifier_regexp + r'\s*' + maybe_bitfield_regexp + character_after_identifier_regexp
declaration_with_type_regexp = r'\s*' + type_regexp + r'\s' + declaration_without_type_regexp
is_function_arguments = False
number_of_identifiers = 0
while True:
# If we are seeing the first identifier or arguments of a
# function, there should be a type name before an identifier.
if not number_of_identifiers or is_function_arguments:
declaration_regexp = declaration_with_type_regexp
else:
declaration_regexp = declaration_without_type_regexp
matched = match(declaration_regexp, line)
if not matched:
return
identifier = matched.group('identifier')
character_after_identifier = matched.group('character_after_identifier')
# If we removed a non-for-control statement, the character after
# the identifier should be '='. With this rule, we can avoid
# warning for cases like "if (val & INT_MAX) {".
if control_statement and character_after_identifier != '=':
return
is_function_arguments = is_function_arguments or character_after_identifier == '('
# Remove "m_" and "s_" to allow them.
modified_identifier = sub(r'(^|(?<=::))[ms]_', '', identifier)
if not file_state.is_objective_c() and modified_identifier.find('_') >= 0:
# Various exceptions to the rule: JavaScript op codes functions, const_iterator.
if (not (filename.find('JavaScriptCore') >= 0 and modified_identifier.find('op_') >= 0)
and not (filename.find('gtk') >= 0 and modified_identifier.startswith('webkit_') >= 0)
and not modified_identifier.startswith('tst_')
and not modified_identifier.startswith('webkit_dom_object_')
and not modified_identifier.startswith('webkit_soup')
and not modified_identifier.startswith('NPN_')
and not modified_identifier.startswith('NPP_')
and not modified_identifier.startswith('NP_')
and not modified_identifier.startswith('qt_')
and not modified_identifier.startswith('_q_')
and not modified_identifier.startswith('cairo_')
and not modified_identifier.startswith('Ecore_')
and not modified_identifier.startswith('Eina_')
and not modified_identifier.startswith('Evas_')
and not modified_identifier.startswith('Ewk_')
and not modified_identifier.startswith('cti_')
and not modified_identifier.find('::qt_') >= 0
and not modified_identifier.find('::_q_') >= 0
and not modified_identifier == "const_iterator"
and not modified_identifier == "vm_throw"
and not modified_identifier == "DFG_OPERATION"):
error(line_number, 'readability/naming/underscores', 4, identifier + " is incorrectly named. Don't use underscores in your identifier names.")
# Check for variables named 'l', these are too easy to confuse with '1' in some fonts
if modified_identifier == 'l':
error(line_number, 'readability/naming', 4, identifier + " is incorrectly named. Don't use the single letter 'l' as an identifier name.")
# There can be only one declaration in non-for-control statements.
if control_statement:
return
# We should continue checking if this is a function
# declaration because we need to check its arguments.
# Also, we need to check multiple declarations.
if character_after_identifier != '(' and character_after_identifier != ',':
return
number_of_identifiers += 1
line = line[matched.end():]
def check_c_style_cast(line_number, line, raw_line, cast_type, pattern,
error):
"""Checks for a C-style cast by looking for the pattern.
This also handles sizeof(type) warnings, due to similarity of content.
Args:
line_number: The number of the line to check.
line: The line of code to check.
raw_line: The raw line of code to check, with comments.
cast_type: The string for the C++ cast to recommend. This is either
reinterpret_cast or static_cast, depending.
pattern: The regular expression used to find C-style casts.
error: The function to call with any errors found.
"""
matched = search(pattern, line)
if not matched:
return
# e.g., sizeof(int)
sizeof_match = match(r'.*sizeof\s*$', line[0:matched.start(1) - 1])
if sizeof_match:
error(line_number, 'runtime/sizeof', 1,
'Using sizeof(type). Use sizeof(varname) instead if possible')
return
remainder = line[matched.end(0):]
# The close paren is for function pointers as arguments to a function.
# eg, void foo(void (*bar)(int));
# The semicolon check is a more basic function check; also possibly a
# function pointer typedef.
# eg, void foo(int); or void foo(int) const;
# The equals check is for function pointer assignment.
# eg, void *(*foo)(int) = ...
#
# Right now, this will only catch cases where there's a single argument, and
# it's unnamed. It should probably be expanded to check for multiple
# arguments with some unnamed.
function_match = match(r'\s*(\)|=|(const)?\s*(;|\{|throw\(\)))', remainder)
if function_match:
if (not function_match.group(3)
or function_match.group(3) == ';'
or raw_line.find('/*') < 0):
error(line_number, 'readability/function', 3,
'All parameters should be named in a function')
return
# At this point, all that should be left is actual casts.
error(line_number, 'readability/casting', 4,
'Using C-style cast. Use %s<%s>(...) instead' %
(cast_type, matched.group(1)))
_HEADERS_CONTAINING_TEMPLATES = (
('<deque>', ('deque',)),
('<functional>', ('unary_function', 'binary_function',
'plus', 'minus', 'multiplies', 'divides', 'modulus',
'negate',
'equal_to', 'not_equal_to', 'greater', 'less',
'greater_equal', 'less_equal',
'logical_and', 'logical_or', 'logical_not',
'unary_negate', 'not1', 'binary_negate', 'not2',
'bind1st', 'bind2nd',
'pointer_to_unary_function',
'pointer_to_binary_function',
'ptr_fun',
'mem_fun_t', 'mem_fun', 'mem_fun1_t', 'mem_fun1_ref_t',
'mem_fun_ref_t',
'const_mem_fun_t', 'const_mem_fun1_t',
'const_mem_fun_ref_t', 'const_mem_fun1_ref_t',
'mem_fun_ref',
)),
('<limits>', ('numeric_limits',)),
('<list>', ('list',)),
('<map>', ('map', 'multimap',)),
('<memory>', ('allocator',)),
('<queue>', ('queue', 'priority_queue',)),
('<set>', ('set', 'multiset',)),
('<stack>', ('stack',)),
('<string>', ('char_traits', 'basic_string',)),
('<utility>', ('pair',)),
('<vector>', ('vector',)),
# gcc extensions.
# Note: std::hash is their hash, ::hash is our hash
('<hash_map>', ('hash_map', 'hash_multimap',)),
('<hash_set>', ('hash_set', 'hash_multiset',)),
('<slist>', ('slist',)),
)
_HEADERS_ACCEPTED_BUT_NOT_PROMOTED = {
# We can trust with reasonable confidence that map gives us pair<>, too.
'pair<>': ('map', 'multimap', 'hash_map', 'hash_multimap')
}
_RE_PATTERN_STRING = re.compile(r'\bstring\b')
_re_pattern_algorithm_header = []
for _template in ('copy', 'max', 'min', 'min_element', 'sort', 'swap',
'transform'):
# Match max<type>(..., ...), max(..., ...), but not foo->max, foo.max or
# type::max().
_re_pattern_algorithm_header.append(
(re.compile(r'[^>.]\b' + _template + r'(<.*?>)?\([^\)]'),
_template,
'<algorithm>'))
_re_pattern_templates = []
for _header, _templates in _HEADERS_CONTAINING_TEMPLATES:
for _template in _templates:
_re_pattern_templates.append(
(re.compile(r'(\<|\b)' + _template + r'\s*\<'),
_template + '<>',
_header))
def files_belong_to_same_module(filename_cpp, filename_h):
"""Check if these two filenames belong to the same module.
The concept of a 'module' here is a as follows:
foo.h, foo-inl.h, foo.cpp, foo_test.cpp and foo_unittest.cpp belong to the
same 'module' if they are in the same directory.
some/path/public/xyzzy and some/path/internal/xyzzy are also considered
to belong to the same module here.
If the filename_cpp contains a longer path than the filename_h, for example,
'/absolute/path/to/base/sysinfo.cpp', and this file would include
'base/sysinfo.h', this function also produces the prefix needed to open the
header. This is used by the caller of this function to more robustly open the
header file. We don't have access to the real include paths in this context,
so we need this guesswork here.
Known bugs: tools/base/bar.cpp and base/bar.h belong to the same module
according to this implementation. Because of this, this function gives
some false positives. This should be sufficiently rare in practice.
Args:
filename_cpp: is the path for the .cpp file
filename_h: is the path for the header path
Returns:
Tuple with a bool and a string:
bool: True if filename_cpp and filename_h belong to the same module.
string: the additional prefix needed to open the header file.
"""
if not filename_cpp.endswith('.cpp'):
return (False, '')
filename_cpp = filename_cpp[:-len('.cpp')]
if filename_cpp.endswith('_unittest'):
filename_cpp = filename_cpp[:-len('_unittest')]
elif filename_cpp.endswith('_test'):
filename_cpp = filename_cpp[:-len('_test')]
filename_cpp = filename_cpp.replace('/public/', '/')
filename_cpp = filename_cpp.replace('/internal/', '/')
if not filename_h.endswith('.h'):
return (False, '')
filename_h = filename_h[:-len('.h')]
if filename_h.endswith('-inl'):
filename_h = filename_h[:-len('-inl')]
filename_h = filename_h.replace('/public/', '/')
filename_h = filename_h.replace('/internal/', '/')
files_belong_to_same_module = filename_cpp.endswith(filename_h)
common_path = ''
if files_belong_to_same_module:
common_path = filename_cpp[:-len(filename_h)]
return files_belong_to_same_module, common_path
def update_include_state(filename, include_state, io=codecs):
"""Fill up the include_state with new includes found from the file.
Args:
filename: the name of the header to read.
include_state: an _IncludeState instance in which the headers are inserted.
io: The io factory to use to read the file. Provided for testability.
Returns:
True if a header was succesfully added. False otherwise.
"""
io = _unit_test_config.get(INCLUDE_IO_INJECTION_KEY, codecs)
header_file = None
try:
header_file = io.open(filename, 'r', 'utf8', 'replace')
except IOError:
return False
line_number = 0
for line in header_file:
line_number += 1
clean_line = cleanse_comments(line)
matched = _RE_PATTERN_INCLUDE.search(clean_line)
if matched:
include = matched.group(2)
# The value formatting is cute, but not really used right now.
# What matters here is that the key is in include_state.
include_state.setdefault(include, '%s:%d' % (filename, line_number))
return True
def check_for_include_what_you_use(filename, clean_lines, include_state, error):
"""Reports for missing stl includes.
This function will output warnings to make sure you are including the headers
necessary for the stl containers and functions that you use. We only give one
reason to include a header. For example, if you use both equal_to<> and
less<> in a .h file, only one (the latter in the file) of these will be
reported as a reason to include the <functional>.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
include_state: An _IncludeState instance.
error: The function to call with any errors found.
"""
required = {} # A map of header name to line_number and the template entity.
# Example of required: { '<functional>': (1219, 'less<>') }
for line_number in xrange(clean_lines.num_lines()):
line = clean_lines.elided[line_number]
if not line or line[0] == '#':
continue
# String is special -- it is a non-templatized type in STL.
if _RE_PATTERN_STRING.search(line):
required['<string>'] = (line_number, 'string')
for pattern, template, header in _re_pattern_algorithm_header:
if pattern.search(line):
required[header] = (line_number, template)
# The following function is just a speed up, no semantics are changed.
if not '<' in line: # Reduces the cpu time usage by skipping lines.
continue
for pattern, template, header in _re_pattern_templates:
if pattern.search(line):
required[header] = (line_number, template)
# The policy is that if you #include something in foo.h you don't need to
# include it again in foo.cpp. Here, we will look at possible includes.
# Let's copy the include_state so it is only messed up within this function.
include_state = include_state.copy()
# Did we find the header for this file (if any) and succesfully load it?
header_found = False
# Use the absolute path so that matching works properly.
abs_filename = os.path.abspath(filename)
# For Emacs's flymake.
# If cpp_style is invoked from Emacs's flymake, a temporary file is generated
# by flymake and that file name might end with '_flymake.cpp'. In that case,
# restore original file name here so that the corresponding header file can be
# found.
# e.g. If the file name is 'foo_flymake.cpp', we should search for 'foo.h'
# instead of 'foo_flymake.h'
abs_filename = re.sub(r'_flymake\.cpp$', '.cpp', abs_filename)
# include_state is modified during iteration, so we iterate over a copy of
# the keys.
for header in include_state.keys(): #NOLINT
(same_module, common_path) = files_belong_to_same_module(abs_filename, header)
fullpath = common_path + header
if same_module and update_include_state(fullpath, include_state):
header_found = True
# If we can't find the header file for a .cpp, assume it's because we don't
# know where to look. In that case we'll give up as we're not sure they
# didn't include it in the .h file.
# FIXME: Do a better job of finding .h files so we are confident that
# not having the .h file means there isn't one.
if filename.endswith('.cpp') and not header_found:
return
# All the lines have been processed, report the errors found.
for required_header_unstripped in required:
template = required[required_header_unstripped][1]
if template in _HEADERS_ACCEPTED_BUT_NOT_PROMOTED:
headers = _HEADERS_ACCEPTED_BUT_NOT_PROMOTED[template]
if [True for header in headers if header in include_state]:
continue
if required_header_unstripped.strip('<>"') not in include_state:
error(required[required_header_unstripped][0],
'build/include_what_you_use', 4,
'Add #include ' + required_header_unstripped + ' for ' + template)
def process_line(filename, file_extension,
clean_lines, line, include_state, function_state,
class_state, file_state, enum_state, error):
"""Processes a single line in the file.
Args:
filename: Filename of the file that is being processed.
file_extension: The extension (dot not included) of the file.
clean_lines: An array of strings, each representing a line of the file,
with comments stripped.
line: Number of line being processed.
include_state: An _IncludeState instance in which the headers are inserted.
function_state: A _FunctionState instance which counts function lines, etc.
class_state: A _ClassState instance which maintains information about
the current stack of nested class declarations being parsed.
file_state: A _FileState instance which maintains information about
the state of things in the file.
enum_state: A _EnumState instance which maintains an enum declaration
state.
error: A callable to which errors are reported, which takes arguments:
line number, error level, and message
"""
raw_lines = clean_lines.raw_lines
detect_functions(clean_lines, line, function_state, error)
check_for_function_lengths(clean_lines, line, function_state, error)
if search(r'\bNOLINT\b', raw_lines[line]): # ignore nolint lines
return
if match(r'\s*\b__asm\b', raw_lines[line]): # Ignore asm lines as they format differently.
return
check_function_definition(filename, file_extension, clean_lines, line, function_state, error)
check_pass_ptr_usage(clean_lines, line, function_state, error)
check_for_leaky_patterns(clean_lines, line, function_state, error)
check_for_multiline_comments_and_strings(clean_lines, line, error)
check_style(clean_lines, line, file_extension, class_state, file_state, enum_state, error)
check_language(filename, clean_lines, line, file_extension, include_state,
file_state, error)
check_for_non_standard_constructs(clean_lines, line, class_state, error)
check_posix_threading(clean_lines, line, error)
check_invalid_increment(clean_lines, line, error)
def _process_lines(filename, file_extension, lines, error, min_confidence):
"""Performs lint checks and reports any errors to the given error function.
Args:
filename: Filename of the file that is being processed.
file_extension: The extension (dot not included) of the file.
lines: An array of strings, each representing a line of the file, with the
last element being empty if the file is termined with a newline.
error: A callable to which errors are reported, which takes 4 arguments:
"""
lines = (['// marker so line numbers and indices both start at 1'] + lines +
['// marker so line numbers end in a known way'])
include_state = _IncludeState()
function_state = _FunctionState(min_confidence)
class_state = _ClassState()
check_for_copyright(lines, error)
if file_extension == 'h':
check_for_header_guard(filename, lines, error)
remove_multi_line_comments(lines, error)
clean_lines = CleansedLines(lines)
file_state = _FileState(clean_lines, file_extension)
enum_state = _EnumState()
for line in xrange(clean_lines.num_lines()):
process_line(filename, file_extension, clean_lines, line,
include_state, function_state, class_state, file_state,
enum_state, error)
class_state.check_finished(error)
check_for_include_what_you_use(filename, clean_lines, include_state, error)
# We check here rather than inside process_line so that we see raw
# lines rather than "cleaned" lines.
check_for_unicode_replacement_characters(lines, error)
check_for_new_line_at_eof(lines, error)
class CppChecker(object):
"""Processes C++ lines for checking style."""
# This list is used to--
#
# (1) generate an explicit list of all possible categories,
# (2) unit test that all checked categories have valid names, and
# (3) unit test that all categories are getting unit tested.
#
categories = set([
'build/class',
'build/deprecated',
'build/endif_comment',
'build/forward_decl',
'build/header_guard',
'build/include',
'build/include_order',
'build/include_what_you_use',
'build/namespaces',
'build/printf_format',
'build/storage_class',
'build/using_std',
'build/using_namespace',
'legal/copyright',
'readability/braces',
'readability/casting',
'readability/check',
'readability/comparison_to_zero',
'readability/constructors',
'readability/control_flow',
'readability/enum_casing',
'readability/fn_size',
'readability/function',
'readability/multiline_comment',
'readability/multiline_string',
'readability/parameter_name',
'readability/naming',
'readability/naming/underscores',
'readability/null',
'readability/pass_ptr',
'readability/streams',
'readability/todo',
'readability/utf8',
'readability/webkit_export',
'runtime/arrays',
'runtime/bitfields',
'runtime/casting',
'runtime/ctype_function',
'runtime/explicit',
'runtime/init',
'runtime/int',
'runtime/invalid_increment',
'runtime/leaky_pattern',
'runtime/max_min_macros',
'runtime/memset',
'runtime/printf',
'runtime/printf_format',
'runtime/references',
'runtime/rtti',
'runtime/sizeof',
'runtime/string',
'runtime/threadsafe_fn',
'runtime/unsigned',
'runtime/virtual',
'whitespace/blank_line',
'whitespace/braces',
'whitespace/comma',
'whitespace/comments',
'whitespace/declaration',
'whitespace/end_of_line',
'whitespace/ending_newline',
'whitespace/indent',
'whitespace/line_length',
'whitespace/newline',
'whitespace/operators',
'whitespace/parens',
'whitespace/semicolon',
'whitespace/tab',
'whitespace/todo',
])
def __init__(self, file_path, file_extension, handle_style_error,
min_confidence):
"""Create a CppChecker instance.
Args:
file_extension: A string that is the file extension, without
the leading dot.
"""
self.file_extension = file_extension
self.file_path = file_path
self.handle_style_error = handle_style_error
self.min_confidence = min_confidence
# Useful for unit testing.
def __eq__(self, other):
"""Return whether this CppChecker instance is equal to another."""
if self.file_extension != other.file_extension:
return False
if self.file_path != other.file_path:
return False
if self.handle_style_error != other.handle_style_error:
return False
if self.min_confidence != other.min_confidence:
return False
return True
# Useful for unit testing.
def __ne__(self, other):
# Python does not automatically deduce __ne__() from __eq__().
return not self.__eq__(other)
def check(self, lines):
_process_lines(self.file_path, self.file_extension, lines,
self.handle_style_error, self.min_confidence)
# FIXME: Remove this function (requires refactoring unit tests).
def process_file_data(filename, file_extension, lines, error, min_confidence, unit_test_config):
global _unit_test_config
_unit_test_config = unit_test_config
checker = CppChecker(filename, file_extension, error, min_confidence)
checker.check(lines)
_unit_test_config = {} | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
import settestpath
# lots of useful util methods for building/tearing down
# test enviroments...
import testutils
from up2date_client import config
import unittest
test_up2date = "etc-sysconfig-rhn/up2date"
class TestConfig(unittest.TestCase):
def setUp(self):
# in this stuff, we get weird stuff existing, so restore
# a config first, then change anything test specifc
testutils.restoreConfig()
self.__setupData()
def __setupData(self):
pass
def tearDown(self):
config.cfg == None
testutils.restoreConfig()
def testEmptyInit(self):
"Verify that the class can be created with no arguments"
cfg = config.initUp2dateConfig(test_up2date)
def testConfigString(self):
"Verify that Config loads a string as a string"
cfg = config.initUp2dateConfig(test_up2date)
assert isinstance(cfg['systemIdPath'], basestring)
def testConfigListSingleItem(self):
"Verify that Config loads a list of one as a list"
cfg = config.initUp2dateConfig(test_up2date)
assert type(cfg['pkgSkipList']) == type([])
def testConfigList(self):
"Verify that Config loads a list as a list"
cfg = config.initUp2dateConfig(test_up2date)
assert type(cfg['disallowConfChanges']) == type([])
def testConfigBool(self):
"Verify that Config loads a bool int as a bool"
cfg = config.initUp2dateConfig(test_up2date)
assert type(cfg['enableProxy']) == type(1)
def testConfigSave(self):
"Verify that Config saves a file without error"
cfg = config.initUp2dateConfig(test_up2date)
cfg.save()
def testConfigSetItem(self):
"Verify that Config.__setitem__ works"
cfg = config.initUp2dateConfig(test_up2date)
cfg['blippyfoobarbazblargh'] = 1
assert cfg['blippyfoobarbazblargh'] == 1
def testConfigInfo(self):
"Verify that Config.into() runs without error"
cfg = config.initUp2dateConfig(test_up2date)
blargh = cfg.info('enableProxy')
def testConfigRuntimeStore(self):
"Verify that values Config['value'] are set for runtime only and not saved"
cfg = config.initUp2dateConfig(test_up2date)
cfg['blippy12345'] = "wantafreehat?"
cfg.save()
# cfg is a fairly persistent singleton, blow it awy to get a new referece
del config.cfg
cfg2 = config.initUp2dateConfig(test_up2date)
# if this returns a value, it means we saved the config file...
assert cfg2['blippy12345'] == None
def testConfigRuntimeStoreNoDir(self):
"Verify that saving a file into a non existent dir works"
# bugzilla: 125179
cfg = config.initUp2dateConfig(test_up2date)
cfg['blippy321'] = "blumblim"
cfg.save()
def testConfigKeysReturnsAList(self):
"Verify that Config.keys() returns a list"
cfg = config.initUp2dateConfig(test_up2date)
blip = cfg.keys()
assert type(blip) == type([])
def testConfigKeys(self):
"Verify that Config.keys() returns a list with the right stuff"
cfg = config.initUp2dateConfig(test_up2date)
blip = cfg.keys()
assert "enableProxy" in blip
def testConfigHasKeyDoesntExist(self):
"Verify that Config.has_key() is correct on non existent keys"
cfg = config.initUp2dateConfig(test_up2date)
assert cfg.has_key("234wfj34ruafho34rhkfe") == 0
def testConfigHasKeyDoesExist(self):
"Verify that Config.has_key() is correct on existing keys"
cfg = config.initUp2dateConfig(test_up2date)
assert cfg.has_key("enableProxy") == 1
def testConfigHasKeyRuntime(self):
"Verify that Config.has_key() is correct for runtime keys"
cfg = config.initUp2dateConfig(test_up2date)
cfg['runtimekey'] = "blippy"
assert cfg.has_key('runtimekey') == 1
def testConfigValues(self):
"Verify that Config.values() runs without error"
cfg = config.initUp2dateConfig(test_up2date)
ret = cfg.values()
assert type(ret) == type([])
def testConfigItems(self):
"Verify that Config.items() runs without error"
cfg = config.initUp2dateConfig(test_up2date)
ret = cfg.items()
assert type(ret) == type([])
def testConfigSet(self):
"Verify that Config.set() sets items into the persistent layer"
cfg = config.initUp2dateConfig(test_up2date)
cfg.set("permItem", 1)
assert cfg.stored["permItem"] == 1
def testConfigSetOverride(self):
"Verify that Config.set() sets items in the persitent layer, overriding runtime"
cfg = config.initUp2dateConfig(test_up2date)
cfg['semiPermItem'] = 1
cfg.set('semiPermItem',0)
assert cfg.stored['semiPermItem'] == 0
def testConfigLoad(self):
"Verify that Config.load() works without exception"
cfg = config.initUp2dateConfig(test_up2date)
cfg.load("/etc/sysconfig/rhn/up2date")
def testNetworkConfig(self):
"Verify that the NetworkConfig class can be created"
nc = config.NetworkConfig()
def testNetworkConfigLoad(self):
"Verify that NetworkConfig.load() runs without error"
nc = config.NetworkConfig()
nc.load()
def testNetworkConfigLoadCorrectness(self):
"Verify that NetworkConfig.load() runs and gets the right info"
testutils.setupConfig("fc2-rpmmd-sources-1")
nc = config.NetworkConfig()
nc.load()
assert nc['blargh'] == "blippyfoo"
def testNetworkConfigLoadCorrectnessOverrides(self):
"Verify that NetworkConfig.load() runs and overrides the default value"
testutils.setupConfig("fc2-rpmmd-sources-1")
nc = config.NetworkConfig()
nc.load()
assert nc['serverURL'] == "http://www.hokeypokeyland.com/XMLRPC"
class TestGetProxySetting(unittest.TestCase):
def setUp(self):
self.cfg = config.initUp2dateConfig(test_up2date)
self.proxy1 = "http://proxy.company.com:8080"
self.proxy2 = "proxy.company.com:8080"
def testHttpSpecified(self):
"Verify that http:// gets stripped from proxy settings"
self.cfg['httpProxy'] = self.proxy1
res = up2dateUtils.getProxySetting()
assert res == "proxy.company.com:8080"
def testHttpUnSpecified(self):
"Verify that proxies with no http:// work correctly"
self.cfg['httpProxy'] = self.proxy2
res = up2dateUtils.getProxySetting()
assert res == "proxy.company.com:8080"
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestConfig))
suite.addTest(unittest.makeSuite(TestGetProxySetting))
return suite
if __name__ == "__main__":
unittest.main(defaultTest="suite") | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
"""
/***************************************************************************
Name : DB Manager
Description : Database manager plugin for QGIS (Oracle)
Date : Aug 27, 2014
copyright : (C) 2014 by Médéric RIBREUX
email : mederic.ribreux@gmail.com
The content of this file is based on
- PG_Manager by Martin Dobias <wonder.sk@gmail.com> (GPLv2 license)
- DB Manager by Giuseppe Sucameli <brush.tyler@gmail.com> (GPLv2 license)
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from qgis.PyQt.QtWidgets import QApplication
from qgis.core import QgsWkbTypes
from ..info_model import TableInfo, VectorTableInfo, DatabaseInfo
from ..html_elems import HtmlContent, HtmlSection, HtmlParagraph, \
HtmlTable, HtmlTableHeader, HtmlTableCol
# Syntax Highlight for VIEWS/MVIEWS
from pygments import highlight
from pygments.lexers import get_lexer_by_name
from pygments.formatters import HtmlFormatter
class ORDatabaseInfo(DatabaseInfo):
def __init__(self, db):
self.db = db
def connectionDetails(self):
tbl = []
if self.db.connector.host != u"":
tbl.append((QApplication.translate("DBManagerPlugin", "Host:"),
self.db.connector.host))
tbl.append((QApplication.translate("DBManagerPlugin", "Database:"),
self.db.connector.dbname))
tbl.append((QApplication.translate("DBManagerPlugin", "User:"),
self.db.connector.user))
tbl.append((QApplication.translate("DBManagerPlugin",
"SQLite list tables cache:"),
"Enabled" if self.db.connector.hasCache else
"Unavailable"))
return HtmlTable(tbl)
def spatialInfo(self):
ret = []
info = self.db.connector.getSpatialInfo()
if not info:
return
tbl = [
(QApplication.translate("DBManagerPlugin", "Oracle\
Spatial:"),
info[0])
]
ret.append(HtmlTable(tbl))
if not self.db.connector.has_geometry_columns:
ret.append(
HtmlParagraph(
QApplication.translate(
"DBManagerPlugin",
(u"<warning> ALL_SDO_GEOM_METADATA"
u" view doesn't exist!\n"
u"This view is essential for many"
u"GIS applications for enumeration of tables."))))
return ret
def privilegesDetails(self):
""" find if user can create schemas (CREATE ANY TABLE or something)"""
# TODO
return None
class ORTableInfo(TableInfo):
def __init__(self, table):
self.table = table
if not self.table.objectType:
self.table.getType()
if not self.table.comment:
self.table.getComment()
if not self.table.estimatedRowCount and not self.table.isView:
self.table.refreshRowEstimation()
if not self.table.creationDate:
self.table.getDates()
def generalInfo(self):
ret = []
# if the estimation is less than 100 rows, try to count them - it
# shouldn't take long time
if (not self.table.isView and
not self.table.rowCount and
self.table.estimatedRowCount < 100):
# row count information is not displayed yet, so just block
# table signals to avoid double refreshing
# (infoViewer->refreshRowCount->tableChanged->infoViewer)
self.table.blockSignals(True)
self.table.refreshRowCount()
self.table.blockSignals(False)
relation_type = QApplication.translate(
"DBManagerPlugin", self.table.objectType)
tbl = [
(QApplication.translate("DBManagerPlugin", "Object type:"),
relation_type),
(QApplication.translate("DBManagerPlugin", "Owner:"),
self.table.owner)
]
if self.table.comment:
tbl.append(
(QApplication.translate(
"DBManagerPlugin",
"Comment:"),
self.table.comment))
# Estimated rows
if not self.table.isView:
tbl.append(
(QApplication.translate(
"DBManagerPlugin", "Rows (estimation):"),
self.table.estimatedRowCount)
)
if self.table.rowCount is not None and self.table.rowCount >= 0:
# Add a real count of rows
tbl.append(
(QApplication.translate("DBManagerPlugin", "Rows (counted):"),
self.table.rowCount)
)
else:
tbl.append(
(QApplication.translate("DBManagerPlugin", "Rows (counted):"),
'Unknown (<a href="action:rows/recount">find out</a>)')
)
# Add creation and modification dates
if self.table.creationDate:
tbl.append(
(QApplication.translate("DBManagerPlugin", "Creation Date:"),
self.table.creationDate))
if self.table.modificationDate:
tbl.append(
(QApplication.translate(
"DBManagerPlugin", "Last Modification Date:"),
self.table.modificationDate))
# privileges
# has the user access to this schema?
schema_priv = self.table.database().connector.getSchemaPrivileges(
self.table.schemaName()) if self.table.schema() else None
if not schema_priv:
pass
elif schema_priv[1] is False: # no usage privileges on the schema
tbl.append((QApplication.translate(
"DBManagerPlugin", "Privileges:"),
QApplication.translate(
"DBManagerPlugin",
(u"<warning> This user doesn't have usage privileges"
u"for this schema!"))))
else:
table_priv = self.table.database().connector.getTablePrivileges(
(self.table.schemaName(), self.table.name))
privileges = []
if table_priv[0]:
privileges.append("select")
if table_priv[1]:
privileges.append("insert")
if table_priv[2]:
privileges.append("update")
if table_priv[3]:
privileges.append("delete")
if len(privileges) > 0:
priv_string = u", ".join(privileges)
else:
priv_string = QApplication.translate(
"DBManagerPlugin",
'<warning> This user has no privileges!')
tbl.append(
(QApplication.translate(
"DBManagerPlugin", "Privileges:"),
priv_string))
ret.append(HtmlTable(tbl))
if schema_priv and schema_priv[1]:
if (table_priv[0] and
not table_priv[1] and
not table_priv[2] and
not table_priv[3]):
ret.append(
HtmlParagraph(QApplication.translate(
"DBManagerPlugin",
"<warning> This user has read-only privileges.")))
# primary key defined?
if (not self.table.isView and
self.table.objectType != u"MATERIALIZED VIEW"):
pk = [fld for fld in self.table.fields() if fld.primaryKey]
if len(pk) <= 0:
ret.append(
HtmlParagraph(QApplication.translate(
"DBManagerPlugin",
"<warning> No primary key defined for this table!")))
return ret
def getSpatialInfo(self):
ret = []
info = self.db.connector.getSpatialInfo()
if not info:
return
tbl = [
(QApplication.translate(
"DBManagerPlugin", "Library:"), info[0]) # ,
]
ret.append(HtmlTable(tbl))
if not self.db.connector.has_geometry_columns:
ret.append(HtmlParagraph(
QApplication.translate(
"DBManagerPlugin",
(u"<warning> ALL_SDO_GEOM_METADATA table doesn't exist!\n"
u"This table is essential for many GIS"
u"applications for enumeration of tables."))))
return ret
def fieldsDetails(self):
tbl = []
# define the table header
header = (
"#",
QApplication.translate("DBManagerPlugin", "Name"),
QApplication.translate("DBManagerPlugin", "Type"),
QApplication.translate("DBManagerPlugin", "Length"),
QApplication.translate("DBManagerPlugin", "Null"),
QApplication.translate("DBManagerPlugin", "Default"),
QApplication.translate("DBManagerPlugin", "Comment"))
tbl.append(HtmlTableHeader(header))
# add table contents
for fld in self.table.fields():
char_max_len = fld.charMaxLen if fld.charMaxLen else ""
if fld.modifier:
char_max_len = u"{},{}".format(char_max_len, fld.modifier)
is_null_txt = "N" if fld.notNull else "Y"
# make primary key field underlined
attrs = {"class": "underline"} if fld.primaryKey else None
name = HtmlTableCol(fld.name, attrs)
tbl.append(
(fld.num, name, fld.type2String(), char_max_len,
is_null_txt, fld.default2String(), fld.comment))
return HtmlTable(tbl, {"class": "header"})
def constraintsDetails(self):
if not self.table.constraints():
return None
tbl = []
# define the table header
header = (QApplication.translate("DBManagerPlugin", "Name"),
QApplication.translate("DBManagerPlugin", "Type"),
QApplication.translate("DBManagerPlugin", "Column"),
QApplication.translate("DBManagerPlugin", "Status"),
QApplication.translate("DBManagerPlugin", "Validated"),
QApplication.translate("DBManagerPlugin", "Generated"),
QApplication.translate("DBManagerPlugin", "Check condition"),
QApplication.translate("DBManagerPlugin", "Foreign Table"),
QApplication.translate("DBManagerPlugin", "Foreign column"),
QApplication.translate("DBManagerPlugin", "On Delete"))
tbl.append(HtmlTableHeader(header))
# add table contents
for con in self.table.constraints():
tbl.append((con.name, con.type2String(), con.column,
con.status, con.validated, con.generated,
con.checkSource, con.foreignTable,
con.foreignKey, con.foreignOnDelete))
return HtmlTable(tbl, {"class": "header"})
def indexesDetails(self):
if not self.table.indexes():
return None
tbl = []
# define the table header
header = (QApplication.translate("DBManagerPlugin", "Name"),
QApplication.translate("DBManagerPlugin", "Column(s)"),
QApplication.translate("DBManagerPlugin", "Index Type"),
QApplication.translate("DBManagerPlugin", "Status"),
QApplication.translate("DBManagerPlugin", "Last analyzed"),
QApplication.translate("DBManagerPlugin", "Compression"),
QApplication.translate("DBManagerPlugin", "Uniqueness"),
QApplication.translate("DBManagerPlugin", "Action"))
tbl.append(HtmlTableHeader(header))
# add table contents
for idx in self.table.indexes():
# get the fields the index is defined on
tbl.append((idx.name, idx.column, idx.indexType,
idx.status, idx.analyzed, idx.compression,
idx.isUnique,
(u'<a href="action:index/{}/rebuild">Rebuild'
u"""</a>""".format(idx.name))))
return HtmlTable(tbl, {"class": "header"})
def triggersDetails(self):
if not self.table.triggers():
return None
ret = []
tbl = []
# define the table header
header = (
QApplication.translate("DBManagerPlugin", "Name"),
QApplication.translate("DBManagerPlugin", "Event"),
QApplication.translate("DBManagerPlugin", "Type"),
QApplication.translate("DBManagerPlugin", "Enabled"))
tbl.append(HtmlTableHeader(header))
# add table contents
for trig in self.table.triggers():
name = (u"""{0} (<a href="action:trigger/"""
u"""{0}/{1}">{1}</a>)""".format(trig.name, "delete"))
if trig.enabled == u"ENABLED":
enabled, action = (
QApplication.translate("DBManagerPlugin", "Yes"),
u"disable")
else:
enabled, action = (
QApplication.translate("DBManagerPlugin", "No"),
"enable")
txt_enabled = (u"""{0} (<a href="action:trigger/"""
u"""{1}/{2}">{2}</a>)""".format(
enabled, trig.name, action))
tbl.append((name, trig.event, trig.type, txt_enabled))
ret.append(HtmlTable(tbl, {"class": "header"}))
ret.append(
HtmlParagraph(
QApplication.translate(
"DBManagerPlugin",
(u'<a href="action:triggers/enable">'
u'Enable all triggers</a> / '
u'<a href="action:triggers/disable">'
u'Disable all triggers</a>'))))
return ret
def getTableInfo(self):
ret = []
general_info = self.generalInfo()
if not general_info:
pass
else:
ret.append(
HtmlSection(
QApplication.translate(
"DBManagerPlugin", 'General info'),
general_info))
# spatial info
spatial_info = self.spatialInfo()
if not spatial_info:
pass
else:
spatial_info = HtmlContent(spatial_info)
if not spatial_info.hasContents():
spatial_info = QApplication.translate(
"DBManagerPlugin",
'<warning> This is not a spatial table.')
ret.append(
HtmlSection(
self.table.database().connection().typeNameString(),
spatial_info))
# fields
fields_details = self.fieldsDetails()
if not fields_details:
pass
else:
ret.append(
HtmlSection(
QApplication.translate(
"DBManagerPlugin",
'Fields'),
fields_details))
# constraints
constraints_details = self.constraintsDetails()
if not constraints_details:
pass
else:
ret.append(
HtmlSection(
QApplication.translate(
"DBManagerPlugin",
'Constraints'),
constraints_details))
# indexes
indexes_details = self.indexesDetails()
if not indexes_details:
pass
else:
ret.append(
HtmlSection(
QApplication.translate(
"DBManagerPlugin",
'Indexes'),
indexes_details))
# triggers
triggers_details = self.triggersDetails()
if not triggers_details:
pass
else:
ret.append(
HtmlSection(
QApplication.translate(
"DBManagerPlugin",
'Triggers'),
triggers_details))
if self.table.objectType == u"MATERIALIZED VIEW":
mview_info = self.getMViewInfo()
ret.append(
HtmlSection(
QApplication.translate(
"DBManagerPlugin",
'Materialized View information'),
mview_info))
return ret
def getMViewInfo(self):
"""If the table is a materialized view, grab more
information...
"""
ret = []
tbl = []
values = self.table.getMViewInfo()
tbl.append((QApplication.translate("DBManagerPlugin",
"Refresh Mode:"),
values[0]))
tbl.append((QApplication.translate("DBManagerPlugin",
"Refresh Method:"),
values[1]))
tbl.append((QApplication.translate("DBManagerPlugin",
"Build Mode:"),
values[2]))
tbl.append((QApplication.translate("DBManagerPlugin",
"Last Refresh Date:"),
values[5]))
tbl.append((QApplication.translate("DBManagerPlugin",
"Last Refresh Type:"),
values[4]))
tbl.append((QApplication.translate("DBManagerPlugin",
"Fast Refreshable:"),
values[3]))
tbl.append((QApplication.translate("DBManagerPlugin",
"Staleness:"),
values[6]))
tbl.append((QApplication.translate("DBManagerPlugin",
"Stale since:"),
values[7]))
tbl.append((QApplication.translate("DBManagerPlugin",
"Compile State:"),
values[8]))
tbl.append((QApplication.translate("DBManagerPlugin",
"Use no index:"),
values[9]))
tbl.append((QApplication.translate(
"DBManagerPlugin",
(u'<a href="action:mview/refresh">Refresh the materializ'
u'ed view</a>')),
u""))
ret.append(HtmlTable(tbl))
return ret
def getViewInfo(self):
"""If the table is a view or a materialized view, add the
definition of the view.
"""
if self.table.objectType not in [u"VIEW", u"MATERIALIZED VIEW"]:
return []
ret = self.getTableInfo()
# view definition
view_def = self.table.getDefinition()
# Syntax highlight
lexer = get_lexer_by_name("sql")
formatter = HtmlFormatter(
linenos=True, cssclass="source", noclasses=True)
result = highlight(view_def, lexer, formatter)
if view_def:
if self.table.objectType == u"VIEW":
title = u"View Definition"
else:
title = u"Materialized View Definition"
ret.append(
HtmlSection(
QApplication.translate("DBManagerPlugin", title),
result))
return ret
def toHtml(self):
if self.table.objectType in [u"VIEW", u"MATERIALIZED VIEW"]:
ret = self.getViewInfo()
else:
ret = self.getTableInfo()
return HtmlContent(ret).toHtml()
class ORVectorTableInfo(ORTableInfo, VectorTableInfo):
def __init__(self, table):
VectorTableInfo.__init__(self, table)
ORTableInfo.__init__(self, table)
def spatialInfo(self):
ret = []
if not self.table.geomType:
return ret
tbl = [
(QApplication.translate("DBManagerPlugin", "Column:"),
self.table.geomColumn),
(QApplication.translate("DBManagerPlugin", "Geometry:"),
self.table.geomType),
(QApplication.translate("DBManagerPlugin",
"QGIS Geometry type:"),
QgsWkbTypes.displayString(self.table.wkbType))
]
# only if we have info from geometry_columns
if self.table.geomDim:
tbl.append(
(QApplication.translate(
"DBManagerPlugin",
"Dimension:"),
self.table.geomDim))
srid = self.table.srid if self.table.srid else -1
if srid != -1:
sr_info = (
self.table.database().connector.getSpatialRefInfo(srid))
else:
sr_info = QApplication.translate("DBManagerPlugin",
"Undefined")
if sr_info:
tbl.append(
(QApplication.translate(
"DBManagerPlugin", "Spatial ref:"),
u"{0} ({1})".format(sr_info, srid)))
# estimated extent
if not self.table.estimatedExtent:
# estimated extent information is not displayed yet, so just block
# table signals to avoid double refreshing
# (infoViewer->refreshEstimatedExtent->tableChanged->infoViewer)
self.table.blockSignals(True)
self.table.refreshTableEstimatedExtent()
self.table.blockSignals(False)
if self.table.estimatedExtent:
estimated_extent_str = (u"{:.9f}, {:.9f} - {:.9f}, "
u"{:.9f}".format(
*self.table.estimatedExtent))
tbl.append(
(QApplication.translate(
"DBManagerPlugin", "Estimated extent:"),
estimated_extent_str))
# extent
extent_str = None
if self.table.extent and len(self.table.extent) == 4:
extent_str = (u"{:.9f}, {:.9f} - {:.9f}, "
u"{:.9f}".format(*self.table.extent))
elif (self.table.rowCount is not None and self.table.rowCount > 0) or (self.table.estimatedRowCount is not None and self.table.estimatedRowCount > 0):
# Can't calculate an extent on empty layer
extent_str = QApplication.translate(
"DBManagerPlugin",
'(unknown) (<a href="action:extent/get">find out</a>)')
if extent_str:
tbl.append(
(QApplication.translate(
"DBManagerPlugin", "Extent:"),
extent_str))
ret.append(HtmlTable(tbl))
# Handle extent update metadata
if (self.table.extent and
self.table.extent != self.table.estimatedExtent and
self.table.canUpdateMetadata()):
ret.append(
HtmlParagraph(
QApplication.translate(
"DBManagerPlugin",
(u'<warning> Metadata extent is different from'
u'real extent. You should <a href="action:extent'
u'/update">update it</a>!'))))
# is there an entry in geometry_columns?
if self.table.geomType.lower() == 'geometry':
ret.append(
HtmlParagraph(
QApplication.translate(
"DBManagerPlugin",
"<warning> There is no entry in geometry_columns!")))
# find out whether the geometry column has spatial index on it
if not self.table.isView:
if not self.table.hasSpatialIndex():
ret.append(
HtmlParagraph(
QApplication.translate(
"DBManagerPlugin",
(u'<warning> No spatial index defined (<a href='
u'"action:spatialindex/create">'
u'create it</a>).'))))
return ret | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright (c) 2007 Mockito contributors
* This program is made available under the terms of the MIT License.
*/
/**
* Mockito plugins configuration machinery.
*/
package org.mockito.internal.configuration.plugins; | java | github | https://github.com/mockito/mockito | mockito-core/src/main/java/org/mockito/internal/configuration/plugins/package-info.java |
# -*- encoding: utf-8 -*-
from __future__ import unicode_literals
import unittest
from django.conf import settings
from django.core.checks import Error
from django.db import connections, models
from django.test.utils import override_settings
from .base import IsolatedModelsTestCase
def get_max_column_name_length():
allowed_len = None
db_alias = None
for db in settings.DATABASES.keys():
connection = connections[db]
max_name_length = connection.ops.max_name_length()
if max_name_length is None or connection.features.truncates_names:
continue
else:
if allowed_len is None:
allowed_len = max_name_length
db_alias = db
elif max_name_length < allowed_len:
allowed_len = max_name_length
db_alias = db
return (allowed_len, db_alias)
class IndexTogetherTests(IsolatedModelsTestCase):
def test_non_iterable(self):
class Model(models.Model):
class Meta:
index_together = 42
errors = Model.check()
expected = [
Error(
"'index_together' must be a list or tuple.",
hint=None,
obj=Model,
id='models.E008',
),
]
self.assertEqual(errors, expected)
def test_non_list(self):
class Model(models.Model):
class Meta:
index_together = 'not-a-list'
errors = Model.check()
expected = [
Error(
"'index_together' must be a list or tuple.",
hint=None,
obj=Model,
id='models.E008',
),
]
self.assertEqual(errors, expected)
def test_list_containing_non_iterable(self):
class Model(models.Model):
class Meta:
index_together = [('a', 'b'), 42]
errors = Model.check()
expected = [
Error(
"All 'index_together' elements must be lists or tuples.",
hint=None,
obj=Model,
id='models.E009',
),
]
self.assertEqual(errors, expected)
def test_pointing_to_missing_field(self):
class Model(models.Model):
class Meta:
index_together = [
["missing_field"],
]
errors = Model.check()
expected = [
Error(
"'index_together' refers to the non-existent field 'missing_field'.",
hint=None,
obj=Model,
id='models.E012',
),
]
self.assertEqual(errors, expected)
def test_pointing_to_non_local_field(self):
class Foo(models.Model):
field1 = models.IntegerField()
class Bar(Foo):
field2 = models.IntegerField()
class Meta:
index_together = [
["field2", "field1"],
]
errors = Bar.check()
expected = [
Error(
"'index_together' refers to field 'field1' which is not "
"local to model 'Bar'.",
hint=("This issue may be caused by multi-table inheritance."),
obj=Bar,
id='models.E016',
),
]
self.assertEqual(errors, expected)
def test_pointing_to_m2m_field(self):
class Model(models.Model):
m2m = models.ManyToManyField('self')
class Meta:
index_together = [
["m2m"],
]
errors = Model.check()
expected = [
Error(
"'index_together' refers to a ManyToManyField 'm2m', but "
"ManyToManyFields are not permitted in 'index_together'.",
hint=None,
obj=Model,
id='models.E013',
),
]
self.assertEqual(errors, expected)
# unique_together tests are very similar to index_together tests.
class UniqueTogetherTests(IsolatedModelsTestCase):
def test_non_iterable(self):
class Model(models.Model):
class Meta:
unique_together = 42
errors = Model.check()
expected = [
Error(
"'unique_together' must be a list or tuple.",
hint=None,
obj=Model,
id='models.E010',
),
]
self.assertEqual(errors, expected)
def test_list_containing_non_iterable(self):
class Model(models.Model):
one = models.IntegerField()
two = models.IntegerField()
class Meta:
unique_together = [('a', 'b'), 42]
errors = Model.check()
expected = [
Error(
"All 'unique_together' elements must be lists or tuples.",
hint=None,
obj=Model,
id='models.E011',
),
]
self.assertEqual(errors, expected)
def test_non_list(self):
class Model(models.Model):
class Meta:
unique_together = 'not-a-list'
errors = Model.check()
expected = [
Error(
"'unique_together' must be a list or tuple.",
hint=None,
obj=Model,
id='models.E010',
),
]
self.assertEqual(errors, expected)
def test_valid_model(self):
class Model(models.Model):
one = models.IntegerField()
two = models.IntegerField()
class Meta:
# unique_together can be a simple tuple
unique_together = ('one', 'two')
errors = Model.check()
self.assertEqual(errors, [])
def test_pointing_to_missing_field(self):
class Model(models.Model):
class Meta:
unique_together = [
["missing_field"],
]
errors = Model.check()
expected = [
Error(
"'unique_together' refers to the non-existent field 'missing_field'.",
hint=None,
obj=Model,
id='models.E012',
),
]
self.assertEqual(errors, expected)
def test_pointing_to_m2m(self):
class Model(models.Model):
m2m = models.ManyToManyField('self')
class Meta:
unique_together = [
["m2m"],
]
errors = Model.check()
expected = [
Error(
"'unique_together' refers to a ManyToManyField 'm2m', but "
"ManyToManyFields are not permitted in 'unique_together'.",
hint=None,
obj=Model,
id='models.E013',
),
]
self.assertEqual(errors, expected)
class FieldNamesTests(IsolatedModelsTestCase):
def test_ending_with_underscore(self):
class Model(models.Model):
field_ = models.CharField(max_length=10)
m2m_ = models.ManyToManyField('self')
errors = Model.check()
expected = [
Error(
'Field names must not end with an underscore.',
hint=None,
obj=Model._meta.get_field('field_'),
id='fields.E001',
),
Error(
'Field names must not end with an underscore.',
hint=None,
obj=Model._meta.get_field('m2m_'),
id='fields.E001',
),
]
self.assertEqual(errors, expected)
max_column_name_length, column_limit_db_alias = get_max_column_name_length()
@unittest.skipIf(max_column_name_length is None,
"The database doesn't have a column name length limit.")
def test_M2M_long_column_name(self):
"""
#13711 -- Model check for long M2M column names when database has
column name length limits.
"""
allowed_len, db_alias = get_max_column_name_length()
# A model with very long name which will be used to set relations to.
class VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz(models.Model):
title = models.CharField(max_length=11)
# Main model for which checks will be performed.
class ModelWithLongField(models.Model):
m2m_field = models.ManyToManyField(
VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz,
related_name="rn1"
)
m2m_field2 = models.ManyToManyField(
VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz,
related_name="rn2", through='m2msimple'
)
m2m_field3 = models.ManyToManyField(
VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz,
related_name="rn3",
through='m2mcomplex'
)
fk = models.ForeignKey(
VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz,
models.CASCADE,
related_name="rn4",
)
# Models used for setting `through` in M2M field.
class m2msimple(models.Model):
id2 = models.ForeignKey(ModelWithLongField, models.CASCADE)
class m2mcomplex(models.Model):
id2 = models.ForeignKey(ModelWithLongField, models.CASCADE)
long_field_name = 'a' * (self.max_column_name_length + 1)
models.ForeignKey(
VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz,
models.CASCADE,
).contribute_to_class(m2msimple, long_field_name)
models.ForeignKey(
VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz,
models.CASCADE,
db_column=long_field_name
).contribute_to_class(m2mcomplex, long_field_name)
errors = ModelWithLongField.check()
# First error because of M2M field set on the model with long name.
m2m_long_name = "verylongmodelnamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz_id"
if self.max_column_name_length > len(m2m_long_name):
# Some databases support names longer than the test name.
expected = []
else:
expected = [
Error(
'Autogenerated column name too long for M2M field "%s". '
'Maximum length is "%s" for database "%s".'
% (m2m_long_name, self.max_column_name_length, self.column_limit_db_alias),
hint=("Use 'through' to create a separate model for "
"M2M and then set column_name using 'db_column'."),
obj=ModelWithLongField,
id='models.E019',
)
]
# Second error because the FK specified in the `through` model
# `m2msimple` has auto-genererated name longer than allowed.
# There will be no check errors in the other M2M because it
# specifies db_column for the FK in `through` model even if the actual
# name is longer than the limits of the database.
expected.append(
Error(
'Autogenerated column name too long for M2M field "%s_id". '
'Maximum length is "%s" for database "%s".'
% (long_field_name, self.max_column_name_length, self.column_limit_db_alias),
hint=("Use 'through' to create a separate model for "
"M2M and then set column_name using 'db_column'."),
obj=ModelWithLongField,
id='models.E019',
)
)
self.assertEqual(errors, expected)
@unittest.skipIf(max_column_name_length is None,
"The database doesn't have a column name length limit.")
def test_local_field_long_column_name(self):
"""
#13711 -- Model check for long column names
when database does not support long names.
"""
allowed_len, db_alias = get_max_column_name_length()
class ModelWithLongField(models.Model):
title = models.CharField(max_length=11)
long_field_name = 'a' * (self.max_column_name_length + 1)
long_field_name2 = 'b' * (self.max_column_name_length + 1)
models.CharField(max_length=11).contribute_to_class(ModelWithLongField, long_field_name)
models.CharField(max_length=11, db_column='vlmn').contribute_to_class(ModelWithLongField, long_field_name2)
errors = ModelWithLongField.check()
# Error because of the field with long name added to the model
# without specifying db_column
expected = [
Error(
'Autogenerated column name too long for field "%s". '
'Maximum length is "%s" for database "%s".'
% (long_field_name, self.max_column_name_length, self.column_limit_db_alias),
hint="Set the column name manually using 'db_column'.",
obj=ModelWithLongField,
id='models.E018',
)
]
self.assertEqual(errors, expected)
def test_including_separator(self):
class Model(models.Model):
some__field = models.IntegerField()
errors = Model.check()
expected = [
Error(
'Field names must not contain "__".',
hint=None,
obj=Model._meta.get_field('some__field'),
id='fields.E002',
)
]
self.assertEqual(errors, expected)
def test_pk(self):
class Model(models.Model):
pk = models.IntegerField()
errors = Model.check()
expected = [
Error(
"'pk' is a reserved word that cannot be used as a field name.",
hint=None,
obj=Model._meta.get_field('pk'),
id='fields.E003',
)
]
self.assertEqual(errors, expected)
class ShadowingFieldsTests(IsolatedModelsTestCase):
def test_field_name_clash_with_child_accessor(self):
class Parent(models.Model):
pass
class Child(Parent):
child = models.CharField(max_length=100)
errors = Child.check()
expected = [
Error(
"The field 'child' clashes with the field "
"'child' from model 'invalid_models_tests.parent'.",
hint=None,
obj=Child._meta.get_field('child'),
id='models.E006',
)
]
self.assertEqual(errors, expected)
def test_multiinheritance_clash(self):
class Mother(models.Model):
clash = models.IntegerField()
class Father(models.Model):
clash = models.IntegerField()
class Child(Mother, Father):
# Here we have two clashed: id (automatic field) and clash, because
# both parents define these fields.
pass
errors = Child.check()
expected = [
Error(
"The field 'id' from parent model "
"'invalid_models_tests.mother' clashes with the field 'id' "
"from parent model 'invalid_models_tests.father'.",
hint=None,
obj=Child,
id='models.E005',
),
Error(
"The field 'clash' from parent model "
"'invalid_models_tests.mother' clashes with the field 'clash' "
"from parent model 'invalid_models_tests.father'.",
hint=None,
obj=Child,
id='models.E005',
)
]
self.assertEqual(errors, expected)
def test_inheritance_clash(self):
class Parent(models.Model):
f_id = models.IntegerField()
class Target(models.Model):
# This field doesn't result in a clash.
f_id = models.IntegerField()
class Child(Parent):
# This field clashes with parent "f_id" field.
f = models.ForeignKey(Target, models.CASCADE)
errors = Child.check()
expected = [
Error(
"The field 'f' clashes with the field 'f_id' "
"from model 'invalid_models_tests.parent'.",
hint=None,
obj=Child._meta.get_field('f'),
id='models.E006',
)
]
self.assertEqual(errors, expected)
def test_multigeneration_inheritance(self):
class GrandParent(models.Model):
clash = models.IntegerField()
class Parent(GrandParent):
pass
class Child(Parent):
pass
class GrandChild(Child):
clash = models.IntegerField()
errors = GrandChild.check()
expected = [
Error(
"The field 'clash' clashes with the field 'clash' "
"from model 'invalid_models_tests.grandparent'.",
hint=None,
obj=GrandChild._meta.get_field('clash'),
id='models.E006',
)
]
self.assertEqual(errors, expected)
def test_id_clash(self):
class Target(models.Model):
pass
class Model(models.Model):
fk = models.ForeignKey(Target, models.CASCADE)
fk_id = models.IntegerField()
errors = Model.check()
expected = [
Error(
"The field 'fk_id' clashes with the field 'fk' from model "
"'invalid_models_tests.model'.",
hint=None,
obj=Model._meta.get_field('fk_id'),
id='models.E006',
)
]
self.assertEqual(errors, expected)
class OtherModelTests(IsolatedModelsTestCase):
def test_unique_primary_key(self):
invalid_id = models.IntegerField(primary_key=False)
class Model(models.Model):
id = invalid_id
errors = Model.check()
expected = [
Error(
"'id' can only be used as a field name if the field also sets "
"'primary_key=True'.",
hint=None,
obj=Model,
id='models.E004',
),
]
self.assertEqual(errors, expected)
def test_ordering_non_iterable(self):
class Model(models.Model):
class Meta:
ordering = "missing_field"
errors = Model.check()
expected = [
Error(
"'ordering' must be a tuple or list "
"(even if you want to order by only one field).",
hint=None,
obj=Model,
id='models.E014',
),
]
self.assertEqual(errors, expected)
def test_just_ordering_no_errors(self):
class Model(models.Model):
order = models.PositiveIntegerField()
class Meta:
ordering = ['order']
self.assertEqual(Model.check(), [])
def test_just_order_with_respect_to_no_errors(self):
class Question(models.Model):
pass
class Answer(models.Model):
question = models.ForeignKey(Question, models.CASCADE)
class Meta:
order_with_respect_to = 'question'
self.assertEqual(Answer.check(), [])
def test_ordering_with_order_with_respect_to(self):
class Question(models.Model):
pass
class Answer(models.Model):
question = models.ForeignKey(Question, models.CASCADE)
order = models.IntegerField()
class Meta:
order_with_respect_to = 'question'
ordering = ['order']
errors = Answer.check()
expected = [
Error(
"'ordering' and 'order_with_respect_to' cannot be used together.",
hint=None,
obj=Answer,
id='models.E021',
),
]
self.assertEqual(errors, expected)
def test_non_valid(self):
class RelationModel(models.Model):
pass
class Model(models.Model):
relation = models.ManyToManyField(RelationModel)
class Meta:
ordering = ['relation']
errors = Model.check()
expected = [
Error(
"'ordering' refers to the non-existent field 'relation'.",
hint=None,
obj=Model,
id='models.E015',
),
]
self.assertEqual(errors, expected)
def test_ordering_pointing_to_missing_field(self):
class Model(models.Model):
class Meta:
ordering = ("missing_field",)
errors = Model.check()
expected = [
Error(
"'ordering' refers to the non-existent field 'missing_field'.",
hint=None,
obj=Model,
id='models.E015',
)
]
self.assertEqual(errors, expected)
def test_ordering_pointing_to_missing_foreignkey_field(self):
# refs #22711
class Model(models.Model):
missing_fk_field = models.IntegerField()
class Meta:
ordering = ("missing_fk_field_id",)
errors = Model.check()
expected = [
Error(
"'ordering' refers to the non-existent field 'missing_fk_field_id'.",
hint=None,
obj=Model,
id='models.E015',
)
]
self.assertEqual(errors, expected)
def test_ordering_pointing_to_existing_foreignkey_field(self):
# refs #22711
class Parent(models.Model):
pass
class Child(models.Model):
parent = models.ForeignKey(Parent, models.CASCADE)
class Meta:
ordering = ("parent_id",)
self.assertFalse(Child.check())
@override_settings(TEST_SWAPPED_MODEL_BAD_VALUE='not-a-model')
def test_swappable_missing_app_name(self):
class Model(models.Model):
class Meta:
swappable = 'TEST_SWAPPED_MODEL_BAD_VALUE'
errors = Model.check()
expected = [
Error(
"'TEST_SWAPPED_MODEL_BAD_VALUE' is not of the form 'app_label.app_name'.",
hint=None,
obj=None,
id='models.E001',
),
]
self.assertEqual(errors, expected)
@override_settings(TEST_SWAPPED_MODEL_BAD_MODEL='not_an_app.Target')
def test_swappable_missing_app(self):
class Model(models.Model):
class Meta:
swappable = 'TEST_SWAPPED_MODEL_BAD_MODEL'
errors = Model.check()
expected = [
Error(
"'TEST_SWAPPED_MODEL_BAD_MODEL' references 'not_an_app.Target', "
'which has not been installed, or is abstract.',
hint=None,
obj=None,
id='models.E002',
),
]
self.assertEqual(errors, expected)
def test_two_m2m_through_same_relationship(self):
class Person(models.Model):
pass
class Group(models.Model):
primary = models.ManyToManyField(Person,
through="Membership", related_name="primary")
secondary = models.ManyToManyField(Person, through="Membership",
related_name="secondary")
class Membership(models.Model):
person = models.ForeignKey(Person, models.CASCADE)
group = models.ForeignKey(Group, models.CASCADE)
errors = Group.check()
expected = [
Error(
"The model has two many-to-many relations through "
"the intermediate model 'invalid_models_tests.Membership'.",
hint=None,
obj=Group,
id='models.E003',
)
]
self.assertEqual(errors, expected) | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright 2014-2021 JetBrains s.r.o and contributors. Use of this source code is governed by the Apache 2.0 license.
*/
package io.ktor.server.routing
import io.ktor.http.*
/**
* A parsed routing path. Consist of number of segments [parts].
*
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.server.routing.RoutingPath)
*
* @property parts contains parsed routing path segments
*/
public class RoutingPath private constructor(public val parts: List<RoutingPathSegment>) {
public companion object {
/**
* A constant for a root routing path.
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.server.routing.RoutingPath.Companion.root)
*/
public val root: RoutingPath = RoutingPath(listOf())
/**
* Parses the specified [path] and creates an instance of [RoutingPath].
* It handles wildcards and decodes escape characters properly.
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.server.routing.RoutingPath.Companion.parse)
*/
public fun parse(path: String): RoutingPath {
if (path == "/") return root
val segments = path.splitToSequence("/").filter { it.isNotEmpty() }.map { segment ->
when {
segment.contains('{') && segment.contains('}') -> RoutingPathSegment(
segment,
RoutingPathSegmentKind.Parameter
)
else -> RoutingPathSegment(segment.decodeURLPart(), RoutingPathSegmentKind.Constant)
}
}
return RoutingPath(segments.toList())
}
}
override fun toString(): String = parts.joinToString("/") { it.value }
}
/**
* A single routing path segment.
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.server.routing.RoutingPathSegment)
*
* @property value - segment text value
* @property kind - segment kind (constant or parameter)
*/
public data class RoutingPathSegment(val value: String, val kind: RoutingPathSegmentKind)
/**
* Possible routing path segment kinds.
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.server.routing.RoutingPathSegmentKind)
*/
public enum class RoutingPathSegmentKind {
/**
* A constant path segment.
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.server.routing.RoutingPathSegmentKind.Constant)
*/
Constant,
/**
* A parameter path segment (a wildcard, a named parameter, or both).
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.server.routing.RoutingPathSegmentKind.Parameter)
*/
Parameter
} | kotlin | github | https://github.com/ktorio/ktor | ktor-server/ktor-server-core/common/src/io/ktor/server/routing/RoutingPath.kt |
# -*- coding: utf-8 -*-
import datetime
from dateutil.relativedelta import relativedelta
from openerp.addons.account.tests.account_test_classes import AccountingTestCase
class TestMembershipCommon(AccountingTestCase):
def setUp(self):
super(TestMembershipCommon, self).setUp()
# Usefull models
Product = self.env['product.product']
Partner = self.env['res.partner']
# Test memberships
self.membership_1 = Product.create({
'membership': True,
'membership_date_from': datetime.date.today() + relativedelta(days=-2),
'membership_date_to': datetime.date.today() + relativedelta(months=1),
'name': 'Basic Limited',
'type': 'service',
'list_price': 100.00,
})
# Test people
self.partner_1 = Partner.create({
'name': 'Ignasse Reblochon',
})
self.partner_2 = Partner.create({
'name': 'Martine Poulichette',
'free_member': True,
}) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python2
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test spending coinbase transactions.
# The coinbase transaction in block N can appear in block
# N+100... so is valid in the mempool when the best block
# height is N+99.
# This test makes sure coinbase spends that will be mature
# in the next block are accepted into the memory pool,
# but less mature coinbase spends are NOT.
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import os
import shutil
# Create one-input, one-output, no-fee transaction:
class MempoolSpendCoinbaseTest(BitcoinTestFramework):
def setup_network(self):
# Just need one node for this test
args = ["-checkmempool", "-debug=mempool"]
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, args))
self.is_network_split = False
def create_tx(self, from_txid, to_address, amount):
inputs = [{ "txid" : from_txid, "vout" : 0}]
outputs = { to_address : amount }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
signresult = self.nodes[0].signrawtransaction(rawtx)
assert_equal(signresult["complete"], True)
return signresult["hex"]
def run_test(self):
chain_height = self.nodes[0].getblockcount()
assert_equal(chain_height, 200)
node0_address = self.nodes[0].getnewaddress()
# Coinbase at height chain_height-100+1 ok in mempool, should
# get mined. Coinbase at height chain_height-100+2 is
# is too immature to spend.
b = [ self.nodes[0].getblockhash(n) for n in range(101, 103) ]
coinbase_txids = [ self.nodes[0].getblock(h)['tx'][0] for h in b ]
spends_raw = [ self.create_tx(txid, node0_address, 50) for txid in coinbase_txids ]
spend_101_id = self.nodes[0].sendrawtransaction(spends_raw[0])
# coinbase at height 102 should be too immature to spend
assert_raises(JSONRPCException, self.nodes[0].sendrawtransaction, spends_raw[1])
# mempool should have just spend_101:
assert_equal(self.nodes[0].getrawmempool(), [ spend_101_id ])
# mine a block, spend_101 should get confirmed
self.nodes[0].generate(1)
assert_equal(set(self.nodes[0].getrawmempool()), set())
# ... and now height 102 can be spent:
spend_102_id = self.nodes[0].sendrawtransaction(spends_raw[1])
assert_equal(self.nodes[0].getrawmempool(), [ spend_102_id ])
if __name__ == '__main__':
MempoolSpendCoinbaseTest().main() | unknown | codeparrot/codeparrot-clean | ||
import decimal
import enum
import json
import unittest
import uuid
from django import forms
from django.contrib.admin.utils import display_for_field
from django.core import checks, exceptions, serializers, validators
from django.core.exceptions import FieldError
from django.core.management import call_command
from django.db import IntegrityError, connection, models
from django.db.models import JSONNull
from django.db.models.expressions import Exists, F, OuterRef, RawSQL, Value
from django.db.models.functions import Cast, JSONObject, Upper
from django.test import TransactionTestCase, override_settings, skipUnlessDBFeature
from django.test.utils import isolate_apps
from django.utils import timezone
from django.utils.deprecation import RemovedInDjango70Warning
from . import PostgreSQLSimpleTestCase, PostgreSQLTestCase, PostgreSQLWidgetTestCase
from .models import (
ArrayEnumModel,
ArrayFieldSubclass,
CharArrayModel,
DateTimeArrayModel,
IntegerArrayModel,
NestedIntegerArrayModel,
NullableIntegerArrayModel,
OtherTypesArrayModel,
PostgreSQLModel,
Tag,
WithSizeArrayModel,
)
try:
from django.contrib.postgres.aggregates import ArrayAgg
from django.contrib.postgres.expressions import ArraySubquery
from django.contrib.postgres.fields import ArrayField
from django.contrib.postgres.fields.array import IndexTransform, SliceTransform
from django.contrib.postgres.forms import (
SimpleArrayField,
SplitArrayField,
SplitArrayWidget,
)
from django.db.backends.postgresql.psycopg_any import NumericRange
except ImportError:
pass
@isolate_apps("postgres_tests")
class BasicTests(PostgreSQLSimpleTestCase):
def test_get_field_display(self):
class MyModel(PostgreSQLModel):
field = ArrayField(
models.CharField(max_length=16),
choices=[
["Media", [(["vinyl", "cd"], "Audio")]],
(("mp3", "mp4"), "Digital"),
],
)
tests = (
(["vinyl", "cd"], "Audio"),
(("mp3", "mp4"), "Digital"),
(("a", "b"), "('a', 'b')"),
(["c", "d"], "['c', 'd']"),
)
for value, display in tests:
with self.subTest(value=value, display=display):
instance = MyModel(field=value)
self.assertEqual(instance.get_field_display(), display)
def test_get_field_display_nested_array(self):
class MyModel(PostgreSQLModel):
field = ArrayField(
ArrayField(models.CharField(max_length=16)),
choices=[
[
"Media",
[([["vinyl", "cd"], ("x",)], "Audio")],
],
((["mp3"], ("mp4",)), "Digital"),
],
)
tests = (
([["vinyl", "cd"], ("x",)], "Audio"),
((["mp3"], ("mp4",)), "Digital"),
((("a", "b"), ("c",)), "(('a', 'b'), ('c',))"),
([["a", "b"], ["c"]], "[['a', 'b'], ['c']]"),
)
for value, display in tests:
with self.subTest(value=value, display=display):
instance = MyModel(field=value)
self.assertEqual(instance.get_field_display(), display)
class TestSaveLoad(PostgreSQLTestCase):
def test_integer(self):
instance = IntegerArrayModel(field=[1, 2, 3])
instance.save()
loaded = IntegerArrayModel.objects.get()
self.assertEqual(instance.field, loaded.field)
def test_char(self):
instance = CharArrayModel(field=["hello", "goodbye"])
instance.save()
loaded = CharArrayModel.objects.get()
self.assertEqual(instance.field, loaded.field)
def test_dates(self):
instance = DateTimeArrayModel(
datetimes=[timezone.now()],
dates=[timezone.now().date()],
times=[timezone.now().time()],
)
instance.save()
loaded = DateTimeArrayModel.objects.get()
self.assertEqual(instance.datetimes, loaded.datetimes)
self.assertEqual(instance.dates, loaded.dates)
self.assertEqual(instance.times, loaded.times)
def test_tuples(self):
instance = IntegerArrayModel(field=(1,))
instance.save()
loaded = IntegerArrayModel.objects.get()
self.assertSequenceEqual(instance.field, loaded.field)
def test_integers_passed_as_strings(self):
# This checks that get_prep_value is deferred properly
instance = IntegerArrayModel(field=["1"])
instance.save()
loaded = IntegerArrayModel.objects.get()
self.assertEqual(loaded.field, [1])
def test_default_null(self):
instance = NullableIntegerArrayModel()
instance.save()
loaded = NullableIntegerArrayModel.objects.get(pk=instance.pk)
self.assertIsNone(loaded.field)
self.assertEqual(instance.field, loaded.field)
def test_null_handling(self):
instance = NullableIntegerArrayModel(field=None)
instance.save()
loaded = NullableIntegerArrayModel.objects.get()
self.assertEqual(instance.field, loaded.field)
instance = IntegerArrayModel(field=None)
with self.assertRaises(IntegrityError):
instance.save()
def test_nested(self):
instance = NestedIntegerArrayModel(field=[[1, 2], [3, 4]])
instance.save()
loaded = NestedIntegerArrayModel.objects.get()
self.assertEqual(instance.field, loaded.field)
def test_other_array_types(self):
instance = OtherTypesArrayModel(
ips=["192.168.0.1", "::1"],
uuids=[uuid.uuid4()],
decimals=[decimal.Decimal(1.25), 1.75],
tags=[Tag(1), Tag(2), Tag(3)],
json=[{"a": 1}, {"b": 2}],
int_ranges=[NumericRange(10, 20), NumericRange(30, 40)],
bigint_ranges=[
NumericRange(7000000000, 10000000000),
NumericRange(50000000000, 70000000000),
],
)
instance.save()
loaded = OtherTypesArrayModel.objects.get()
self.assertEqual(instance.ips, loaded.ips)
self.assertEqual(instance.uuids, loaded.uuids)
self.assertEqual(instance.decimals, loaded.decimals)
self.assertEqual(instance.tags, loaded.tags)
self.assertEqual(instance.json, loaded.json)
self.assertEqual(instance.int_ranges, loaded.int_ranges)
self.assertEqual(instance.bigint_ranges, loaded.bigint_ranges)
def test_null_from_db_value_handling(self):
instance = OtherTypesArrayModel.objects.create(
ips=["192.168.0.1", "::1"],
uuids=[uuid.uuid4()],
decimals=[decimal.Decimal(1.25), 1.75],
tags=None,
)
instance.refresh_from_db()
self.assertIsNone(instance.tags)
self.assertEqual(instance.json, [])
self.assertIsNone(instance.int_ranges)
self.assertIsNone(instance.bigint_ranges)
def test_model_set_on_base_field(self):
instance = IntegerArrayModel()
field = instance._meta.get_field("field")
self.assertEqual(field.model, IntegerArrayModel)
self.assertEqual(field.base_field.model, IntegerArrayModel)
def test_nested_nullable_base_field(self):
instance = NullableIntegerArrayModel.objects.create(
field_nested=[[None, None], [None, None]],
)
self.assertEqual(instance.field_nested, [[None, None], [None, None]])
class TestQuerying(PostgreSQLTestCase):
@classmethod
def setUpTestData(cls):
cls.objs = NullableIntegerArrayModel.objects.bulk_create(
[
NullableIntegerArrayModel(order=1, field=[1]),
NullableIntegerArrayModel(order=2, field=[2]),
NullableIntegerArrayModel(order=3, field=[2, 3]),
NullableIntegerArrayModel(order=4, field=[20, 30, 40]),
NullableIntegerArrayModel(order=5, field=None),
]
)
def test_bulk_create_with_sized_arrayfield(self):
objs = WithSizeArrayModel.objects.bulk_create(
[
WithSizeArrayModel(field=[1, 2]),
WithSizeArrayModel(field=[3, 4]),
]
)
self.assertEqual(objs[0].field, [1, 2])
self.assertEqual(objs[1].field, [3, 4])
def test_empty_list(self):
NullableIntegerArrayModel.objects.create(field=[])
obj = (
NullableIntegerArrayModel.objects.annotate(
empty_array=models.Value(
[], output_field=ArrayField(models.IntegerField())
),
)
.filter(field=models.F("empty_array"))
.get()
)
self.assertEqual(obj.field, [])
self.assertEqual(obj.empty_array, [])
def test_exact(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__exact=[1]), self.objs[:1]
)
def test_exact_null_only_array(self):
obj = NullableIntegerArrayModel.objects.create(
field=[None], field_nested=[None, None]
)
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__exact=[None]), [obj]
)
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field_nested__exact=[None, None]),
[obj],
)
def test_exact_null_only_nested_array(self):
obj1 = NullableIntegerArrayModel.objects.create(field_nested=[[None, None]])
obj2 = NullableIntegerArrayModel.objects.create(
field_nested=[[None, None], [None, None]],
)
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(
field_nested__exact=[[None, None]],
),
[obj1],
)
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(
field_nested__exact=[[None, None], [None, None]],
),
[obj2],
)
def test_exact_with_expression(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__exact=[Value(1)]),
self.objs[:1],
)
def test_exact_charfield(self):
instance = CharArrayModel.objects.create(field=["text"])
self.assertSequenceEqual(
CharArrayModel.objects.filter(field=["text"]), [instance]
)
def test_exact_nested(self):
instance = NestedIntegerArrayModel.objects.create(field=[[1, 2], [3, 4]])
self.assertSequenceEqual(
NestedIntegerArrayModel.objects.filter(field=[[1, 2], [3, 4]]), [instance]
)
def test_isnull(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__isnull=True), self.objs[-1:]
)
def test_gt(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__gt=[0]), self.objs[:4]
)
def test_lt(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__lt=[2]), self.objs[:1]
)
def test_in(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__in=[[1], [2]]),
self.objs[:2],
)
def test_in_subquery(self):
IntegerArrayModel.objects.create(field=[2, 3])
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(
field__in=IntegerArrayModel.objects.values_list("field", flat=True)
),
self.objs[2:3],
)
@unittest.expectedFailure
def test_in_including_F_object(self):
# This test asserts that Array objects passed to filters can be
# constructed to contain F objects. This currently doesn't work as the
# psycopg mogrify method that generates the ARRAY() syntax is
# expecting literals, not column references (#27095).
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__in=[[models.F("id")]]),
self.objs[:2],
)
def test_in_as_F_object(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__in=[models.F("field")]),
self.objs[:4],
)
def test_contained_by(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__contained_by=[1, 2]),
self.objs[:2],
)
def test_contained_by_including_F_object(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(
field__contained_by=[models.F("order"), 2]
),
self.objs[:3],
)
def test_contains(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__contains=[2]),
self.objs[1:3],
)
def test_contains_subquery(self):
IntegerArrayModel.objects.create(field=[2, 3])
inner_qs = IntegerArrayModel.objects.values_list("field", flat=True)
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__contains=inner_qs[:1]),
self.objs[2:3],
)
inner_qs = IntegerArrayModel.objects.filter(field__contains=OuterRef("field"))
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(Exists(inner_qs)),
self.objs[1:3],
)
def test_contains_including_expression(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(
field__contains=[2, Value(6) / Value(2)],
),
self.objs[2:3],
)
def test_icontains(self):
# Using the __icontains lookup with ArrayField is inefficient.
instance = CharArrayModel.objects.create(field=["FoO"])
self.assertSequenceEqual(
CharArrayModel.objects.filter(field__icontains="foo"), [instance]
)
def test_contains_charfield(self):
# Regression for #22907
self.assertSequenceEqual(
CharArrayModel.objects.filter(field__contains=["text"]), []
)
def test_contained_by_charfield(self):
self.assertSequenceEqual(
CharArrayModel.objects.filter(field__contained_by=["text"]), []
)
def test_overlap_charfield(self):
self.assertSequenceEqual(
CharArrayModel.objects.filter(field__overlap=["text"]), []
)
def test_overlap_charfield_including_expression(self):
obj_1 = CharArrayModel.objects.create(field=["TEXT", "lower text"])
obj_2 = CharArrayModel.objects.create(field=["lower text", "TEXT"])
CharArrayModel.objects.create(field=["lower text", "text"])
self.assertSequenceEqual(
CharArrayModel.objects.filter(
field__overlap=[
Upper(Value("text")),
"other",
]
),
[obj_1, obj_2],
)
def test_overlap_values(self):
qs = NullableIntegerArrayModel.objects.filter(order__lt=3)
self.assertCountEqual(
NullableIntegerArrayModel.objects.filter(
field__overlap=qs.values_list("field"),
),
self.objs[:3],
)
self.assertCountEqual(
NullableIntegerArrayModel.objects.filter(
field__overlap=qs.values("field"),
),
self.objs[:3],
)
def test_lookups_autofield_array(self):
qs = (
NullableIntegerArrayModel.objects.filter(
field__0__isnull=False,
)
.values("field__0")
.annotate(
arrayagg=ArrayAgg("id"),
)
.order_by("field__0")
)
tests = (
("contained_by", [self.objs[1].pk, self.objs[2].pk, 0], [2]),
("contains", [self.objs[2].pk], [2]),
("exact", [self.objs[3].pk], [20]),
("overlap", [self.objs[1].pk, self.objs[3].pk], [2, 20]),
)
for lookup, value, expected in tests:
with self.subTest(lookup=lookup):
self.assertSequenceEqual(
qs.filter(
**{"arrayagg__" + lookup: value},
).values_list("field__0", flat=True),
expected,
)
@skipUnlessDBFeature("allows_group_by_select_index")
def test_group_by_order_by_select_index(self):
with self.assertNumQueries(1) as ctx:
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(
field__0__isnull=False,
)
.values("field__0")
.annotate(arrayagg=ArrayAgg("id"))
.order_by("field__0"),
[
{"field__0": 1, "arrayagg": [self.objs[0].pk]},
{"field__0": 2, "arrayagg": [self.objs[1].pk, self.objs[2].pk]},
{"field__0": 20, "arrayagg": [self.objs[3].pk]},
],
)
sql = ctx[0]["sql"]
self.assertIn("GROUP BY 1", sql)
self.assertIn("ORDER BY 1", sql)
def test_order_by_arrayagg_index(self):
qs = (
NullableIntegerArrayModel.objects.values("order")
.annotate(ids=ArrayAgg("id"))
.order_by("-ids__0")
)
self.assertQuerySetEqual(
qs, [{"order": obj.order, "ids": [obj.id]} for obj in reversed(self.objs)]
)
def test_index(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__0=2), self.objs[1:3]
)
def test_index_chained(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__0__lt=3), self.objs[0:3]
)
def test_index_nested(self):
instance = NestedIntegerArrayModel.objects.create(field=[[1, 2], [3, 4]])
self.assertSequenceEqual(
NestedIntegerArrayModel.objects.filter(field__0__0=1), [instance]
)
@unittest.expectedFailure
def test_index_used_on_nested_data(self):
instance = NestedIntegerArrayModel.objects.create(field=[[1, 2], [3, 4]])
self.assertSequenceEqual(
NestedIntegerArrayModel.objects.filter(field__0=[1, 2]), [instance]
)
def test_index_transform_expression(self):
expr = RawSQL("string_to_array(%s, ';')", ["1;2"])
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(
field__0=Cast(
IndexTransform(1, models.IntegerField, expr),
output_field=models.IntegerField(),
),
),
self.objs[:1],
)
def test_index_annotation(self):
qs = NullableIntegerArrayModel.objects.annotate(second=models.F("field__1"))
self.assertCountEqual(
qs.values_list("second", flat=True),
[None, None, None, 3, 30],
)
def test_overlap(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__overlap=[1, 2]),
self.objs[0:3],
)
def test_len(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__len__lte=2), self.objs[0:3]
)
def test_len_empty_array(self):
obj = NullableIntegerArrayModel.objects.create(field=[])
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__len=0), [obj]
)
def test_slice(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__0_1=[2]), self.objs[1:3]
)
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__0_2=[2, 3]), self.objs[2:3]
)
def test_order_by_index(self):
more_objs = (
NullableIntegerArrayModel.objects.create(field=[1, 637]),
NullableIntegerArrayModel.objects.create(field=[2, 1]),
NullableIntegerArrayModel.objects.create(field=[3, -98123]),
NullableIntegerArrayModel.objects.create(field=[4, 2]),
)
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.order_by("field__1"),
[
more_objs[2],
more_objs[1],
more_objs[3],
self.objs[2],
self.objs[3],
more_objs[0],
self.objs[4],
self.objs[1],
self.objs[0],
],
)
@unittest.expectedFailure
def test_slice_nested(self):
instance = NestedIntegerArrayModel.objects.create(field=[[1, 2], [3, 4]])
self.assertSequenceEqual(
NestedIntegerArrayModel.objects.filter(field__0__0_1=[1]), [instance]
)
def test_slice_transform_expression(self):
expr = RawSQL("string_to_array(%s, ';')", ["9;2;3"])
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(
field__0_2=SliceTransform(2, 3, expr)
),
self.objs[2:3],
)
def test_slice_annotation(self):
qs = NullableIntegerArrayModel.objects.annotate(
first_two=models.F("field__0_2"),
)
self.assertCountEqual(
qs.values_list("first_two", flat=True),
[None, [1], [2], [2, 3], [20, 30]],
)
def test_slicing_of_f_expressions(self):
tests = [
(F("field")[:2], [1, 2]),
(F("field")[2:], [3, 4]),
(F("field")[1:3], [2, 3]),
(F("field")[3], [4]),
(F("field")[:3][1:], [2, 3]), # Nested slicing.
(F("field")[:3][1], [2]), # Slice then index.
]
for expression, expected in tests:
with self.subTest(expression=expression, expected=expected):
instance = IntegerArrayModel.objects.create(field=[1, 2, 3, 4])
instance.field = expression
instance.save()
instance.refresh_from_db()
self.assertEqual(instance.field, expected)
def test_slicing_of_f_expressions_with_annotate(self):
IntegerArrayModel.objects.create(field=[1, 2, 3])
annotated = IntegerArrayModel.objects.annotate(
first_two=F("field")[:2],
after_two=F("field")[2:],
random_two=F("field")[1:3],
).get()
self.assertEqual(annotated.first_two, [1, 2])
self.assertEqual(annotated.after_two, [3])
self.assertEqual(annotated.random_two, [2, 3])
def test_slicing_of_f_expressions_with_len(self):
queryset = NullableIntegerArrayModel.objects.annotate(
subarray=F("field")[:1]
).filter(field__len=F("subarray__len"))
self.assertSequenceEqual(queryset, self.objs[:2])
def test_usage_in_subquery(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(
id__in=NullableIntegerArrayModel.objects.filter(field__len=3)
),
[self.objs[3]],
)
def test_enum_lookup(self):
class TestEnum(enum.Enum):
VALUE_1 = "value_1"
instance = ArrayEnumModel.objects.create(array_of_enums=[TestEnum.VALUE_1])
self.assertSequenceEqual(
ArrayEnumModel.objects.filter(array_of_enums__contains=[TestEnum.VALUE_1]),
[instance],
)
def test_unsupported_lookup(self):
msg = (
"Unsupported lookup '0_bar' for ArrayField or join on the field not "
"permitted."
)
with self.assertRaisesMessage(FieldError, msg):
list(NullableIntegerArrayModel.objects.filter(field__0_bar=[2]))
msg = (
"Unsupported lookup '0bar' for ArrayField or join on the field not "
"permitted."
)
with self.assertRaisesMessage(FieldError, msg):
list(NullableIntegerArrayModel.objects.filter(field__0bar=[2]))
def test_grouping_by_annotations_with_array_field_param(self):
value = models.Value([1], output_field=ArrayField(models.IntegerField()))
self.assertEqual(
NullableIntegerArrayModel.objects.annotate(
array_length=models.Func(
value,
1,
function="ARRAY_LENGTH",
output_field=models.IntegerField(),
),
)
.values("array_length")
.annotate(
count=models.Count("pk"),
)
.get()["array_length"],
1,
)
def test_filter_by_array_subquery(self):
inner_qs = NullableIntegerArrayModel.objects.filter(
field__len=models.OuterRef("field__len"),
).values("field")
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.alias(
same_sized_fields=ArraySubquery(inner_qs),
).filter(same_sized_fields__len__gt=1),
self.objs[0:2],
)
def test_annotated_array_subquery(self):
inner_qs = NullableIntegerArrayModel.objects.exclude(
pk=models.OuterRef("pk")
).values("order")
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.annotate(
sibling_ids=ArraySubquery(inner_qs),
)
.get(order=1)
.sibling_ids,
[2, 3, 4, 5],
)
def test_group_by_with_annotated_array_subquery(self):
inner_qs = NullableIntegerArrayModel.objects.exclude(
pk=models.OuterRef("pk")
).values("order")
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.annotate(
sibling_ids=ArraySubquery(inner_qs),
sibling_count=models.Max("sibling_ids__len"),
).values_list("sibling_count", flat=True),
[len(self.objs) - 1] * len(self.objs),
)
def test_annotated_ordered_array_subquery(self):
inner_qs = NullableIntegerArrayModel.objects.order_by("-order").values("order")
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.annotate(
ids=ArraySubquery(inner_qs),
)
.first()
.ids,
[5, 4, 3, 2, 1],
)
def test_annotated_array_subquery_with_json_objects(self):
inner_qs = NullableIntegerArrayModel.objects.exclude(
pk=models.OuterRef("pk")
).values(json=JSONObject(order="order", field="field"))
siblings_json = (
NullableIntegerArrayModel.objects.annotate(
siblings_json=ArraySubquery(inner_qs),
)
.values_list("siblings_json", flat=True)
.get(order=1)
)
self.assertSequenceEqual(
siblings_json,
[
{"field": [2], "order": 2},
{"field": [2, 3], "order": 3},
{"field": [20, 30, 40], "order": 4},
{"field": None, "order": 5},
],
)
class TestDateTimeExactQuerying(PostgreSQLTestCase):
@classmethod
def setUpTestData(cls):
now = timezone.now()
cls.datetimes = [now]
cls.dates = [now.date()]
cls.times = [now.time()]
cls.objs = [
DateTimeArrayModel.objects.create(
datetimes=cls.datetimes, dates=cls.dates, times=cls.times
),
]
def test_exact_datetimes(self):
self.assertSequenceEqual(
DateTimeArrayModel.objects.filter(datetimes=self.datetimes), self.objs
)
def test_exact_dates(self):
self.assertSequenceEqual(
DateTimeArrayModel.objects.filter(dates=self.dates), self.objs
)
def test_exact_times(self):
self.assertSequenceEqual(
DateTimeArrayModel.objects.filter(times=self.times), self.objs
)
class TestOtherTypesExactQuerying(PostgreSQLTestCase):
@classmethod
def setUpTestData(cls):
cls.ips = ["192.168.0.1", "::1"]
cls.uuids = [uuid.uuid4()]
cls.decimals = [decimal.Decimal(1.25), 1.75]
cls.tags = [Tag(1), Tag(2), Tag(3)]
cls.objs = [
OtherTypesArrayModel.objects.create(
ips=cls.ips,
uuids=cls.uuids,
decimals=cls.decimals,
tags=cls.tags,
)
]
def test_exact_ip_addresses(self):
self.assertSequenceEqual(
OtherTypesArrayModel.objects.filter(ips=self.ips), self.objs
)
def test_exact_uuids(self):
self.assertSequenceEqual(
OtherTypesArrayModel.objects.filter(uuids=self.uuids), self.objs
)
def test_exact_decimals(self):
self.assertSequenceEqual(
OtherTypesArrayModel.objects.filter(decimals=self.decimals), self.objs
)
def test_exact_tags(self):
self.assertSequenceEqual(
OtherTypesArrayModel.objects.filter(tags=self.tags), self.objs
)
@isolate_apps("postgres_tests")
class TestChecks(PostgreSQLSimpleTestCase):
def test_field_checks(self):
class MyModel(PostgreSQLModel):
field = ArrayField(models.CharField(max_length=-1))
model = MyModel()
errors = model.check()
self.assertEqual(len(errors), 1)
# The inner CharField has a non-positive max_length.
self.assertEqual(errors[0].id, "postgres.E001")
self.assertIn("max_length", errors[0].msg)
def test_base_field_check_kwargs(self):
passed_kwargs = None
class MyField(models.Field):
def check(self, **kwargs):
nonlocal passed_kwargs
passed_kwargs = kwargs
return []
class MyModel(PostgreSQLModel):
field = ArrayField(MyField())
self.assertEqual(MyModel.check(databases=["default"]), [])
self.assertEqual(
passed_kwargs,
{"databases": ["default"]},
"ArrayField.check kwargs should be passed to its base_field.",
)
def test_invalid_base_fields(self):
class MyModel(PostgreSQLModel):
field = ArrayField(
models.ManyToManyField("postgres_tests.IntegerArrayModel")
)
model = MyModel()
errors = model.check()
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].id, "postgres.E002")
def test_invalid_default(self):
class MyModel(PostgreSQLModel):
field = ArrayField(models.IntegerField(), default=[])
model = MyModel()
self.assertEqual(
model.check(),
[
checks.Warning(
msg=(
"ArrayField default should be a callable instead of an "
"instance so that it's not shared between all field "
"instances."
),
hint="Use a callable instead, e.g., use `list` instead of `[]`.",
obj=MyModel._meta.get_field("field"),
id="fields.E010",
)
],
)
def test_valid_default(self):
class MyModel(PostgreSQLModel):
field = ArrayField(models.IntegerField(), default=list)
model = MyModel()
self.assertEqual(model.check(), [])
def test_valid_default_none(self):
class MyModel(PostgreSQLModel):
field = ArrayField(models.IntegerField(), default=None)
model = MyModel()
self.assertEqual(model.check(), [])
def test_nested_field_checks(self):
"""
Nested ArrayFields are permitted.
"""
class MyModel(PostgreSQLModel):
field = ArrayField(ArrayField(models.CharField(max_length=-1)))
model = MyModel()
errors = model.check()
self.assertEqual(len(errors), 1)
# The inner CharField has a non-positive max_length.
self.assertEqual(errors[0].id, "postgres.E001")
self.assertIn("max_length", errors[0].msg)
def test_choices_tuple_list(self):
class MyModel(PostgreSQLModel):
field = ArrayField(
models.CharField(max_length=16),
choices=[
[
"Media",
[(["vinyl", "cd"], "Audio"), (("vhs", "dvd"), "Video")],
],
(["mp3", "mp4"], "Digital"),
],
)
self.assertEqual(MyModel._meta.get_field("field").check(), [])
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific tests")
class TestMigrations(TransactionTestCase):
available_apps = ["postgres_tests"]
def test_deconstruct(self):
field = ArrayField(models.IntegerField())
name, path, args, kwargs = field.deconstruct()
self.assertEqual(kwargs.keys(), {"base_field"})
new = ArrayField(*args, **kwargs)
self.assertEqual(type(new.base_field), type(field.base_field))
self.assertIsNot(new.base_field, field.base_field)
def test_deconstruct_with_size(self):
field = ArrayField(models.IntegerField(), size=3)
name, path, args, kwargs = field.deconstruct()
new = ArrayField(*args, **kwargs)
self.assertEqual(new.size, field.size)
def test_deconstruct_args(self):
field = ArrayField(models.CharField(max_length=20))
name, path, args, kwargs = field.deconstruct()
new = ArrayField(*args, **kwargs)
self.assertEqual(new.base_field.max_length, field.base_field.max_length)
def test_subclass_deconstruct(self):
field = ArrayField(models.IntegerField())
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.contrib.postgres.fields.ArrayField")
field = ArrayFieldSubclass()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "postgres_tests.models.ArrayFieldSubclass")
@override_settings(
MIGRATION_MODULES={
"postgres_tests": "postgres_tests.array_default_migrations",
}
)
def test_adding_field_with_default(self):
# See #22962
table_name = "postgres_tests_integerarraydefaultmodel"
with connection.cursor() as cursor:
self.assertNotIn(table_name, connection.introspection.table_names(cursor))
call_command("migrate", "postgres_tests", verbosity=0)
with connection.cursor() as cursor:
self.assertIn(table_name, connection.introspection.table_names(cursor))
call_command("migrate", "postgres_tests", "zero", verbosity=0)
with connection.cursor() as cursor:
self.assertNotIn(table_name, connection.introspection.table_names(cursor))
@override_settings(
MIGRATION_MODULES={
"postgres_tests": "postgres_tests.array_index_migrations",
}
)
def test_adding_arrayfield_with_index(self):
"""
ArrayField shouldn't have varchar_patterns_ops or text_patterns_ops
indexes.
"""
table_name = "postgres_tests_chartextarrayindexmodel"
call_command("migrate", "postgres_tests", verbosity=0)
with connection.cursor() as cursor:
like_constraint_columns_list = [
v["columns"]
for k, v in list(
connection.introspection.get_constraints(cursor, table_name).items()
)
if k.endswith("_like")
]
# Only the CharField should have a LIKE index.
self.assertEqual(like_constraint_columns_list, [["char2"]])
# All fields should have regular indexes.
with connection.cursor() as cursor:
indexes = [
c["columns"][0]
for c in connection.introspection.get_constraints(
cursor, table_name
).values()
if c["index"] and len(c["columns"]) == 1
]
self.assertIn("char", indexes)
self.assertIn("char2", indexes)
self.assertIn("text", indexes)
call_command("migrate", "postgres_tests", "zero", verbosity=0)
with connection.cursor() as cursor:
self.assertNotIn(table_name, connection.introspection.table_names(cursor))
class TestSerialization(PostgreSQLSimpleTestCase):
test_data = (
'[{"fields": {"field": "[\\"1\\", \\"2\\", null]"}, '
'"model": "postgres_tests.integerarraymodel", "pk": null}]'
)
def test_dumping(self):
instance = IntegerArrayModel(field=[1, 2, None])
data = serializers.serialize("json", [instance])
self.assertEqual(json.loads(data), json.loads(self.test_data))
def test_loading(self):
instance = list(serializers.deserialize("json", self.test_data))[0].object
self.assertEqual(instance.field, [1, 2, None])
class TestStringSerialization(PostgreSQLSimpleTestCase):
field_values = [["Django", "Python", None], ["Джанго", "פייתון", None, "król"]]
@staticmethod
def create_json_data(array_field_value):
fields = {"field": json.dumps(array_field_value, ensure_ascii=False)}
return json.dumps(
[{"model": "postgres_tests.chararraymodel", "pk": None, "fields": fields}]
)
def test_encode(self):
for field_value in self.field_values:
with self.subTest(field_value=field_value):
instance = CharArrayModel(field=field_value)
data = serializers.serialize("json", [instance])
json_data = self.create_json_data(field_value)
self.assertEqual(json.loads(data), json.loads(json_data))
def test_decode(self):
for field_value in self.field_values:
with self.subTest(field_value=field_value):
json_data = self.create_json_data(field_value)
instance = list(serializers.deserialize("json", json_data))[0].object
self.assertEqual(instance.field, field_value)
class TestValidation(PostgreSQLSimpleTestCase):
def test_unbounded(self):
field = ArrayField(models.IntegerField())
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean([1, None], None)
self.assertEqual(cm.exception.code, "item_invalid")
self.assertEqual(
cm.exception.message % cm.exception.params,
"Item 2 in the array did not validate: This field cannot be null.",
)
def test_blank_true(self):
field = ArrayField(models.IntegerField(blank=True, null=True))
# This should not raise a validation error
field.clean([1, None], None)
def test_with_size(self):
field = ArrayField(models.IntegerField(), size=3)
field.clean([1, 2, 3], None)
msg = "List contains 4 items, it should contain no more than 3."
with self.assertRaisesMessage(exceptions.ValidationError, msg):
field.clean([1, 2, 3, 4], None)
def test_with_size_singular(self):
field = ArrayField(models.IntegerField(), size=1)
field.clean([1], None)
msg = "List contains 2 items, it should contain no more than 1."
with self.assertRaisesMessage(exceptions.ValidationError, msg):
field.clean([1, 2], None)
def test_nested_array_mismatch(self):
field = ArrayField(ArrayField(models.IntegerField()))
field.clean([[1, 2], [3, 4]], None)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean([[1, 2], [3, 4, 5]], None)
self.assertEqual(cm.exception.code, "nested_array_mismatch")
self.assertEqual(
cm.exception.messages[0], "Nested arrays must have the same length."
)
def test_with_base_field_error_params(self):
field = ArrayField(models.CharField(max_length=2))
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(["abc"], None)
self.assertEqual(len(cm.exception.error_list), 1)
exception = cm.exception.error_list[0]
self.assertEqual(
exception.message,
"Item 1 in the array did not validate: Ensure this value has at most 2 "
"characters (it has 3).",
)
self.assertEqual(exception.code, "item_invalid")
self.assertEqual(
exception.params,
{"nth": 1, "value": "abc", "limit_value": 2, "show_value": 3},
)
def test_with_validators(self):
field = ArrayField(
models.IntegerField(validators=[validators.MinValueValidator(1)])
)
field.clean([1, 2], None)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean([0], None)
self.assertEqual(len(cm.exception.error_list), 1)
exception = cm.exception.error_list[0]
self.assertEqual(
exception.message,
"Item 1 in the array did not validate: Ensure this value is greater than "
"or equal to 1.",
)
self.assertEqual(exception.code, "item_invalid")
self.assertEqual(
exception.params, {"nth": 1, "value": 0, "limit_value": 1, "show_value": 0}
)
class TestSimpleFormField(PostgreSQLSimpleTestCase):
def test_valid(self):
field = SimpleArrayField(forms.CharField())
value = field.clean("a,b,c")
self.assertEqual(value, ["a", "b", "c"])
def test_to_python_fail(self):
field = SimpleArrayField(forms.IntegerField())
msg = "Item 1 in the array did not validate: Enter a whole number."
with self.assertRaisesMessage(exceptions.ValidationError, msg):
field.clean("a,b,9")
def test_validate_fail(self):
field = SimpleArrayField(forms.CharField(required=True))
msg = "Item 3 in the array did not validate: This field is required."
with self.assertRaisesMessage(exceptions.ValidationError, msg):
field.clean("a,b,")
def test_validate_fail_base_field_error_params(self):
field = SimpleArrayField(forms.CharField(max_length=2))
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean("abc,c,defg")
errors = cm.exception.error_list
self.assertEqual(len(errors), 2)
first_error = errors[0]
self.assertEqual(
first_error.message,
"Item 1 in the array did not validate: Ensure this value has at most 2 "
"characters (it has 3).",
)
self.assertEqual(first_error.code, "item_invalid")
self.assertEqual(
first_error.params,
{"nth": 1, "value": "abc", "limit_value": 2, "show_value": 3},
)
second_error = errors[1]
self.assertEqual(
second_error.message,
"Item 3 in the array did not validate: Ensure this value has at most 2 "
"characters (it has 4).",
)
self.assertEqual(second_error.code, "item_invalid")
self.assertEqual(
second_error.params,
{"nth": 3, "value": "defg", "limit_value": 2, "show_value": 4},
)
def test_validators_fail(self):
field = SimpleArrayField(forms.RegexField("[a-e]{2}"))
msg = "Item 1 in the array did not validate: Enter a valid value."
with self.assertRaisesMessage(exceptions.ValidationError, msg):
field.clean("a,bc,de")
def test_delimiter(self):
field = SimpleArrayField(forms.CharField(), delimiter="|")
value = field.clean("a|b|c")
self.assertEqual(value, ["a", "b", "c"])
def test_delimiter_with_nesting(self):
field = SimpleArrayField(SimpleArrayField(forms.CharField()), delimiter="|")
value = field.clean("a,b|c,d")
self.assertEqual(value, [["a", "b"], ["c", "d"]])
def test_prepare_value(self):
field = SimpleArrayField(forms.CharField())
value = field.prepare_value(["a", "b", "c"])
self.assertEqual(value, "a,b,c")
def test_max_length(self):
field = SimpleArrayField(forms.CharField(), max_length=2)
msg = "List contains 3 items, it should contain no more than 2."
with self.assertRaisesMessage(exceptions.ValidationError, msg):
field.clean("a,b,c")
def test_min_length(self):
field = SimpleArrayField(forms.CharField(), min_length=4)
msg = "List contains 3 items, it should contain no fewer than 4."
with self.assertRaisesMessage(exceptions.ValidationError, msg):
field.clean("a,b,c")
def test_min_length_singular(self):
field = SimpleArrayField(forms.IntegerField(), min_length=2)
field.clean([1, 2])
msg = "List contains 1 item, it should contain no fewer than 2."
with self.assertRaisesMessage(exceptions.ValidationError, msg):
field.clean([1])
def test_required(self):
field = SimpleArrayField(forms.CharField(), required=True)
msg = "This field is required."
with self.assertRaisesMessage(exceptions.ValidationError, msg):
field.clean("")
def test_model_field_formfield(self):
model_field = ArrayField(models.CharField(max_length=27))
form_field = model_field.formfield()
self.assertIsInstance(form_field, SimpleArrayField)
self.assertIsInstance(form_field.base_field, forms.CharField)
self.assertEqual(form_field.base_field.max_length, 27)
def test_model_field_formfield_size(self):
model_field = ArrayField(models.CharField(max_length=27), size=4)
form_field = model_field.formfield()
self.assertIsInstance(form_field, SimpleArrayField)
self.assertEqual(form_field.max_length, 4)
def test_model_field_choices(self):
model_field = ArrayField(models.IntegerField(choices=((1, "A"), (2, "B"))))
form_field = model_field.formfield()
self.assertEqual(form_field.clean("1,2"), [1, 2])
def test_already_converted_value(self):
field = SimpleArrayField(forms.CharField())
vals = ["a", "b", "c"]
self.assertEqual(field.clean(vals), vals)
def test_has_changed(self):
field = SimpleArrayField(forms.IntegerField())
self.assertIs(field.has_changed([1, 2], [1, 2]), False)
self.assertIs(field.has_changed([1, 2], "1,2"), False)
self.assertIs(field.has_changed([1, 2], "1,2,3"), True)
self.assertIs(field.has_changed([1, 2], "a,b"), True)
def test_has_changed_empty(self):
field = SimpleArrayField(forms.CharField())
self.assertIs(field.has_changed(None, None), False)
self.assertIs(field.has_changed(None, ""), False)
self.assertIs(field.has_changed(None, []), False)
self.assertIs(field.has_changed([], None), False)
self.assertIs(field.has_changed([], ""), False)
class TestSplitFormField(PostgreSQLSimpleTestCase):
def test_valid(self):
class SplitForm(forms.Form):
array = SplitArrayField(forms.CharField(), size=3)
data = {"array_0": "a", "array_1": "b", "array_2": "c"}
form = SplitForm(data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data, {"array": ["a", "b", "c"]})
def test_required(self):
class SplitForm(forms.Form):
array = SplitArrayField(forms.CharField(), required=True, size=3)
data = {"array_0": "", "array_1": "", "array_2": ""}
form = SplitForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {"array": ["This field is required."]})
def test_remove_trailing_nulls(self):
class SplitForm(forms.Form):
array = SplitArrayField(
forms.CharField(required=False), size=5, remove_trailing_nulls=True
)
data = {
"array_0": "a",
"array_1": "",
"array_2": "b",
"array_3": "",
"array_4": "",
}
form = SplitForm(data)
self.assertTrue(form.is_valid(), form.errors)
self.assertEqual(form.cleaned_data, {"array": ["a", "", "b"]})
def test_remove_trailing_nulls_not_required(self):
class SplitForm(forms.Form):
array = SplitArrayField(
forms.CharField(required=False),
size=2,
remove_trailing_nulls=True,
required=False,
)
data = {"array_0": "", "array_1": ""}
form = SplitForm(data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data, {"array": []})
def test_required_field(self):
class SplitForm(forms.Form):
array = SplitArrayField(forms.CharField(), size=3)
data = {"array_0": "a", "array_1": "b", "array_2": ""}
form = SplitForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors,
{
"array": [
"Item 3 in the array did not validate: This field is required."
]
},
)
def test_invalid_integer(self):
msg = (
"Item 2 in the array did not validate: Ensure this value is less than or "
"equal to 100."
)
with self.assertRaisesMessage(exceptions.ValidationError, msg):
SplitArrayField(forms.IntegerField(max_value=100), size=2).clean([0, 101])
def test_rendering(self):
class SplitForm(forms.Form):
array = SplitArrayField(forms.CharField(), size=3)
self.assertHTMLEqual(
str(SplitForm()),
"""
<div>
<label for="id_array_0">Array:</label>
<input id="id_array_0" name="array_0" type="text" required>
<input id="id_array_1" name="array_1" type="text" required>
<input id="id_array_2" name="array_2" type="text" required>
</div>
""",
)
def test_invalid_char_length(self):
field = SplitArrayField(forms.CharField(max_length=2), size=3)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(["abc", "c", "defg"])
self.assertEqual(
cm.exception.messages,
[
"Item 1 in the array did not validate: Ensure this value has at most 2 "
"characters (it has 3).",
"Item 3 in the array did not validate: Ensure this value has at most 2 "
"characters (it has 4).",
],
)
def test_invalid_char_length_with_remove_trailing_nulls(self):
field = SplitArrayField(
forms.CharField(max_length=2, required=False),
size=3,
remove_trailing_nulls=True,
)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(["abc", "", ""])
self.assertEqual(
cm.exception.messages,
[
"Item 1 in the array did not validate: Ensure this value has at most 2 "
"characters (it has 3).",
],
)
def test_splitarraywidget_value_omitted_from_data(self):
class Form(forms.ModelForm):
field = SplitArrayField(forms.IntegerField(), required=False, size=2)
class Meta:
model = IntegerArrayModel
fields = ("field",)
form = Form({"field_0": "1", "field_1": "2"})
self.assertEqual(form.errors, {})
obj = form.save(commit=False)
self.assertEqual(obj.field, [1, 2])
def test_splitarrayfield_has_changed(self):
class Form(forms.ModelForm):
field = SplitArrayField(forms.IntegerField(), required=False, size=2)
class Meta:
model = IntegerArrayModel
fields = ("field",)
tests = [
({}, {"field_0": "", "field_1": ""}, True),
({"field": None}, {"field_0": "", "field_1": ""}, True),
({"field": [1]}, {"field_0": "", "field_1": ""}, True),
({"field": [1]}, {"field_0": "1", "field_1": "0"}, True),
({"field": [1, 2]}, {"field_0": "1", "field_1": "2"}, False),
({"field": [1, 2]}, {"field_0": "a", "field_1": "b"}, True),
]
for initial, data, expected_result in tests:
with self.subTest(initial=initial, data=data):
obj = IntegerArrayModel(**initial)
form = Form(data, instance=obj)
self.assertIs(form.has_changed(), expected_result)
def test_splitarrayfield_remove_trailing_nulls_has_changed(self):
class Form(forms.ModelForm):
field = SplitArrayField(
forms.IntegerField(), required=False, size=2, remove_trailing_nulls=True
)
class Meta:
model = IntegerArrayModel
fields = ("field",)
tests = [
({}, {"field_0": "", "field_1": ""}, False),
({"field": None}, {"field_0": "", "field_1": ""}, False),
({"field": []}, {"field_0": "", "field_1": ""}, False),
({"field": [1]}, {"field_0": "1", "field_1": ""}, False),
]
for initial, data, expected_result in tests:
with self.subTest(initial=initial, data=data):
obj = IntegerArrayModel(**initial)
form = Form(data, instance=obj)
self.assertIs(form.has_changed(), expected_result)
class TestSplitFormWidget(PostgreSQLWidgetTestCase):
def test_get_context(self):
self.assertEqual(
SplitArrayWidget(forms.TextInput(), size=2).get_context(
"name", ["val1", "val2"]
),
{
"widget": {
"name": "name",
"is_hidden": False,
"required": False,
"value": "['val1', 'val2']",
"attrs": {},
"template_name": "postgres/widgets/split_array.html",
"subwidgets": [
{
"name": "name_0",
"is_hidden": False,
"required": False,
"value": "val1",
"attrs": {},
"template_name": "django/forms/widgets/text.html",
"type": "text",
},
{
"name": "name_1",
"is_hidden": False,
"required": False,
"value": "val2",
"attrs": {},
"template_name": "django/forms/widgets/text.html",
"type": "text",
},
],
}
},
)
def test_checkbox_get_context_attrs(self):
context = SplitArrayWidget(
forms.CheckboxInput(),
size=2,
).get_context("name", [True, False])
self.assertEqual(context["widget"]["value"], "[True, False]")
self.assertEqual(
[subwidget["attrs"] for subwidget in context["widget"]["subwidgets"]],
[{"checked": True}, {}],
)
def test_render(self):
self.check_html(
SplitArrayWidget(forms.TextInput(), size=2),
"array",
None,
"""
<input name="array_0" type="text">
<input name="array_1" type="text">
""",
)
def test_render_attrs(self):
self.check_html(
SplitArrayWidget(forms.TextInput(), size=2),
"array",
["val1", "val2"],
attrs={"id": "foo"},
html=("""
<input id="foo_0" name="array_0" type="text" value="val1">
<input id="foo_1" name="array_1" type="text" value="val2">
"""),
)
def test_value_omitted_from_data(self):
widget = SplitArrayWidget(forms.TextInput(), size=2)
self.assertIs(widget.value_omitted_from_data({}, {}, "field"), True)
self.assertIs(
widget.value_omitted_from_data({"field_0": "value"}, {}, "field"), False
)
self.assertIs(
widget.value_omitted_from_data({"field_1": "value"}, {}, "field"), False
)
self.assertIs(
widget.value_omitted_from_data(
{"field_0": "value", "field_1": "value"}, {}, "field"
),
False,
)
class TestAdminUtils(PostgreSQLTestCase):
empty_value = "-empty-"
def test_array_display_for_field(self):
array_field = ArrayField(models.IntegerField())
display_value = display_for_field(
[1, 2],
array_field,
self.empty_value,
)
self.assertEqual(display_value, "1, 2")
def test_array_with_choices_display_for_field(self):
array_field = ArrayField(
models.IntegerField(),
choices=[
([1, 2, 3], "1st choice"),
([1, 2], "2nd choice"),
],
)
display_value = display_for_field(
[1, 2],
array_field,
self.empty_value,
)
self.assertEqual(display_value, "2nd choice")
display_value = display_for_field(
[99, 99],
array_field,
self.empty_value,
)
self.assertEqual(display_value, self.empty_value)
class TestJSONFieldQuerying(PostgreSQLTestCase):
def test_saving_and_querying_for_sql_null(self):
obj = OtherTypesArrayModel.objects.create(json=[None, None])
self.assertSequenceEqual(
OtherTypesArrayModel.objects.filter(json__1__isnull=True), [obj]
)
# RemovedInDjango70Warning.
msg = (
"Using None as the right-hand side of an exact lookup on JSONField to mean "
"JSON scalar 'null' is deprecated. Use JSONNull() instead (or use the "
"__isnull lookup if you meant SQL NULL)."
)
with self.assertWarnsMessage(RemovedInDjango70Warning, msg):
# RemovedInDjango70Warning: deindent, and replace [] with [obj].
self.assertSequenceEqual(
OtherTypesArrayModel.objects.filter(json__1=None), []
)
def test_saving_and_querying_for_json_null(self):
obj = OtherTypesArrayModel.objects.create(json=[JSONNull(), JSONNull()])
self.assertSequenceEqual(
OtherTypesArrayModel.objects.filter(json__1=JSONNull()), [obj]
)
self.assertSequenceEqual(
OtherTypesArrayModel.objects.filter(json__1__isnull=True), []
)
def test_saving_and_querying_for_nested_json_nulls(self):
obj = OtherTypesArrayModel.objects.create(json=[[None, 1], [None, 2]])
self.assertSequenceEqual(
OtherTypesArrayModel.objects.filter(json__1__0=None), [obj]
)
self.assertSequenceEqual(
OtherTypesArrayModel.objects.filter(json__1__0__isnull=True), []
) | python | github | https://github.com/django/django | tests/postgres_tests/test_array.py |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.serialization;
import org.apache.kafka.common.errors.SerializationException;
import org.apache.kafka.common.header.Headers;
import java.nio.ByteBuffer;
public class FloatDeserializer implements Deserializer<Float> {
@Override
public Float deserialize(final String topic, final byte[] data) {
if (data == null)
return null;
if (data.length != 4) {
throw new SerializationException("Size of data received by Deserializer is not 4");
}
int value = 0;
for (byte b : data) {
value <<= 8;
value |= b & 0xFF;
}
return Float.intBitsToFloat(value);
}
@Override
public Float deserialize(String topic, Headers headers, ByteBuffer data) {
if (data == null) {
return null;
}
if (data.remaining() != 4) {
throw new SerializationException("Size of data received by Deserializer is not 4");
}
return data.getFloat(data.position());
}
} | java | github | https://github.com/apache/kafka | clients/src/main/java/org/apache/kafka/common/serialization/FloatDeserializer.java |
<!--Copyright 2021 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# BARTpho [[bartpho]]
## 개요 [[overview]]
BARTpho 모델은 Nguyen Luong Tran, Duong Minh Le, Dat Quoc Nguyen에 의해 [BARTpho: Pre-trained Sequence-to-Sequence Models for Vietnamese](https://huggingface.co/papers/2109.09701)에서 제안되었습니다.
이 논문의 초록은 다음과 같습니다:
*우리는 BARTpho_word와 BARTpho_syllable의 두 가지 버전으로 BARTpho를 제시합니다.
이는 베트남어를 위해 사전훈련된 최초의 대규모 단일 언어 시퀀스-투-시퀀스 모델입니다.
우리의 BARTpho는 시퀀스-투-시퀀스 디노이징 모델인 BART의 "large" 아키텍처와 사전훈련 방식을 사용하여, 생성형 NLP 작업에 특히 적합합니다.
베트남어 텍스트 요약의 다운스트림 작업 실험에서,
자동 및 인간 평가 모두에서 BARTpho가 강력한 기준인 mBART를 능가하고 최신 성능을 개선했음을 보여줍니다.
우리는 향후 연구 및 베트남어 생성형 NLP 작업의 응용을 촉진하기 위해 BARTpho를 공개합니다.*
이 모델은 [dqnguyen](https://huggingface.co/dqnguyen)이 기여했습니다. 원본 코드는 [여기](https://github.com/VinAIResearch/BARTpho)에서 찾을 수 있습니다.
## 사용 예시 [[usage-example]]
```python
>>> import torch
>>> from transformers import AutoModel, AutoTokenizer
>>> bartpho = AutoModel.from_pretrained("vinai/bartpho-syllable")
>>> tokenizer = AutoTokenizer.from_pretrained("vinai/bartpho-syllable")
>>> line = "Chúng tôi là những nghiên cứu viên."
>>> input_ids = tokenizer(line, return_tensors="pt")
>>> with torch.no_grad():
... features = bartpho(**input_ids) # 이제 모델 출력은 튜플입니다
>>> # With TensorFlow 2.0+:
>>> from transformers import TFAutoModel
>>> bartpho = TFAutoModel.from_pretrained("vinai/bartpho-syllable")
>>> input_ids = tokenizer(line, return_tensors="tf")
>>> features = bartpho(**input_ids)
```
## 사용 팁 [[usage-tips]]
- mBART를 따르며, BARTpho는 BART의 "large" 아키텍처에 인코더와 디코더의 상단에 추가적인 레이어 정규화 레이어를 사용합니다.
따라서 [BART 문서](bart)에 있는 사용 예시를 BARTpho에 맞게 적용하려면
BART 전용 클래스를 mBART 전용 클래스로 대체하여 조정해야 합니다.
예를 들어:
```python
>>> from transformers import MBartForConditionalGeneration
>>> bartpho = MBartForConditionalGeneration.from_pretrained("vinai/bartpho-syllable")
>>> TXT = "Chúng tôi là <mask> nghiên cứu viên."
>>> input_ids = tokenizer([TXT], return_tensors="pt")["input_ids"]
>>> logits = bartpho(input_ids).logits
>>> masked_index = (input_ids[0] == tokenizer.mask_token_id).nonzero().item()
>>> probs = logits[0, masked_index].softmax(dim=0)
>>> values, predictions = probs.topk(5)
>>> print(tokenizer.decode(predictions).split())
```
- 이 구현은 토큰화만을 위한 것입니다: "monolingual_vocab_file"은 다국어
XLM-RoBERTa에서 제공되는 사전훈련된 SentencePiece 모델
"vocab_file"에서 추출된 베트남어 전용 유형으로 구성됩니다.
다른 언어들도 이 사전훈련된 다국어 SentencePiece 모델 "vocab_file"을 하위 단어 분할에 사용하면, 자신의 언어 전용 "monolingual_vocab_file"과 함께 BartphoTokenizer를 재사용할 수 있습니다.
## BartphoTokenizer [[bartphotokenizer]]
[[autodoc]] BartphoTokenizer | unknown | github | https://github.com/huggingface/transformers | docs/source/ko/model_doc/bartpho.md |
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# create a rendering window and renderer
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
renWin.StereoCapableWindowOn()
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
reader = vtk.vtkGenericEnSightReader()
# Make sure all algorithms use the composite data pipeline
cdp = vtk.vtkCompositeDataPipeline()
reader.SetDefaultExecutivePrototype(cdp)
reader.SetCaseFileName("" + str(VTK_DATA_ROOT) + "/Data/EnSight/blow5_ascii.case")
reader.SetTimeValue(1)
geom = vtk.vtkGeometryFilter()
geom.SetInputConnection(reader.GetOutputPort())
mapper = vtk.vtkHierarchicalPolyDataMapper()
mapper.SetInputConnection(geom.GetOutputPort())
mapper.SetColorModeToMapScalars()
mapper.SetScalarModeToUsePointFieldData()
mapper.ColorByArrayComponent("displacement",0)
mapper.SetScalarRange(0,2.08)
actor = vtk.vtkActor()
actor.SetMapper(mapper)
# assign our actor to the renderer
ren1.AddActor(actor)
# enable user interface interactor
iren.Initialize()
ren1.GetActiveCamera().SetPosition(99.3932,17.6571,-22.6071)
ren1.GetActiveCamera().SetFocalPoint(3.5,12,1.5)
ren1.GetActiveCamera().SetViewAngle(30)
ren1.GetActiveCamera().SetViewUp(0.239617,-0.01054,0.97081)
ren1.ResetCameraClippingRange()
renWin.Render()
# prevent the tk window from showing up then start the event loop
reader.SetDefaultExecutivePrototype(None)
# --- end of script -- | unknown | codeparrot/codeparrot-clean | ||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from novaclient.tests.fixture_data import base
class Fixture(base.Fixture):
base_url = 'os-aggregates'
def setUp(self):
super(Fixture, self).setUp()
get_os_aggregates = {"aggregates": [
{'id': '1',
'name': 'test',
'availability_zone': 'nova1'},
{'id': '2',
'name': 'test2',
'availability_zone': 'nova1'},
]}
self.requests.register_uri('GET', self.url(),
json=get_os_aggregates,
headers=self.json_headers)
get_aggregates_1 = {'aggregate': get_os_aggregates['aggregates'][0]}
self.requests.register_uri('POST', self.url(),
json=get_aggregates_1,
headers=self.json_headers)
for agg_id in (1, 2):
for method in ('GET', 'PUT'):
self.requests.register_uri(method, self.url(agg_id),
json=get_aggregates_1,
headers=self.json_headers)
self.requests.register_uri('POST', self.url(agg_id, 'action'),
json=get_aggregates_1,
headers=self.json_headers)
self.requests.register_uri('DELETE', self.url(1), status_code=202) | unknown | codeparrot/codeparrot-clean | ||
"""
AppShell provides a GUI application framework.
This is an adaption of AppShell.py found in Python and Tkinter Programming
by John E. Grayson which is a streamlined adaptation of GuiAppD.py, originally
created by Doug Hellmann (doughellmann@mindspring.com).
"""
__all__ = ['AppShell']
from direct.showbase.DirectObject import DirectObject
from direct.showbase.TkGlobal import *
import Pmw, sys
from . import Dial
from . import Floater
from . import Slider
from . import EntryScale
from . import VectorWidgets
from . import ProgressBar
if sys.version_info >= (3, 0):
from tkinter.filedialog import *
else:
from tkFileDialog import *
"""
TO FIX:
Radiobutton ordering change
"""
# Create toplevel widget dictionary
try:
__builtins__["widgetDict"]
except KeyError:
__builtins__["widgetDict"] = {}
# Create toplevel variable dictionary
try:
__builtins__["variableDict"]
except KeyError:
__builtins__["variableDict"] = {}
def resetWidgetDict():
__builtins__["widgetDict"] = {}
def resetVariableDict():
__builtins__["variableDict"] = {}
# Inherit from MegaWidget instead of Toplevel so you can pass in a toplevel
# to use as a container if you wish. If no toplevel passed in, create one
class AppShell(Pmw.MegaWidget, DirectObject):
appversion = '1.0'
appname = 'Generic Application Frame'
copyright = ('Copyright 2004 Walt Disney Imagineering.' +
' All Rights Reserved')
contactname = 'Mark R. Mine'
contactphone = '(818) 544-2921'
contactemail = 'Mark.Mine@disney.com'
frameWidth = 450
frameHeight = 320
padx = 5
pady = 5
usecommandarea = 0
usestatusarea = 0
balloonState = 'none'
panelCount = 0
def __init__(self, parent = None, **kw):
optiondefs = (
('title', self.appname, None),
('padx', 1, Pmw.INITOPT),
('pady', 1, Pmw.INITOPT),
('framewidth', self.frameWidth, Pmw.INITOPT),
('frameheight', self.frameHeight, Pmw.INITOPT),
('usecommandarea', self.usecommandarea, Pmw.INITOPT),
('usestatusarea', self.usestatusarea, Pmw.INITOPT),
)
self.defineoptions(kw, optiondefs)
# If no toplevel passed in, create one
if parent == None:
self.parent = Toplevel()
else:
self.parent = parent
# Initialize the base class
Pmw.MegaWidget.__init__(self, self.parent)
# Set window size
self.parent.geometry('%dx%d' % (self.frameWidth, self.frameHeight))
self.parent.title(self['title'])
# Create unique id
AppShell.panelCount += 1
self.id = self.appname + '-' + repr(AppShell.panelCount)
# Create a dictionary in the widgetDict to hold this panel's widgets
self.widgetDict = widgetDict[self.id] = {}
# And one to hold this panel's variables
self.variableDict = variableDict[self.id] = {}
# Get handle to the toplevels hull
self._hull = self.component('hull')
# Initialize the application
self.appInit()
# create the interface
self.__createInterface()
# Set focus to ourselves
self.focus_set()
# initialize our options
self.initialiseoptions(AppShell)
self.pack(fill = BOTH, expand = 1)
def __createInterface(self):
self.__createBalloon()
self.__createMenuBar()
self.__createDataArea()
self.__createCommandArea()
self.__createMessageBar()
self.__createAboutBox()
# Add binding for panel cleanup code
self.interior().bind('<Destroy>', self.onDestroy)
#
# Create the parts of the interface
# which can be modified by subclasses
#
self.createMenuBar()
self.createInterface()
def __createBalloon(self):
# Create the balloon help manager for the frame.
# Create the manager for the balloon help
self.__balloon = self.createcomponent('balloon', (), None,
Pmw.Balloon, (self._hull,))
self.__balloon.configure(state = self.balloonState)
def __createMenuBar(self):
self.menuFrame = Frame(self._hull)
self.menuBar = self.createcomponent('menubar', (), None,
Pmw.MenuBar,
(self.menuFrame,),
hull_relief=FLAT,
hull_borderwidth=0,
balloon=self.balloon())
self.menuBar.addmenu('Help', 'About %s' % self.appname, side = 'right')
self.menuBar.addmenu('File', 'File commands and Quit')
self.menuBar.pack(fill=X, side = LEFT)
# Force some space between pull down menus and other widgets
spacer = Label(self.menuFrame, text = ' ')
spacer.pack(side = LEFT, expand = 0)
self.menuFrame.pack(fill = X)
def __createDataArea(self):
# Create data area where data entry widgets are placed.
self.dataArea = self.createcomponent('dataarea',
(), None,
Frame, (self._hull,),
relief=GROOVE,
bd=1)
self.dataArea.pack(side=TOP, fill=BOTH, expand=YES,
padx=self['padx'], pady=self['pady'])
def __createCommandArea(self):
# Create a command area for application-wide buttons.
self.__commandFrame = self.createcomponent('commandframe', (), None,
Frame,
(self._hull,),
relief=SUNKEN,
bd=1)
self.__buttonBox = self.createcomponent('buttonbox', (), None,
Pmw.ButtonBox,
(self.__commandFrame,),
padx=0, pady=0)
self.__buttonBox.pack(side=TOP, expand=NO, fill=X)
if self['usecommandarea']:
self.__commandFrame.pack(side=TOP,
expand=NO,
fill=X,
padx=self['padx'],
pady=self['pady'])
def __createMessageBar(self):
# Create the message bar area for help and status messages.
frame = self.createcomponent('bottomtray', (), None,
Frame, (self._hull,), relief=SUNKEN)
self.__messageBar = self.createcomponent('messagebar',
(), None,
Pmw.MessageBar,
(frame,),
#entry_width = 40,
entry_relief=SUNKEN,
entry_bd=1,
labelpos=None)
self.__messageBar.pack(side=LEFT, expand=YES, fill=X)
self.__progressBar = ProgressBar.ProgressBar(
frame,
fillColor='slateblue',
doLabel=1,
width=150)
self.__progressBar.frame.pack(side=LEFT, expand=NO, fill=NONE)
self.updateProgress(0)
if self['usestatusarea']:
frame.pack(side=BOTTOM, expand=NO, fill=X)
self.__balloon.configure(statuscommand = \
self.__messageBar.helpmessage)
def __createAboutBox(self):
Pmw.aboutversion(self.appversion)
Pmw.aboutcopyright(self.copyright)
Pmw.aboutcontact(
'For more information, contact:\n %s\n Phone: %s\n Email: %s' %\
(self.contactname, self.contactphone,
self.contactemail))
self.about = Pmw.AboutDialog(self._hull,
applicationname=self.appname)
self.about.withdraw()
def toggleBalloon(self):
if self.toggleBalloonVar.get():
self.__balloon.configure(state = 'both')
else:
self.__balloon.configure(state = 'status')
def showAbout(self):
# Create the dialog to display about and contact information.
self.about.show()
self.about.focus_set()
def quit(self):
self.parent.destroy()
### USER METHODS ###
# To be overridden
def appInit(self):
# Called before interface is created (should be overridden).
pass
def createInterface(self):
# Override this method to create the interface for the app.
pass
def onDestroy(self, event):
# Override this method with actions to be performed on panel shutdown
pass
def createMenuBar(self):
# Creates default menus. Can be overridden or simply augmented
# Using button Add below
self.menuBar.addmenuitem('Help', 'command',
'Get information on application',
label='About...', command=self.showAbout)
self.toggleBalloonVar = IntVar()
if self.balloonState == 'none':
self.toggleBalloonVar.set(0)
else:
self.toggleBalloonVar.set(1)
self.menuBar.addmenuitem('Help', 'checkbutton',
'Toggle balloon help',
label='Balloon help',
variable = self.toggleBalloonVar,
command=self.toggleBalloon)
self.menuBar.addmenuitem('File', 'command', 'Quit this application',
label='Quit',
command=self.quit)
# Getters
def interior(self):
# Retrieve the interior site where widgets should go.
return self.dataArea
def balloon(self):
# Retrieve the panel's balloon widget
return self.__balloon
def buttonBox(self):
# Retrieve the button box.
return self.__buttonBox
def messageBar(self):
# Retieve the message bar
return self.__messageBar
# Utility functions
def buttonAdd(self, buttonName, helpMessage=None,
statusMessage=None, **kw):
# Add a button to the button box.
newBtn = self.__buttonBox.add(buttonName)
newBtn.configure(kw)
if helpMessage:
self.bind(newBtn, helpMessage, statusMessage)
return newBtn
def alignbuttons(self):
""" Make all buttons wide as widest """
self.__buttonBox.alignbuttons()
def bind(self, child, balloonHelpMsg, statusHelpMsg=None):
# Bind a help message and/or status message to a widget.
self.__balloon.bind(child, balloonHelpMsg, statusHelpMsg)
def updateProgress(self, newValue=0, newMax=0):
# Used to update progress bar
self.__progressBar.updateProgress(newValue, newMax)
## WIDGET UTILITY FUNCTIONS ##
def addWidget(self, category, text, widget):
self.widgetDict[category + '-' + text] = widget
def getWidget(self, category, text):
return self.widgetDict.get(category + '-' + text, None)
def addVariable(self, category, text, variable):
self.variableDict[category + '-' + text] = variable
def getVariable(self, category, text):
return self.variableDict.get(category + '-' + text, None)
def createWidget(self, parent, category, text, widgetClass,
help, command, side, fill, expand, kw):
# Update kw to reflect user inputs
kw['text'] = text
# Create widget
widget = widgetClass(parent, **kw)
# Do this after so command isn't called on widget creation
widget['command'] = command
# Pack widget
widget.pack(side = side, fill = fill, expand = expand)
# Bind help
self.bind(widget, help)
# Record widget
self.addWidget(category, text, widget)
return widget
def newCreateLabeledEntry(self, parent, category, text, help = '',
command = None, value = '',
width = 12, relief = SUNKEN,
side = LEFT, fill = X, expand = 0):
""" createLabeledEntry(parent, category, text, [options]) """
# Create labeled entry
frame = Frame(parent)
variable = StringVar()
variable.set(value)
label = Label(frame, text = text)
label.pack(side = LEFT, fill = X, expand = 0)
entry = Entry(frame, width = width, relief = relief,
textvariable = variable)
entry.pack(side = LEFT, fill = X, expand = 1)
frame.pack(side = side, fill = X, expand = expand)
if command:
entry.bind('<Return>', command)
# Add balloon help
self.bind(label, help)
self.bind(entry, help)
# Record widgets and variable
self.addWidget(category, text, entry)
self.addWidget(category, text + '-Label', label)
self.addVariable(category, text, variable)
return entry
def newCreateButton(self, parent, category, text,
help = '', command = None,
side = LEFT, fill = X, expand = 0, **kw):
""" createButton(parent, category, text, [options]) """
# Create the widget
widget = self.createWidget(parent, category, text, Button,
help, command, side, fill, expand, kw)
return widget
def newCreateCheckbutton(self, parent, category, text,
help = '', command = None,
initialState = 0, anchor = W,
side = LEFT, fill = X, expand = 0, **kw):
""" createCheckbutton(parent, category, text, [options]) """
# Create the widget
widget = self.createWidget(parent, category, text, Checkbutton,
help, command, side, fill, expand, kw)
# Perform extra customization
widget['anchor'] = anchor
variable = BooleanVar()
variable.set(initialState)
self.addVariable(category, text, variable)
widget['variable'] = variable
return widget
def newCreateRadiobutton(self, parent, category, text, variable, value,
command = None, help = '', anchor = W,
side = LEFT, fill = X, expand = 0, **kw):
"""
createRadiobutton(parent, category, text, variable, value, [options])
"""
# Create the widget
widget = self.createWidget(parent, category, text, Radiobutton,
help, command, side, fill, expand, kw)
# Perform extra customization
widget['anchor'] = anchor
widget['value'] = value
widget['variable'] = variable
return widget
def newCreateFloater(self, parent, category, text,
help = '', command = None,
side = LEFT, fill = X, expand = 0, **kw):
# Create the widget
widget = self.createWidget(parent, category, text,
Floater.Floater,
help, command, side, fill, expand, kw)
return widget
def newCreateDial(self, parent, category, text,
help = '', command = None,
side = LEFT, fill = X, expand = 0, **kw):
# Create the widget
widget = self.createWidget(parent, category, text,
Dial.Dial,
help, command, side, fill, expand, kw)
return widget
def newCreateSider(self, parent, category, text,
help = '', command = None,
side = LEFT, fill = X, expand = 0, **kw):
# Create the widget
widget = self.createWidget(parent, category, text,
Slider.Slider,
help, command, side, fill, expand, kw)
return widget
def newCreateEntryScale(self, parent, category, text,
help = '', command = None,
side = LEFT, fill = X, expand = 0, **kw):
# Create the widget
widget = self.createWidget(parent, category, text,
EntryScale.EntryScale,
help, command, side, fill, expand, kw)
return widget
def newCreateVector2Entry(self, parent, category, text,
help = '', command = None,
side = LEFT, fill = X, expand = 0, **kw):
# Create the widget
widget = self.createWidget(parent, category, text,
VectorWidgets.Vector2Entry,
help, command, side, fill, expand, kw)
def newCreateVector3Entry(self, parent, category, text,
help = '', command = None,
side = LEFT, fill = X, expand = 0, **kw):
# Create the widget
widget = self.createWidget(parent, category, text,
VectorWidgets.Vector3Entry,
help, command, side, fill, expand, kw)
return widget
def newCreateColorEntry(self, parent, category, text,
help = '', command = None,
side = LEFT, fill = X, expand = 0, **kw):
# Create the widget
widget = self.createWidget(parent, category, text,
VectorWidgets.ColorEntry,
help, command, side, fill, expand, kw)
return widget
def newCreateOptionMenu(self, parent, category, text,
help = '', command = None, items = [],
labelpos = W, label_anchor = W,
label_width = 16, menu_tearoff = 1,
side = LEFT, fill = X, expand = 0, **kw):
# Create variable
variable = StringVar()
if len(items) > 0:
variable.set(items[0])
# Update kw to reflect user inputs
kw['items'] = items
kw['label_text'] = text
kw['labelpos'] = labelpos
kw['label_anchor'] = label_anchor
kw['label_width'] = label_width
kw['menu_tearoff'] = menu_tearoff
kw['menubutton_textvariable'] = variable
# Create widget
widget = Pmw.OptionMenu(parent, **kw)
# Do this after so command isn't called on widget creation
widget['command'] = command
# Pack widget
widget.pack(side = side, fill = fill, expand = expand)
# Bind help
self.bind(widget.component('menubutton'), help)
# Record widget and variable
self.addWidget(category, text, widget)
self.addVariable(category, text, variable)
return widget
def newCreateComboBox(self, parent, category, text,
help = '', command = None,
items = [], state = DISABLED, history = 0,
labelpos = W, label_anchor = W,
label_width = 16, entry_width = 16,
side = LEFT, fill = X, expand = 0, **kw):
# Update kw to reflect user inputs
kw['label_text'] = text
kw['labelpos'] = labelpos
kw['label_anchor'] = label_anchor
kw['label_width'] = label_width
kw['entry_width'] = entry_width
kw['scrolledlist_items'] = items
kw['entryfield_entry_state'] = state
# Create widget
widget = Pmw.ComboBox(parent, **kw)
# Bind selection command
widget['selectioncommand'] = command
# Select first item if it exists
if len(items) > 0:
widget.selectitem(items[0])
# Pack widget
widget.pack(side = side, fill = fill, expand = expand)
# Bind help
self.bind(widget, help)
# Record widget
self.addWidget(category, text, widget)
return widget
def transformRGB(self, rgb, max = 1.0):
retval = '#'
for v in [rgb[0], rgb[1], rgb[2]]:
v = (v/max)*255
if v > 255:
v = 255
if v < 0:
v = 0
retval = "%s%02x" % (retval, int(v))
return retval
class TestAppShell(AppShell):
# Override class variables here
appname = 'Test Application Shell'
usecommandarea = 1
usestatusarea = 1
def __init__(self, parent = None, **kw):
# Call superclass initialization function
AppShell.__init__(self)
self.initialiseoptions(TestAppShell)
def createButtons(self):
self.buttonAdd('Ok',
helpMessage='Exit',
statusMessage='Exit',
command=self.quit)
def createMain(self):
self.label = self.createcomponent('label', (), None,
Label,
(self.interior(),),
text='Data Area')
self.label.pack()
self.bind(self.label, 'Space taker')
def createInterface(self):
self.createButtons()
self.createMain()
if __name__ == '__main__':
test = TestAppShell(balloon_state='none') | unknown | codeparrot/codeparrot-clean | ||
"""
Configuration file for Atlas
All variable settings should go here so values can be propagated to the various
functions from a central location.
"""
import re
import os
from atlas.config_servers import (SERVERDEFS, VARNISH_CONTROL_TERMINALS, NFS_MOUNT_LOCATION,
BASE_URLS, API_URLS, LOAD_BALANCER_CONFIG_FILES,
LOAD_BALANCER_CONFIG_GROUP)
from atlas.config_local import (ENVIRONMENT, SSL_KEY_FILE, SSL_CRT_FILE, ALLOWED_USERS,
NFS_MOUNT_FILES_DIR, LOAD_BALANCER, DESIRED_SITE_COUNT, CODE_ROOT,
SITES_WEB_ROOT, SITES_CODE_ROOT, SITE_DOWN_PATH, BACKUPS_PATH,
DEFAULT_CORE, DEFAULT_PROFILE, ENCRYPTION_KEY, LDAP_SERVER,
LDAP_ORG_UNIT, LDAP_DNS_DOMAIN_NAME, SSH_USER, WEBSERVER_USER,
WEBSERVER_USER_GROUP, DATABASE_USER, DATABASE_PASSWORD,
SERVICE_ACCOUNT_USERNAME, SERVICE_ACCOUNT_PASSWORD,
SLACK_NOTIFICATIONS, SLACK_URL, SLACK_USERNAME,
SEND_NOTIFICATION_EMAILS, SEND_NOTIFICATION_FROM_EMAIL, EMAIL_HOST,
EMAIL_PORT, EMAIL_USERNAME, EMAIL_PASSWORD, LOG_LOCATION,
EMAIL_USERS_EXCLUDE)
# Set Atlas location
ATLAS_LOCATION = os.path.dirname(os.path.realpath(__file__))
# Verify code_root is correctly formed.
LEADING_SLASH = re.compile("^/")
TRAILING_SLASH = re.compile("/$")
# Uses re.match primitive to look from the beginning.
if not LEADING_SLASH.match(CODE_ROOT):
raise Exception("'code_root' should begin with a slash.")
if not LEADING_SLASH.match(SITES_WEB_ROOT):
raise Exception("'sites_web_root' should begin with a slash.")
if not LEADING_SLASH.match(SITES_CODE_ROOT):
raise Exception("'sites_code_root' should begin with a slash.")
# Uses re.search primitive to look anywhere in the string.
if TRAILING_SLASH.search(CODE_ROOT):
raise Exception("'code_root' should not have a trailing slash.")
if TRAILING_SLASH.search(SITES_WEB_ROOT):
raise Exception("'sites_web_root' should not have a trailing slash.")
if TRAILING_SLASH.search(SITES_WEB_ROOT):
raise Exception("'sites_web_root' should not have a trailing slash.")
# This allows us to use a self signed cert for local dev.
SSL_VERIFICATION = "/etc/pki/tls/certs"
if ENVIRONMENT == 'local':
SSL_VERIFICATION = False
import urllib3
# Disable warnings about not being able to verify local certs.
# https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
urllib3.disable_warnings()
VERSION_NUMBER = '2.1.0' | unknown | codeparrot/codeparrot-clean | ||
"""Starts a service to scan in intervals for new devices."""
from datetime import timedelta
import json
import logging
from netdisco.discovery import NetworkDiscovery
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.components import zeroconf
from homeassistant.const import EVENT_HOMEASSISTANT_STARTED
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.discovery import async_discover, async_load_platform
from homeassistant.helpers.event import async_track_point_in_utc_time
import homeassistant.util.dt as dt_util
DOMAIN = "discovery"
SCAN_INTERVAL = timedelta(seconds=300)
SERVICE_APPLE_TV = "apple_tv"
SERVICE_DAIKIN = "daikin"
SERVICE_DLNA_DMR = "dlna_dmr"
SERVICE_ENIGMA2 = "enigma2"
SERVICE_HASS_IOS_APP = "hass_ios"
SERVICE_HASSIO = "hassio"
SERVICE_HEOS = "heos"
SERVICE_KONNECTED = "konnected"
SERVICE_MOBILE_APP = "hass_mobile_app"
SERVICE_NETGEAR = "netgear_router"
SERVICE_OCTOPRINT = "octoprint"
SERVICE_SABNZBD = "sabnzbd"
SERVICE_SAMSUNG_PRINTER = "samsung_printer"
SERVICE_TELLDUSLIVE = "tellstick"
SERVICE_YEELIGHT = "yeelight"
SERVICE_WEMO = "belkin_wemo"
SERVICE_WINK = "wink"
SERVICE_XIAOMI_GW = "xiaomi_gw"
# These have custom protocols
CONFIG_ENTRY_HANDLERS = {
SERVICE_TELLDUSLIVE: "tellduslive",
"logitech_mediaserver": "squeezebox",
}
# These have no config flows
SERVICE_HANDLERS = {
SERVICE_NETGEAR: ("device_tracker", None),
SERVICE_ENIGMA2: ("media_player", "enigma2"),
SERVICE_SABNZBD: ("sabnzbd", None),
"yamaha": ("media_player", "yamaha"),
"frontier_silicon": ("media_player", "frontier_silicon"),
"openhome": ("media_player", "openhome"),
"bose_soundtouch": ("media_player", "soundtouch"),
"bluesound": ("media_player", "bluesound"),
"lg_smart_device": ("media_player", "lg_soundbar"),
"nanoleaf_aurora": ("light", "nanoleaf"),
}
OPTIONAL_SERVICE_HANDLERS = {SERVICE_DLNA_DMR: ("media_player", "dlna_dmr")}
MIGRATED_SERVICE_HANDLERS = [
SERVICE_APPLE_TV,
"axis",
"deconz",
SERVICE_DAIKIN,
"denonavr",
"esphome",
"google_cast",
SERVICE_HASS_IOS_APP,
SERVICE_HASSIO,
SERVICE_HEOS,
"harmony",
"homekit",
"ikea_tradfri",
"kodi",
SERVICE_KONNECTED,
SERVICE_MOBILE_APP,
SERVICE_OCTOPRINT,
"philips_hue",
SERVICE_SAMSUNG_PRINTER,
"sonos",
"songpal",
SERVICE_WEMO,
SERVICE_WINK,
SERVICE_XIAOMI_GW,
"volumio",
SERVICE_YEELIGHT,
]
DEFAULT_ENABLED = (
list(CONFIG_ENTRY_HANDLERS) + list(SERVICE_HANDLERS) + MIGRATED_SERVICE_HANDLERS
)
DEFAULT_DISABLED = list(OPTIONAL_SERVICE_HANDLERS) + MIGRATED_SERVICE_HANDLERS
CONF_IGNORE = "ignore"
CONF_ENABLE = "enable"
CONFIG_SCHEMA = vol.Schema(
{
vol.Optional(DOMAIN): vol.Schema(
{
vol.Optional(CONF_IGNORE, default=[]): vol.All(
cv.ensure_list, [vol.In(DEFAULT_ENABLED)]
),
vol.Optional(CONF_ENABLE, default=[]): vol.All(
cv.ensure_list, [vol.In(DEFAULT_DISABLED + DEFAULT_ENABLED)]
),
}
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass, config):
"""Start a discovery service."""
logger = logging.getLogger(__name__)
netdisco = NetworkDiscovery()
already_discovered = set()
if DOMAIN in config:
# Platforms ignore by config
ignored_platforms = config[DOMAIN][CONF_IGNORE]
# Optional platforms enabled by config
enabled_platforms = config[DOMAIN][CONF_ENABLE]
else:
ignored_platforms = []
enabled_platforms = []
for platform in enabled_platforms:
if platform in DEFAULT_ENABLED:
logger.warning(
"Please remove %s from your discovery.enable configuration "
"as it is now enabled by default",
platform,
)
zeroconf_instance = await zeroconf.async_get_instance(hass)
async def new_service_found(service, info):
"""Handle a new service if one is found."""
if service in MIGRATED_SERVICE_HANDLERS:
return
if service in ignored_platforms:
logger.info("Ignoring service: %s %s", service, info)
return
discovery_hash = json.dumps([service, info], sort_keys=True)
if discovery_hash in already_discovered:
logger.debug("Already discovered service %s %s.", service, info)
return
already_discovered.add(discovery_hash)
if service in CONFIG_ENTRY_HANDLERS:
await hass.config_entries.flow.async_init(
CONFIG_ENTRY_HANDLERS[service],
context={"source": config_entries.SOURCE_DISCOVERY},
data=info,
)
return
comp_plat = SERVICE_HANDLERS.get(service)
if not comp_plat and service in enabled_platforms:
comp_plat = OPTIONAL_SERVICE_HANDLERS[service]
# We do not know how to handle this service.
if not comp_plat:
logger.debug("Unknown service discovered: %s %s", service, info)
return
logger.info("Found new service: %s %s", service, info)
component, platform = comp_plat
if platform is None:
await async_discover(hass, service, info, component, config)
else:
await async_load_platform(hass, component, platform, info, config)
async def scan_devices(now):
"""Scan for devices."""
try:
results = await hass.async_add_executor_job(
_discover, netdisco, zeroconf_instance
)
for result in results:
hass.async_create_task(new_service_found(*result))
except OSError:
logger.error("Network is unreachable")
async_track_point_in_utc_time(
hass, scan_devices, dt_util.utcnow() + SCAN_INTERVAL
)
@callback
def schedule_first(event):
"""Schedule the first discovery when Home Assistant starts up."""
async_track_point_in_utc_time(hass, scan_devices, dt_util.utcnow())
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STARTED, schedule_first)
return True
def _discover(netdisco, zeroconf_instance):
"""Discover devices."""
results = []
try:
netdisco.scan(zeroconf_instance=zeroconf_instance)
for disc in netdisco.discover():
for service in netdisco.get_info(disc):
results.append((disc, service))
finally:
netdisco.stop()
return results | unknown | codeparrot/codeparrot-clean | ||
from django.contrib.contenttypes.fields import ContentType
from django.test import TestCase
from nsync.models import ExternalSystem, ExternalKeyMapping
from tests.models import TestPerson
class TestExternalSystem(TestCase):
def test_it_uses_name_for_string(self):
self.assertEqual('SystemName', str(ExternalSystem(name='SystemName')))
def test_it_returns_the_description_instead_of_name_if_available(
self):
sut = ExternalSystem(name='SystemName',
description='SystemDescription')
self.assertEqual('SystemDescription', str(sut))
class TestExternalKeyMapping(TestCase):
def setUp(self):
self.external_system = ExternalSystem.objects.create(
name='ExternalSystemName')
def test_it_returns_as_useful_string(self):
john = TestPerson.objects.create(first_name='John')
content_type = ContentType.objects.get_for_model(TestPerson)
sut = ExternalKeyMapping(
external_system=self.external_system,
external_key='Person123',
content_type=content_type,
content_object=john,
object_id=john.id)
result = str(sut)
self.assertIn('ExternalSystemName', result)
self.assertIn('Person123', result)
self.assertIn(content_type.model_class().__name__, result)
self.assertIn(str(john.id), result) | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright 2010-2024 JetBrains s.r.o. and Kotlin Programming Language contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
package org.jetbrains.kotlin.analysis.api.platform
import org.jetbrains.kotlin.analysis.api.KaPlatformInterface
/**
* A **platform component** as defined by the Platform Interface (see the README).
*
* Mandatory platform components must be implemented by a platform to fully support the Analysis API in the desired environment. A few
* platform components are optional, signified by [KotlinOptionalPlatformComponent]. As a marker interface, [KotlinPlatformComponent]
* makes it easy to find all platform components to implement.
*
* The Platform Interface provides a number of default and base implementations which may be specified or extended by a platform
* implementation, such as [KotlinProjectMessageBusProvider] for [KotlinMessageBusProvider].
*
* Platform component interfaces and their default implementations are always prefixed with the word `Kotlin`, in contrast to
* [KaEngineService]s which are prefixed with `Ka`. It is recommended to keep this naming convention in platform implementations. For
* example, the Standalone API uses a `KotlinStandalone` prefix for its own platform component implementations.
*/
@KaPlatformInterface
public interface KotlinPlatformComponent
/**
* An optional [KotlinPlatformComponent]. The Analysis API engine does not require an optional platform component to be implemented and
* will use sensible fallbacks or disable/avoid certain behaviors instead.
*/
@KaPlatformInterface
public interface KotlinOptionalPlatformComponent : KotlinPlatformComponent | kotlin | github | https://github.com/JetBrains/kotlin | analysis/analysis-api-platform-interface/src/org/jetbrains/kotlin/analysis/api/platform/KotlinPlatformComponent.kt |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
def create_parser():
from wptrunner import wptcommandline
parser = wptcommandline.create_parser_update()
parser.add_argument("--upstream", dest="upstream", action="store_true", default=None,
help="Push local changes to upstream repository even when not syncing")
parser.add_argument("--no-upstream", dest="upstream", action="store_false", default=None,
help="Dont't push local changes to upstream repository when syncing")
parser.add_argument("--token-file", action="store", type=wptcommandline.abs_path,
help="Path to file containing github token")
parser.add_argument("--token", action="store", help="GitHub token to use")
return parser
def check_args(kwargs):
from wptrunner import wptcommandline
wptcommandline.set_from_config(kwargs)
kwargs["upstream"] = kwargs["upstream"] if kwargs["upstream"] is not None else kwargs["sync"]
if kwargs["upstream"]:
if kwargs["rev"]:
raise ValueError("Setting --rev with --upstream isn't supported")
if kwargs["token"] is None:
if kwargs["token_file"] is None:
raise ValueError("Must supply either a token file or a token")
with open(kwargs["token_file"]) as f:
token = f.read().strip()
kwargs["token"] = token
del kwargs["token_file"]
return kwargs
def parse_args():
parser = create_parser()
kwargs = vars(parser.parse_args())
return check_args(kwargs) | unknown | codeparrot/codeparrot-clean | ||
"""
Scrapy Item
See documentation in docs/topics/item.rst
"""
from __future__ import annotations
from abc import ABCMeta
from collections.abc import MutableMapping
from copy import deepcopy
from pprint import pformat
from typing import TYPE_CHECKING, Any, NoReturn
from scrapy.utils.trackref import object_ref
if TYPE_CHECKING:
from collections.abc import Iterator, KeysView
# typing.Self requires Python 3.11
from typing_extensions import Self
class Field(dict[str, Any]):
"""Container of field metadata"""
class ItemMeta(ABCMeta):
"""Metaclass_ of :class:`Item` that handles field definitions.
.. _metaclass: https://realpython.com/python-metaclasses
"""
def __new__(
mcs, class_name: str, bases: tuple[type, ...], attrs: dict[str, Any]
) -> ItemMeta:
classcell = attrs.pop("__classcell__", None)
new_bases = tuple(base._class for base in bases if hasattr(base, "_class"))
_class = super().__new__(mcs, "x_" + class_name, new_bases, attrs)
fields = getattr(_class, "fields", {})
new_attrs = {}
for n in dir(_class):
v = getattr(_class, n)
if isinstance(v, Field):
fields[n] = v
elif n in attrs:
new_attrs[n] = attrs[n]
new_attrs["fields"] = fields
new_attrs["_class"] = _class
if classcell is not None:
new_attrs["__classcell__"] = classcell
return super().__new__(mcs, class_name, bases, new_attrs)
class Item(MutableMapping[str, Any], object_ref, metaclass=ItemMeta):
"""Base class for scraped items.
In Scrapy, an object is considered an ``item`` if it's supported by the
`itemadapter`_ library. For example, when the output of a spider callback
is evaluated, only such objects are passed to :ref:`item pipelines
<topics-item-pipeline>`. :class:`Item` is one of the classes supported by
`itemadapter`_ by default.
Items must declare :class:`Field` attributes, which are processed and stored
in the ``fields`` attribute. This restricts the set of allowed field names
and prevents typos, raising ``KeyError`` when referring to undefined fields.
Additionally, fields can be used to define metadata and control the way
data is processed internally. Please refer to the :ref:`documentation
about fields <topics-items-fields>` for additional information.
Unlike instances of :class:`dict`, instances of :class:`Item` may be
:ref:`tracked <topics-leaks-trackrefs>` to debug memory leaks.
.. _itemadapter: https://github.com/scrapy/itemadapter
"""
#: A dictionary containing *all declared fields* for this Item, not only
#: those populated. The keys are the field names and the values are the
#: :class:`Field` objects used in the :ref:`Item declaration
#: <topics-items-declaring>`.
fields: dict[str, Field]
def __init__(self, *args: Any, **kwargs: Any):
self._values: dict[str, Any] = {}
if args or kwargs: # avoid creating dict for most common case
for k, v in dict(*args, **kwargs).items():
self[k] = v
def __getitem__(self, key: str) -> Any:
return self._values[key]
def __setitem__(self, key: str, value: Any) -> None:
if key in self.fields:
self._values[key] = value
else:
raise KeyError(f"{self.__class__.__name__} does not support field: {key}")
def __delitem__(self, key: str) -> None:
del self._values[key]
def __getattr__(self, name: str) -> NoReturn:
if name in self.fields:
raise AttributeError(f"Use item[{name!r}] to get field value")
raise AttributeError(name)
def __setattr__(self, name: str, value: Any) -> None:
if not name.startswith("_"):
raise AttributeError(f"Use item[{name!r}] = {value!r} to set field value")
super().__setattr__(name, value)
def __len__(self) -> int:
return len(self._values)
def __iter__(self) -> Iterator[str]:
return iter(self._values)
__hash__ = object_ref.__hash__
def keys(self) -> KeysView[str]:
return self._values.keys()
def __repr__(self) -> str:
return pformat(dict(self))
def copy(self) -> Self:
return self.__class__(self)
def deepcopy(self) -> Self:
"""Return a :func:`~copy.deepcopy` of this item."""
return deepcopy(self) | python | github | https://github.com/scrapy/scrapy | scrapy/item.py |
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_TESTING_TFLITE_DIFF_FLAGS_H_
#define TENSORFLOW_LITE_TESTING_TFLITE_DIFF_FLAGS_H_
#include <cstring>
#include <string>
#include "absl/strings/match.h"
#include "tensorflow/core/util/command_line_flags.h"
#include "tensorflow/lite/testing/split.h"
#include "tensorflow/lite/testing/tflite_diff_util.h"
#include "tensorflow/lite/testing/tflite_driver.h"
namespace tflite {
namespace testing {
inline DiffOptions ParseTfliteDiffFlags(int* argc, char** argv) {
struct {
string tensorflow_model;
string tflite_model;
string input_layer;
string input_layer_type;
string input_layer_shape;
string output_layer;
int32_t num_runs_per_pass = 100;
string delegate_name;
string reference_tflite_model;
} values;
std::string delegate_name;
std::vector<tensorflow::Flag> flags = {
tensorflow::Flag("tensorflow_model", &values.tensorflow_model,
"Path of tensorflow model."),
tensorflow::Flag("tflite_model", &values.tflite_model,
"Path of tensorflow lite model."),
tensorflow::Flag("input_layer", &values.input_layer,
"Names of input tensors, separated by comma. Example: "
"input_1,input_2."),
tensorflow::Flag("input_layer_type", &values.input_layer_type,
"Data types of input tensors, separated by comma. "
"Example: float,int."),
tensorflow::Flag(
"input_layer_shape", &values.input_layer_shape,
"Shapes of input tensors, separated by colon. Example: 1,3,4,1:2."),
tensorflow::Flag("output_layer", &values.output_layer,
"Names of output tensors, separated by comma. Example: "
"output_1,output_2."),
tensorflow::Flag("num_runs_per_pass", &values.num_runs_per_pass,
"[optional] Number of full runs in each pass."),
tensorflow::Flag("delegate", &values.delegate_name,
"[optional] Delegate to use for executing ops. Must be "
"`{\"\", NNAPI, GPU, FLEX}`"),
tensorflow::Flag("reference_tflite_model", &values.reference_tflite_model,
"[optional] Path of the TensorFlow Lite model to "
"compare inference results against the model given in "
"`tflite_model`."),
};
bool no_inputs = *argc == 1;
bool success = tensorflow::Flags::Parse(argc, argv, flags);
if (!success || no_inputs || (*argc == 2 && !strcmp(argv[1], "--helpfull"))) {
fprintf(stderr, "%s", tensorflow::Flags::Usage(argv[0], flags).c_str());
return {};
} else if (values.tensorflow_model.empty() || values.tflite_model.empty() ||
values.input_layer.empty() || values.input_layer_type.empty() ||
values.input_layer_shape.empty() || values.output_layer.empty()) {
fprintf(stderr, "%s", tensorflow::Flags::Usage(argv[0], flags).c_str());
return {};
}
TfLiteDriver::DelegateType delegate = TfLiteDriver::DelegateType::kNone;
if (!values.delegate_name.empty()) {
if (absl::EqualsIgnoreCase(values.delegate_name, "nnapi")) {
delegate = TfLiteDriver::DelegateType::kNnapi;
} else if (absl::EqualsIgnoreCase(values.delegate_name, "gpu")) {
delegate = TfLiteDriver::DelegateType::kGpu;
} else if (absl::EqualsIgnoreCase(values.delegate_name, "flex")) {
delegate = TfLiteDriver::DelegateType::kFlex;
} else {
fprintf(stderr, "%s", tensorflow::Flags::Usage(argv[0], flags).c_str());
return {};
}
}
return {values.tensorflow_model,
values.tflite_model,
Split<string>(values.input_layer, ","),
Split<string>(values.input_layer_type, ","),
Split<string>(values.input_layer_shape, ":"),
Split<string>(values.output_layer, ","),
values.num_runs_per_pass,
delegate,
values.reference_tflite_model};
}
} // namespace testing
} // namespace tflite
#endif // TENSORFLOW_LITE_TESTING_TFLITE_DIFF_FLAGS_H_ | c | github | https://github.com/tensorflow/tensorflow | tensorflow/lite/testing/tflite_diff_flags.h |
"""
>>> import numpy.core as nx
>>> import numpy.lib.ufunclike as U
Test fix:
>>> a = nx.array([[1.0, 1.1, 1.5, 1.8], [-1.0, -1.1, -1.5, -1.8]])
>>> U.fix(a)
array([[ 1., 1., 1., 1.],
[-1., -1., -1., -1.]])
>>> y = nx.zeros(a.shape, float)
>>> U.fix(a, y)
array([[ 1., 1., 1., 1.],
[-1., -1., -1., -1.]])
>>> y
array([[ 1., 1., 1., 1.],
[-1., -1., -1., -1.]])
Test isposinf, isneginf, sign
>>> a = nx.array([nx.Inf, -nx.Inf, nx.NaN, 0.0, 3.0, -3.0])
>>> U.isposinf(a)
array([ True, False, False, False, False, False], dtype=bool)
>>> U.isneginf(a)
array([False, True, False, False, False, False], dtype=bool)
>>> olderr = nx.seterr(invalid='ignore')
>>> nx.sign(a)
array([ 1., -1., NaN, 0., 1., -1.])
>>> olderr = nx.seterr(**olderr)
Same thing with an output array:
>>> y = nx.zeros(a.shape, bool)
>>> U.isposinf(a, y)
array([ True, False, False, False, False, False], dtype=bool)
>>> y
array([ True, False, False, False, False, False], dtype=bool)
>>> U.isneginf(a, y)
array([False, True, False, False, False, False], dtype=bool)
>>> y
array([False, True, False, False, False, False], dtype=bool)
>>> olderr = nx.seterr(invalid='ignore')
>>> nx.sign(a, y)
array([ True, True, True, False, True, True], dtype=bool)
>>> olderr = nx.seterr(**olderr)
>>> y
array([ True, True, True, False, True, True], dtype=bool)
Now log2:
>>> a = nx.array([4.5, 2.3, 6.5])
>>> U.log2(a)
array([ 2.169925 , 1.20163386, 2.70043972])
>>> 2**_
array([ 4.5, 2.3, 6.5])
>>> y = nx.zeros(a.shape, float)
>>> U.log2(a, y)
array([ 2.169925 , 1.20163386, 2.70043972])
>>> y
array([ 2.169925 , 1.20163386, 2.70043972])
"""
from numpy.testing import *
def test():
return rundocs()
if __name__ == "__main__":
run_module_suite() | unknown | codeparrot/codeparrot-clean | ||
import unittest
import warnings
from pymongo.read_preferences import ReadPreference
from mongoengine import *
from tests.utils import MongoDBTestCase
class TestQuerysetAggregate(MongoDBTestCase):
def test_read_preference_aggregation_framework(self):
class Bar(Document):
txt = StringField()
meta = {"indexes": ["txt"]}
# Aggregates with read_preference
pipeline = []
bars = Bar.objects.read_preference(
ReadPreference.SECONDARY_PREFERRED
).aggregate(pipeline)
assert (
bars._CommandCursor__collection.read_preference
== ReadPreference.SECONDARY_PREFERRED
)
def test_queryset_aggregation_framework(self):
class Person(Document):
name = StringField()
age = IntField()
Person.drop_collection()
p1 = Person(name="Isabella Luanna", age=16)
p2 = Person(name="Wilson Junior", age=21)
p3 = Person(name="Sandra Mara", age=37)
Person.objects.insert([p1, p2, p3])
pipeline = [{"$project": {"name": {"$toUpper": "$name"}}}]
data = Person.objects(age__lte=22).aggregate(pipeline)
assert list(data) == [
{"_id": p1.pk, "name": "ISABELLA LUANNA"},
{"_id": p2.pk, "name": "WILSON JUNIOR"},
]
pipeline = [{"$project": {"name": {"$toUpper": "$name"}}}]
data = Person.objects(age__lte=22).order_by("-name").aggregate(pipeline)
assert list(data) == [
{"_id": p2.pk, "name": "WILSON JUNIOR"},
{"_id": p1.pk, "name": "ISABELLA LUANNA"},
]
pipeline = [
{"$group": {"_id": None, "total": {"$sum": 1}, "avg": {"$avg": "$age"}}}
]
data = (
Person.objects(age__gte=17, age__lte=40)
.order_by("-age")
.aggregate(pipeline)
)
assert list(data) == [{"_id": None, "avg": 29, "total": 2}]
pipeline = [{"$match": {"name": "Isabella Luanna"}}]
data = Person.objects().aggregate(pipeline)
assert list(data) == [{"_id": p1.pk, "age": 16, "name": "Isabella Luanna"}]
def test_queryset_aggregation_with_skip(self):
class Person(Document):
name = StringField()
age = IntField()
Person.drop_collection()
p1 = Person(name="Isabella Luanna", age=16)
p2 = Person(name="Wilson Junior", age=21)
p3 = Person(name="Sandra Mara", age=37)
Person.objects.insert([p1, p2, p3])
pipeline = [{"$project": {"name": {"$toUpper": "$name"}}}]
data = Person.objects.skip(1).aggregate(pipeline)
assert list(data) == [
{"_id": p2.pk, "name": "WILSON JUNIOR"},
{"_id": p3.pk, "name": "SANDRA MARA"},
]
def test_queryset_aggregation_with_limit(self):
class Person(Document):
name = StringField()
age = IntField()
Person.drop_collection()
p1 = Person(name="Isabella Luanna", age=16)
p2 = Person(name="Wilson Junior", age=21)
p3 = Person(name="Sandra Mara", age=37)
Person.objects.insert([p1, p2, p3])
pipeline = [{"$project": {"name": {"$toUpper": "$name"}}}]
data = Person.objects.limit(1).aggregate(pipeline)
assert list(data) == [{"_id": p1.pk, "name": "ISABELLA LUANNA"}]
def test_queryset_aggregation_with_sort(self):
class Person(Document):
name = StringField()
age = IntField()
Person.drop_collection()
p1 = Person(name="Isabella Luanna", age=16)
p2 = Person(name="Wilson Junior", age=21)
p3 = Person(name="Sandra Mara", age=37)
Person.objects.insert([p1, p2, p3])
pipeline = [{"$project": {"name": {"$toUpper": "$name"}}}]
data = Person.objects.order_by("name").aggregate(pipeline)
assert list(data) == [
{"_id": p1.pk, "name": "ISABELLA LUANNA"},
{"_id": p3.pk, "name": "SANDRA MARA"},
{"_id": p2.pk, "name": "WILSON JUNIOR"},
]
def test_queryset_aggregation_with_skip_with_limit(self):
class Person(Document):
name = StringField()
age = IntField()
Person.drop_collection()
p1 = Person(name="Isabella Luanna", age=16)
p2 = Person(name="Wilson Junior", age=21)
p3 = Person(name="Sandra Mara", age=37)
Person.objects.insert([p1, p2, p3])
pipeline = [{"$project": {"name": {"$toUpper": "$name"}}}]
data = list(Person.objects.skip(1).limit(1).aggregate(pipeline))
assert list(data) == [{"_id": p2.pk, "name": "WILSON JUNIOR"}]
# Make sure limit/skip chaining order has no impact
data2 = Person.objects.limit(1).skip(1).aggregate(pipeline)
assert data == list(data2)
def test_queryset_aggregation_with_sort_with_limit(self):
class Person(Document):
name = StringField()
age = IntField()
Person.drop_collection()
p1 = Person(name="Isabella Luanna", age=16)
p2 = Person(name="Wilson Junior", age=21)
p3 = Person(name="Sandra Mara", age=37)
Person.objects.insert([p1, p2, p3])
pipeline = [{"$project": {"name": {"$toUpper": "$name"}}}]
data = Person.objects.order_by("name").limit(2).aggregate(pipeline)
assert list(data) == [
{"_id": p1.pk, "name": "ISABELLA LUANNA"},
{"_id": p3.pk, "name": "SANDRA MARA"},
]
# Verify adding limit/skip steps works as expected
pipeline = [{"$project": {"name": {"$toUpper": "$name"}}}, {"$limit": 1}]
data = Person.objects.order_by("name").limit(2).aggregate(pipeline)
assert list(data) == [{"_id": p1.pk, "name": "ISABELLA LUANNA"}]
pipeline = [
{"$project": {"name": {"$toUpper": "$name"}}},
{"$skip": 1},
{"$limit": 1},
]
data = Person.objects.order_by("name").limit(2).aggregate(pipeline)
assert list(data) == [{"_id": p3.pk, "name": "SANDRA MARA"}]
def test_queryset_aggregation_with_sort_with_skip(self):
class Person(Document):
name = StringField()
age = IntField()
Person.drop_collection()
p1 = Person(name="Isabella Luanna", age=16)
p2 = Person(name="Wilson Junior", age=21)
p3 = Person(name="Sandra Mara", age=37)
Person.objects.insert([p1, p2, p3])
pipeline = [{"$project": {"name": {"$toUpper": "$name"}}}]
data = Person.objects.order_by("name").skip(2).aggregate(pipeline)
assert list(data) == [{"_id": p2.pk, "name": "WILSON JUNIOR"}]
def test_queryset_aggregation_with_sort_with_skip_with_limit(self):
class Person(Document):
name = StringField()
age = IntField()
Person.drop_collection()
p1 = Person(name="Isabella Luanna", age=16)
p2 = Person(name="Wilson Junior", age=21)
p3 = Person(name="Sandra Mara", age=37)
Person.objects.insert([p1, p2, p3])
pipeline = [{"$project": {"name": {"$toUpper": "$name"}}}]
data = Person.objects.order_by("name").skip(1).limit(1).aggregate(pipeline)
assert list(data) == [{"_id": p3.pk, "name": "SANDRA MARA"}]
def test_queryset_aggregation_deprecated_interface(self):
class Person(Document):
name = StringField()
Person.drop_collection()
p1 = Person(name="Isabella Luanna")
p2 = Person(name="Wilson Junior")
p3 = Person(name="Sandra Mara")
Person.objects.insert([p1, p2, p3])
pipeline = [{"$project": {"name": {"$toUpper": "$name"}}}]
# Make sure a warning is emitted
with warnings.catch_warnings():
warnings.simplefilter("error", DeprecationWarning)
with self.assertRaises(DeprecationWarning):
Person.objects.order_by("name").limit(2).aggregate(*pipeline)
# Make sure old interface works as expected with a 1-step pipeline
data = Person.objects.order_by("name").limit(2).aggregate(*pipeline)
assert list(data) == [
{"_id": p1.pk, "name": "ISABELLA LUANNA"},
{"_id": p3.pk, "name": "SANDRA MARA"},
]
# Make sure old interface works as expected with a 2-steps pipeline
pipeline = [{"$project": {"name": {"$toUpper": "$name"}}}, {"$limit": 1}]
data = Person.objects.order_by("name").limit(2).aggregate(*pipeline)
assert list(data) == [{"_id": p1.pk, "name": "ISABELLA LUANNA"}]
def test_queryset_aggregation_geonear_aggregation_on_pointfield(self):
"""test ensures that $geonear can be used as a 1-stage pipeline and that
MongoEngine does not interfer with such pipeline (#2473)
"""
class Aggr(Document):
name = StringField()
c = PointField()
Aggr.drop_collection()
agg1 = Aggr(name="X", c=[10.634584, 35.8245029]).save()
agg2 = Aggr(name="Y", c=[10.634584, 35.8245029]).save()
pipeline = [
{
"$geoNear": {
"near": {"type": "Point", "coordinates": [10.634584, 35.8245029]},
"distanceField": "c",
"spherical": True,
}
}
]
assert list(Aggr.objects.aggregate(*pipeline)) == [
{"_id": agg1.id, "c": 0.0, "name": "X"},
{"_id": agg2.id, "c": 0.0, "name": "Y"},
]
if __name__ == "__main__":
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
"""
Serialization
``django.core.serializers`` provides interfaces to converting Django
``QuerySet`` objects to and from "flat" data (i.e. strings).
"""
from decimal import Decimal
from django.db import models
class CategoryMetaDataManager(models.Manager):
def get_by_natural_key(self, kind, name):
return self.get(kind=kind, name=name)
class CategoryMetaData(models.Model):
kind = models.CharField(max_length=10)
name = models.CharField(max_length=10)
value = models.CharField(max_length=10)
objects = CategoryMetaDataManager()
class Meta:
unique_together = (('kind', 'name'),)
def __str__(self):
return '[%s:%s]=%s' % (self.kind, self.name, self.value)
def natural_key(self):
return (self.kind, self.name)
class Category(models.Model):
name = models.CharField(max_length=20)
meta_data = models.ForeignKey(CategoryMetaData, models.SET_NULL, null=True, default=None)
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
class Author(models.Model):
name = models.CharField(max_length=20)
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
class Article(models.Model):
author = models.ForeignKey(Author, models.CASCADE)
headline = models.CharField(max_length=50)
pub_date = models.DateTimeField()
categories = models.ManyToManyField(Category)
meta_data = models.ManyToManyField(CategoryMetaData)
class Meta:
ordering = ('pub_date',)
def __str__(self):
return self.headline
class AuthorProfile(models.Model):
author = models.OneToOneField(Author, models.CASCADE, primary_key=True)
date_of_birth = models.DateField()
def __str__(self):
return "Profile of %s" % self.author
class Actor(models.Model):
name = models.CharField(max_length=20, primary_key=True)
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
class Movie(models.Model):
actor = models.ForeignKey(Actor, models.CASCADE)
title = models.CharField(max_length=50)
price = models.DecimalField(max_digits=6, decimal_places=2, default=Decimal('0.00'))
class Meta:
ordering = ('title',)
def __str__(self):
return self.title
class Score(models.Model):
score = models.FloatField()
class Team:
def __init__(self, title):
self.title = title
def __str__(self):
raise NotImplementedError("Not so simple")
def to_string(self):
return "%s" % self.title
class TeamField(models.CharField):
def __init__(self):
super().__init__(max_length=100)
def get_db_prep_save(self, value, connection):
return str(value.title)
def to_python(self, value):
if isinstance(value, Team):
return value
return Team(value)
def from_db_value(self, value, expression, connection):
return Team(value)
def value_to_string(self, obj):
return self.value_from_object(obj).to_string()
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
del kwargs['max_length']
return name, path, args, kwargs
class Player(models.Model):
name = models.CharField(max_length=50)
rank = models.IntegerField()
team = TeamField()
def __str__(self):
return '%s (%d) playing for %s' % (self.name, self.rank, self.team.to_string())
class BaseModel(models.Model):
parent_data = models.IntegerField()
class ProxyBaseModel(BaseModel):
class Meta:
proxy = True
class ProxyProxyBaseModel(ProxyBaseModel):
class Meta:
proxy = True
class ComplexModel(models.Model):
field1 = models.CharField(max_length=10)
field2 = models.CharField(max_length=10)
field3 = models.CharField(max_length=10) | unknown | codeparrot/codeparrot-clean | ||
import sys
from pdflib_py import *
from math import sin,cos
import Image
import ImageDraw
img = Image.new("L", (3, 3))
draw = ImageDraw.Draw(img)
draw.point((0,1), fill=255)
draw.point((0,2), fill=255)
draw.point((1,0), fill=255)
draw.point((1,2), fill=255)
draw.point((2,0), fill=255)
draw.point((2,1), fill=255)
img.save("/tmp/mask1.png")
img = Image.new("L", (16, 1))
draw = ImageDraw.Draw(img)
for i in range(16):
draw.point((i,0), fill=i*16)
img.save("/tmp/mask2.png")
img = Image.new("RGB", (3, 3))
draw = ImageDraw.Draw(img)
draw.point((0,1), fill=(0,0,0))
draw.point((0,2), fill=(255,0,0))
draw.point((1,0), fill=(0,255,0))
draw.point((1,2), fill=(0,0,255))
draw.point((2,0), fill=(255,255,0))
draw.point((2,1), fill=(0,255,255))
img.save("/tmp/img1.png")
img = Image.new("RGB", (16, 1))
draw = ImageDraw.Draw(img)
for i in range(16):
draw.point((i,0), fill=(0,255,0))
img.save("/tmp/img2.png")
p = PDF_new()
PDF_open_file(p, "transparency.pdf")
font = PDF_load_font(p, "Helvetica", "host", "")
PDF_set_parameter(p, "usercoordinates", "true")
width = 400
height = 400
PDF_begin_page(p, width, height)
PDF_setcolor(p, "fill", "rgb", 0.0,0.0,0.0,1.0)
PDF_moveto(p, 0,0)
PDF_lineto(p, width, 0)
PDF_lineto(p, width, height)
PDF_lineto(p, 0, height)
PDF_lineto(p, 0, 0)
PDF_fill(p)
PDF_setfont(p, font, 10.0)
PDF_setcolor(p, "fill", "rgb", 1.0,1.0,1.0,1.0)
PDF_set_text_pos(p, 50, 205);PDF_show(p, "abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz")
PDF_set_text_pos(p, 50, 105);PDF_show(p, "abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz")
mask = PDF_load_image(p, "png", "/tmp/mask1.png", "mask")
i = PDF_load_image(p, "png", "/tmp/img1.png", "masked "+str(mask))
PDF_place_image(p, i, 100, 300, 20)
mask2 = PDF_load_image(p, "png", "/tmp/mask2.png", "mask")
i2 = PDF_load_image(p, "png", "/tmp/img2.png", "masked "+str(mask2))
PDF_place_image(p, i2, 0, 200, 25)
PDF_setcolor(p, "fill", "rgb", 1.0,1.0,1.0,1.0)
gstate = PDF_create_gstate(p, "opacityfill 0.25") # blendmode multiply opacityfill 0.5")
PDF_set_gstate(p, gstate)
PDF_moveto(p, 50, 75)
PDF_lineto(p, 50+300, 75)
PDF_lineto(p, 50+300, 150)
PDF_lineto(p, 50, 150)
PDF_lineto(p, 50, 75)
PDF_fill(p)
PDF_end_page(p)
PDF_close(p)
PDF_delete(p); | unknown | codeparrot/codeparrot-clean | ||
// Copyright 2017 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package adapter
import (
"context"
"maps"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
)
// chanServerStream implements grpc.ServerStream with a chanStream
type chanServerStream struct {
headerc chan<- metadata.MD
trailerc chan<- metadata.MD
grpc.Stream
headers []metadata.MD
}
func (ss *chanServerStream) SendHeader(md metadata.MD) error {
if ss.headerc == nil {
return errAlreadySentHeader
}
outmd := make(map[string][]string)
for _, h := range append(ss.headers, md) {
maps.Copy(outmd, h)
}
select {
case ss.headerc <- outmd:
ss.headerc = nil
ss.headers = nil
return nil
case <-ss.Context().Done(): //nolint:staticcheck // TODO: remove for a supported version
}
return ss.Context().Err() //nolint:staticcheck // TODO: remove for a supported version
}
func (ss *chanServerStream) SetHeader(md metadata.MD) error {
if ss.headerc == nil {
return errAlreadySentHeader
}
ss.headers = append(ss.headers, md)
return nil
}
func (ss *chanServerStream) SetTrailer(md metadata.MD) {
ss.trailerc <- md
}
// chanClientStream implements grpc.ClientStream with a chanStream
type chanClientStream struct {
headerc <-chan metadata.MD
trailerc <-chan metadata.MD
*chanStream
}
func (cs *chanClientStream) Header() (metadata.MD, error) {
select {
case md := <-cs.headerc:
return md, nil
case <-cs.Context().Done():
}
return nil, cs.Context().Err()
}
func (cs *chanClientStream) Trailer() metadata.MD {
select {
case md := <-cs.trailerc:
return md
case <-cs.Context().Done():
return nil
}
}
func (cs *chanClientStream) CloseSend() error {
close(cs.chanStream.sendc)
return nil
}
// chanStream implements grpc.Stream using channels
type chanStream struct {
recvc <-chan any
sendc chan<- any
ctx context.Context
cancel context.CancelFunc
}
func (s *chanStream) Context() context.Context { return s.ctx }
func (s *chanStream) SendMsg(m any) error {
select {
case s.sendc <- m:
if err, ok := m.(error); ok {
return err
}
return nil
case <-s.ctx.Done():
}
return s.ctx.Err()
}
func (s *chanStream) RecvMsg(m any) error {
v := m.(*any)
for {
select {
case msg, ok := <-s.recvc:
if !ok {
return status.Error(codes.Canceled, "the client connection is closing")
}
if err, ok := msg.(error); ok {
return err
}
*v = msg
return nil
case <-s.ctx.Done():
}
if len(s.recvc) == 0 {
// prioritize any pending recv messages over canceled context
break
}
}
return s.ctx.Err()
}
func newPipeStream(ctx context.Context, ssHandler func(chanServerStream) error) chanClientStream {
// ch1 is buffered so server can send error on close
ch1, ch2 := make(chan any, 1), make(chan any)
headerc, trailerc := make(chan metadata.MD, 1), make(chan metadata.MD, 1)
cctx, ccancel := context.WithCancel(ctx)
cli := &chanStream{recvc: ch1, sendc: ch2, ctx: cctx, cancel: ccancel}
cs := chanClientStream{headerc, trailerc, cli}
sctx, scancel := context.WithCancel(ctx)
srv := &chanStream{recvc: ch2, sendc: ch1, ctx: sctx, cancel: scancel}
ss := chanServerStream{headerc, trailerc, srv, nil}
go func() {
if err := ssHandler(ss); err != nil {
select {
case srv.sendc <- err:
case <-sctx.Done():
case <-cctx.Done():
}
}
scancel()
ccancel()
}()
return cs
} | go | github | https://github.com/etcd-io/etcd | server/proxy/grpcproxy/adapter/chan_stream.go |
import datetime
import logging
import logging.handlers
import os
import pymongo
import redis
from redis.sentinel import Sentinel
import server_exceptions
import general_exceptions
import signal
import stoneredis
import sys
import time
import traceback
import utils
import cipher_utils
import serializer_utils
import queue_utils
from optparse import OptionParser
from pymongo.errors import DuplicateKeyError, PyMongoError
from twisted.internet import reactor
from twisted.internet.error import ReactorNotRunning
from twisted.internet.task import LoopingCall
try:
from bson.objectid import ObjectId
from bson.errors import InvalidId
from bson.errors import InvalidDocument
except ImportError:
from pymongo.objectid import ObjectId, InvalidId, InvalidDocument
try:
import cPickle as pickle
except:
import pickle
# LOGGER CONFIG
FACILITY = "local0"
logging.basicConfig()
logger = logging.getLogger()
formatter = logging.Formatter('PID:%(process)s %(filename)s %(funcName)s %(levelname)s %(message)s')
rs = None
options = None
args = None
run_stopped = False
class RedongoServer(object):
def __init__(self, mode, *args, **kwargs):
def __get_sk__():
result = self.redis.get('redongo_sk')
if not result:
result = os.urandom(16)
self.redis.set('redongo_sk', result)
return result
logger.info('Starting Redongo Server..')
self.mode = mode
self.create_redis_connection()
self.keep_going = True
self.redisQueue = options.redisQueue
self.popSize = int(options.popSize)
self.redisQueueSize = int(options.redisQueueSize)
self.bulks = {}
self.completed_bulks = set()
self.objs = []
self.cipher = cipher_utils.AESCipher(__get_sk__())
disk_queue_load_time = time.time()
logger.info('Loading disk queues...')
self.disk_queue = queue_utils.Queue(queue_name=options.diskQueue)
logger.info('Loading disk queue took {0}'.format(time.time() - disk_queue_load_time))
ret_disk_queue_load_time = time.time()
self.returned_disk_queue = queue_utils.Queue(queue_name='{0}_returned'.format(options.diskQueue))
logger.info('Loading returned disk queue took {0}'.format(time.time() - ret_disk_queue_load_time))
self.lock_key = '{0}_LOCK'.format(self.redisQueue)
def create_redis_connection(self):
if self.mode == 'Redis':
self.redis = stoneredis.StoneRedis(options.redisIP, db=options.redisDB, port=options.redisPort, socket_connect_timeout=5, socket_timeout=5)
self.redis.connect()
else:
SENTINEL_POOL = Sentinel(
options.sentinelServers,
socket_timeout=0.1,
max_connections=1000,
)
self.redis = SENTINEL_POOL.master_for(
options.sentinelName,
redis_class=stoneredis.StoneRedis,
socket_timeout=5,
socket_connect_timeout=5,
)
def check_object(self, obj):
if type(obj) != list or len(obj) != 2:
raise server_exceptions.ObjectValidationError('Type not valid')
def get_application_settings(self, application_name):
return utils.get_application_settings(application_name, self.redis)
def save_to_failed_queue(self, application_name, bulk):
i = 0
for obj, command, original_object in bulk['data']:
self.redis.rpush('{0}_FAILED'.format(self.redisQueue), original_object)
i += 1
logger.warning('Moved {0} objects from application {1} to queue {2}_FAILED'.format(i, application_name, self.redisQueue))
def run(self):
global run_stopped
first_run = True
try:
logger.info('Running!')
while self.keep_going:
object_found = False
lock = self.redis.wait_for_lock(self.lock_key, 60, auto_renewal=True)
if first_run:
while self.returned_disk_queue._length > 0:
self.objs.append(self.returned_disk_queue.pop())
object_found = True
first_run = False
if object_found:
logger.info('Got {0} objects from returned disk queue {1}'.format(len(self.objs), self.returned_disk_queue._disk_queue_name))
if self.disk_queue._length > 0:
for i in range(0, self.popSize):
if self.disk_queue._length:
self.objs.append(self.disk_queue.pop())
object_found = True
logger.debug('Got {0} objects from disk queue {1}'.format(len(self.objs), self.disk_queue._disk_queue_name))
else:
break
else:
try:
self.objs.append(self.redis.blpop(self.redisQueue)[1])
logger.debug('Got {0} objects from redis queue {1}'.format(len(self.objs), self.redisQueue))
object_found = True
except redis.TimeoutError:
pass
if object_found:
self.objs.extend(self.redis.multi_lpop(self.redisQueue, self.popSize-1))
if lock:
self.redis.release_lock(lock)
if object_found:
while self.objs:
try:
orig_obj = self.objs.pop(0)
obj = pickle.loads(orig_obj)
try:
self.check_object(obj)
application_settings = self.get_application_settings(obj[0][0])
except (server_exceptions.ObjectValidationError, general_exceptions.ApplicationSettingsError), e:
logger.error('Discarding {0} object because of {1}'.format(obj[0], e))
continue
application_bulk = self.bulks.setdefault(obj[0][0], {'serializer': obj[0][1], 'data': []})
application_bulk.setdefault('inserted_date', datetime.datetime.utcnow())
application_bulk.update(application_settings)
ser = serializer_utils.serializer(obj[0][1])
obj_data = ser.loads(obj[1])
application_bulk['data'].append((self.normalize_object(obj_data), obj[0][2], orig_obj))
except (ValueError, TypeError, IndexError, ImportError, pickle.PickleError), e:
logger.error('Discarding {0} object because of {1}'.format(orig_obj, e))
continue
while self.completed_bulks:
self.consume_application(self.completed_bulks.pop())
# Guarantee that the looping call can access the lock
time.sleep(.05)
logger.info('Setting run_stopped to True')
run_stopped = True
except:
logger.error('Stopping redongo because unexpected exception: {0}'.format(traceback.format_exc()))
logger.info('Setting run_stopped to True')
run_stopped = True
stopApp()
def back_to_disk(self):
logger.info('Returning memory data to Disk Queue')
objects_returned = 0
for application_name, bulk in self.bulks.iteritems():
for obj, command, original_object in bulk['data']:
self.returned_disk_queue.push(original_object)
objects_returned += 1
logger.info('{0} objects returned to Disk Queue'.format(objects_returned))
def get_mongo_collection(self, bulk):
mongo_client = pymongo.MongoClient('mongodb://{0}:{1}@{2}:{3}/{4}'.format(bulk['mongo_user'], self.cipher.decrypt(bulk['mongo_password']), bulk['mongo_host'], bulk['mongo_port'], bulk['mongo_database']))
mongo_db = mongo_client[bulk['mongo_database']]
collection = mongo_db[bulk['mongo_collection']]
return collection
def normalize_object(self, obj):
objectid_fields = obj.pop('objectid_fields', [])
for f in objectid_fields:
if obj.get(f, None):
try:
obj[f] = ObjectId(obj[f])
except InvalidId:
pass
except TypeError:
pass
return obj
def deal_with_mongo(self, application_name):
bulk = self.bulks[application_name]
set_of_objects = []
to_failed = []
result = None
try:
collection = self.get_mongo_collection(bulk)
except (PyMongoError, InvalidDocument), e:
logger.error('Not saving bulk {0} (moving to failed queue) from application {1} due to connection bad data: {2}'.format(bulk, application_name, e))
self.save_to_failed_queue(application_name, bulk)
return
# Separates objects with different commands. When appears any object with other command, executes current command for all readed objects
current_command = bulk['data'][0][1]
while bulk['data']:
obj, command, original_object = bulk['data'].pop(0)
if command == current_command:
set_of_objects.append((obj, command, original_object))
else:
# Execute command for all readed objects
if current_command == 'save':
result = self.save_to_mongo(collection, set_of_objects)
elif current_command == 'add':
result = self.add_in_mongo(collection, set_of_objects)
# Notify on failure
if result:
logger.error('Not saving {0} objects (moving to failed queue) from application {1} due to connection bad data'.format(len(result), application_name))
to_failed += result
current_command = command
set_of_objects = [(obj, command, original_object)]
# Last set
if current_command == 'save':
result = self.save_to_mongo(collection, set_of_objects)
elif current_command == 'add':
result = self.add_in_mongo(collection, set_of_objects)
# Notify on failure
if result:
logger.error('Not saving {0} objects (moving to failed queue) from application {1} due to connection bad data'.format(len(result), application_name))
to_failed += result
# If an error occurred, it notifies and inserts the required objects
if to_failed:
bulk['data'] = to_failed
self.save_to_failed_queue(application_name, bulk)
def save_to_mongo(self, collection, objs):
to_insert = []
to_update = []
to_failed = []
differents = set()
while objs:
full_object = objs.pop(0)
if '_id' not in full_object[0]:
to_insert.append(full_object)
elif full_object[0]['_id'] not in differents:
to_insert.append(full_object)
differents.add(full_object[0]['_id'])
else:
to_update.append(full_object)
# Bulk insert
try:
collection.insert(map(lambda x: x[0], to_insert))
except DuplicateKeyError:
to_update = to_insert + to_update
except (PyMongoError, InvalidDocument):
to_failed.extend(to_insert)
# One-to-one update
while to_update:
full_obj = to_update.pop(0)
try:
collection.update({'_id': full_obj[0]['_id']}, full_obj[0])
except (PyMongoError, InvalidDocument):
to_failed.extend(to_update)
# Return unsaved objects
return to_failed
def create_add_query(self, obj, previous_field='', query={}):
for field, value in obj.iteritems():
if field == '_id':
continue
type_field = type(value)
# Numeric and logical fields perform an addition (Complex number are not supported by mongo)
if type_field is int or type_field is long or type_field is float or type_field is bool: # type_field is complex:
x = query.setdefault('$inc', {})
x[previous_field + field] = value
# String fields perform a set
elif type_field is str:
x = query.setdefault('$set', {})
x[previous_field + field] = value
# List fields perform a concatenation
elif type_field is list:
x = query.setdefault('$push', {})
x[previous_field + field] = {'$each': value}
# Dict fields will be treated as the original object
elif type_field is dict:
query = self.create_add_query(value, '{0}{1}.'.format(previous_field, field), query)
else:
query.setdefault('$set', {(previous_field + field): value})
return query
def add_in_mongo(self, collection, objs):
to_failed = []
# One-to-one update
while objs:
full_object = objs.pop(0)
obj = full_object[0]
try:
collection.update({'_id': obj['_id']}, self.create_add_query(obj), upsert=True)
except (PyMongoError, InvalidDocument):
to_failed.append(full_object)
# Return unadded objects and info
return to_failed
def consume_application(self, application_name):
# In case that check_completed_bulks reads while main thread was saving on previous iteration
if application_name in self.bulks:
self.deal_with_mongo(application_name)
self.bulks.pop(application_name)
def check_completed_bulks(self):
try:
for application_name, bulk in self.bulks.items():
if len(bulk['data']) >= bulk['bulk_size'] or bulk['inserted_date'] + datetime.timedelta(seconds=bulk['bulk_expiration']) <= datetime.datetime.utcnow():
self.completed_bulks.add(application_name)
except:
stopApp()
def check_redis_queue(self):
try:
if self.redis.llen(self.redisQueue) > self.redisQueueSize or self.disk_queue._length > 0:
to_disk_queue = []
object_found = False
lock = self.redis.wait_for_lock(self.lock_key, 60, auto_renewal=True)
while self.redis.llen(self.redisQueue) > self.redisQueueSize:
try:
to_disk_queue.append(self.redis.blpop(self.redisQueue)[1])
object_found = True
except redis.TimeoutError:
pass
if object_found:
to_disk_queue.extend(self.redis.multi_lpop(self.redisQueue, self.popSize-1))
self.save_to_disk_queue(to_disk_queue)
self.redis.release_lock(lock)
except redis.TimeoutError:
pass
def save_to_disk_queue(self, objs):
while objs:
obj = objs.pop(0)
self.disk_queue.push(obj)
def close_disk_queues(self):
try:
self.disk_queue.close()
except:
logger.error('Could not close disk queue {0}: {1}'.format(self.disk_queue._disk_queue_name, traceback.format_exc()))
try:
self.returned_disk_queue.close()
except:
logger.error('Could not close disk queue {0}: {1}'.format(self.returned_disk_queue._disk_queue_name, traceback.format_exc()))
def sigtermHandler():
global rs
global run_stopped
rs.keep_going = False
logger.info('Waiting for run_stopped')
while not run_stopped:
time.sleep(0.1)
rs.back_to_disk()
rs.close_disk_queues()
logger.info('Exiting program!')
def stopApp():
global run_stopped
logger.info('Stopping app')
try:
reactor.stop()
except ReactorNotRunning:
run_stopped = True
def closeApp(signum, frame):
logger.info('Received signal {0}'.format(signum))
stopApp()
def validate(parser, options, required_options, silent=True):
for required_option in filter(lambda x: x.__dict__['metavar'] in required_options, parser.option_list):
if not getattr(options, required_option.dest):
if not silent:
logger.error('Option {0} not found'.format(required_option.metavar))
return False
return True
def validateRedisClient(parser, options):
required_options = ['REDIS', 'REDIS_DB']
return validate(parser, options, required_options, silent=False)
def validateSentinelClient(parser, options):
required_options = ['SENTINEL_SERVERS', 'SENTINEL_NAME']
return validate(parser, options, required_options, silent=False)
def validateArgs(parser, options):
if validateRedisClient(parser, options):
mode = 'Redis'
elif validateSentinelClient(parser, options):
mode = 'Sentinel'
else:
logger.error('Parameters for Redis connection not valid!\n\tUse -r HOST -d DB for Standard Redis mode\n\tUse -n GROUP NAME -S host1 port1 -S host2 port2 .. -S hostN:portN for Sentinel mode')
sys.exit(-1)
required_options = ['REDIS_QUEUE']
if not validate(parser, options, required_options, silent=False):
sys.exit(-1)
return mode
def main():
global rs
global options
global args
global logger
parser = OptionParser(description='Startup options')
parser.add_option('--redis', '-r', dest='redisIP', help='Redis server IP Address', metavar='REDIS')
parser.add_option('--redisdb', '-d', dest='redisDB', help='Redis server DB', metavar='REDIS_DB')
parser.add_option('--redisqueue', '-q', dest='redisQueue', help='Redis Queue', metavar='REDIS_QUEUE')
parser.add_option('--popsize', '-p', dest='popSize', help='Redis Pop Size', metavar='REDIS_POP_SIZE', default=1000)
parser.add_option('--port', '-P', dest='redisPort', help='Redis Port', metavar='REDIS_PORT', default=6379)
parser.add_option('--sentinelservers', '-S', dest='sentinelServers', help='Sentinel Servers (-S host1 port1 -S host2 port2 .. -S hostN portN)', metavar='SENTINEL_SERVERS', action='append', nargs=2)
parser.add_option('--sentinelname', '-n', dest='sentinelName', help='Sentinel Group Name', metavar='SENTINEL_NAME')
parser.add_option('--queuesize', '-s', dest='redisQueueSize', help='Max Redis Queue Size', metavar='REDIS_QUEUE_SIZE', default=10000)
parser.add_option('--diskqueue', '-Q', dest='diskQueue', help='Disk Queue', metavar='DISK_QUEUE', default='redongo_disk_queue')
parser.add_option('--logger', '-L', dest='logger', help='Logger Usage', metavar='LOGGER_USAGE', default='1')
parser.add_option('--log', '-l', dest='logLevel', help='Logger Level', metavar='LOG_LEVEL', default='debug')
(options, args) = parser.parse_args()
logger.setLevel(getattr(logging, options.logLevel.upper(), 'DEBUG'))
mode = validateArgs(parser, options)
# With this line the logs are sent to syslog.
if options.logger != '0':
handler = logging.handlers.SysLogHandler("/dev/log", FACILITY)
handler.setFormatter(formatter)
logger.addHandler(handler)
signal.signal(signal.SIGHUP, closeApp)
signal.signal(signal.SIGTERM, closeApp)
signal.signal(signal.SIGINT, closeApp)
signal.signal(signal.SIGALRM, closeApp)
# Handler for SIGTERM
reactor.addSystemEventTrigger('before', 'shutdown', sigtermHandler)
rs = RedongoServer(mode)
lc = LoopingCall(rs.check_completed_bulks)
lc.start(1, now=False)
lc_redis_queue = LoopingCall(rs.check_redis_queue)
lc_redis_queue.start(1, now=False)
reactor.callInThread(rs.run)
# Start the reactor
reactor.run(installSignalHandlers=False)
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
sys.exit(25) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python2
# vim: expandtab:tabstop=4:shiftwidth=4
#
# Copyright 2015 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#This is not a module, but pylint thinks it is. This is a command.
#pylint: disable=invalid-name, import-error
"""This is a script the processes heartbeat metrics
"""
import logging
from logging.handlers import RotatingFileHandler
import socket
import yaml
from openshift_tools.monitoring.zabbix_metric_processor import ZabbixSender, ZabbixMetricProcessor
from openshift_tools.monitoring.metricmanager import MetricManager
from openshift_tools.ansible.simplezabbix import SimpleZabbix
def process_heartbeats(target):
"""Send heartbeats to the target
Args:
target: the config file portion for this specific target.
Returns: None
"""
mm = MetricManager(target['name'])
zbxapi = SimpleZabbix(
url=target['api_url'],
user=target['api_user'],
password=target['api_password'],
)
zbxsender = ZabbixSender(target['trapper_server'], target['trapper_port'])
hostname = socket.gethostname()
zmp = ZabbixMetricProcessor(mm, zbxapi, zbxsender, hostname, verbose=True)
zmp.process_hb_metrics()
return []
def process_targets(target):
''' process the targets based on their type'''
logger.info("Sending heartbeats to target [%s]", target['name'])
# We only process heartbeats directly against zabbix targets
if target['type'] == 'zabbix':
errors = process_heartbeats(target)
if errors > 0:
logger.error('Results: %s errors occurred.', len(errors))
else:
logger.error("Error: Target Type Not Supported: %s", target['type'])
# TODO: add zabbix item and trigger for tracking this failure
if __name__ == "__main__":
logger = logging.getLogger()
logger.setLevel(logging.INFO)
logFormatter = logging.Formatter('%(asctime)s %(levelname)s %(name)s %(message)s')
logFile = '/var/log/ops-zagg-heartbeat-processor.log'
logRFH = RotatingFileHandler(logFile, mode='a', maxBytes=2*1024*1024, backupCount=5, delay=0)
logRFH.setFormatter(logFormatter)
logRFH.setLevel(logging.INFO)
logger.addHandler(logRFH)
logger.info('Starting ops-zagg-heartbeat-processor...')
CONFIG = yaml.load(file('/etc/openshift_tools/zagg_server.yaml'))
TARGETS = CONFIG['targets']
for a_target in TARGETS:
process_targets(a_target) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
"""
sphinx.writers.texinfo
~~~~~~~~~~~~~~~~~~~~~~
Custom docutils writer for Texinfo.
:copyright: Copyright 2007-2014 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
import string
import textwrap
from os import path
from docutils import nodes, writers
from sphinx import addnodes, __version__
from sphinx.locale import admonitionlabels, _
from sphinx.util import ustrftime
from sphinx.writers.latex import collected_footnote
COPYING = """\
@quotation
%(project)s %(release)s, %(date)s
%(author)s
Copyright @copyright{} %(copyright)s
@end quotation
"""
TEMPLATE = """\
\\input texinfo @c -*-texinfo-*-
@c %%**start of header
@setfilename %(filename)s
@documentencoding UTF-8
@ifinfo
@*Generated by Sphinx """ + __version__ + """.@*
@end ifinfo
@settitle %(title)s
@defindex ge
@paragraphindent %(paragraphindent)s
@exampleindent %(exampleindent)s
@finalout
%(direntry)s
@definfoenclose strong,`,'
@definfoenclose emph,`,'
@c %%**end of header
@copying
%(copying)s
@end copying
@titlepage
@title %(title)s
@insertcopying
@end titlepage
@contents
@c %%** start of user preamble
%(preamble)s
@c %%** end of user preamble
@ifnottex
@node Top
@top %(title)s
@insertcopying
@end ifnottex
@c %%**start of body
%(body)s
@c %%**end of body
@bye
"""
def find_subsections(section):
"""Return a list of subsections for the given ``section``."""
result = []
for child in section.children:
if isinstance(child, nodes.section):
result.append(child)
continue
result.extend(find_subsections(child))
return result
def smart_capwords(s, sep=None):
"""Like string.capwords() but does not capitalize words that already
contain a capital letter."""
words = s.split(sep)
for i, word in enumerate(words):
if all(x.islower() for x in word):
words[i] = word.capitalize()
return (sep or ' ').join(words)
class TexinfoWriter(writers.Writer):
"""Texinfo writer for generating Texinfo documents."""
supported = ('texinfo', 'texi')
settings_spec = (
'Texinfo Specific Options', None, (
("Name of the Info file", ['--texinfo-filename'], {'default': ''}),
('Dir entry', ['--texinfo-dir-entry'], {'default': ''}),
('Description', ['--texinfo-dir-description'], {'default': ''}),
('Category', ['--texinfo-dir-category'], {'default':
'Miscellaneous'})))
settings_defaults = {}
output = None
visitor_attributes = ('output', 'fragment')
def __init__(self, builder):
writers.Writer.__init__(self)
self.builder = builder
def translate(self):
self.visitor = visitor = TexinfoTranslator(self.document, self.builder)
self.document.walkabout(visitor)
visitor.finish()
for attr in self.visitor_attributes:
setattr(self, attr, getattr(visitor, attr))
class TexinfoTranslator(nodes.NodeVisitor):
ignore_missing_images = False
default_elements = {
'author': '',
'body': '',
'copying': '',
'date': '',
'direntry': '',
'exampleindent': 4,
'filename': '',
'paragraphindent': 0,
'preamble': '',
'project': '',
'release': '',
'title': '',
}
def __init__(self, document, builder):
nodes.NodeVisitor.__init__(self, document)
self.builder = builder
self.init_settings()
self.written_ids = set() # node names and anchors in output
self.referenced_ids = set() # node names and anchors that should
# be in output
self.indices = [] # (node name, content)
self.short_ids = {} # anchors --> short ids
self.node_names = {} # node name --> node's name to display
self.node_menus = {} # node name --> node's menu entries
self.rellinks = {} # node name --> (next, previous, up)
self.collect_indices()
self.collect_node_names()
self.collect_node_menus()
self.collect_rellinks()
self.body = []
self.context = []
self.previous_section = None
self.section_level = 0
self.seen_title = False
self.next_section_ids = set()
self.escape_newlines = 0
self.escape_hyphens = 0
self.curfilestack = []
self.footnotestack = []
self.in_footnote = 0
self.handled_abbrs = set()
def finish(self):
if self.previous_section is None:
self.add_menu('Top')
for index in self.indices:
name, content = index
pointers = tuple([name] + self.rellinks[name])
self.body.append('\n@node %s,%s,%s,%s\n' % pointers)
self.body.append('@unnumbered %s\n\n%s\n' % (name, content))
while self.referenced_ids:
# handle xrefs with missing anchors
r = self.referenced_ids.pop()
if r not in self.written_ids:
self.body.append('@anchor{%s}@w{%s}\n' % (r, ' ' * 30))
self.ensure_eol()
self.fragment = ''.join(self.body)
self.elements['body'] = self.fragment
self.output = TEMPLATE % self.elements
## Helper routines
def init_settings(self):
settings = self.settings = self.document.settings
elements = self.elements = self.default_elements.copy()
elements.update({
# if empty, the title is set to the first section title
'title': settings.title,
'author': settings.author,
# if empty, use basename of input file
'filename': settings.texinfo_filename,
'release': self.escape(self.builder.config.release),
'project': self.escape(self.builder.config.project),
'copyright': self.escape(self.builder.config.copyright),
'date': self.escape(self.builder.config.today or
ustrftime(self.builder.config.today_fmt
or _('%B %d, %Y')))
})
# title
title = elements['title']
if not title:
title = self.document.next_node(nodes.title)
title = (title and title.astext()) or '<untitled>'
elements['title'] = self.escape_id(title) or '<untitled>'
# filename
if not elements['filename']:
elements['filename'] = self.document.get('source') or 'untitled'
if elements['filename'][-4:] in ('.txt', '.rst'):
elements['filename'] = elements['filename'][:-4]
elements['filename'] += '.info'
# direntry
if settings.texinfo_dir_entry:
entry = self.format_menu_entry(
self.escape_menu(settings.texinfo_dir_entry),
'(%s)' % elements['filename'],
self.escape_arg(settings.texinfo_dir_description))
elements['direntry'] = ('@dircategory %s\n'
'@direntry\n'
'%s'
'@end direntry\n') % (
self.escape_id(settings.texinfo_dir_category), entry)
elements['copying'] = COPYING % elements
# allow the user to override them all
elements.update(settings.texinfo_elements)
def collect_node_names(self):
"""Generates a unique id for each section.
Assigns the attribute ``node_name`` to each section."""
def add_node_name(name):
node_id = self.escape_id(name)
nth, suffix = 1, ''
while node_id + suffix in self.written_ids or \
node_id + suffix in self.node_names:
nth += 1
suffix = '<%s>' % nth
node_id += suffix
self.written_ids.add(node_id)
self.node_names[node_id] = name
return node_id
# must have a "Top" node
self.document['node_name'] = 'Top'
add_node_name('Top')
add_node_name('top')
# each index is a node
self.indices = [(add_node_name(name), content)
for name, content in self.indices]
# each section is also a node
for section in self.document.traverse(nodes.section):
title = section.next_node(nodes.Titular)
name = (title and title.astext()) or '<untitled>'
section['node_name'] = add_node_name(name)
def collect_node_menus(self):
"""Collect the menu entries for each "node" section."""
node_menus = self.node_menus
for node in ([self.document] +
self.document.traverse(nodes.section)):
assert 'node_name' in node and node['node_name']
entries = [s['node_name'] for s in find_subsections(node)]
node_menus[node['node_name']] = entries
# try to find a suitable "Top" node
title = self.document.next_node(nodes.title)
top = (title and title.parent) or self.document
if not isinstance(top, (nodes.document, nodes.section)):
top = self.document
if top is not self.document:
entries = node_menus[top['node_name']]
entries += node_menus['Top'][1:]
node_menus['Top'] = entries
del node_menus[top['node_name']]
top['node_name'] = 'Top'
# handle the indices
for name, content in self.indices:
node_menus[name] = ()
node_menus['Top'].append(name)
def collect_rellinks(self):
"""Collect the relative links (next, previous, up) for each "node"."""
rellinks = self.rellinks
node_menus = self.node_menus
for id, entries in node_menus.items():
rellinks[id] = ['', '', '']
# up's
for id, entries in node_menus.items():
for e in entries:
rellinks[e][2] = id
# next's and prev's
for id, entries in node_menus.items():
for i, id in enumerate(entries):
# First child's prev is empty
if i != 0:
rellinks[id][1] = entries[i-1]
# Last child's next is empty
if i != len(entries) - 1:
rellinks[id][0] = entries[i+1]
# top's next is its first child
try:
first = node_menus['Top'][0]
except IndexError:
pass
else:
rellinks['Top'][0] = first
rellinks[first][1] = 'Top'
## Escaping
# Which characters to escape depends on the context. In some cases,
# namely menus and node names, it's not possible to escape certain
# characters.
def escape(self, s):
"""Return a string with Texinfo command characters escaped."""
s = s.replace('@', '@@')
s = s.replace('{', '@{')
s = s.replace('}', '@}')
# prevent `` and '' quote conversion
s = s.replace('``', "`@w{`}")
s = s.replace("''", "'@w{'}")
return s
def escape_arg(self, s):
"""Return an escaped string suitable for use as an argument
to a Texinfo command."""
s = self.escape(s)
# commas are the argument delimeters
s = s.replace(',', '@comma{}')
# normalize white space
s = ' '.join(s.split()).strip()
return s
def escape_id(self, s):
"""Return an escaped string suitable for node names and anchors."""
bad_chars = ',:.()'
for bc in bad_chars:
s = s.replace(bc, ' ')
s = ' '.join(s.split()).strip()
return self.escape(s)
def escape_menu(self, s):
"""Return an escaped string suitable for menu entries."""
s = self.escape_arg(s)
s = s.replace(':', ';')
s = ' '.join(s.split()).strip()
return s
def ensure_eol(self):
"""Ensure the last line in body is terminated by new line."""
if self.body and self.body[-1][-1:] != '\n':
self.body.append('\n')
def format_menu_entry(self, name, node_name, desc):
if name == node_name:
s = '* %s:: ' % (name,)
else:
s = '* %s: %s. ' % (name, node_name)
offset = max((24, (len(name) + 4) % 78))
wdesc = '\n'.join(' ' * offset + l for l in
textwrap.wrap(desc, width=78-offset))
return s + wdesc.strip() + '\n'
def add_menu_entries(self, entries, reg=re.compile(r'\s+---?\s+')):
for entry in entries:
name = self.node_names[entry]
# special formatting for entries that are divided by an em-dash
try:
parts = reg.split(name, 1)
except TypeError:
# could be a gettext proxy
parts = [name]
if len(parts) == 2:
name, desc = parts
else:
desc = ''
name = self.escape_menu(name)
desc = self.escape(desc)
self.body.append(self.format_menu_entry(name, entry, desc))
def add_menu(self, node_name):
entries = self.node_menus[node_name]
if not entries:
return
self.body.append('\n@menu\n')
self.add_menu_entries(entries)
if (node_name != 'Top' or
not self.node_menus[entries[0]] or
self.builder.config.texinfo_no_detailmenu):
self.body.append('\n@end menu\n')
return
def _add_detailed_menu(name):
entries = self.node_menus[name]
if not entries:
return
self.body.append('\n%s\n\n' % (self.escape(self.node_names[name],)))
self.add_menu_entries(entries)
for subentry in entries:
_add_detailed_menu(subentry)
self.body.append('\n@detailmenu\n'
' --- The Detailed Node Listing ---\n')
for entry in entries:
_add_detailed_menu(entry)
self.body.append('\n@end detailmenu\n'
'@end menu\n')
def tex_image_length(self, width_str):
match = re.match('(\d*\.?\d*)\s*(\S*)', width_str)
if not match:
# fallback
return width_str
res = width_str
amount, unit = match.groups()[:2]
if not unit or unit == "px":
# pixels: let TeX alone
return ''
elif unit == "%":
# a4paper: textwidth=418.25368pt
res = "%d.0pt" % (float(amount) * 4.1825368)
return res
def collect_indices(self):
def generate(content, collapsed):
ret = ['\n@menu\n']
for letter, entries in content:
for entry in entries:
if not entry[3]:
continue
name = self.escape_menu(entry[0])
sid = self.get_short_id('%s:%s' % (entry[2], entry[3]))
desc = self.escape_arg(entry[6])
me = self.format_menu_entry(name, sid, desc)
ret.append(me)
ret.append('@end menu\n')
return ''.join(ret)
indices_config = self.builder.config.texinfo_domain_indices
if indices_config:
for domain in self.builder.env.domains.itervalues():
for indexcls in domain.indices:
indexname = '%s-%s' % (domain.name, indexcls.name)
if isinstance(indices_config, list):
if indexname not in indices_config:
continue
content, collapsed = indexcls(domain).generate(
self.builder.docnames)
if not content:
continue
self.indices.append((indexcls.localname,
generate(content, collapsed)))
# only add the main Index if it's not empty
for docname in self.builder.docnames:
if self.builder.env.indexentries[docname]:
self.indices.append((_('Index'), '\n@printindex ge\n'))
break
# this is copied from the latex writer
# TODO: move this to sphinx.util
def collect_footnotes(self, node):
fnotes = {}
def footnotes_under(n):
if isinstance(n, nodes.footnote):
yield n
else:
for c in n.children:
if isinstance(c, addnodes.start_of_file):
continue
for k in footnotes_under(c):
yield k
for fn in footnotes_under(node):
num = fn.children[0].astext().strip()
fnotes[num] = [collected_footnote(*fn.children), False]
return fnotes
## xref handling
def get_short_id(self, id):
"""Return a shorter 'id' associated with ``id``."""
# Shorter ids improve paragraph filling in places
# that the id is hidden by Emacs.
try:
sid = self.short_ids[id]
except KeyError:
sid = hex(len(self.short_ids))[2:]
self.short_ids[id] = sid
return sid
def add_anchor(self, id, node):
if id.startswith('index-'):
return
id = self.curfilestack[-1] + ':' + id
eid = self.escape_id(id)
sid = self.get_short_id(id)
for id in (eid, sid):
if id not in self.written_ids:
self.body.append('@anchor{%s}' % id)
self.written_ids.add(id)
def add_xref(self, id, name, node):
name = self.escape_menu(name)
sid = self.get_short_id(id)
self.body.append('@ref{%s,,%s}' % (sid, name))
self.referenced_ids.add(sid)
self.referenced_ids.add(self.escape_id(id))
## Visiting
def visit_document(self, node):
self.footnotestack.append(self.collect_footnotes(node))
self.curfilestack.append(node.get('docname', ''))
if 'docname' in node:
self.add_anchor(':doc', node)
def depart_document(self, node):
self.footnotestack.pop()
self.curfilestack.pop()
def visit_Text(self, node):
s = self.escape(node.astext())
if self.escape_newlines:
s = s.replace('\n', ' ')
if self.escape_hyphens:
# prevent "--" and "---" conversion
s = s.replace('-', '@w{-}')
self.body.append(s)
def depart_Text(self, node):
pass
def visit_section(self, node):
self.next_section_ids.update(node.get('ids', []))
if not self.seen_title:
return
if self.previous_section:
self.add_menu(self.previous_section['node_name'])
else:
self.add_menu('Top')
node_name = node['node_name']
pointers = tuple([node_name] + self.rellinks[node_name])
self.body.append('\n@node %s,%s,%s,%s\n' % pointers)
for id in self.next_section_ids:
self.add_anchor(id, node)
self.next_section_ids.clear()
self.previous_section = node
self.section_level += 1
def depart_section(self, node):
self.section_level -= 1
headings = (
'@unnumbered',
'@chapter',
'@section',
'@subsection',
'@subsubsection',
)
rubrics = (
'@heading',
'@subheading',
'@subsubheading',
)
def visit_title(self, node):
if not self.seen_title:
self.seen_title = 1
raise nodes.SkipNode
parent = node.parent
if isinstance(parent, nodes.table):
return
if isinstance(parent, (nodes.Admonition, nodes.sidebar, nodes.topic)):
raise nodes.SkipNode
elif not isinstance(parent, nodes.section):
self.builder.warn(
'encountered title node not in section, topic, table, '
'admonition or sidebar', (self.curfilestack[-1], node.line))
self.visit_rubric(node)
else:
try:
heading = self.headings[self.section_level]
except IndexError:
heading = self.headings[-1]
self.body.append('\n%s ' % heading)
def depart_title(self, node):
self.body.append('\n\n')
def visit_rubric(self, node):
if len(node.children) == 1 and node.children[0].astext() in \
('Footnotes', _('Footnotes')):
raise nodes.SkipNode
try:
rubric = self.rubrics[self.section_level]
except IndexError:
rubric = self.rubrics[-1]
self.body.append('\n%s ' % rubric)
def depart_rubric(self, node):
self.body.append('\n\n')
def visit_subtitle(self, node):
self.body.append('\n\n@noindent\n')
def depart_subtitle(self, node):
self.body.append('\n\n')
## References
def visit_target(self, node):
# postpone the labels until after the sectioning command
parindex = node.parent.index(node)
try:
try:
next = node.parent[parindex+1]
except IndexError:
# last node in parent, look at next after parent
# (for section of equal level)
next = node.parent.parent[node.parent.parent.index(node.parent)]
if isinstance(next, nodes.section):
if node.get('refid'):
self.next_section_ids.add(node['refid'])
self.next_section_ids.update(node['ids'])
return
except IndexError:
pass
if 'refuri' in node:
return
if node.get('refid'):
self.add_anchor(node['refid'], node)
for id in node['ids']:
self.add_anchor(id, node)
def depart_target(self, node):
pass
def visit_reference(self, node):
# an xref's target is displayed in Info so we ignore a few
# cases for the sake of appearance
if isinstance(node.parent, (nodes.title, addnodes.desc_type,)):
return
if isinstance(node[0], nodes.image):
return
name = node.get('name', node.astext()).strip()
uri = node.get('refuri', '')
if not uri and node.get('refid'):
uri = '%' + self.curfilestack[-1] + '#' + node['refid']
if not uri:
return
if uri.startswith('mailto:'):
uri = self.escape_arg(uri[7:])
name = self.escape_arg(name)
if not name or name == uri:
self.body.append('@email{%s}' % uri)
else:
self.body.append('@email{%s,%s}' % (uri, name))
elif uri.startswith('#'):
# references to labels in the same document
id = self.curfilestack[-1] + ':' + uri[1:]
self.add_xref(id, name, node)
elif uri.startswith('%'):
# references to documents or labels inside documents
hashindex = uri.find('#')
if hashindex == -1:
# reference to the document
id = uri[1:] + '::doc'
else:
# reference to a label
id = uri[1:].replace('#', ':')
self.add_xref(id, name, node)
elif uri.startswith('info:'):
# references to an external Info file
uri = uri[5:].replace('_', ' ')
uri = self.escape_arg(uri)
id = 'Top'
if '#' in uri:
uri, id = uri.split('#', 1)
id = self.escape_id(id)
name = self.escape_menu(name)
if name == id:
self.body.append('@ref{%s,,,%s}' % (id, uri))
else:
self.body.append('@ref{%s,,%s,%s}' % (id, name, uri))
else:
uri = self.escape_arg(uri)
name = self.escape_arg(name)
show_urls = self.builder.config.texinfo_show_urls
if self.in_footnote:
show_urls = 'inline'
if not name or uri == name:
self.body.append('@indicateurl{%s}' % uri)
elif show_urls == 'inline':
self.body.append('@uref{%s,%s}' % (uri, name))
elif show_urls == 'no':
self.body.append('@uref{%s,,%s}' % (uri, name))
else:
self.body.append('%s@footnote{%s}' % (name, uri))
raise nodes.SkipNode
def depart_reference(self, node):
pass
def visit_title_reference(self, node):
text = node.astext()
self.body.append('@cite{%s}' % self.escape_arg(text))
raise nodes.SkipNode
## Blocks
def visit_paragraph(self, node):
self.body.append('\n')
def depart_paragraph(self, node):
self.body.append('\n')
def visit_block_quote(self, node):
self.body.append('\n@quotation\n')
def depart_block_quote(self, node):
self.ensure_eol()
self.body.append('@end quotation\n')
def visit_literal_block(self, node):
self.body.append('\n@example\n')
def depart_literal_block(self, node):
self.ensure_eol()
self.body.append('@end example\n')
visit_doctest_block = visit_literal_block
depart_doctest_block = depart_literal_block
def visit_line_block(self, node):
if not isinstance(node.parent, nodes.line_block):
self.body.append('\n\n')
self.body.append('@display\n')
def depart_line_block(self, node):
self.body.append('@end display\n')
if not isinstance(node.parent, nodes.line_block):
self.body.append('\n\n')
def visit_line(self, node):
self.escape_newlines += 1
def depart_line(self, node):
self.body.append('@w{ }\n')
self.escape_newlines -= 1
## Inline
def visit_strong(self, node):
self.body.append('@strong{')
def depart_strong(self, node):
self.body.append('}')
def visit_emphasis(self, node):
self.body.append('@emph{')
def depart_emphasis(self, node):
self.body.append('}')
def visit_literal(self, node):
self.body.append('@code{')
def depart_literal(self, node):
self.body.append('}')
def visit_superscript(self, node):
self.body.append('@w{^')
def depart_superscript(self, node):
self.body.append('}')
def visit_subscript(self, node):
self.body.append('@w{[')
def depart_subscript(self, node):
self.body.append(']}')
## Footnotes
def visit_footnote(self, node):
raise nodes.SkipNode
def visit_collected_footnote(self, node):
self.in_footnote += 1
self.body.append('@footnote{')
def depart_collected_footnote(self, node):
self.body.append('}')
self.in_footnote -= 1
def visit_footnote_reference(self, node):
num = node.astext().strip()
try:
footnode, used = self.footnotestack[-1][num]
except (KeyError, IndexError):
raise nodes.SkipNode
# footnotes are repeated for each reference
footnode.walkabout(self)
raise nodes.SkipChildren
def visit_citation(self, node):
for id in node.get('ids'):
self.add_anchor(id, node)
def depart_citation(self, node):
pass
def visit_citation_reference(self, node):
self.body.append('@w{[')
def depart_citation_reference(self, node):
self.body.append(']}')
## Lists
def visit_bullet_list(self, node):
bullet = node.get('bullet', '*')
self.body.append('\n\n@itemize %s\n' % bullet)
def depart_bullet_list(self, node):
self.ensure_eol()
self.body.append('@end itemize\n')
def visit_enumerated_list(self, node):
# doesn't support Roman numerals
enum = node.get('enumtype', 'arabic')
starters = {'arabic': '',
'loweralpha': 'a',
'upperalpha': 'A',}
start = node.get('start', starters.get(enum, ''))
self.body.append('\n\n@enumerate %s\n' % start)
def depart_enumerated_list(self, node):
self.ensure_eol()
self.body.append('@end enumerate\n')
def visit_list_item(self, node):
self.body.append('\n@item ')
def depart_list_item(self, node):
pass
## Option List
def visit_option_list(self, node):
self.body.append('\n\n@table @option\n')
def depart_option_list(self, node):
self.ensure_eol()
self.body.append('@end table\n')
def visit_option_list_item(self, node):
pass
def depart_option_list_item(self, node):
pass
def visit_option_group(self, node):
self.at_item_x = '@item'
def depart_option_group(self, node):
pass
def visit_option(self, node):
self.escape_hyphens += 1
self.body.append('\n%s ' % self.at_item_x)
self.at_item_x = '@itemx'
def depart_option(self, node):
self.escape_hyphens -= 1
def visit_option_string(self, node):
pass
def depart_option_string(self, node):
pass
def visit_option_argument(self, node):
self.body.append(node.get('delimiter', ' '))
def depart_option_argument(self, node):
pass
def visit_description(self, node):
self.body.append('\n')
def depart_description(self, node):
pass
## Definitions
def visit_definition_list(self, node):
self.body.append('\n\n@table @asis\n')
def depart_definition_list(self, node):
self.ensure_eol()
self.body.append('@end table\n')
def visit_definition_list_item(self, node):
self.at_item_x = '@item'
def depart_definition_list_item(self, node):
pass
def visit_term(self, node):
for id in node.get('ids'):
self.add_anchor(id, node)
# anchors and indexes need to go in front
for n in node[::]:
if isinstance(n, (addnodes.index, nodes.target)):
n.walkabout(self)
node.remove(n)
self.body.append('\n%s ' % self.at_item_x)
self.at_item_x = '@itemx'
def depart_term(self, node):
pass
def visit_termsep(self, node):
self.body.append('\n%s ' % self.at_item_x)
def depart_termsep(self, node):
pass
def visit_classifier(self, node):
self.body.append(' : ')
def depart_classifier(self, node):
pass
def visit_definition(self, node):
self.body.append('\n')
def depart_definition(self, node):
pass
## Tables
def visit_table(self, node):
self.entry_sep = '@item'
def depart_table(self, node):
self.body.append('\n@end multitable\n\n')
def visit_tabular_col_spec(self, node):
pass
def depart_tabular_col_spec(self, node):
pass
def visit_colspec(self, node):
self.colwidths.append(node['colwidth'])
if len(self.colwidths) != self.n_cols:
return
self.body.append('\n\n@multitable ')
for i, n in enumerate(self.colwidths):
self.body.append('{%s} ' %('x' * (n+2)))
def depart_colspec(self, node):
pass
def visit_tgroup(self, node):
self.colwidths = []
self.n_cols = node['cols']
def depart_tgroup(self, node):
pass
def visit_thead(self, node):
self.entry_sep = '@headitem'
def depart_thead(self, node):
pass
def visit_tbody(self, node):
pass
def depart_tbody(self, node):
pass
def visit_row(self, node):
pass
def depart_row(self, node):
self.entry_sep = '@item'
def visit_entry(self, node):
self.body.append('\n%s\n' % self.entry_sep)
self.entry_sep = '@tab'
def depart_entry(self, node):
for i in xrange(node.get('morecols', 0)):
self.body.append('\n@tab\n')
## Field Lists
def visit_field_list(self, node):
pass
def depart_field_list(self, node):
pass
def visit_field(self, node):
self.body.append('\n')
def depart_field(self, node):
self.body.append('\n')
def visit_field_name(self, node):
self.ensure_eol()
self.body.append('@*')
def depart_field_name(self, node):
self.body.append(': ')
def visit_field_body(self, node):
pass
def depart_field_body(self, node):
pass
## Admonitions
def visit_admonition(self, node, name=''):
if not name:
name = self.escape(node[0].astext())
self.body.append(u'\n@cartouche\n@quotation %s ' % name)
def depart_admonition(self, node):
self.ensure_eol()
self.body.append('@end quotation\n'
'@end cartouche\n')
def _make_visit_admonition(name):
def visit(self, node):
self.visit_admonition(node, admonitionlabels[name])
return visit
visit_attention = _make_visit_admonition('attention')
depart_attention = depart_admonition
visit_caution = _make_visit_admonition('caution')
depart_caution = depart_admonition
visit_danger = _make_visit_admonition('danger')
depart_danger = depart_admonition
visit_error = _make_visit_admonition('error')
depart_error = depart_admonition
visit_hint = _make_visit_admonition('hint')
depart_hint = depart_admonition
visit_important = _make_visit_admonition('important')
depart_important = depart_admonition
visit_note = _make_visit_admonition('note')
depart_note = depart_admonition
visit_tip = _make_visit_admonition('tip')
depart_tip = depart_admonition
visit_warning = _make_visit_admonition('warning')
depart_warning = depart_admonition
## Misc
def visit_docinfo(self, node):
raise nodes.SkipNode
def visit_generated(self, node):
raise nodes.SkipNode
def visit_header(self, node):
raise nodes.SkipNode
def visit_footer(self, node):
raise nodes.SkipNode
def visit_container(self, node):
pass
def depart_container(self, node):
pass
def visit_decoration(self, node):
pass
def depart_decoration(self, node):
pass
def visit_topic(self, node):
# ignore TOC's since we have to have a "menu" anyway
if 'contents' in node.get('classes', []):
raise nodes.SkipNode
title = node[0]
self.visit_rubric(title)
self.body.append('%s\n' % self.escape(title.astext()))
def depart_topic(self, node):
pass
def visit_transition(self, node):
self.body.append('\n\n%s\n\n' % ('_' * 66))
def depart_transition(self, node):
pass
def visit_attribution(self, node):
self.body.append('\n\n@center --- ')
def depart_attribution(self, node):
self.body.append('\n\n')
def visit_raw(self, node):
format = node.get('format', '').split()
if 'texinfo' in format or 'texi' in format:
self.body.append(node.astext())
raise nodes.SkipNode
def visit_figure(self, node):
self.body.append('\n\n@float Figure\n')
def depart_figure(self, node):
self.body.append('\n@end float\n\n')
def visit_caption(self, node):
if not isinstance(node.parent, nodes.figure):
self.builder.warn('caption not inside a figure.',
(self.curfilestack[-1], node.line))
return
self.body.append('\n@caption{')
def depart_caption(self, node):
if isinstance(node.parent, nodes.figure):
self.body.append('}\n')
def visit_image(self, node):
if node['uri'] in self.builder.images:
uri = self.builder.images[node['uri']]
else:
# missing image!
if self.ignore_missing_images:
return
uri = node['uri']
if uri.find('://') != -1:
# ignore remote images
return
name, ext = path.splitext(uri)
attrs = node.attributes
# width and height ignored in non-tex output
width = self.tex_image_length(attrs.get('width', ''))
height = self.tex_image_length(attrs.get('height', ''))
alt = self.escape_arg(attrs.get('alt', ''))
self.body.append('\n@image{%s,%s,%s,%s,%s}\n' %
(name, width, height, alt, ext[1:]))
def depart_image(self, node):
pass
def visit_compound(self, node):
pass
def depart_compound(self, node):
pass
def visit_sidebar(self, node):
self.visit_topic(node)
def depart_sidebar(self, node):
self.depart_topic(node)
def visit_label(self, node):
self.body.append('@w{(')
def depart_label(self, node):
self.body.append(')} ')
def visit_legend(self, node):
pass
def depart_legend(self, node):
pass
def visit_substitution_reference(self, node):
pass
def depart_substitution_reference(self, node):
pass
def visit_substitution_definition(self, node):
raise nodes.SkipNode
def visit_system_message(self, node):
self.body.append('\n@verbatim\n'
'<SYSTEM MESSAGE: %s>\n'
'@end verbatim\n' % node.astext())
raise nodes.SkipNode
def visit_comment(self, node):
self.body.append('\n')
for line in node.astext().splitlines():
self.body.append('@c %s\n' % line)
raise nodes.SkipNode
def visit_problematic(self, node):
self.body.append('>>')
def depart_problematic(self, node):
self.body.append('<<')
def unimplemented_visit(self, node):
self.builder.warn("unimplemented node type: %r" % node,
(self.curfilestack[-1], node.line))
def unknown_visit(self, node):
self.builder.warn("unknown node type: %r" % node,
(self.curfilestack[-1], node.line))
def unknown_departure(self, node):
pass
### Sphinx specific
def visit_productionlist(self, node):
self.visit_literal_block(None)
names = []
for production in node:
names.append(production['tokenname'])
maxlen = max(len(name) for name in names)
for production in node:
if production['tokenname']:
for id in production.get('ids'):
self.add_anchor(id, production)
s = production['tokenname'].ljust(maxlen) + ' ::='
else:
s = '%s ' % (' '*maxlen)
self.body.append(self.escape(s))
self.body.append(self.escape(production.astext() + '\n'))
self.depart_literal_block(None)
raise nodes.SkipNode
def visit_production(self, node):
pass
def depart_production(self, node):
pass
def visit_literal_emphasis(self, node):
self.body.append('@code{')
def depart_literal_emphasis(self, node):
self.body.append('}')
def visit_index(self, node):
# terminate the line but don't prevent paragraph breaks
if isinstance(node.parent, nodes.paragraph):
self.ensure_eol()
else:
self.body.append('\n')
for entry in node['entries']:
typ, text, tid, text2 = entry
text = self.escape_menu(text)
self.body.append('@geindex %s\n' % text)
def visit_versionmodified(self, node):
self.body.append('\n')
def depart_versionmodified(self, node):
self.body.append('\n')
def visit_start_of_file(self, node):
# add a document target
self.next_section_ids.add(':doc')
self.curfilestack.append(node['docname'])
self.footnotestack.append(self.collect_footnotes(node))
def depart_start_of_file(self, node):
self.curfilestack.pop()
self.footnotestack.pop()
def visit_centered(self, node):
txt = self.escape_arg(node.astext())
self.body.append('\n\n@center %s\n\n' % txt)
raise nodes.SkipNode
def visit_seealso(self, node):
self.body.append(u'\n\n@subsubheading %s\n\n' %
admonitionlabels['seealso'])
def depart_seealso(self, node):
self.body.append('\n')
def visit_meta(self, node):
raise nodes.SkipNode
def visit_glossary(self, node):
pass
def depart_glossary(self, node):
pass
def visit_acks(self, node):
self.body.append('\n\n')
self.body.append(', '.join(n.astext()
for n in node.children[0].children) + '.')
self.body.append('\n\n')
raise nodes.SkipNode
def visit_highlightlang(self, node):
pass
def depart_highlightlang(self, node):
pass
## Desc
def visit_desc(self, node):
self.desc = node
self.at_deffnx = '@deffn'
def depart_desc(self, node):
self.desc = None
self.ensure_eol()
self.body.append('@end deffn\n')
def visit_desc_signature(self, node):
self.escape_hyphens += 1
objtype = node.parent['objtype']
if objtype != 'describe':
for id in node.get('ids'):
self.add_anchor(id, node)
# use the full name of the objtype for the category
try:
domain = self.builder.env.domains[node.parent['domain']]
primary = self.builder.config.primary_domain
name = domain.get_type_name(domain.object_types[objtype],
primary == domain.name)
except KeyError:
name = objtype
# by convention, the deffn category should be capitalized like a title
category = self.escape_arg(smart_capwords(name))
self.body.append('\n%s {%s} ' % (self.at_deffnx, category))
self.at_deffnx = '@deffnx'
self.desc_type_name = name
def depart_desc_signature(self, node):
self.body.append("\n")
self.escape_hyphens -= 1
self.desc_type_name = None
def visit_desc_name(self, node):
pass
def depart_desc_name(self, node):
pass
def visit_desc_addname(self, node):
pass
def depart_desc_addname(self, node):
pass
def visit_desc_type(self, node):
pass
def depart_desc_type(self, node):
pass
def visit_desc_returns(self, node):
self.body.append(' -> ')
def depart_desc_returns(self, node):
pass
def visit_desc_parameterlist(self, node):
self.body.append(' (')
self.first_param = 1
def depart_desc_parameterlist(self, node):
self.body.append(')')
def visit_desc_parameter(self, node):
if not self.first_param:
self.body.append(', ')
else:
self.first_param = 0
text = self.escape(node.astext())
# replace no-break spaces with normal ones
text = text.replace(u' ', '@w{ }')
self.body.append(text)
raise nodes.SkipNode
def visit_desc_optional(self, node):
self.body.append('[')
def depart_desc_optional(self, node):
self.body.append(']')
def visit_desc_annotation(self, node):
# Try to avoid duplicating info already displayed by the deffn category.
# e.g.
# @deffn {Class} Foo
# -- instead of --
# @deffn {Class} class Foo
txt = node.astext().strip()
if txt == self.desc['desctype'] or \
txt == self.desc['objtype'] or \
txt in self.desc_type_name.split():
raise nodes.SkipNode
def depart_desc_annotation(self, node):
pass
def visit_desc_content(self, node):
pass
def depart_desc_content(self, node):
pass
def visit_inline(self, node):
pass
def depart_inline(self, node):
pass
def visit_abbreviation(self, node):
abbr = node.astext()
self.body.append('@abbr{')
if node.hasattr('explanation') and abbr not in self.handled_abbrs:
self.context.append(',%s}' % self.escape_arg(node['explanation']))
self.handled_abbrs.add(abbr)
else:
self.context.append('}')
def depart_abbreviation(self, node):
self.body.append(self.context.pop())
def visit_download_reference(self, node):
pass
def depart_download_reference(self, node):
pass
def visit_hlist(self, node):
self.visit_bullet_list(node)
def depart_hlist(self, node):
self.depart_bullet_list(node)
def visit_hlistcol(self, node):
pass
def depart_hlistcol(self, node):
pass
def visit_pending_xref(self, node):
pass
def depart_pending_xref(self, node):
pass
def visit_math(self, node):
self.builder.warn('using "math" markup without a Sphinx math extension '
'active, please use one of the math extensions '
'described at http://sphinx-doc.org/ext/math.html')
raise nodes.SkipNode
visit_math_block = visit_math | unknown | codeparrot/codeparrot-clean | ||
// Copyright IBM Corp. 2016, 2025
// SPDX-License-Identifier: BUSL-1.1
package http
import (
"encoding/json"
"testing"
"github.com/hashicorp/vault/helper/hostutil"
"github.com/hashicorp/vault/vault"
)
func TestSysHostInfo(t *testing.T) {
cluster := vault.NewTestCluster(t, &vault.CoreConfig{}, &vault.TestClusterOptions{
HandlerFunc: Handler,
})
cluster.Start()
defer cluster.Cleanup()
cores := cluster.Cores
vault.TestWaitActive(t, cores[0].Core)
// Query against the active node, should get host information back
secret, err := cores[0].Client.Logical().Read("sys/host-info")
if err != nil {
t.Fatal(err)
}
if secret == nil || secret.Data == nil {
t.Fatal("expected data in the response")
}
dataBytes, err := json.Marshal(secret.Data)
if err != nil {
t.Fatal(err)
}
var info hostutil.HostInfo
if err := json.Unmarshal(dataBytes, &info); err != nil {
t.Fatal(err)
}
if info.Timestamp.IsZero() {
t.Fatal("expected non-zero Timestamp")
}
if info.CPU == nil {
t.Fatal("expected non-nil CPU value")
}
if info.Disk == nil {
t.Fatal("expected disk info")
}
if info.Host == nil {
t.Fatal("expected host info")
}
if info.Memory == nil {
t.Fatal("expected memory info")
}
// Query against a standby, should error
secret, err = cores[1].Client.Logical().Read("sys/host-info")
if err == nil || secret != nil {
t.Fatalf("expected error on standby node, HostInfo: %v", secret)
}
} | go | github | https://github.com/hashicorp/vault | http/sys_hostinfo_test.go |
# -*- coding:utf-8 -*-
import pymongo
import requests
import requesocks
from model import FailedUrl
from config import *
# 初始化mongodb客户端
def init_client():
client = pymongo.MongoClient(config['db_host'], config['db_port'])
if len(config['db_user']) != 0:
admin = client[config['db_name']]
admin.authenticate(config['db_user'], config['db_pass'])
return client
# 根据是否使用tor代理,来获取http客户端
def get_http_client():
if config['use_tor_proxy']:
session = requesocks.session()
session.proxies = {'http': 'socks5://127.0.0.1:%d' % config['tor_proxy_port'],
'https': 'socks5://127.0.0.1:%d' % config['tor_proxy_port']}
return session
else:
return requests.session()
# 发送get请求
def get_body(url):
retry_times = 0
client = get_http_client()
while retry_times < 3:
try:
content = client.get(url, timeout=config['timeout']).content
return content
except:
retry_times += 1
return ''
# 把失败的url添加到数据库
def add_failed_url(db, url):
collection = db.failed_urls
if collection.find({'url': url}).count() == 0:
collection.insert(FailedUrl(url).dict()) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import datetime
import io
import os
import platform
import shutil
import socket
import subprocess
import unittest
from absl.testing import absltest
import mock
from grr_response_core.lib.util import filesystem
from grr_response_core.lib.util import temp
# TODO(hanuszczak): This import below is less than ideal, these functions could
# be probably moved to some more fundamental test module.
from grr.test_lib import filesystem_test_lib
class StatTest(absltest.TestCase):
def testGetSize(self):
with temp.AutoTempFilePath() as temp_filepath:
with io.open(temp_filepath, "wb") as fd:
fd.write(b"foobarbaz")
stat = filesystem.Stat.FromPath(temp_filepath, follow_symlink=False)
self.assertEqual(stat.GetSize(), 9)
def testGetPath(self):
with temp.AutoTempFilePath() as temp_filepath:
stat = filesystem.Stat.FromPath(temp_filepath, follow_symlink=False)
self.assertEqual(stat.GetPath(), temp_filepath)
@unittest.skipIf(platform.system() == "Windows", "requires Unix-like system")
def testGetTime(self):
adate = datetime.datetime(2017, 10, 2, 8, 45)
mdate = datetime.datetime(2001, 5, 3, 10, 30)
with temp.AutoTempFilePath() as temp_filepath:
self._Touch(temp_filepath, "-a", adate)
self._Touch(temp_filepath, "-m", mdate)
stat = filesystem.Stat.FromPath(temp_filepath, follow_symlink=False)
self.assertEqual(stat.GetAccessTime(), self._EpochMicros(adate))
self.assertEqual(stat.GetModificationTime(), self._EpochMicros(mdate))
def testDirectory(self):
with temp.AutoTempDirPath() as temp_dirpath:
stat = filesystem.Stat.FromPath(temp_dirpath, follow_symlink=False)
self.assertTrue(stat.IsDirectory())
self.assertFalse(stat.IsRegular())
self.assertFalse(stat.IsSocket())
self.assertFalse(stat.IsSymlink())
def testRegular(self):
with temp.AutoTempFilePath() as temp_filepath:
stat = filesystem.Stat.FromPath(temp_filepath, follow_symlink=False)
self.assertFalse(stat.IsDirectory())
self.assertTrue(stat.IsRegular())
self.assertFalse(stat.IsSocket())
self.assertFalse(stat.IsSymlink())
@unittest.skipIf(platform.system() == "Windows", "requires Unix-like system")
def testSocket(self):
with temp.AutoTempDirPath(remove_non_empty=True) as temp_dirpath:
temp_socketpath = os.path.join(temp_dirpath, "foo")
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
sock.bind(temp_socketpath)
stat = filesystem.Stat.FromPath(temp_socketpath, follow_symlink=False)
self.assertFalse(stat.IsDirectory())
self.assertFalse(stat.IsRegular())
self.assertTrue(stat.IsSocket())
self.assertFalse(stat.IsSymlink())
finally:
sock.close()
@unittest.skipIf(platform.system() == "Windows", "requires Unix-like system")
def testSymlink(self):
with temp.AutoTempDirPath(remove_non_empty=True) as temp_dirpath, \
temp.AutoTempFilePath() as temp_filepath:
with io.open(temp_filepath, "wb") as fd:
fd.write(b"foobar")
temp_linkpath = os.path.join(temp_dirpath, "foo")
os.symlink(temp_filepath, temp_linkpath)
stat = filesystem.Stat.FromPath(temp_linkpath, follow_symlink=False)
self.assertFalse(stat.IsDirectory())
self.assertFalse(stat.IsRegular())
self.assertFalse(stat.IsSocket())
self.assertTrue(stat.IsSymlink())
stat = filesystem.Stat.FromPath(temp_linkpath, follow_symlink=True)
self.assertFalse(stat.IsDirectory())
self.assertTrue(stat.IsRegular())
self.assertFalse(stat.IsSocket())
self.assertFalse(stat.IsSymlink())
self.assertEqual(stat.GetSize(), 6)
# http://elixir.free-electrons.com/linux/v4.9/source/include/uapi/linux/fs.h
FS_COMPR_FL = 0x00000004
FS_IMMUTABLE_FL = 0x00000010
FS_NODUMP_FL = 0x00000040
def testGetLinuxFlags(self):
with temp.AutoTempFilePath() as temp_filepath:
filesystem_test_lib.Chattr(temp_filepath, attrs=["+c", "+d"])
stat = filesystem.Stat.FromPath(temp_filepath, follow_symlink=False)
self.assertTrue(stat.IsRegular())
self.assertTrue(stat.GetLinuxFlags() & self.FS_COMPR_FL)
self.assertTrue(stat.GetLinuxFlags() & self.FS_NODUMP_FL)
self.assertFalse(stat.GetLinuxFlags() & self.FS_IMMUTABLE_FL)
self.assertEqual(stat.GetOsxFlags(), 0)
# https://github.com/apple/darwin-xnu/blob/master/bsd/sys/stat.h
UF_NODUMP = 0x00000001
UF_IMMUTABLE = 0x00000002
UF_HIDDEN = 0x00008000
def testGetOsxFlags(self):
with temp.AutoTempFilePath() as temp_filepath:
filesystem_test_lib.Chflags(temp_filepath, flags=["nodump", "hidden"])
stat = filesystem.Stat.FromPath(temp_filepath, follow_symlink=False)
self.assertTrue(stat.IsRegular())
self.assertTrue(stat.GetOsxFlags() & self.UF_NODUMP)
self.assertTrue(stat.GetOsxFlags() & self.UF_HIDDEN)
self.assertFalse(stat.GetOsxFlags() & self.UF_IMMUTABLE)
self.assertEqual(stat.GetLinuxFlags(), 0)
@unittest.skipIf(platform.system() == "Windows",
"Windows does not support os.symlink().")
def testGetFlagsSymlink(self):
with temp.AutoTempDirPath(remove_non_empty=True) as temp_dirpath, \
temp.AutoTempFilePath() as temp_filepath:
temp_linkpath = os.path.join(temp_dirpath, "foo")
os.symlink(temp_filepath, temp_linkpath)
stat = filesystem.Stat.FromPath(temp_linkpath, follow_symlink=False)
self.assertTrue(stat.IsSymlink())
self.assertEqual(stat.GetLinuxFlags(), 0)
self.assertEqual(stat.GetOsxFlags(), 0)
@unittest.skipIf(platform.system() == "Windows",
"Windows does not support socket.AF_UNIX.")
def testGetFlagsSocket(self):
with temp.AutoTempDirPath(remove_non_empty=True) as temp_dirpath:
temp_socketpath = os.path.join(temp_dirpath, "foo")
# There is a limit on maximum length for a socket path [1]. Most of the
# time, this should not be an issue (since generated paths are something
# like `/tmp/tmppqnrQsZ/foo`, way below this limit). However, on strange
# setups this might not always be the case. Since we don't want to fail
# the test on such configurations, we simply skip it.
#
# pylint: disable=line-too-long
# [1]: https://unix.stackexchange.com/questions/367008/why-is-socket-path-length-limited-to-a-hundred-chars
# pylint: enable=ling-too-long
if ((platform.system() == "Linux" and len(temp_socketpath) > 108) or
(platform.system() == "Darwin" and len(temp_socketpath) > 104)):
message = "Generated path '{}' is too long for a socket path"
self.skipTest(message.format(temp_socketpath))
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
sock.bind(temp_socketpath)
stat = filesystem.Stat.FromPath(temp_socketpath, follow_symlink=False)
self.assertTrue(stat.IsSocket())
self.assertEqual(stat.GetLinuxFlags(), 0)
self.assertEqual(stat.GetOsxFlags(), 0)
finally:
sock.close()
def _Touch(self, path, mode, date):
fmt_date = date.strftime("%Y%m%d%H%M")
subprocess.check_call(["touch", mode, "-t", fmt_date, path])
@staticmethod
def _EpochMicros(date):
return int(date.strftime("%s")) * 1000000
class StatCacheTest(absltest.TestCase):
def setUp(self):
super(StatCacheTest, self).setUp()
self.temp_dir = temp.TempDirPath()
self.addCleanup(shutil.rmtree, self.temp_dir)
def Path(self, *args):
return os.path.join(self.temp_dir, *args)
def testBasicUsage(self):
with io.open(self.Path("foo"), "wb") as fd:
fd.write(b"123")
with io.open(self.Path("bar"), "wb") as fd:
fd.write(b"123456")
with io.open(self.Path("baz"), "wb") as fd:
fd.write(b"123456789")
stat_cache = filesystem.StatCache()
with MockStat() as stat_mock:
foo_stat = stat_cache.Get(self.Path("foo"))
self.assertEqual(foo_stat.GetSize(), 3)
self.assertTrue(stat_mock.FromPath.called)
with MockStat() as stat_mock:
bar_stat = stat_cache.Get(self.Path("bar"))
self.assertEqual(bar_stat.GetSize(), 6)
self.assertTrue(stat_mock.FromPath.called)
with MockStat() as stat_mock:
other_foo_stat = stat_cache.Get(self.Path("foo"))
self.assertEqual(other_foo_stat.GetSize(), 3)
self.assertFalse(stat_mock.FromPath.called)
with MockStat() as stat_mock:
other_bar_stat = stat_cache.Get(self.Path("bar"))
self.assertEqual(other_bar_stat.GetSize(), 6)
self.assertFalse(stat_mock.FromPath.called)
with MockStat() as stat_mock:
baz_stat = stat_cache.Get(self.Path("baz"))
self.assertEqual(baz_stat.GetSize(), 9)
self.assertTrue(stat_mock.FromPath.called)
with MockStat() as stat_mock:
other_baz_stat = stat_cache.Get(self.Path("baz"))
self.assertEqual(other_baz_stat.GetSize(), 9)
self.assertFalse(stat_mock.FromPath.called)
@unittest.skipIf(platform.system() == "Windows",
"Windows does not support os.symlink().")
def testFollowSymlink(self):
with io.open(self.Path("foo"), "wb") as fd:
fd.write(b"123456")
os.symlink(self.Path("foo"), self.Path("bar"))
stat_cache = filesystem.StatCache()
with MockStat() as stat_mock:
bar_stat = stat_cache.Get(self.Path("bar"), follow_symlink=False)
self.assertTrue(bar_stat.IsSymlink())
self.assertTrue(stat_mock.FromPath.called)
with MockStat() as stat_mock:
foo_stat = stat_cache.Get(self.Path("bar"), follow_symlink=True)
self.assertFalse(foo_stat.IsSymlink())
self.assertEqual(foo_stat.GetSize(), 6)
self.assertTrue(stat_mock.FromPath.called)
def testSmartSymlinkCache(self):
with open(self.Path("foo"), "wb") as fd:
fd.write(b"12345")
stat_cache = filesystem.StatCache()
with MockStat() as stat_mock:
foo_stat = stat_cache.Get(self.Path("foo"), follow_symlink=False)
self.assertEqual(foo_stat.GetSize(), 5)
self.assertTrue(stat_mock.FromPath.called)
with MockStat() as stat_mock:
other_foo_stat = stat_cache.Get(self.Path("foo"), follow_symlink=True)
self.assertEqual(other_foo_stat.GetSize(), 5)
self.assertFalse(stat_mock.FromPath.called)
def MockStat():
return mock.patch.object(filesystem, "Stat", wraps=filesystem.Stat)
if __name__ == "__main__":
absltest.main() | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
#
# Generate the build trees and Makefiles for PyQwt.
import compileall
import glob
import optparse
import os
import pprint
import re
import shutil
import sys
import traceback
class Die(Exception):
def __init__(self, info):
Exception.__init__(self, info)
# __init__()
# class Die
try:
required = 'Requires at least SIP-4.6 and its development tools.'
import sipconfig
except ImportError:
raise Die, required
if 0x040600 > sipconfig._pkg_config['sip_version']:
raise Die, required
del required
def get_pyqt_configuration(options):
"""Return the PyQt configuration for Qt3 or Qt4
"""
required = 'Requires at least PyQt-3.17 and its development tools.'
options.qwt = 'qwt4qt3'
options.iqt = 'iqt4qt3'
try:
import pyqtconfig as pyqtconfig
except ImportError:
raise Die, required
if 0x031100 > pyqtconfig._pkg_config['pyqt_version']:
raise Die, required
try:
configuration = pyqtconfig.Configuration()
except AttributeError:
raise Die, 'Check if SIP and PyQt have been installed properly.'
return configuration
# get_pyqt_configuration()
def compile_qt_program(name, configuration,
extra_defines=[],
extra_include_dirs=[],
extra_lib_dirs=[],
extra_libs=[],
):
"""Compile a simple Qt application.
name is the name of the single source file
configuration is the pyqtconfig.Configuration()
extra_defines is a list of extra preprocessor definitions
extra_include_dirs is a list of extra directories to search for headers
extra_lib_dirs is a list of extra directories to search for libraries
extra_libs is a list of extra libraries
"""
makefile = sipconfig.ProgramMakefile(
configuration, console=True, qt=True, warnings=True)
makefile.extra_defines.extend(extra_defines)
makefile.extra_include_dirs.extend(extra_include_dirs)
makefile.extra_lib_dirs.extend(extra_lib_dirs)
makefile.extra_libs.extend(extra_libs)
exe, build = makefile.build_command(name)
# zap a spurious executable
try:
os.remove(exe)
except OSError:
pass
os.system(build)
if not os.access(exe, os.X_OK):
return None
if sys.platform != 'win32':
exe = './' + exe
return exe
# compile_qt_program()
def copy_files(sources, directory):
"""Copy a list of files to a directory
"""
for source in sources:
shutil.copy2(source, os.path.join(directory, os.path.basename(source)))
# copy_files()
def fix_build_file(name, extra_sources, extra_headers, extra_moc_headers):
"""Extend the targets of a SIP build file with extra files
"""
keys = ('target', 'sources', 'headers', 'moc_headers')
sbf = {}
for key in keys:
sbf[key] = []
# Parse,
nr = 0
for line in open(name, 'r'):
nr += 1
if line[0] != '#':
eq = line.find('=')
if eq == -1:
raise Die, ('"%s\" line %d: Line must be in the form '
'"key = value value...."' % (name, nr)
)
key = line[:eq].strip()
value = line[eq+1:].strip()
if key in keys:
sbf[key].append(value)
# extend,
sbf['sources'].extend(extra_sources)
sbf['headers'].extend(extra_headers)
sbf['moc_headers'].extend(extra_moc_headers)
# and write.
output = open(name, 'w')
for key in keys:
if sbf[key]:
print >> output, '%s = %s' % (key, ' '.join(sbf[key]))
# fix_build_file()
def lazy_copy_file(source, target):
"""Lazy copy a file to another file:
- check for a SIP time stamp to skip,
- check if source and target do really differ,
- copy the source file to the target if they do,
- return True on copy and False on no copy.
"""
if not os.path.exists(target):
shutil.copy2(source, target)
return True
sourcelines = open(source).readlines()
targetlines = open(target).readlines()
# global length check
if len(sourcelines) != len(targetlines):
shutil.copy2(source, target)
return True
# skip a SIP time stamp
if (len(sourcelines) > 3
and sourcelines[3].startswith(' * Generated by SIP')
):
line = 4
else:
line = 0
# line by line check
while line < len(sourcelines):
if sourcelines[line] != targetlines[line]:
shutil.copy2(source, target)
return True
line = line + 1
return False
# lazy_copy_file()
def check_numarray(configuration, options, package):
"""See if the numarray extension has been installed.
"""
if options.disable_numarray:
options.excluded_features.append("-x HAS_NUMARRAY")
return options
try:
import numarray
# Try to find numarray/arrayobject.h.
numarray_inc = os.path.join(
configuration.py_inc_dir, "numarray", "arrayobject.h")
if os.access(numarray_inc, os.F_OK):
print "Found numarray-%s.\n" % numarray.__version__
options.extra_defines.append("HAS_NUMARRAY")
else:
print ("numarray has been installed, "
"but its headers are not in the standard location.\n"
"%s will be build without support for numarray.\n"
"(Linux users may have to install a development package)\n"
) % (package,)
raise ImportError
except ImportError:
options.excluded_features.append("-x HAS_NUMARRAY")
print ("Failed to import numarray: "
"%s will be build without support for numarray.\n"
) % (package,)
return options
# check_numarray()
def check_numeric(configuration, options, package):
"""See if the Numeric extension has been installed.
"""
if options.disable_numeric:
options.excluded_features.append("-x HAS_NUMERIC")
return options
try:
import Numeric
# Try to find Numeric/arrayobject.h.
numeric_inc = os.path.join(
configuration.py_inc_dir, "Numeric", "arrayobject.h")
if os.access(numeric_inc, os.F_OK):
print "Found Numeric-%s.\n" % Numeric.__version__
options.extra_defines.append("HAS_NUMERIC")
else:
print ("Numeric has been installed, "
"but its headers are not in the standard location.\n"
"%s will be build without support for Numeric.\n"
"(Linux users may have to install a development package)\n"
) % (package,)
raise ImportError
except ImportError:
options.excluded_features.append("-x HAS_NUMERIC")
print ("Failed to find Numeric2: "
"%s will be build without support for Numeric.\n"
) % (package,)
return options
# check_numeric()
def check_numpy(configuration, options, package):
"""See if the NumPy extension has been installed.
"""
if options.disable_numpy:
options.excluded_features.append("-x HAS_NUMPY")
return options
try:
import numpy
# Try to find numpy/arrayobject.h.
from numpy.distutils.misc_util import get_numpy_include_dirs
include_dirs = get_numpy_include_dirs()
for inc_dir in include_dirs:
header = os.path.join(inc_dir, 'numpy', 'arrayobject.h')
if os.access(header, os.F_OK):
break
else:
print ('NumPy has been installed, '
'but its headers are not in the standard location.\n'
'%s will be build without support for NumPy.\n'
'(Linux users may have to install a development package)\n'
) % (package,)
raise ImportError
print 'Found NumPy-%s.\n' % numpy.__version__
options.extra_defines.append('HAS_NUMPY')
options.extra_include_dirs.extend(include_dirs)
except ImportError:
options.excluded_features.append("-x HAS_NUMPY")
print ("Failed to find NumPy: "
"%s will be build without support for NumPy.\n"
) % (package,)
return options
# check_numpy()
def check_compiler(configuration, options):
"""Check compiler specifics.
"""
print 'Do not get upset by error messages in the next 3 compiler checks:'
makefile = sipconfig.Makefile(configuration)
generator = makefile.optional_string('MAKEFILE_GENERATOR', 'UNIX')
if generator in ['MSVC', 'MSVC.NET']:
options.extra_cxxflags.extend(['-GR'])
program = '\n'.join([
r'#include <stddef.h>',
r'class a { public: void f(size_t); };',
r'void a::f(%s) {};',
r'int main() { return 0; }',
r'',
])
name = "size_t_check.cpp"
new = [
'// Automagically generated by configure.py',
'',
'// Uncomment one of the following three lines',
]
for ctype in ('unsigned int', 'unsigned long', 'unsigned long long'):
open(name, "w").write(program % ctype)
print "Check if 'size_t' and '%s' are the same type:" % ctype
if compile_qt_program(name, configuration):
comment = ''
print "YES"
else:
print "NO"
comment = '// '
new.append('%stypedef %s size_t;' % (comment, ctype))
new.extend(['',
'// Local Variables:',
'// mode: C++',
'// c-file-style: "stroustrup"',
'// End:',
'',
])
new = '\n'.join(new)
types_sip = os.path.join(os.pardir, 'sip', options.qwt, 'QwtTypes.sip')
if os.access(types_sip, os.R_OK):
old = open(types_sip, 'r').read()
else:
old = ''
if old != new:
open(types_sip, 'w').write(new)
return options
# check_compiler()
def check_os(configuration, options):
"""Check operating system specifics.
"""
print "Found '%s' operating system:" % os.name
print sys.version
if os.name == 'nt':
options.extra_defines.append('WIN32')
return options
# check_os()
def check_sip(configuration, options):
"""Check if PyQwt can be built with SIP
"""
version = configuration.sip_version
version_str = configuration.sip_version_str
print "Found SIP-%s." % version_str
if 0x040600 > version:
raise Die, 'PyQwt requires at least SIP-4.6.'
if 0x031105 > configuration.pyqt_version:
options.excluded_features.append('-x HAS_PYQT_031105')
options.extra_include_dirs.append(configuration.sip_inc_dir)
return options
# check_sip()
def check_iqt(configuration, options):
"""Check iqt module specifics.
"""
options.subdirs.append(options.iqt)
options.modules.append('iqt')
options.iqt_sipfile = os.path.join(
os.pardir, 'sip', options.iqt, 'IQtModule.sip')
return options
# check_iqt()
def check_qwt(configuration, options):
"""Check qwt module specifics.
"""
# zap all qwt_version_info*
for name in glob.glob('qwt_version_info*'):
try:
os.remove(name)
except OSError:
pass
program = '\n'.join([
r'#include <stdio.h>',
r'#include <qwt_global.h>',
r'',
r'int main(int, char **)',
r'{',
r' FILE *file;',
r'',
r' if (!(file = fopen("qwt_version_info.py", "w"))) {',
r' fprintf(stderr, "Failed to create qwt_version_info.py\n");',
r' return 1;',
r' }',
r'',
r' fprintf(file, "QWT_VERSION = %#08x\n", QWT_VERSION);',
r' fprintf(file, "QWT_VERSION_STR = \"%s\"\n", QWT_VERSION_STR);',
r'',
r' fclose(file);',
r'',
r' return 0;',
r'}',
r'',
r'// Local Variables:',
r'// mode: C++',
r'// c-file-style: "stroustrup"',
r'// End:',
r'',
])
open('qwt_version_info.cpp', 'w').write(program)
extra_include_dirs = [os.path.join(configuration.qt_inc_dir, 'Qt')]
if options.qwt_sources:
extra_include_dirs.append(os.path.join(options.qwt_sources, 'include'))
if options.extra_include_dirs:
extra_include_dirs.extend(options.extra_include_dirs)
exe = compile_qt_program('qwt_version_info.cpp', configuration,
extra_include_dirs = extra_include_dirs)
if not exe:
raise Die, 'Failed to build the qwt_version_info tool.'
os.system(exe)
try:
from qwt_version_info import QWT_VERSION, QWT_VERSION_STR
except ImportError:
raise Die, 'Failed to import qwt_version_info.'
if QWT_VERSION != 0x040200:
raise Die, 'Qwt-%s is not supported.' % QWT_VERSION_STR
print ('Found Qwt-%s.' % QWT_VERSION_STR)
options.extra_defines.append('HAS_QWT4')
options.excluded_features.append('-x HAS_QWT5')
options.subdirs.append(options.qwt)
options.modules.append('Qwt4')
options.qwt_sipfile = os.path.join(
os.pardir, 'sip', options.qwt, 'QwtModule.sip')
return options
# check_qwt()
def setup_iqt_build(configuration, options, package):
"""Setup the iqt package build
"""
if 'iqt' not in options.modules:
return
print 'Setup the iqt package build.'
build_dir = options.iqt
tmp_dir = 'tmp-' + build_dir
build_file = os.path.join(tmp_dir, '%s.sbf' % options.iqt)
# zap the temporary directory
try:
shutil.rmtree(tmp_dir)
except:
pass
# make a clean temporary directory
try:
os.mkdir(tmp_dir)
except:
raise Die, 'Failed to create the temporary build directory.'
# invoke SIP
cmd = ' '.join(
[configuration.sip_bin,
'-b', build_file,
'-c', tmp_dir,
options.jobs,
options.trace,
]
+ options.excluded_features
# SIP assumes POSIX style path separators
+ [options.iqt_sipfile.replace('\\', '/')]
)
print 'sip invokation:'
pprint.pprint(cmd)
if os.path.exists(build_file):
os.remove(build_file)
os.system(cmd)
if not os.path.exists(build_file):
raise Die, 'SIP failed to generate the C++ code.'
# copy lazily to the build directory to speed up recompilation
if not os.path.exists(build_dir):
try:
os.mkdir(build_dir)
except:
raise Die, 'Failed to create the build directory.'
lazy_copies = 0
for pattern in ('*.c', '*.cpp', '*.h', '*.py', '*.sbf'):
for source in glob.glob(os.path.join(tmp_dir, pattern)):
target = os.path.join(build_dir, os.path.basename(source))
if lazy_copy_file(source, target):
print 'Copy %s -> %s.' % (source, target)
lazy_copies += 1
print '%s file(s) lazily copied.' % lazy_copies
makefile = sipconfig.ModuleMakefile(
configuration = configuration,
build_file = os.path.basename(build_file),
dir = build_dir,
install_dir = options.module_install_path,
qt = 1,
warnings = 1,
debug = options.debug
)
makefile._target = '_iqt'
makefile.extra_cflags.extend(options.extra_cflags)
makefile.extra_cxxflags.extend(options.extra_cxxflags)
makefile.extra_defines.extend(options.extra_defines)
makefile.extra_include_dirs.extend(options.extra_include_dirs)
makefile.extra_lflags.extend(options.extra_lflags)
makefile.extra_libs.extend(options.extra_libs)
makefile.extra_lib_dirs.extend(options.extra_lib_dirs)
makefile.generate()
# setup_iqt_build()
def setup_qwt4_build(configuration, options, package):
"""Setup the qwt package build
"""
if 'Qwt4' not in options.modules:
return
print 'Setup the qwt package build.'
build_dir = options.qwt
tmp_dir = 'tmp-%s' % options.qwt
build_file = os.path.join(tmp_dir, '%s.sbf' % options.qwt)
extra_sources = []
extra_headers = []
extra_moc_headers = []
extra_py_files = glob.glob(
os.path.join(os.pardir, 'qt3lib', 'Qwt4', '*.py'))
# do we compile and link the sources of Qwt statically into PyQwt?
if options.qwt_sources:
extra_sources += glob.glob(os.path.join(
options.qwt_sources, 'src', '*.cpp'))
extra_headers += glob.glob(os.path.join(
options.qwt_sources, 'include', '*.h'))
extra_moc_headers = []
for header in extra_headers:
text = open(header).read()
if re.compile(r'^\s*Q_OBJECT', re.M).search(text):
extra_moc_headers.append(header)
# add the interface to the numerical Python extensions
extra_sources += glob.glob(os.path.join(os.pardir, 'support', '*.cpp'))
extra_headers += glob.glob(os.path.join(os.pardir, 'support', '*.h'))
# do we compile and link the sources of Qwt into PyQwt?
if options.qwt_sources:
# yes, zap all 'qwt'
while options.extra_libs.count('qwt'):
options.extra_libs.remove('qwt')
elif 'qwt' not in options.extra_libs:
# no, add 'qwt' if needed
options.extra_libs.append('qwt')
# zap the temporary directory
try:
shutil.rmtree(tmp_dir)
except:
pass
# make a clean temporary directory
try:
os.mkdir(tmp_dir)
except:
raise Die, 'Failed to create the temporary build directory.'
# copy the extra files
copy_files(extra_sources, tmp_dir)
copy_files(extra_headers, tmp_dir)
copy_files(extra_moc_headers, tmp_dir)
copy_files(extra_py_files, tmp_dir)
try: # Qt4
pyqt_sip_flags = configuration.pyqt_sip_flags
except AttributeError: # Qt3
pyqt_sip_flags = configuration.pyqt_qt_sip_flags
# invoke SIP
cmd = ' '.join(
[configuration.sip_bin,
# SIP assumes POSIX style path separators
'-I', configuration.pyqt_sip_dir.replace('\\', '/'),
'-b', build_file,
'-c', tmp_dir,
options.jobs,
options.trace,
pyqt_sip_flags,
]
+ options.sip_include_dirs
+ options.excluded_features
+ options.timelines
# SIP assumes POSIX style path separators
+ [options.qwt_sipfile.replace('\\', '/')]
)
print 'sip invokation:'
pprint.pprint(cmd)
if os.path.exists(build_file):
os.remove(build_file)
os.system(cmd)
if not os.path.exists(build_file):
raise Die, 'SIP failed to generate the C++ code.'
# FIXME: sip-4.7 does not generate those include files anymore
for name in [os.path.join(tmp_dir, name) for name in [
'sipQwtQwtArrayDouble.h',
'sipQwtQwtArrayInt.h',
'sipQwtQwtArrayQwtDoublePoint.h',
]]:
if not os.path.exists(name):
open(name, 'w')
# fix the SIP build file
fix_build_file(build_file,
[os.path.basename(f) for f in extra_sources],
[os.path.basename(f) for f in extra_headers],
[os.path.basename(f) for f in extra_moc_headers])
# copy lazily to the build directory to speed up recompilation
if not os.path.exists(build_dir):
try:
os.mkdir(build_dir)
except:
raise Die, 'Failed to create the build directory.'
lazy_copies = 0
for pattern in ('*.c', '*.cpp', '*.h', '*.py', '*.sbf'):
for source in glob.glob(os.path.join(tmp_dir, pattern)):
target = os.path.join(build_dir, os.path.basename(source))
if lazy_copy_file(source, target):
print 'Copy %s -> %s.' % (source, target)
lazy_copies += 1
print '%s file(s) lazily copied.' % lazy_copies
# byte-compile the Python files
compileall.compile_dir(build_dir, 1, options.module_install_path)
# files to be installed
installs = []
installs.append([[os.path.basename(f) for f in glob.glob(
os.path.join(build_dir, '*.py*'))], options.module_install_path])
pattern = os.path.join(os.pardir, 'sip', options.qwt, '*.sip')
installs.append(
[[os.path.join(os.pardir, f) for f in glob.glob(pattern)],
os.path.join(configuration.pyqt_sip_dir, 'Qwt4')])
pattern = os.path.join(os.pardir, 'sip', options.qwt, 'common', '*.sip')
installs.append(
[[os.path.join(os.pardir, f) for f in glob.glob(pattern)],
os.path.join(configuration.pyqt_sip_dir, 'Qwt4', 'common')])
# module makefile
makefile = sipconfig.ModuleMakefile(
configuration = configuration,
build_file = os.path.basename(build_file),
dir = build_dir,
install_dir = options.module_install_path,
installs = installs,
qt = 1,
warnings = 1,
debug = options.debug,
)
makefile.extra_cflags.extend(options.extra_cflags)
makefile.extra_cxxflags.extend(options.extra_cxxflags)
makefile.extra_defines.extend(options.extra_defines)
makefile.extra_include_dirs.extend(options.extra_include_dirs)
makefile.extra_lflags.extend(options.extra_lflags)
makefile.extra_libs.extend(options.extra_libs)
makefile.extra_lib_dirs.extend(options.extra_lib_dirs)
makefile.generate()
# setup_qwt5_build()
def setup_parent_build(configuration, options):
"""Generate the parent Makefile
"""
print "Setup the PyQwt build."
sipconfig.ParentMakefile(configuration = configuration,
subdirs = options.subdirs).generate()
# setup_main_build
def parse_args():
"""Return the parsed options and args from the command line
"""
usage = (
'python configure.py [options]'
'\n\nEach option takes at most one argument, but some options'
'\naccumulate arguments when repeated. For example, invoke:'
'\n\n\tpython configure.py -I %s -I %s'
'\n\nto search the current *and* parent directories for headers.'
) % (os.curdir, os.pardir)
parser = optparse.OptionParser(usage=usage)
common_options = optparse.OptionGroup(parser, 'Common options')
common_options.add_option(
'-Q', '--qwt-sources', default='', action='store',
type='string', metavar='/sources/of/qwt',
help=('compile and link the Qwt source files in'
' /sources/of/qwt statically into PyQwt'))
common_options.add_option(
'-I', '--extra-include-dirs', default=[], action='append',
type='string', metavar='/usr/lib/qt3/include/qwt',
help=('add an extra directory to search for headers'
' (the compiler must be able to find the Qwt headers'
' without the -Q option)'))
common_options.add_option(
'-L', '--extra-lib-dirs', default=[], action='append',
type='string', metavar='/usr/lib/qt3/lib',
help=('add an extra directory to search for libraries'
' (the linker must be able to find the Qwt library'
' without the -Q option)'))
common_options.add_option(
'-j', '--jobs', default=0, action='store',
type='int', metavar='N',
help=('concatenate the SIP generated code into N files'
' [default 1 per class] (to speed up make by running '
' simultaneous jobs on multiprocessor systems)'))
parser.add_option_group(common_options)
make_options = optparse.OptionGroup(parser, 'Make options')
make_options.add_option(
'--debug', default=False, action='store_true',
help='enable debugging symbols [default disabled]')
make_options.add_option(
'--extra-cflags', default=[], action='append',
type='string', metavar='EXTRA_CFLAG',
help='add an extra C compiler flag')
make_options.add_option(
'--extra-cxxflags', default=[], action='append',
type='string', metavar='EXTRA_CXXFLAG',
help='add an extra C++ compiler flag')
make_options.add_option(
'-D', '--extra-defines', default=[], action='append',
type='string', metavar='HAS_EXTRA_SENSORY_PERCEPTION',
help='add an extra preprocessor definition')
make_options.add_option(
'-l', '--extra-libs', default=[], action='append',
type='string', metavar='extra_sensory_perception',
help='add an extra library')
make_options.add_option(
'--extra-lflags', default=[], action='append',
type='string', metavar='EXTRA_LFLAG',
help='add an extra linker flag')
parser.add_option_group(make_options)
sip_options = optparse.OptionGroup(parser, 'SIP options')
sip_options.add_option(
'-x', '--excluded-features', default=[], action='append',
type='string', metavar='EXTRA_SENSORY_PERCEPTION',
help=('add a feature for SIP to exclude'
' (normally one of the features in sip/features.sip)'))
sip_options.add_option(
'-t', '--timelines', default=[], action='append',
type='string', metavar='EXTRA_SENSORY_PERCEPTION',
help=('add a timeline option for SIP'
' (normally one of the timeline options in sip/timelines.sip)'))
sip_options.add_option(
'--sip-include-dirs', default=[],
action='append', type='string', metavar='SIP_INCLUDE_DIR',
help='add an extra directory for SIP to search')
sip_options.add_option(
'--trace', default=False, action='store_true',
help=('enable trace of the execution of the bindings'
' [default disabled]'))
parser.add_option_group(sip_options)
detection_options = optparse.OptionGroup(parser, 'Detection options')
detection_options.add_option(
'--disable-numarray', default=False, action='store_true',
help='disable detection and use of numarray [default enabled]'
)
detection_options.add_option(
'--disable-numeric', default=False, action='store_true',
help='disable detection and use of Numeric [default enabled]'
)
detection_options.add_option(
'--disable-numpy', default=False, action='store_true',
help='disable detection and use of NumPy [default enabled]'
)
parser.add_option_group(detection_options)
install_options = optparse.OptionGroup(parser, 'Install options')
install_options.add_option(
'--module-install-path', default='', action='store',
help= 'specify the install directory for the Python modules'
)
parser.add_option_group(install_options)
options, args = parser.parse_args()
# tweak some of the options to facilitate later processing
if options.jobs < 1:
options.jobs = ''
else:
options.jobs = '-j %s' % options.jobs
options.excluded_features = [
('-x %s' % f) for f in options.excluded_features
]
# SIP assumes POSIX style path separators
options.sip_include_dirs = [
('-I %s' % f).replace('\\', '/') for f in options.sip_include_dirs
]
options.timelines = [
('-t %s' % t) for t in options.timelines
]
if options.trace:
options.trace = '-r'
options.extra_defines.append('TRACE_PYQWT')
else:
options.trace = ''
options.modules = []
options.subdirs = []
return options, args
# parse_args()
def main():
"""Generate the build tree and the Makefiles
"""
options, args = parse_args()
print 'Command line options:'
pprint.pprint(options.__dict__)
print
configuration = get_pyqt_configuration(options)
options = check_sip(configuration, options)
options = check_os(configuration, options)
options = check_compiler(configuration, options)
options = check_numarray(configuration, options, 'PyQwt')
options = check_numeric(configuration, options, 'PyQwt')
options = check_numpy(configuration, options, 'PyQwt')
options = check_iqt(configuration, options)
options = check_qwt(configuration, options)
if not options.module_install_path:
options.module_install_path = os.path.join(
configuration.pyqt_mod_dir, 'Qwt4')
print
print 'Extended command line options:'
pprint.pprint(options.__dict__)
print
print 'The following modules will be built: %s.' % options.modules
print
setup_iqt_build(configuration, options, 'PyQwt')
print
setup_qwt4_build(configuration, options, 'PyQwt')
print
setup_parent_build(configuration, options)
print
print 'Great, run make or nmake to build and install PyQwt.'
# main()
if __name__ == '__main__':
try:
main()
except Die, info:
print info
sys.exit(1)
except:
# catch the optparse --help option
for entry in traceback.extract_tb(sys.exc_info()[-1]):
if 'optparse.py' in entry[0]:
sys.exit(0)
else:
print (
'An internal error occured. Please report all the output\n'
'from the program, including the following traceback, to\n'
'pyqwt-users@lists.sourceforge.net'
)
traceback.print_exc()
sys.exit(1)
# Local Variables: ***
# mode: python ***
# End: *** | unknown | codeparrot/codeparrot-clean | ||
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Example DAG demonstrating the usage of the @taskgroup decorator."""
from __future__ import annotations
# [START howto_task_group_decorator]
import pendulum
from airflow.sdk import DAG, task, task_group
# Creating Tasks
@task
def task_start():
"""Empty Task which is First Task of Dag"""
return "[Task_start]"
@task
def task_1(value: int) -> str:
"""Empty Task1"""
return f"[ Task1 {value} ]"
@task
def task_2(value: str) -> str:
"""Empty Task2"""
return f"[ Task2 {value} ]"
@task
def task_3(value: str) -> None:
"""Empty Task3"""
print(f"[ Task3 {value} ]")
@task
def task_end() -> None:
"""Empty Task which is Last Task of Dag"""
print("[ Task_End ]")
# Creating TaskGroups
@task_group
def task_group_function(value: int) -> None:
"""TaskGroup for grouping related Tasks"""
task_3(task_2(task_1(value)))
# Executing Tasks and TaskGroups
with DAG(
dag_id="example_task_group_decorator",
schedule=None,
start_date=pendulum.datetime(2021, 1, 1, tz="UTC"),
catchup=False,
tags=["example"],
) as dag:
start_task = task_start()
end_task = task_end()
for i in range(5):
current_task_group = task_group_function(i)
start_task >> current_task_group >> end_task
# [END howto_task_group_decorator] | python | github | https://github.com/apache/airflow | airflow-core/src/airflow/example_dags/example_task_group_decorator.py |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import mock
import nova.exception
from nova.tests.unit.virt.libvirt.volume import test_volume
from nova.virt.libvirt.volume import scality
class LibvirtScalityVolumeDriverTestCase(
test_volume.LibvirtVolumeBaseTestCase):
def setUp(self):
super(LibvirtScalityVolumeDriverTestCase, self).setUp()
self.scality_sofs_config = 'fake.conf'
self.scality_sofs_mount_point = '/fake'
self.flags(scality_sofs_config=self.scality_sofs_config,
scality_sofs_mount_point=self.scality_sofs_mount_point,
group='libvirt')
self.drv = scality.LibvirtScalityVolumeDriver(self.fake_conn)
@mock.patch('six.moves.urllib.request.urlopen')
def test_connect_volume(self, mock_urlopen):
TEST_VOLDIR = 'volumes'
TEST_VOLNAME = 'volume_name'
TEST_CONN_INFO = {
'data': {
'sofs_path': os.path.join(TEST_VOLDIR, TEST_VOLNAME)
}
}
TEST_VOLPATH = os.path.join(self.scality_sofs_mount_point,
TEST_VOLDIR,
TEST_VOLNAME)
def _access_wrapper(path, flags):
if path == '/sbin/mount.sofs':
return True
else:
return os.access(path, flags)
self.stub_out('os.access', _access_wrapper)
with mock.patch.object(self.drv, '_mount_sofs'):
self.drv.connect_volume(TEST_CONN_INFO, self.disk_info)
device_path = os.path.join(self.scality_sofs_mount_point,
TEST_CONN_INFO['data']['sofs_path'])
self.assertEqual(TEST_CONN_INFO['data']['device_path'], device_path)
conf = self.drv.get_config(TEST_CONN_INFO, self.disk_info)
tree = conf.format_dom()
self._assertFileTypeEquals(tree, TEST_VOLPATH)
@mock.patch('nova.utils.execute')
def test_mount_sofs_when_sofs_already_mounted(self, mock_execute):
with mock.patch.object(self.drv, '_sofs_is_mounted') as m_is_mounted:
m_is_mounted.return_value = True
self.drv._mount_sofs()
mock_execute.assert_called_once_with('mkdir', '-p',
self.scality_sofs_mount_point)
self.assertEqual(1, m_is_mounted.call_count)
@mock.patch('nova.utils.execute', mock.Mock())
def test_mount_sofs_when_mount_fails(self):
with mock.patch.object(self.drv, '_sofs_is_mounted') as m_is_mounted:
m_is_mounted.side_effect = [False, False]
self.assertRaises(nova.exception.NovaException,
self.drv._mount_sofs)
self.assertEqual(2, m_is_mounted.call_count)
@mock.patch('nova.utils.execute')
def test_mount_sofs_when_sofs_is_not_mounted(self, mock_execute):
with mock.patch.object(self.drv, '_sofs_is_mounted') as m_is_mounted:
m_is_mounted.side_effect = [False, True]
self.drv._mount_sofs()
self.assertEqual(2, m_is_mounted.call_count)
self.assertEqual(2, mock_execute.call_count)
expected_calls = [
mock.call('mkdir', '-p', self.scality_sofs_mount_point),
mock.call('mount', '-t', 'sofs', self.scality_sofs_config,
self.scality_sofs_mount_point, run_as_root=True)
]
mock_execute.assert_has_calls(expected_calls)
def test_sofs_is_mounted_when_sofs_is_not_mounted(self):
mock_open = mock.mock_open(read_data='tmpfs /dev/shm\n')
with mock.patch('io.open', mock_open) as mock_open:
self.assertFalse(self.drv._sofs_is_mounted())
def test_sofs_is_mounted_when_sofs_is_mounted(self):
proc_mount = '/dev/fuse ' + self.scality_sofs_mount_point + '\n'
mock_open = mock.mock_open(read_data=proc_mount)
with mock.patch('io.open', mock_open) as mock_open:
self.assertTrue(self.drv._sofs_is_mounted()) | unknown | codeparrot/codeparrot-clean | ||
name: Intl/Emoji data
on:
push:
paths:
- 'src/Symfony/Component/Emoji/*.php'
- 'src/Symfony/Component/Emoji/Resources/data/**'
- 'src/Symfony/Component/Emoji/Tests/*Test.php'
- 'src/Symfony/Component/Intl/*.php'
- 'src/Symfony/Component/Intl/Util/GitRepository.php'
- 'src/Symfony/Component/Intl/Resources/data/**'
- 'src/Symfony/Component/Intl/Tests/*Test.php'
- 'src/Symfony/Component/Intl/Tests/Util/GitRepositoryTest.php'
pull_request:
paths:
- 'src/Symfony/Component/Emoji/*.php'
- 'src/Symfony/Component/Emoji/Resources/data/**'
- 'src/Symfony/Component/Emoji/Tests/*Test.php'
- 'src/Symfony/Component/Intl/*.php'
- 'src/Symfony/Component/Intl/Util/GitRepository.php'
- 'src/Symfony/Component/Intl/Resources/data/**'
- 'src/Symfony/Component/Intl/Tests/*Test.php'
- 'src/Symfony/Component/Intl/Tests/Util/GitRepositoryTest.php'
defaults:
run:
shell: bash
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
permissions:
contents: read
jobs:
tests:
name: Intl/Emoji data
runs-on: ubuntu-24.04
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Install system dependencies
run: |
echo "::group::apt-get update"
sudo apt-get update
echo "::endgroup::"
echo "::group::install tools & libraries"
sudo apt-get install icu-devtools
echo "::endgroup::"
- name: Define the ICU version
run: |
SYMFONY_ICU_VERSION=$(php -r 'require "src/Symfony/Component/Intl/Intl.php"; echo Symfony\Component\Intl\Intl::getIcuStubVersion();')
echo "SYMFONY_ICU_VERSION=$SYMFONY_ICU_VERSION" >> $GITHUB_ENV
- name: Setup PHP
uses: shivammathur/setup-php@v2
with:
coverage: "none"
extensions: "zip,intl-${{env.SYMFONY_ICU_VERSION}}"
ini-values: "memory_limit=-1"
php-version: "8.4"
- name: Install dependencies
run: |
COMPOSER_HOME="$(composer config home)"
([ -d "$COMPOSER_HOME" ] || mkdir "$COMPOSER_HOME") && cp .github/composer-config.json "$COMPOSER_HOME/config.json"
export COMPOSER_ROOT_VERSION=$(grep ' VERSION = ' src/Symfony/Component/HttpKernel/Kernel.php | grep -P -o '[0-9]+\.[0-9]+').x-dev
echo COMPOSER_ROOT_VERSION=$COMPOSER_ROOT_VERSION >> $GITHUB_ENV
echo "::group::composer update"
composer update --no-progress --ansi
echo "::endgroup::"
echo "::group::install phpunit"
./phpunit install
echo "::endgroup::"
- name: Report the ICU version
run: uconv -V && php -i | grep 'ICU version'
- name: Run intl-data tests
run: |
./phpunit --group intl-data --exclude-group intl-data-isolate
./phpunit --group intl-data --filter testWhenEnvVarNotSet
./phpunit --group intl-data --filter testWhenEnvVarSetFalse
./phpunit --group intl-data --filter testWhenEnvVarSetTrue
- name: Test intl-data with compressed data
run: |
[ -f src/Symfony/Component/Intl/Resources/data/locales/en.php ]
[ ! -f src/Symfony/Component/Intl/Resources/data/locales/en.php.gz ]
src/Symfony/Component/Intl/Resources/bin/compress
[ ! -f src/Symfony/Component/Intl/Resources/data/locales/en.php ]
[ -f src/Symfony/Component/Intl/Resources/data/locales/en.php.gz ]
./phpunit src/Symfony/Component/Intl
- name: Run Emoji tests
run: ./phpunit src/Symfony/Component/Emoji
- name: Test Emoji with compressed data
run: |
[ -f src/Symfony/Component/Emoji/Resources/data/emoji-en.php ]
[ ! -f src/Symfony/Component/Emoji/Resources/data/emoji-en.php.gz ]
src/Symfony/Component/Emoji/Resources/bin/compress
[ ! -f src/Symfony/Component/Emoji/Resources/data/emoji-en.php ]
[ -f src/Symfony/Component/Emoji/Resources/data/emoji-en.php.gz ]
./phpunit src/Symfony/Component/Emoji | unknown | github | https://github.com/symfony/symfony | .github/workflows/intl-data-tests.yml |
// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v1
import (
"context"
"testing"
"time"
"github.com/prometheus/common/route"
"github.com/prometheus/prometheus/promql/parser"
"github.com/prometheus/prometheus/web/api/testhelpers"
)
// newTestAPI creates a new API instance for testing using testhelpers.
func newTestAPI(t *testing.T, cfg testhelpers.APIConfig) *testhelpers.APIWrapper {
t.Helper()
params := testhelpers.PrepareAPI(t, cfg)
// Adapt the testhelpers interfaces to v1 interfaces.
api := NewAPI(
params.QueryEngine,
params.Queryable,
nil, nil, // appendables
params.ExemplarQueryable,
func(ctx context.Context) ScrapePoolsRetriever {
return adaptScrapePoolsRetriever(params.ScrapePoolsRetriever(ctx))
},
func(ctx context.Context) TargetRetriever {
return adaptTargetRetriever(params.TargetRetriever(ctx))
},
func(ctx context.Context) AlertmanagerRetriever {
return adaptAlertmanagerRetriever(params.AlertmanagerRetriever(ctx))
},
params.ConfigFunc,
params.FlagsMap,
GlobalURLOptions{},
params.ReadyFunc,
adaptTSDBAdminStats(params.TSDBAdmin),
params.DBDir,
false, // enableAdmin
params.Logger,
func(ctx context.Context) RulesRetriever {
return adaptRulesRetriever(params.RulesRetriever(ctx))
},
0, // remoteReadSampleLimit
0, // remoteReadConcurrencyLimit
0, // remoteReadMaxBytesInFrame
false, // isAgent
nil, // corsOrigin
func() (RuntimeInfo, error) {
info, err := params.RuntimeInfoFunc()
return RuntimeInfo{
StartTime: info.StartTime,
CWD: info.CWD,
Hostname: info.Hostname,
ServerTime: info.ServerTime,
ReloadConfigSuccess: info.ReloadConfigSuccess,
LastConfigTime: info.LastConfigTime,
CorruptionCount: info.CorruptionCount,
GoroutineCount: info.GoroutineCount,
GOMAXPROCS: info.GOMAXPROCS,
GOMEMLIMIT: info.GOMEMLIMIT,
GOGC: info.GOGC,
GODEBUG: info.GODEBUG,
StorageRetention: info.StorageRetention,
}, err
},
&PrometheusVersion{
Version: params.BuildInfo.Version,
Revision: params.BuildInfo.Revision,
Branch: params.BuildInfo.Branch,
BuildUser: params.BuildInfo.BuildUser,
BuildDate: params.BuildInfo.BuildDate,
GoVersion: params.BuildInfo.GoVersion,
},
params.NotificationsGetter,
params.NotificationsSub,
params.Gatherer,
params.Registerer,
nil, // statsRenderer
false, // rwEnabled
nil, // acceptRemoteWriteProtoMsgs
false, // otlpEnabled
false, // otlpDeltaToCumulative
false, // otlpNativeDeltaIngestion
false, // stZeroIngestionEnabled
5*time.Minute, // lookbackDelta
false, // enableTypeAndUnitLabels
false, // appendMetadata
nil, // overrideErrorCode
nil, // featureRegistry
OpenAPIOptions{}, // openAPIOptions
parser.NewParser(parser.Options{}), // promqlParser
)
// Register routes.
router := route.New()
api.Register(router.WithPrefix("/api/v1"))
return &testhelpers.APIWrapper{
Handler: router,
}
}
// Adapter functions to convert testhelpers interfaces to v1 interfaces.
type rulesRetrieverAdapter struct {
testhelpers.RulesRetriever
}
func adaptRulesRetriever(r testhelpers.RulesRetriever) RulesRetriever {
return &rulesRetrieverAdapter{r}
}
type targetRetrieverAdapter struct {
testhelpers.TargetRetriever
}
func adaptTargetRetriever(t testhelpers.TargetRetriever) TargetRetriever {
return &targetRetrieverAdapter{t}
}
type scrapePoolsRetrieverAdapter struct {
testhelpers.ScrapePoolsRetriever
}
func adaptScrapePoolsRetriever(s testhelpers.ScrapePoolsRetriever) ScrapePoolsRetriever {
return &scrapePoolsRetrieverAdapter{s}
}
type alertmanagerRetrieverAdapter struct {
testhelpers.AlertmanagerRetriever
}
func adaptAlertmanagerRetriever(a testhelpers.AlertmanagerRetriever) AlertmanagerRetriever {
return &alertmanagerRetrieverAdapter{a}
}
type tsdbAdminStatsAdapter struct {
testhelpers.TSDBAdminStats
}
func adaptTSDBAdminStats(t testhelpers.TSDBAdminStats) TSDBAdminStats {
return &tsdbAdminStatsAdapter{t}
} | go | github | https://github.com/prometheus/prometheus | web/api/v1/test_helpers.go |
# Kubernetes HACK Alert
This is a hack folder for kubernetes codegen scripts. Oddly, a /hack/ folder seems to be standard kubernetes development practice ¯\_(ツ)\_/¯
The workflow is a WIP, however we are trying to leverage as many off-the-shelf patterns as possible.
For these scripts to work, your local GOROOT/src/grafana/grafana must point to this git checkout. For my setup this is:
```
❯ pwd
/Users/ryan/go/src/github.com/grafana
❯ ls -l
total 0
lrwxr-xr-x 1 ryan staff 37 Oct 5 09:34 grafana -> /Users/ryan/workspace/grafana/grafana
```
The current workflow is to run the following:
```shell
# ensure k8s.io/code-generator pkg is up to date
go mod download
# the happy path
./hack/update-codegen.sh
```
Note that the script deletes existing openapi go code and regenerates in place so that you will temporarily see
deleted files in your `git status`. After a successful run, you should see them restored.
If resource client is not generated for your resource make sure that it follows the k8s guidelines for structuring the resource definition
- the directory is named after resource version, i.e. `<resource_name>/v<version>` (e.g. service/v0alpha1)
- the resource directory contains file `types.go` that includes resource definitions
- the resource definitions are annotated with comment `// +genclient` | unknown | github | https://github.com/grafana/grafana | hack/README.md |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.