code stringlengths 1 25.8M | language stringclasses 18 values | source stringclasses 4 values | repo stringclasses 78 values | path stringlengths 0 268 |
|---|---|---|---|---|
/*-------------------------------------------------------------------------
*
* spgproc.c
* Common supporting procedures for SP-GiST opclasses.
*
*
* Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* src/backend/access/spgist/spgproc.c
*
*-------------------------------------------------------------------------
*/
#include "postgres.h"
#include <math.h>
#include "access/spgist_private.h"
#include "utils/float.h"
#include "utils/fmgrprotos.h"
#include "utils/geo_decls.h"
#define point_point_distance(p1,p2) \
DatumGetFloat8(DirectFunctionCall2(point_distance, \
PointPGetDatum(p1), PointPGetDatum(p2)))
/* Point-box distance in the assumption that box is aligned by axis */
static double
point_box_distance(Point *point, BOX *box)
{
double dx,
dy;
if (isnan(point->x) || isnan(box->low.x) ||
isnan(point->y) || isnan(box->low.y))
return get_float8_nan();
if (point->x < box->low.x)
dx = box->low.x - point->x;
else if (point->x > box->high.x)
dx = point->x - box->high.x;
else
dx = 0.0;
if (point->y < box->low.y)
dy = box->low.y - point->y;
else if (point->y > box->high.y)
dy = point->y - box->high.y;
else
dy = 0.0;
return hypot(dx, dy);
}
/*
* Returns distances from given key to array of ordering scan keys. Leaf key
* is expected to be point, non-leaf key is expected to be box. Scan key
* arguments are expected to be points.
*/
double *
spg_key_orderbys_distances(Datum key, bool isLeaf,
ScanKey orderbys, int norderbys)
{
int sk_num;
double *distances = palloc_array(double, norderbys),
*distance = distances;
for (sk_num = 0; sk_num < norderbys; ++sk_num, ++orderbys, ++distance)
{
Point *point = DatumGetPointP(orderbys->sk_argument);
*distance = isLeaf ? point_point_distance(point, DatumGetPointP(key))
: point_box_distance(point, DatumGetBoxP(key));
}
return distances;
}
BOX *
box_copy(BOX *orig)
{
BOX *result = palloc_object(BOX);
*result = *orig;
return result;
} | c | github | https://github.com/postgres/postgres | src/backend/access/spgist/spgproc.c |
from __future__ import absolute_import
import datetime
from decimal import Decimal
from django.db.models import Avg, Sum, Count, Max, Min
from django.test import TestCase, Approximate
from .models import Author, Publisher, Book, Store
class BaseAggregateTestCase(TestCase):
fixtures = ["aggregation.json"]
def test_empty_aggregate(self):
self.assertEqual(Author.objects.all().aggregate(), {})
def test_single_aggregate(self):
vals = Author.objects.aggregate(Avg("age"))
self.assertEqual(vals, {"age__avg": Approximate(37.4, places=1)})
def test_multiple_aggregates(self):
vals = Author.objects.aggregate(Sum("age"), Avg("age"))
self.assertEqual(vals, {"age__sum": 337, "age__avg": Approximate(37.4, places=1)})
def test_filter_aggregate(self):
vals = Author.objects.filter(age__gt=29).aggregate(Sum("age"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["age__sum"], 254)
def test_related_aggregate(self):
vals = Author.objects.aggregate(Avg("friends__age"))
self.assertEqual(len(vals), 1)
self.assertAlmostEqual(vals["friends__age__avg"], 34.07, places=2)
vals = Book.objects.filter(rating__lt=4.5).aggregate(Avg("authors__age"))
self.assertEqual(len(vals), 1)
self.assertAlmostEqual(vals["authors__age__avg"], 38.2857, places=2)
vals = Author.objects.all().filter(name__contains="a").aggregate(Avg("book__rating"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["book__rating__avg"], 4.0)
vals = Book.objects.aggregate(Sum("publisher__num_awards"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["publisher__num_awards__sum"], 30)
vals = Publisher.objects.aggregate(Sum("book__price"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["book__price__sum"], Decimal("270.27"))
def test_aggregate_multi_join(self):
vals = Store.objects.aggregate(Max("books__authors__age"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["books__authors__age__max"], 57)
vals = Author.objects.aggregate(Min("book__publisher__num_awards"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["book__publisher__num_awards__min"], 1)
def test_aggregate_alias(self):
vals = Store.objects.filter(name="Amazon.com").aggregate(amazon_mean=Avg("books__rating"))
self.assertEqual(len(vals), 1)
self.assertAlmostEqual(vals["amazon_mean"], 4.08, places=2)
def test_annotate_basic(self):
self.assertQuerysetEqual(
Book.objects.annotate().order_by('pk'), [
"The Definitive Guide to Django: Web Development Done Right",
"Sams Teach Yourself Django in 24 Hours",
"Practical Django Projects",
"Python Web Development with Django",
"Artificial Intelligence: A Modern Approach",
"Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp"
],
lambda b: b.name
)
books = Book.objects.annotate(mean_age=Avg("authors__age"))
b = books.get(pk=1)
self.assertEqual(
b.name,
'The Definitive Guide to Django: Web Development Done Right'
)
self.assertEqual(b.mean_age, 34.5)
def test_annotate_m2m(self):
books = Book.objects.filter(rating__lt=4.5).annotate(Avg("authors__age")).order_by("name")
self.assertQuerysetEqual(
books, [
('Artificial Intelligence: A Modern Approach', 51.5),
('Practical Django Projects', 29.0),
('Python Web Development with Django', Approximate(30.3, places=1)),
('Sams Teach Yourself Django in 24 Hours', 45.0)
],
lambda b: (b.name, b.authors__age__avg),
)
books = Book.objects.annotate(num_authors=Count("authors")).order_by("name")
self.assertQuerysetEqual(
books, [
('Artificial Intelligence: A Modern Approach', 2),
('Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 1),
('Practical Django Projects', 1),
('Python Web Development with Django', 3),
('Sams Teach Yourself Django in 24 Hours', 1),
('The Definitive Guide to Django: Web Development Done Right', 2)
],
lambda b: (b.name, b.num_authors)
)
def test_backwards_m2m_annotate(self):
authors = Author.objects.filter(name__contains="a").annotate(Avg("book__rating")).order_by("name")
self.assertQuerysetEqual(
authors, [
('Adrian Holovaty', 4.5),
('Brad Dayley', 3.0),
('Jacob Kaplan-Moss', 4.5),
('James Bennett', 4.0),
('Paul Bissex', 4.0),
('Stuart Russell', 4.0)
],
lambda a: (a.name, a.book__rating__avg)
)
authors = Author.objects.annotate(num_books=Count("book")).order_by("name")
self.assertQuerysetEqual(
authors, [
('Adrian Holovaty', 1),
('Brad Dayley', 1),
('Jacob Kaplan-Moss', 1),
('James Bennett', 1),
('Jeffrey Forcier', 1),
('Paul Bissex', 1),
('Peter Norvig', 2),
('Stuart Russell', 1),
('Wesley J. Chun', 1)
],
lambda a: (a.name, a.num_books)
)
def test_reverse_fkey_annotate(self):
books = Book.objects.annotate(Sum("publisher__num_awards")).order_by("name")
self.assertQuerysetEqual(
books, [
('Artificial Intelligence: A Modern Approach', 7),
('Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 9),
('Practical Django Projects', 3),
('Python Web Development with Django', 7),
('Sams Teach Yourself Django in 24 Hours', 1),
('The Definitive Guide to Django: Web Development Done Right', 3)
],
lambda b: (b.name, b.publisher__num_awards__sum)
)
publishers = Publisher.objects.annotate(Sum("book__price")).order_by("name")
self.assertQuerysetEqual(
publishers, [
('Apress', Decimal("59.69")),
("Jonno's House of Books", None),
('Morgan Kaufmann', Decimal("75.00")),
('Prentice Hall', Decimal("112.49")),
('Sams', Decimal("23.09"))
],
lambda p: (p.name, p.book__price__sum)
)
def test_annotate_values(self):
books = list(Book.objects.filter(pk=1).annotate(mean_age=Avg("authors__age")).values())
self.assertEqual(
books, [
{
"contact_id": 1,
"id": 1,
"isbn": "159059725",
"mean_age": 34.5,
"name": "The Definitive Guide to Django: Web Development Done Right",
"pages": 447,
"price": Approximate(Decimal("30")),
"pubdate": datetime.date(2007, 12, 6),
"publisher_id": 1,
"rating": 4.5,
}
]
)
books = Book.objects.filter(pk=1).annotate(mean_age=Avg('authors__age')).values('pk', 'isbn', 'mean_age')
self.assertEqual(
list(books), [
{
"pk": 1,
"isbn": "159059725",
"mean_age": 34.5,
}
]
)
books = Book.objects.filter(pk=1).annotate(mean_age=Avg("authors__age")).values("name")
self.assertEqual(
list(books), [
{
"name": "The Definitive Guide to Django: Web Development Done Right"
}
]
)
books = Book.objects.filter(pk=1).values().annotate(mean_age=Avg('authors__age'))
self.assertEqual(
list(books), [
{
"contact_id": 1,
"id": 1,
"isbn": "159059725",
"mean_age": 34.5,
"name": "The Definitive Guide to Django: Web Development Done Right",
"pages": 447,
"price": Approximate(Decimal("30")),
"pubdate": datetime.date(2007, 12, 6),
"publisher_id": 1,
"rating": 4.5,
}
]
)
books = Book.objects.values("rating").annotate(n_authors=Count("authors__id"), mean_age=Avg("authors__age")).order_by("rating")
self.assertEqual(
list(books), [
{
"rating": 3.0,
"n_authors": 1,
"mean_age": 45.0,
},
{
"rating": 4.0,
"n_authors": 6,
"mean_age": Approximate(37.16, places=1)
},
{
"rating": 4.5,
"n_authors": 2,
"mean_age": 34.5,
},
{
"rating": 5.0,
"n_authors": 1,
"mean_age": 57.0,
}
]
)
authors = Author.objects.annotate(Avg("friends__age")).order_by("name")
self.assertEqual(len(authors), 9)
self.assertQuerysetEqual(
authors, [
('Adrian Holovaty', 32.0),
('Brad Dayley', None),
('Jacob Kaplan-Moss', 29.5),
('James Bennett', 34.0),
('Jeffrey Forcier', 27.0),
('Paul Bissex', 31.0),
('Peter Norvig', 46.0),
('Stuart Russell', 57.0),
('Wesley J. Chun', Approximate(33.66, places=1))
],
lambda a: (a.name, a.friends__age__avg)
)
def test_count(self):
vals = Book.objects.aggregate(Count("rating"))
self.assertEqual(vals, {"rating__count": 6})
vals = Book.objects.aggregate(Count("rating", distinct=True))
self.assertEqual(vals, {"rating__count": 4})
def test_fkey_aggregate(self):
explicit = list(Author.objects.annotate(Count('book__id')))
implicit = list(Author.objects.annotate(Count('book')))
self.assertEqual(explicit, implicit)
def test_annotate_ordering(self):
books = Book.objects.values('rating').annotate(oldest=Max('authors__age')).order_by('oldest', 'rating')
self.assertEqual(
list(books), [
{
"rating": 4.5,
"oldest": 35,
},
{
"rating": 3.0,
"oldest": 45
},
{
"rating": 4.0,
"oldest": 57,
},
{
"rating": 5.0,
"oldest": 57,
}
]
)
books = Book.objects.values("rating").annotate(oldest=Max("authors__age")).order_by("-oldest", "-rating")
self.assertEqual(
list(books), [
{
"rating": 5.0,
"oldest": 57,
},
{
"rating": 4.0,
"oldest": 57,
},
{
"rating": 3.0,
"oldest": 45,
},
{
"rating": 4.5,
"oldest": 35,
}
]
)
def test_aggregate_annotation(self):
vals = Book.objects.annotate(num_authors=Count("authors__id")).aggregate(Avg("num_authors"))
self.assertEqual(vals, {"num_authors__avg": Approximate(1.66, places=1)})
def test_filtering(self):
p = Publisher.objects.create(name='Expensive Publisher', num_awards=0)
Book.objects.create(
name='ExpensiveBook1',
pages=1,
isbn='111',
rating=3.5,
price=Decimal("1000"),
publisher=p,
contact_id=1,
pubdate=datetime.date(2008,12,1)
)
Book.objects.create(
name='ExpensiveBook2',
pages=1,
isbn='222',
rating=4.0,
price=Decimal("1000"),
publisher=p,
contact_id=1,
pubdate=datetime.date(2008,12,2)
)
Book.objects.create(
name='ExpensiveBook3',
pages=1,
isbn='333',
rating=4.5,
price=Decimal("35"),
publisher=p,
contact_id=1,
pubdate=datetime.date(2008,12,3)
)
publishers = Publisher.objects.annotate(num_books=Count("book__id")).filter(num_books__gt=1).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Prentice Hall",
"Expensive Publisher",
],
lambda p: p.name,
)
publishers = Publisher.objects.filter(book__price__lt=Decimal("40.0")).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Apress",
"Sams",
"Prentice Hall",
"Expensive Publisher",
],
lambda p: p.name
)
publishers = Publisher.objects.annotate(num_books=Count("book__id")).filter(num_books__gt=1, book__price__lt=Decimal("40.0")).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Prentice Hall",
"Expensive Publisher",
],
lambda p: p.name,
)
publishers = Publisher.objects.filter(book__price__lt=Decimal("40.0")).annotate(num_books=Count("book__id")).filter(num_books__gt=1).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
],
lambda p: p.name
)
publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__range=[1, 3]).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Sams",
"Prentice Hall",
"Morgan Kaufmann",
"Expensive Publisher",
],
lambda p: p.name
)
publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__range=[1, 2]).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Sams",
"Prentice Hall",
"Morgan Kaufmann",
],
lambda p: p.name
)
publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__in=[1, 3]).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Sams",
"Morgan Kaufmann",
"Expensive Publisher",
],
lambda p: p.name,
)
publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__isnull=True)
self.assertEqual(len(publishers), 0)
def test_annotation(self):
vals = Author.objects.filter(pk=1).aggregate(Count("friends__id"))
self.assertEqual(vals, {"friends__id__count": 2})
books = Book.objects.annotate(num_authors=Count("authors__name")).filter(num_authors__ge=2).order_by("pk")
self.assertQuerysetEqual(
books, [
"The Definitive Guide to Django: Web Development Done Right",
"Artificial Intelligence: A Modern Approach",
],
lambda b: b.name
)
authors = Author.objects.annotate(num_friends=Count("friends__id", distinct=True)).filter(num_friends=0).order_by("pk")
self.assertQuerysetEqual(
authors, [
"Brad Dayley",
],
lambda a: a.name
)
publishers = Publisher.objects.annotate(num_books=Count("book__id")).filter(num_books__gt=1).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Prentice Hall",
],
lambda p: p.name
)
publishers = Publisher.objects.filter(book__price__lt=Decimal("40.0")).annotate(num_books=Count("book__id")).filter(num_books__gt=1)
self.assertQuerysetEqual(
publishers, [
"Apress",
],
lambda p: p.name
)
books = Book.objects.annotate(num_authors=Count("authors__id")).filter(authors__name__contains="Norvig", num_authors__gt=1)
self.assertQuerysetEqual(
books, [
"Artificial Intelligence: A Modern Approach",
],
lambda b: b.name
)
def test_more_aggregation(self):
a = Author.objects.get(name__contains='Norvig')
b = Book.objects.get(name__contains='Done Right')
b.authors.add(a)
b.save()
vals = Book.objects.annotate(num_authors=Count("authors__id")).filter(authors__name__contains="Norvig", num_authors__gt=1).aggregate(Avg("rating"))
self.assertEqual(vals, {"rating__avg": 4.25})
def test_even_more_aggregate(self):
publishers = Publisher.objects.annotate(earliest_book=Min("book__pubdate")).exclude(earliest_book=None).order_by("earliest_book").values()
self.assertEqual(
list(publishers), [
{
'earliest_book': datetime.date(1991, 10, 15),
'num_awards': 9,
'id': 4,
'name': 'Morgan Kaufmann'
},
{
'earliest_book': datetime.date(1995, 1, 15),
'num_awards': 7,
'id': 3,
'name': 'Prentice Hall'
},
{
'earliest_book': datetime.date(2007, 12, 6),
'num_awards': 3,
'id': 1,
'name': 'Apress'
},
{
'earliest_book': datetime.date(2008, 3, 3),
'num_awards': 1,
'id': 2,
'name': 'Sams'
}
]
)
vals = Store.objects.aggregate(Max("friday_night_closing"), Min("original_opening"))
self.assertEqual(
vals,
{
"friday_night_closing__max": datetime.time(23, 59, 59),
"original_opening__min": datetime.datetime(1945, 4, 25, 16, 24, 14),
}
)
def test_annotate_values_list(self):
books = Book.objects.filter(pk=1).annotate(mean_age=Avg("authors__age")).values_list("pk", "isbn", "mean_age")
self.assertEqual(
list(books), [
(1, "159059725", 34.5),
]
)
books = Book.objects.filter(pk=1).annotate(mean_age=Avg("authors__age")).values_list("isbn")
self.assertEqual(
list(books), [
('159059725',)
]
)
books = Book.objects.filter(pk=1).annotate(mean_age=Avg("authors__age")).values_list("mean_age")
self.assertEqual(
list(books), [
(34.5,)
]
)
books = Book.objects.filter(pk=1).annotate(mean_age=Avg("authors__age")).values_list("mean_age", flat=True)
self.assertEqual(list(books), [34.5])
books = Book.objects.values_list("price").annotate(count=Count("price")).order_by("-count", "price")
self.assertEqual(
list(books), [
(Decimal("29.69"), 2),
(Decimal('23.09'), 1),
(Decimal('30'), 1),
(Decimal('75'), 1),
(Decimal('82.8'), 1),
]
)
def test_dates_with_aggregation(self):
"""
Test that .dates() returns a distinct set of dates when applied to a
QuerySet with aggregation.
Refs #18056. Previously, .dates() would return distinct (date_kind,
aggregation) sets, in this case (year, num_authors), so 2008 would be
returned twice because there are books from 2008 with a different
number of authors.
"""
dates = Book.objects.annotate(num_authors=Count("authors")).dates('pubdate', 'year')
self.assertQuerysetEqual(
dates, [
"datetime.date(1991, 1, 1)",
"datetime.date(1995, 1, 1)",
"datetime.date(2007, 1, 1)",
"datetime.date(2008, 1, 1)"
]
) | unknown | codeparrot/codeparrot-clean | ||
import re
import sys
import parse_tree as pt
import mod_collins_head_finder as mchf
import warnings
import pdb
# Adapted from the Berkeley coreference system.
def extract_dependency_structure(num_words, parse_tree, head_finder):
constituents = parse_tree.get_constituents()
subtree_heads = dict()
trees = parse_tree.get_post_order_traversal()
heads = [-1] * num_words
for tree in trees:
if tree.is_leaf():
pass
elif tree.is_preterminal():
constituent = constituents[tree]
subtree_heads[tree] = constituent.start
else:
children = tree.get_children()
head = head_finder.determine_head(tree)
if head == None:
warnings.warn('No head found: ' + str(tree))
head = children[0]
head_index = subtree_heads[head]
for child in children:
if child == head:
subtree_heads[tree] = head_index
else:
heads[subtree_heads[child]] = head_index
return heads
def parse_conll_file(filename, tag_column, word_column, parse_column):
f = open(filename)
# Obtain dependencies from the parse tree using Collins' head rules.
head_finder = mchf.ModCollinsHeadFinder()
all_fields = []
parse_tree_desc = ''
for line in f:
line = line.rstrip('\n')
if line == '':
# Load the syntactic parse tree from its string description.
parse_tree = pt.ParseTree()
parse_tree.load(parse_tree_desc)
num_words = len(all_fields)
heads = extract_dependency_structure(num_words, parse_tree, head_finder)
for i, fields in enumerate(all_fields):
deprel = '_'
new_fields = fields[:(parse_column+1)] + [str(heads[i]+1), deprel] + fields[(parse_column+1):]
new_line = '\t'.join(new_fields)
print new_line
print
all_fields = []
parse_tree_desc = ''
elif line.startswith('#begin') or line.startswith('#end'):
print line
else:
line = re.sub('[ ]+', '\t', line)
fields = line.split('\t')
all_fields.append(fields)
assert len(fields) >= 3, pdb.set_trace()
word = fields[word_column]
tag = fields[tag_column]
parse_span = fields[parse_column]
parse_tree_bit = re.sub('\*', '(' + tag + ' ' + word + ')', parse_span)
parse_tree_desc += parse_tree_bit
f.close()
if __name__ == '__main__':
filename = sys.argv[1]
word_column = int(sys.argv[2])
tag_column = int(sys.argv[3])
parse_column = int(sys.argv[4])
parse_conll_file(filename, tag_column, word_column, parse_column) | unknown | codeparrot/codeparrot-clean | ||
#
# cache.py
#
# Copyright (C) 2009 Andrew Resch <andrewresch@gmail.com>
#
# Deluge is free software.
#
# You may redistribute it and/or modify it under the terms of the
# GNU General Public License, as published by the Free Software
# Foundation; either version 3 of the License, or (at your option)
# any later version.
#
# deluge is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with deluge. If not, write to:
# The Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor
# Boston, MA 02110-1301, USA.
#
# In addition, as a special exception, the copyright holders give
# permission to link the code of portions of this program with the OpenSSL
# library.
# You must obey the GNU General Public License in all respects for all of
# the code used other than OpenSSL. If you modify file(s) with this
# exception, you may extend this exception to your version of the file(s),
# but you are not obligated to do so. If you do not wish to do so, delete
# this exception statement from your version. If you delete this exception
# statement from all source files in the program, then also delete it here.
#
from deluge.ui.console.main import BaseCommand
from deluge.ui.client import client
import deluge.ui.console.colors as colors
import deluge.component as component
class Command(BaseCommand):
"""Show information about the disk cache"""
usage = "Usage: cache"
def handle(self, *args, **options):
self.console = component.get("ConsoleUI")
def on_cache_status(status):
for key, value in status.items():
self.console.write("{!info!}%s: {!input!}%s" % (key, value))
d = client.core.get_cache_status()
d.addCallback(on_cache_status)
return d | unknown | codeparrot/codeparrot-clean | ||
from os import readlink
import copy
import csv
import errno
import os
import shutil
import tempfile
import unittest
from mock import Mock, MagicMock, patch
from pulp.common.plugins.distributor_constants import MANIFEST_FILENAME
from pulp.devel.mock_distributor import get_publish_conduit
from pulp.plugins.file.distributor import FileDistributor, FilePublishProgressReport, BUILD_DIRNAME
from pulp.plugins.model import Repository, Unit
DATA_DIR = os.path.realpath("../../../data/")
SAMPLE_RPM = 'pulp-test-package-0.3.1-1.fc11.x86_64.rpm'
SAMPLE_FILE = 'test-override-pulp.conf'
class FileDistributorTest(unittest.TestCase):
"""
Tests the file distributor base class
"""
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
self.target_dir = os.path.join(self.temp_dir, "target")
self.repo = MagicMock(spec=Repository)
self.repo.id = "foo"
self.repo.working_dir = self.temp_dir
self.unit = Unit('RPM', {'name': SAMPLE_RPM, 'size': 1, 'checksum': 'sum1'}, {},
os.path.join(DATA_DIR, SAMPLE_RPM))
self.publish_conduit = get_publish_conduit(existing_units=[self.unit, ])
def tearDown(self):
shutil.rmtree(self.temp_dir)
def create_distributor_with_mocked_api_calls(self):
distributor = FileDistributor()
distributor.get_hosting_locations = Mock()
distributor.get_hosting_locations.return_value = [self.target_dir, ]
distributor.post_repo_publish = Mock()
return distributor
def test_metadata_not_implemented(self):
self.assertRaises(NotImplementedError, FileDistributor.metadata)
def test_validate_config_not_implemented(self):
distributor = FileDistributor()
self.assertRaises(NotImplementedError, distributor.validate_config, None, None, None)
def test_get_hosting_locations_not_implemented(self):
distributor = FileDistributor()
host_locations = distributor.get_hosting_locations(None, None)
self.assertEquals(0, len(host_locations))
def test_post_repo_publish_not_implemented(self):
distributor = FileDistributor()
# ensure that this doesn't raise an error
distributor.post_repo_publish(None, None)
def test_repo_publish_api_calls(self):
distributor = self.create_distributor_with_mocked_api_calls()
result = distributor.publish_repo(self.repo, self.publish_conduit, {})
self.assertTrue(result.success_flag)
self.assertTrue(distributor.get_hosting_locations.called)
self.assertTrue(distributor.post_repo_publish.called)
# The publish_conduit should have had two set_progress calls. One to start the IN_PROGRESS
# state, and the second to mark it as complete
self.assertEqual(self.publish_conduit.set_progress.call_count, 2)
self.assertEqual(self.publish_conduit.set_progress.mock_calls[0][1][0]['state'],
FilePublishProgressReport.STATE_IN_PROGRESS)
self.assertEqual(self.publish_conduit.set_progress.mock_calls[1][1][0]['state'],
FilePublishProgressReport.STATE_COMPLETE)
def test_repo_publish_files_placed_properly(self):
distributor = self.create_distributor_with_mocked_api_calls()
distributor.publish_repo(self.repo, self.publish_conduit, {})
target_file = os.path.join(self.target_dir, SAMPLE_RPM)
# test if the link was created
self.assertTrue(os.path.islink(target_file))
# test if the link points to the correct place
link_target = os.readlink(target_file)
self.assertEquals(link_target, os.path.join(DATA_DIR, SAMPLE_RPM))
def test_repo_publish_metadata_writing(self):
distributor = self.create_distributor_with_mocked_api_calls()
distributor.publish_repo(self.repo, self.publish_conduit, {})
with open(os.path.join(self.target_dir, MANIFEST_FILENAME), 'rb') as f:
reader = csv.reader(f)
row = reader.next()
self.assertEquals(row[0], self.unit.unit_key['name'])
self.assertEquals(row[1], self.unit.unit_key['checksum'])
self.assertEquals(row[2], str(self.unit.unit_key['size']))
@patch('pulp.plugins.file.distributor._logger')
def test_repo_publish_handles_errors(self, mock_logger):
"""
Make sure that publish() does the right thing with the report when there is an error.
"""
distributor = self.create_distributor_with_mocked_api_calls()
distributor.post_repo_publish.side_effect = Exception('Rawr!')
report = distributor.publish_repo(self.repo, self.publish_conduit, {})
self.assertTrue(mock_logger.exception.called)
self.assertFalse(report.success_flag)
self.assertEqual(report.summary['state'], FilePublishProgressReport.STATE_FAILED)
self.assertEqual(report.summary['error_message'], 'Rawr!')
self.assertTrue('Rawr!' in report.summary['traceback'])
# The publish_conduit should have had two set_progress calls. One to start the IN_PROGRESS
# state, and the second to mark it as failed
self.assertEqual(self.publish_conduit.set_progress.call_count, 2)
self.assertEqual(self.publish_conduit.set_progress.mock_calls[0][1][0]['state'],
FilePublishProgressReport.STATE_IN_PROGRESS)
self.assertEqual(self.publish_conduit.set_progress.mock_calls[1][1][0]['state'],
FilePublishProgressReport.STATE_FAILED)
def test_republish_after_unit_removal(self):
"""
This test checks for an issue[0] we had where publishing an ISO repository, removing an ISO,
and then republishing would leave that removed ISO's symlink in the repository even though
it had been removed from the manifest. This test asserts that the republished repository no
longer contains the removed ISO.
[0] https://bugzilla.redhat.com/show_bug.cgi?id=970795
:param delete_protected_repo: The mocked version of delete_protected_repo
:type delete_protected_repo: function
"""
# Publish a repository
distributor = self.create_distributor_with_mocked_api_calls()
distributor.publish_repo(self.repo, self.publish_conduit, {})
target_file = os.path.join(self.target_dir, SAMPLE_RPM)
# test if the link was created
self.assertTrue(os.path.islink(target_file))
# publish a new repo with a different unit in it
cloned_unit = copy.deepcopy(self.unit)
cloned_unit.unit_key['name'] = 'foo.rpm'
new_conduit = get_publish_conduit(existing_units=[cloned_unit, ])
distributor.publish_repo(self.repo, new_conduit, {})
# Make sure the new rpm is linked
self.assertTrue(os.path.islink(os.path.join(self.target_dir, 'foo.rpm')))
# Ensure the old rpm is no longer included
self.assertFalse(os.path.islink(target_file))
def test_distributor_removed_calls_unpublish(self):
distributor = self.create_distributor_with_mocked_api_calls()
distributor.unpublish_repo = Mock()
distributor.distributor_removed(self.repo, {})
self.assertTrue(distributor.unpublish_repo.called)
def test_unpublish_repo(self):
distributor = self.create_distributor_with_mocked_api_calls()
distributor.publish_repo(self.repo, self.publish_conduit, {})
self.assertTrue(os.path.exists(self.target_dir))
distributor.unpublish_repo(self.repo, {})
self.assertFalse(os.path.exists(self.target_dir))
def test__rmtree_if_exists(self):
"""
Let's just make sure this simple thing doesn't barf.
"""
a_directory = os.path.join(self.temp_dir, 'a_directory')
test_filename = os.path.join(a_directory, 'test.txt')
os.makedirs(a_directory)
with open(test_filename, 'w') as test:
test.write("Please don't barf.")
# This should not cause any problems, and test.txt should still exist
distributor = self.create_distributor_with_mocked_api_calls()
distributor._rmtree_if_exists(os.path.join(self.temp_dir, 'fake_path'))
self.assertTrue(os.path.exists(test_filename))
# Now let's remove a_directory
distributor._rmtree_if_exists(a_directory)
self.assertFalse(os.path.exists(a_directory))
def test__symlink_units(self):
"""
Make sure that the _symlink_units creates all the correct symlinks.
"""
distributor = self.create_distributor_with_mocked_api_calls()
# There's some logic in _symlink_units to handle preexisting files and symlinks, so let's
# create some fakes to see if it does the right thing
build_dir = os.path.join(self.temp_dir, BUILD_DIRNAME)
os.makedirs(build_dir)
os.symlink('/some/weird/path',
os.path.join(build_dir, self.unit.unit_key['name']))
distributor._symlink_unit(build_dir, self.unit, [self.unit.unit_key['name'], ])
expected_symlink_path = os.path.join(build_dir, self.unit.unit_key['name'])
self.assertTrue(os.path.islink(expected_symlink_path))
expected_symlink_destination = os.path.join(DATA_DIR, self.unit.unit_key['name'])
self.assertEqual(os.path.realpath(expected_symlink_path), expected_symlink_destination)
@patch('os.symlink', side_effect=os.symlink)
def test__symlink_units_existing_correct_link(self, symlink):
"""
Make sure that the _symlink_units handles an existing correct link well.
"""
# There's some logic in _symlink_units to handle preexisting files and symlinks, so let's
# create some fakes to see if it does the right thing
build_dir = os.path.join(self.temp_dir, BUILD_DIRNAME)
os.makedirs(build_dir)
expected_symlink_destination = os.path.join(DATA_DIR, self.unit.unit_key['name'])
os.symlink(expected_symlink_destination,
os.path.join(build_dir, self.unit.unit_key['name']))
# Now let's reset the Mock so that we can make sure it doesn't get called during _symlink
symlink.reset_mock()
distributor = self.create_distributor_with_mocked_api_calls()
distributor._symlink_unit(build_dir, self.unit, [self.unit.unit_key['name']])
# The call count for symlink should be 0, because the _symlink_units call should have
# noticed that the symlink was already correct and thus should have skipped it
self.assertEqual(symlink.call_count, 0)
expected_symlink_path = os.path.join(build_dir, self.unit.unit_key['name'])
self.assertTrue(os.path.islink(expected_symlink_path))
self.assertEqual(os.path.realpath(expected_symlink_path),
os.path.realpath(expected_symlink_destination))
@patch('os.readlink')
def test__symlink_units_os_error(self, readlink):
"""
Make sure that the _symlink_units handles an OSError correctly, for the case where it
doesn't raise EINVAL. We already have a test that raises EINVAL (test__symlink_units places
an ordinary file there.)
"""
os_error = OSError()
# This would be an unexpected error for reading a symlink!
os_error.errno = errno.ENOSPC
readlink.side_effect = os_error
# There's some logic in _symlink_units to handle preexisting files and symlinks, so let's
# create some fakes to see if it does the right thing
build_dir = os.path.join(self.temp_dir, BUILD_DIRNAME)
os.makedirs(build_dir)
expected_symlink_destination = os.path.join(DATA_DIR, self.unit.unit_key['name'])
os.symlink(expected_symlink_destination,
os.path.join(build_dir, self.unit.unit_key['name']))
try:
distributor = self.create_distributor_with_mocked_api_calls()
distributor._symlink_unit(build_dir, self.unit, [self.unit.unit_key['name']])
self.fail('An OSError should have been raised, but was not!')
except OSError, e:
self.assertEqual(e.errno, errno.ENOSPC)
@patch('os.readlink')
def test__symlink_units_EINVAL_os_error(self, mock_readlink):
"""
Make sure that the _symlink_units handles an OSError correctly, for the case where it
raises EINVAL. We already have a test that raises EINVAL (test__symlink_units places
an ordinary file there.)
"""
os_error = OSError()
# This would be an unexpected error for reading a symlink!
os_error.errno = errno.EINVAL
mock_readlink.side_effect = os_error
# There's some logic in _symlink_units to handle preexisting files and symlinks, so let's
# create some fakes to see if it does the right thing
build_dir = os.path.join(self.temp_dir, BUILD_DIRNAME)
os.makedirs(build_dir)
original_link = os.path.join(build_dir, self.unit.unit_key['name'])
old_target = os.path.join(DATA_DIR, SAMPLE_FILE)
os.symlink(old_target, original_link)
distributor = self.create_distributor_with_mocked_api_calls()
distributor._symlink_unit(build_dir, self.unit, [self.unit.unit_key['name']])
# make sure the symlink was deleted
self.assertTrue(os.path.islink(original_link))
created_link = readlink(original_link)
self.assertNotEqual(old_target, created_link) | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2013 NTT Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import tarfile
import tempfile
from tempest.api.object_storage import base
from tempest.common import custom_matchers
from tempest import test
from tempest_lib import decorators
class BulkTest(base.BaseObjectTest):
def setUp(self):
super(BulkTest, self).setUp()
self.containers = []
def tearDown(self):
self.delete_containers(self.containers)
super(BulkTest, self).tearDown()
def _create_archive(self):
# Create an archived file for bulk upload testing.
# Directory and files contained in the directory correspond to
# container and subsidiary objects.
tmp_dir = tempfile.mkdtemp()
tmp_file = tempfile.mkstemp(dir=tmp_dir)
# Extract a container name and an object name
container_name = tmp_dir.split("/")[-1]
object_name = tmp_file[1].split("/")[-1]
# Create tar file
tarpath = tempfile.NamedTemporaryFile(suffix=".tar")
tar = tarfile.open(None, 'w', tarpath)
tar.add(tmp_dir, arcname=container_name)
tar.close()
tarpath.flush()
return tarpath.name, container_name, object_name
def _upload_archive(self, filepath):
# upload an archived file
params = {'extract-archive': 'tar'}
with open(filepath) as fh:
mydata = fh.read()
resp, body = self.account_client.create_account(data=mydata,
params=params)
return resp, body
def _check_contents_deleted(self, container_name):
param = {'format': 'txt'}
resp, body = self.account_client.list_account_containers(param)
self.assertHeaders(resp, 'Account', 'GET')
self.assertNotIn(container_name, body)
@decorators.skip_because(bug="1417457")
@test.attr(type='gate')
@test.idempotent_id('a407de51-1983-47cc-9f14-47c2b059413c')
@test.requires_ext(extension='bulk', service='object')
def test_extract_archive(self):
# Test bulk operation of file upload with an archived file
filepath, container_name, object_name = self._create_archive()
resp, _ = self._upload_archive(filepath)
self.containers.append(container_name)
# When uploading an archived file with the bulk operation, the response
# does not contain 'content-length' header. This is the special case,
# therefore the existence of response headers is checked without
# custom matcher.
self.assertIn('transfer-encoding', resp)
self.assertIn('content-type', resp)
self.assertIn('x-trans-id', resp)
self.assertIn('date', resp)
# Check only the format of common headers with custom matcher
self.assertThat(resp, custom_matchers.AreAllWellFormatted())
param = {'format': 'json'}
resp, body = self.account_client.list_account_containers(param)
self.assertHeaders(resp, 'Account', 'GET')
self.assertIn(container_name, [b['name'] for b in body])
param = {'format': 'json'}
resp, contents_list = self.container_client.list_container_contents(
container_name, param)
self.assertHeaders(resp, 'Container', 'GET')
self.assertIn(object_name, [c['name'] for c in contents_list])
@decorators.skip_because(bug="1417457")
@test.attr(type='gate')
@test.idempotent_id('c075e682-0d2a-43b2-808d-4116200d736d')
@test.requires_ext(extension='bulk', service='object')
def test_bulk_delete(self):
# Test bulk operation of deleting multiple files
filepath, container_name, object_name = self._create_archive()
self._upload_archive(filepath)
data = '%s/%s\n%s' % (container_name, object_name, container_name)
params = {'bulk-delete': ''}
resp, body = self.account_client.delete_account(data=data,
params=params)
# When deleting multiple files using the bulk operation, the response
# does not contain 'content-length' header. This is the special case,
# therefore the existence of response headers is checked without
# custom matcher.
self.assertIn('transfer-encoding', resp)
self.assertIn('content-type', resp)
self.assertIn('x-trans-id', resp)
self.assertIn('date', resp)
# Check only the format of common headers with custom matcher
self.assertThat(resp, custom_matchers.AreAllWellFormatted())
# Check if uploaded contents are completely deleted
self._check_contents_deleted(container_name)
@decorators.skip_because(bug="1417457")
@test.attr(type='gate')
@test.idempotent_id('dbea2bcb-efbb-4674-ac8a-a5a0e33d1d79')
@test.requires_ext(extension='bulk', service='object')
def test_bulk_delete_by_POST(self):
# Test bulk operation of deleting multiple files
filepath, container_name, object_name = self._create_archive()
self._upload_archive(filepath)
data = '%s/%s\n%s' % (container_name, object_name, container_name)
params = {'bulk-delete': ''}
resp, body = self.account_client.create_account_metadata(
{}, data=data, params=params)
# When deleting multiple files using the bulk operation, the response
# does not contain 'content-length' header. This is the special case,
# therefore the existence of response headers is checked without
# custom matcher.
self.assertIn('transfer-encoding', resp)
self.assertIn('content-type', resp)
self.assertIn('x-trans-id', resp)
self.assertIn('date', resp)
# Check only the format of common headers with custom matcher
self.assertThat(resp, custom_matchers.AreAllWellFormatted())
# Check if uploaded contents are completely deleted
self._check_contents_deleted(container_name) | unknown | codeparrot/codeparrot-clean | ||
function foo(a, b, c, d) {
let x = {};
if (someVal) {
x = {b};
} else {
x = {c};
}
return x;
} | javascript | github | https://github.com/facebook/react | compiler/packages/babel-plugin-react-compiler/src/__tests__/fixtures/compiler/obj-literal-cached-in-if-else.js |
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
# This file was automatically generated from src/transformers/models/deepseek_vl_hybrid/modular_deepseek_vl_hybrid.py.
# Do NOT edit this file manually as any edits will be overwritten by the generation of
# the file from the modular. If any change should be done, please apply the change to the
# modular_deepseek_vl_hybrid.py file directly. One of our CI enforces this.
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
# Copyright 2025 Deepseek AI and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ...image_processing_utils_fast import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessingKwargs, ProcessorMixin, Unpack
from ...tokenization_utils_base import PreTokenizedInput, TextInput
from ...utils import auto_docstring
class DeepseekVLHybridProcessorKwargs(ProcessingKwargs, total=False):
_defaults = {
"text_kwargs": {"padding": False},
"common_kwargs": {"return_tensors": "pt"},
}
@auto_docstring
class DeepseekVLHybridProcessor(ProcessorMixin):
def __init__(
self,
image_processor,
tokenizer,
chat_template=None,
num_image_tokens=576,
):
r"""
num_image_tokens (`int`, *optional*, defaults to 576):
The number of special image tokens used as placeholders for visual content in text sequences.
"""
self.image_token = tokenizer.image_token
self.num_image_tokens = num_image_tokens
super().__init__(image_processor, tokenizer, chat_template=chat_template)
@auto_docstring
def __call__(
self,
text: TextInput | PreTokenizedInput | list[TextInput] | list[PreTokenizedInput] = None,
images: ImageInput | None = None,
**kwargs: Unpack[DeepseekVLHybridProcessorKwargs],
) -> BatchFeature:
r"""
Returns:
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
- **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
`return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
`None`).
- **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
"""
output_kwargs = self._merge_kwargs(
DeepseekVLHybridProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs
)
if text is None and images is None:
raise ValueError("You must specify either text or images.")
if text is not None:
if isinstance(text, str):
text = [text]
elif not (isinstance(text, (list, tuple)) and all(isinstance(t, str) for t in text)):
raise ValueError("Invalid input text. Please provide a string, or a list of strings")
prompt_strings = []
one_img_tokens = self.image_token * self.num_image_tokens
for prompt in text:
prompt = prompt.replace(self.image_token, one_img_tokens)
prompt_strings.append(prompt)
data = self.tokenizer(prompt_strings, **output_kwargs["text_kwargs"])
# process images if pixel_values are provided
if images is not None:
inputs = self.image_processor(images, **output_kwargs["images_kwargs"])
data["pixel_values"] = inputs["pixel_values"]
data["high_res_pixel_values"] = inputs["high_res_pixel_values"]
return BatchFeature(data=data)
def batch_decode(self, *args, **kwargs):
"""
This method forwards all its arguments to LlamaTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
refer to the docstring of this method for more information.
"""
return self.tokenizer.batch_decode(*args, **kwargs)
def decode(self, *args, **kwargs):
"""
This method forwards all its arguments to LlamaTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
the docstring of this method for more information.
"""
return self.tokenizer.decode(*args, **kwargs)
@property
def model_input_names(self):
tokenizer_input_names = self.tokenizer.model_input_names
image_processor_input_names = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
__all__ = ["DeepseekVLHybridProcessor"] | python | github | https://github.com/huggingface/transformers | src/transformers/models/deepseek_vl_hybrid/processing_deepseek_vl_hybrid.py |
# -*- coding: utf-8 -*-
# flake8: noqa
# setup.py from odoo 10.0 alpha, included as is, except for the dependencies
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import glob, os, re, setuptools, sys
from os.path import join
# List all data files
def data():
r = {}
for root, dirnames, filenames in os.walk('odoo'):
for filename in filenames:
if not re.match(r'.*(\.pyc|\.pyo|\~)$', filename):
r.setdefault(root, []).append(os.path.join(root, filename))
if os.name == 'nt':
r["Microsoft.VC90.CRT"] = glob.glob('C:\Microsoft.VC90.CRT\*.*')
import babel
# Add data, but also some .py files py2exe won't include automatically.
# TODO This should probably go under `packages`, instead of `data`,
# but this will work fine (especially since we don't use the ZIP file
# approach).
r["babel/localedata"] = glob.glob(os.path.join(os.path.dirname(babel.__file__), "localedata", '*'))
others = ['global.dat', 'numbers.py', 'support.py', 'plural.py']
r["babel"] = map(lambda f: os.path.join(os.path.dirname(babel.__file__), f), others)
others = ['frontend.py', 'mofile.py']
r["babel/messages"] = map(lambda f: os.path.join(os.path.dirname(babel.__file__), "messages", f), others)
import pytz
tzdir = os.path.dirname(pytz.__file__)
for root, _, filenames in os.walk(os.path.join(tzdir, "zoneinfo")):
base = os.path.join('pytz', root[len(tzdir) + 1:])
r[base] = [os.path.join(root, f) for f in filenames]
import docutils
dudir = os.path.dirname(docutils.__file__)
for root, _, filenames in os.walk(dudir):
base = os.path.join('docutils', root[len(dudir) + 1:])
r[base] = [os.path.join(root, f) for f in filenames if not f.endswith(('.py', '.pyc', '.pyo'))]
return r.items()
def gen_manifest():
file_list="\n".join(data())
open('MANIFEST','w').write(file_list)
if os.name == 'nt':
sys.path.append("C:\Microsoft.VC90.CRT")
def py2exe_options():
if os.name == 'nt':
import py2exe
return {
"console" : [ { "script": "odoo-bin", "icon_resources": [(1, join("install","odoo-icon.ico"))], },
{ "script": "odoo-gevent" },
{ "script": "odoo.py" },
],
'options' : {
"py2exe": {
"skip_archive": 1,
"optimize": 0, # keep the assert running, because the integrated tests rely on them.
"dist_dir": 'dist',
"packages": [
"HTMLParser",
"PIL",
"asynchat", "asyncore",
"commands",
"dateutil",
"decimal",
"docutils",
"email",
"encodings",
"imaplib",
"jinja2",
"lxml", "lxml._elementpath", "lxml.builder", "lxml.etree", "lxml.objectify",
"mako",
"markupsafe", # dependence of jinja2 and mako
"mock",
"odoo",
"poplib",
"psutil",
"pychart",
"pydot",
"pyparsing",
"pytz",
"reportlab",
"requests",
"select",
"simplejson",
"smtplib",
"uuid",
"vatnumber",
"vobject",
"win32service", "win32serviceutil",
"xlwt",
"xml", "xml.dom",
"yaml",
],
"excludes" : ["Tkconstants","Tkinter","tcl"],
}
}
}
else:
return {}
execfile(join(os.path.dirname(__file__), 'odoo', 'release.py'))
# Notes for OpenERP developer on windows:
#
# To setup a windows developer evironement install python2.7 then pip and use
# "pip install <depencey>" for every dependency listed below.
#
# Dependecies that requires DLLs are not installable with pip install, for
# them we added comments with links where you can find the installers.
#
# OpenERP on windows also require the pywin32, the binary can be found at
# http://pywin32.sf.net
#
# Both python2.7 32bits and 64bits are known to work.
setuptools.setup(
name = 'odoo',
version = version,
description = description,
long_description = long_desc,
url = url,
author = author,
author_email = author_email,
classifiers = filter(None, classifiers.split("\n")),
license = license,
scripts = ['odoo-bin', 'odoo-gevent', 'odoo.py'],
data_files = data(),
packages = setuptools.find_packages(),
dependency_links = ['http://download.gna.org/pychart/'],
#include_package_data = True,
# GR voided the list, because we're interested in the test in what
# the recipe will add
install_requires = [],
extras_require = {},
tests_require = [],
**py2exe_options()
)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | unknown | codeparrot/codeparrot-clean | ||
{
"kind": "Dashboard",
"apiVersion": "dashboard.grafana.app/v2alpha1",
"metadata": {
"name": "v0alpha1.panel-geomap.v42"
},
"spec": {
"annotations": [
{
"kind": "AnnotationQuery",
"spec": {
"datasource": {
"type": "datasource",
"uid": "grafana"
},
"query": {
"kind": "datasource",
"spec": {
"limit": 100,
"matchAny": false,
"tags": [],
"type": "dashboard"
}
},
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations \u0026 Alerts",
"builtIn": true,
"legacyOptions": {
"type": "dashboard"
}
}
}
],
"cursorSync": "Off",
"editable": true,
"elements": {
"panel-62": {
"kind": "Panel",
"spec": {
"id": 62,
"title": "Size, color mapped to different fields + share view",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "grafana-testdata-datasource",
"spec": {
"csvFileName": "flight_info_by_state.csv",
"scenarioId": "csv_file"
}
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "geomap",
"spec": {
"pluginVersion": "9.2.0-pre",
"options": {
"basemap": {
"config": {},
"name": "Layer 0",
"type": "default"
},
"controls": {
"mouseWheelZoom": true,
"showAttribution": true,
"showDebug": false,
"showMeasure": false,
"showScale": false,
"showZoom": true
},
"layers": [
{
"config": {
"color": {
"field": "Price",
"fixed": "dark-green"
},
"fillOpacity": 0.4,
"shape": "circle",
"showLegend": true,
"size": {
"field": "Count",
"fixed": 5,
"max": 15,
"min": 2
}
},
"location": {
"gazetteer": "public/gazetteer/usa-states.json",
"lookup": "State",
"mode": "auto"
},
"name": "Layer 1",
"type": "markers"
}
],
"tooltip": {
"mode": "details"
},
"view": {
"id": "coords",
"lat": 38.297683,
"lon": -99.228359,
"shared": true,
"zoom": 3.98
}
},
"fieldConfig": {
"defaults": {
"thresholds": {
"mode": "absolute",
"steps": [
{
"value": null,
"color": "green"
},
{
"value": 80,
"color": "red"
}
]
},
"color": {
"mode": "continuous-GrYlRd"
},
"custom": {
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
}
}
},
"overrides": []
}
}
}
}
},
"panel-63": {
"kind": "Panel",
"spec": {
"id": 63,
"title": "Heatmap data layer",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "grafana-testdata-datasource",
"spec": {
"csvFileName": "flight_info_by_state.csv",
"scenarioId": "csv_file"
}
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "geomap",
"spec": {
"pluginVersion": "9.2.0-pre",
"options": {
"basemap": {
"config": {},
"name": "Layer 0",
"type": "default"
},
"controls": {
"mouseWheelZoom": true,
"showAttribution": true,
"showDebug": false,
"showMeasure": false,
"showScale": false,
"showZoom": true
},
"layers": [
{
"config": {
"blur": 27,
"radius": 25,
"weight": {
"field": "Count",
"fixed": 1,
"max": 1,
"min": 0
}
},
"location": {
"gazetteer": "public/gazetteer/usa-states.json",
"lookup": "State",
"mode": "auto"
},
"name": "Layer 1",
"type": "heatmap"
}
],
"tooltip": {
"mode": "details"
},
"view": {
"id": "coords",
"lat": 38.251497,
"lon": -100.932144,
"shared": false,
"zoom": 4.15
}
},
"fieldConfig": {
"defaults": {
"thresholds": {
"mode": "absolute",
"steps": [
{
"value": null,
"color": "green"
},
{
"value": 80,
"color": "red"
}
]
},
"color": {
"mode": "continuous-BlYlRd"
},
"custom": {
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
}
}
},
"overrides": []
}
}
}
}
},
"panel-65": {
"kind": "Panel",
"spec": {
"id": 65,
"title": "Base layer ArcGIS wold imagery + star shape + share view",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "grafana-testdata-datasource",
"spec": {
"csvFileName": "flight_info_by_state.csv",
"scenarioId": "csv_file"
}
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "geomap",
"spec": {
"pluginVersion": "9.2.0-pre",
"options": {
"basemap": {
"config": {
"server": "world-imagery"
},
"name": "Layer 0",
"type": "esri-xyz"
},
"controls": {
"mouseWheelZoom": true,
"showAttribution": true,
"showDebug": false,
"showMeasure": false,
"showScale": false,
"showZoom": true
},
"layers": [
{
"config": {
"color": {
"fixed": "#ff001e"
},
"fillOpacity": 0.4,
"shape": "star",
"showLegend": true,
"size": {
"field": "Count",
"fixed": 5,
"max": 15,
"min": 2
}
},
"location": {
"gazetteer": "public/gazetteer/usa-states.json",
"lookup": "State",
"mode": "auto"
},
"name": "Layer 1",
"type": "markers"
}
],
"tooltip": {
"mode": "details"
},
"view": {
"id": "coords",
"lat": 40.159084,
"lon": -96.508021,
"shared": true,
"zoom": 3.83
}
},
"fieldConfig": {
"defaults": {
"thresholds": {
"mode": "absolute",
"steps": [
{
"value": null,
"color": "green"
},
{
"value": 80,
"color": "red"
}
]
},
"color": {
"mode": "continuous-GrYlRd"
},
"custom": {
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
}
}
},
"overrides": []
}
}
}
}
},
"panel-66": {
"kind": "Panel",
"spec": {
"id": 66,
"title": "Thresholds legend",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "grafana-testdata-datasource",
"spec": {
"csvFileName": "flight_info_by_state.csv",
"scenarioId": "csv_file"
}
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "geomap",
"spec": {
"pluginVersion": "9.2.0-pre",
"options": {
"basemap": {
"config": {},
"name": "Layer 0",
"type": "default"
},
"controls": {
"mouseWheelZoom": true,
"showAttribution": true,
"showDebug": false,
"showMeasure": false,
"showScale": false,
"showZoom": true
},
"layers": [
{
"config": {
"color": {
"field": "Price",
"fixed": "dark-green"
},
"fillOpacity": 0.4,
"shape": "circle",
"showLegend": true,
"size": {
"field": "Count",
"fixed": 5,
"max": 15,
"min": 2
}
},
"location": {
"gazetteer": "public/gazetteer/usa-states.json",
"lookup": "State",
"mode": "auto"
},
"name": "Layer 1",
"type": "markers"
}
],
"tooltip": {
"mode": "details"
},
"view": {
"id": "coords",
"lat": 38.297683,
"lon": -99.228359,
"shared": true,
"zoom": 3.98
}
},
"fieldConfig": {
"defaults": {
"thresholds": {
"mode": "absolute",
"steps": [
{
"value": null,
"color": "green"
},
{
"value": 80,
"color": "red"
},
{
"value": 90,
"color": "#EAB839"
}
]
},
"color": {
"mode": "thresholds"
},
"custom": {
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
}
}
},
"overrides": []
}
}
}
}
}
},
"layout": {
"kind": "GridLayout",
"spec": {
"items": [
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 0,
"width": 9,
"height": 11,
"element": {
"kind": "ElementReference",
"name": "panel-62"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 9,
"y": 0,
"width": 9,
"height": 11,
"element": {
"kind": "ElementReference",
"name": "panel-66"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 11,
"width": 9,
"height": 11,
"element": {
"kind": "ElementReference",
"name": "panel-63"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 9,
"y": 11,
"width": 9,
"height": 11,
"element": {
"kind": "ElementReference",
"name": "panel-65"
}
}
}
]
}
},
"links": [],
"liveNow": false,
"preload": false,
"tags": [
"gdev",
"panel-tests",
"geomap"
],
"timeSettings": {
"timezone": "",
"from": "now-6h",
"to": "now",
"autoRefresh": "",
"autoRefreshIntervals": [
"10s",
"30s",
"1m",
"5m",
"15m",
"30m",
"1h",
"2h",
"1d"
],
"hideTimepicker": false,
"fiscalYearStartMonth": 0
},
"title": "Panel Tests - Geomap",
"variables": []
},
"status": {
"conversion": {
"failed": false,
"storedVersion": "v0alpha1"
}
}
} | json | github | https://github.com/grafana/grafana | apps/dashboard/pkg/migration/conversion/testdata/output/migrated_dev_dashboards/panel-geomap/v0alpha1.panel-geomap.v42.v2alpha1.json |
#!/usr/bin/env python3
'''Psychomotor Vigilance Task
Stateless CGI script (state is kept by passing a state string as a second parameter to the jsonp callback function; this will work with stapjs library).
Note that although this is task that is focused on millisecond-level timing, it can still be done as a standard CGI script;
reason being, that all of the timing functionality is client-side.
'''
import sys,os,json,random
TRIALS = 20
def send(stap):
print("Content-Type: application/json\nAccess-Control-Allow-Origin: *\n")
print(json.dumps(stap))
def obj(id=None,content=NotImplemented,**options):
if id is not None: options['id']=id
if content is not NotImplemented: options['v']=content
return options
def main():
#read request
try: time,id,val = json.loads(sys.stdin.buffer.read(int(os.environ['CONTENT_LENGTH'])).decode('utf-8'))
except: time,id,val = 0,None,['onload']
displayUpdates=[]
display=obj(content=displayUpdates)
if val==['onload']:
#initial task setup
display.update({'require':{'options':['U','onin']},'template':'[type="bin"]{min-height:6em}'})
displayUpdates+=[
obj('Trial',1,max=TRIALS),
obj('Click a button when one appears',[])
]
trial=0
elif id and id.startswith('btn'):
#read button id (which includes trial number and display timestamp)
_,trial,displayTime=id.split()
trial=int(trial)
#display response time
displayUpdates.append(obj('Your response time is',time-int(displayTime)))
if trial==TRIALS:
#final task page
displayUpdates.append(obj('Click a button when one appears',None))
displayUpdates.append('Thank you for your participation.')
else:
#next trial
trial+=1
#pick random time for button to appear
displayTime=time+random.randrange(3000,10000)
#wait, then show the button (button id includes trial number and time of display to enable stateless scripting)
displayUpdates+=[
obj('Trial',trial),
obj('Click a button when one appears', [obj('btn %d %d'%(trial,displayTime),False,title='Click me',onin={'v':None})],U=displayTime)
]
send(display)
main() | unknown | codeparrot/codeparrot-clean | ||
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#include "cvutils.hpp"
namespace cv {
namespace ndsrvp {
// fastMalloc
// [0][1][2][3][4][5][6][7][8][9]
// ^udata
// ^adata
// ^adata[-1] == udata
void* fastMalloc(size_t size)
{
uchar* udata = (uchar*)malloc(size + sizeof(void*) + CV_MALLOC_ALIGN);
if(!udata)
ndsrvp_error(Error::StsNoMem, "fastMalloc(): Not enough memory");
uchar** adata = (uchar**)align((size_t)((uchar**)udata + 1), CV_MALLOC_ALIGN);
adata[-1] = udata;
return adata;
}
void fastFree(void* ptr)
{
if(ptr)
{
uchar* udata = ((uchar**)ptr)[-1];
if(!(udata < (uchar*)ptr && ((uchar*)ptr - udata) <= (ptrdiff_t)(sizeof(void*) + CV_MALLOC_ALIGN)))
ndsrvp_error(Error::StsBadArg, "fastFree(): Invalid memory block");
free(udata);
}
}
// borderInterpolate
int borderInterpolate(int p, int len, int borderType)
{
if( (unsigned)p < (unsigned)len )
;
else if( borderType == CV_HAL_BORDER_REPLICATE )
p = p < 0 ? 0 : len - 1;
else if( borderType == CV_HAL_BORDER_REFLECT || borderType == CV_HAL_BORDER_REFLECT_101 )
{
int delta = borderType == CV_HAL_BORDER_REFLECT_101;
if( len == 1 )
return 0;
do
{
if( p < 0 )
p = -p - 1 + delta;
else
p = len - 1 - (p - len) - delta;
}
while( (unsigned)p >= (unsigned)len );
}
else if( borderType == CV_HAL_BORDER_WRAP )
{
ndsrvp_assert(len > 0);
if( p < 0 )
p -= ((p - len + 1) / len) * len;
if( p >= len )
p %= len;
}
else if( borderType == CV_HAL_BORDER_CONSTANT )
p = -1;
else
ndsrvp_error(Error::StsBadArg, "borderInterpolate(): Unknown/unsupported border type");
return p;
}
int16x4_t borderInterpolate_vector(int16x4_t vp, short len, int borderType)
{
int16x4_t vzero = (int16x4_t){0, 0, 0, 0};
int16x4_t vone = (int16x4_t){1, 1, 1, 1};
int16x4_t vlen = (int16x4_t){len, len, len, len};
if(borderType == CV_HAL_BORDER_REPLICATE)
vp = (int16x4_t)__nds__bpick(0, __nds__bpick((long)(vlen - 1), (long)vp, (long)(vp >= vlen)), (long)(vp < 0));
else if(borderType == CV_HAL_BORDER_REFLECT || borderType == CV_HAL_BORDER_REFLECT_101)
{
int16x4_t vdelta = (borderType == CV_HAL_BORDER_REFLECT_101) ? vone : vzero;
if(len == 1)
return vzero;
do
{
int16x4_t vneg = -vp - 1 + vdelta;
int16x4_t vpos = vlen - 1 - (vp - vlen) - vdelta;
vp = (int16x4_t)__nds__bpick((long)vneg, __nds__bpick((long)vpos, (long)vp, (long)(vp >= vlen)), (long)(vp < 0));
}
while( (long)(vp >= vlen) || (long)(vp < 0) );
}
else if(borderType == CV_HAL_BORDER_WRAP)
{
ndsrvp_assert(len > 0);
int16x4_t vneg = vp - ((vp - vlen + 1) / vlen) * vlen;
int16x4_t vpos = vp % vlen;
vp = (int16x4_t)__nds__bpick((long)vneg, __nds__bpick((long)vpos, (long)vp, (long)(vp >= vlen)), (long)(vp < 0));
}
else if(borderType == CV_HAL_BORDER_CONSTANT)
vp = (int16x4_t)__nds__bpick((long)-vone, (long)vp, (long)(vp < 0 || vp >= vlen));
else
ndsrvp_error(Error::StsBadArg, "borderInterpolate_vector(): Unknown/unsupported border type");
return vp;
}
} // namespace ndsrvp
} // namespace cv | cpp | github | https://github.com/opencv/opencv | hal/ndsrvp/src/cvutils.cpp |
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// Intel License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of Intel Corporation may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "test_precomp.hpp"
// POSIT is not exposed to C++ API yet, so the test is disabled
#if 0
namespace opencv_test { namespace {
class CV_POSITTest : public cvtest::BaseTest
{
public:
CV_POSITTest();
protected:
void run(int);
};
CV_POSITTest::CV_POSITTest()
{
test_case_count = 20;
}
void CV_POSITTest::run( int start_from )
{
int code = cvtest::TS::OK;
/* fixed parameters output */
/*float rot[3][3]={ 0.49010f, 0.85057f, 0.19063f,
-0.56948f, 0.14671f, 0.80880f,
0.65997f, -0.50495f, 0.55629f };
float trans[3] = { 0.0f, 0.0f, 40.02637f };
*/
/* Some variables */
int i, counter;
CvTermCriteria criteria;
CvPoint3D32f* obj_points;
CvPoint2D32f* img_points;
CvPOSITObject* object;
float angleX, angleY, angleZ;
RNG& rng = ts->get_rng();
int progress = 0;
CvMat* true_rotationX = cvCreateMat( 3, 3, CV_32F );
CvMat* true_rotationY = cvCreateMat( 3, 3, CV_32F );
CvMat* true_rotationZ = cvCreateMat( 3, 3, CV_32F );
CvMat* tmp_matrix = cvCreateMat( 3, 3, CV_32F );
CvMat* true_rotation = cvCreateMat( 3, 3, CV_32F );
CvMat* rotation = cvCreateMat( 3, 3, CV_32F );
CvMat* translation = cvCreateMat( 3, 1, CV_32F );
CvMat* true_translation = cvCreateMat( 3, 1, CV_32F );
const float flFocalLength = 760.f;
const float flEpsilon = 0.5f;
/* Initialization */
criteria.type = CV_TERMCRIT_EPS|CV_TERMCRIT_ITER;
criteria.epsilon = flEpsilon;
criteria.max_iter = 10000;
/* Allocating source arrays; */
obj_points = (CvPoint3D32f*)cvAlloc( 8 * sizeof(CvPoint3D32f) );
img_points = (CvPoint2D32f*)cvAlloc( 8 * sizeof(CvPoint2D32f) );
/* Fill points arrays with values */
/* cube model with edge size 10 */
obj_points[0].x = 0; obj_points[0].y = 0; obj_points[0].z = 0;
obj_points[1].x = 10; obj_points[1].y = 0; obj_points[1].z = 0;
obj_points[2].x = 10; obj_points[2].y = 10; obj_points[2].z = 0;
obj_points[3].x = 0; obj_points[3].y = 10; obj_points[3].z = 0;
obj_points[4].x = 0; obj_points[4].y = 0; obj_points[4].z = 10;
obj_points[5].x = 10; obj_points[5].y = 0; obj_points[5].z = 10;
obj_points[6].x = 10; obj_points[6].y = 10; obj_points[6].z = 10;
obj_points[7].x = 0; obj_points[7].y = 10; obj_points[7].z = 10;
/* Loop for test some random object positions */
for( counter = start_from; counter < test_case_count; counter++ )
{
ts->update_context( this, counter, true );
progress = update_progress( progress, counter, test_case_count, 0 );
/* set all rotation matrix to zero */
cvZero( true_rotationX );
cvZero( true_rotationY );
cvZero( true_rotationZ );
/* fill random rotation matrix */
angleX = (float)(cvtest::randReal(rng)*2*CV_PI);
angleY = (float)(cvtest::randReal(rng)*2*CV_PI);
angleZ = (float)(cvtest::randReal(rng)*2*CV_PI);
true_rotationX->data.fl[0 *3+ 0] = 1;
true_rotationX->data.fl[1 *3+ 1] = (float)cos(angleX);
true_rotationX->data.fl[2 *3+ 2] = true_rotationX->data.fl[1 *3+ 1];
true_rotationX->data.fl[1 *3+ 2] = -(float)sin(angleX);
true_rotationX->data.fl[2 *3+ 1] = -true_rotationX->data.fl[1 *3+ 2];
true_rotationY->data.fl[1 *3+ 1] = 1;
true_rotationY->data.fl[0 *3+ 0] = (float)cos(angleY);
true_rotationY->data.fl[2 *3+ 2] = true_rotationY->data.fl[0 *3+ 0];
true_rotationY->data.fl[0 *3+ 2] = -(float)sin(angleY);
true_rotationY->data.fl[2 *3+ 0] = -true_rotationY->data.fl[0 *3+ 2];
true_rotationZ->data.fl[2 *3+ 2] = 1;
true_rotationZ->data.fl[0 *3+ 0] = (float)cos(angleZ);
true_rotationZ->data.fl[1 *3+ 1] = true_rotationZ->data.fl[0 *3+ 0];
true_rotationZ->data.fl[0 *3+ 1] = -(float)sin(angleZ);
true_rotationZ->data.fl[1 *3+ 0] = -true_rotationZ->data.fl[0 *3+ 1];
cvMatMul( true_rotationX, true_rotationY, tmp_matrix);
cvMatMul( tmp_matrix, true_rotationZ, true_rotation);
/* fill translation vector */
true_translation->data.fl[2] = (float)(cvtest::randReal(rng)*(2*flFocalLength-40) + 60);
true_translation->data.fl[0] = (float)((cvtest::randReal(rng)*2-1)*true_translation->data.fl[2]);
true_translation->data.fl[1] = (float)((cvtest::randReal(rng)*2-1)*true_translation->data.fl[2]);
/* calculate perspective projection */
for ( i = 0; i < 8; i++ )
{
float vec[3];
CvMat Vec = cvMat( 3, 1, CV_32F, vec );
CvMat Obj_point = cvMat( 3, 1, CV_32F, &obj_points[i].x );
cvMatMul( true_rotation, &Obj_point, &Vec );
vec[0] += true_translation->data.fl[0];
vec[1] += true_translation->data.fl[1];
vec[2] += true_translation->data.fl[2];
img_points[i].x = flFocalLength * vec[0] / vec[2];
img_points[i].y = flFocalLength * vec[1] / vec[2];
}
/*img_points[0].x = 0 ; img_points[0].y = 0;
img_points[1].x = 80; img_points[1].y = -93;
img_points[2].x = 245;img_points[2].y = -77;
img_points[3].x = 185;img_points[3].y = 32;
img_points[4].x = 32; img_points[4].y = 135;
img_points[5].x = 99; img_points[5].y = 35;
img_points[6].x = 247; img_points[6].y = 62;
img_points[7].x = 195; img_points[7].y = 179;
*/
object = cvCreatePOSITObject( obj_points, 8 );
cvPOSIT( object, img_points, flFocalLength, criteria,
rotation->data.fl, translation->data.fl );
cvReleasePOSITObject( &object );
Mat _rotation = cvarrToMat(rotation), _true_rotation = cvarrToMat(true_rotation);
Mat _translation = cvarrToMat(translation), _true_translation = cvarrToMat(true_translation);
code = cvtest::cmpEps2( ts, _rotation, _true_rotation, flEpsilon, false, "rotation matrix" );
if( code < 0 )
break;
code = cvtest::cmpEps2( ts, _translation, _true_translation, flEpsilon, false, "translation vector" );
if( code < 0 )
break;
}
cvFree( &obj_points );
cvFree( &img_points );
cvReleaseMat( &true_rotationX );
cvReleaseMat( &true_rotationY );
cvReleaseMat( &true_rotationZ );
cvReleaseMat( &tmp_matrix );
cvReleaseMat( &true_rotation );
cvReleaseMat( &rotation );
cvReleaseMat( &translation );
cvReleaseMat( &true_translation );
if( code < 0 )
ts->set_failed_test_info( code );
}
TEST(Calib3d_POSIT, accuracy) { CV_POSITTest test; test.safe_run(); }
}} // namespace
#endif
/* End of file. */ | cpp | github | https://github.com/opencv/opencv | modules/calib3d/test/test_posit.cpp |
# Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
get_vnc_console = {
'type': 'object',
'properties': {
'os-getVNCConsole': {
'type': 'object',
'properties': {
'type': {
'type': 'string',
'enum': ['novnc', 'xvpvnc'],
},
},
'required': ['type'],
'additionalProperties': False,
},
},
'required': ['os-getVNCConsole'],
'additionalProperties': False,
}
get_spice_console = {
'type': 'object',
'properties': {
'os-getSPICEConsole': {
'type': 'object',
'properties': {
'type': {
'type': 'string',
'enum': ['spice-html5'],
},
},
'required': ['type'],
'additionalProperties': False,
},
},
'required': ['os-getSPICEConsole'],
'additionalProperties': False,
}
get_rdp_console = {
'type': 'object',
'properties': {
'os-getRDPConsole': {
'type': 'object',
'properties': {
'type': {
'type': 'string',
'enum': ['rdp-html5'],
},
},
'required': ['type'],
'additionalProperties': False,
},
},
'required': ['os-getRDPConsole'],
'additionalProperties': False,
}
get_serial_console = {
'type': 'object',
'properties': {
'os-getSerialConsole': {
'type': 'object',
'properties': {
'type': {
'type': 'string',
'enum': ['serial'],
},
},
'required': ['type'],
'additionalProperties': False,
},
},
'required': ['os-getSerialConsole'],
'additionalProperties': False,
} | unknown | codeparrot/codeparrot-clean | ||
from nose.tools import *
from tests.base import OsfTestCase
from tests.factories import UserFactory
from scripts.migration.migrate_mailing_lists_to_mailchimp_field import main, get_users_with_no_mailchimp_mailing_lists
class TestMigrateMailingLists(OsfTestCase):
def setUp(self):
super(TestMigrateMailingLists, self).setUp()
self.user1 = UserFactory(mailing_lists={'mail': True})
self.user2 = UserFactory(mailing_lists={'mail': False})
self.user3 = UserFactory()
self.user1.save()
self.user2.save()
def test_get_users_with_mailing_lists(self):
users_with_mailing_list_ids = [user._id for user in get_users_with_no_mailchimp_mailing_lists()]
assert_equal(len(users_with_mailing_list_ids), 2)
assert_true(self.user1._id in users_with_mailing_list_ids)
assert_true(self.user2._id in users_with_mailing_list_ids)
assert_false(self.user3._id in users_with_mailing_list_ids)
def test_migration_of_mailing_lists(self):
assert_equal(self.user1.mailchimp_mailing_lists, {})
assert_equal(self.user2.mailchimp_mailing_lists, {})
main()
self.user1.reload()
self.user2.reload()
assert_true(self.user1.mailchimp_mailing_lists.get(u'mail'))
assert_false(self.user2.mailchimp_mailing_lists.get(u'mail')) | unknown | codeparrot/codeparrot-clean | ||
#include <gtest/gtest.h>
#include <ATen/ATen.h>
#include <ATen/Utils.h>
#include <c10/util/accumulate.h>
#include <algorithm>
#include <iostream>
#include <numeric>
using namespace at;
#define TRY_CATCH_ELSE(fn, catc, els) \
{ \
/* avoid mistakenly passing if els code throws exception*/ \
bool _passed = false; \
try { \
fn; \
_passed = true; \
els; \
} catch (std::exception&) { \
ASSERT_FALSE(_passed); \
catc; \
} \
}
void require_equal_size_dim(const Tensor &lhs, const Tensor &rhs) {
ASSERT_EQ(lhs.dim(), rhs.dim());
ASSERT_TRUE(lhs.sizes().equals(rhs.sizes()));
}
bool should_expand(const IntArrayRef &from_size, const IntArrayRef &to_size) {
if (from_size.size() > to_size.size()) {
return false;
}
for (auto from_dim_it = from_size.rbegin(); from_dim_it != from_size.rend();
++from_dim_it) {
for (auto to_dim_it = to_size.rbegin(); to_dim_it != to_size.rend();
++to_dim_it) {
if (*from_dim_it != 1 && *from_dim_it != *to_dim_it) {
return false;
}
}
}
return true;
}
void test(DeprecatedTypeProperties &T) {
std::vector<std::vector<int64_t>> sizes = {{}, {0}, {1}, {1, 1}, {2}};
// single-tensor/size tests
for (auto s = sizes.begin(); s != sizes.end(); ++s) {
// verify that the dim, sizes, strides, etc match what was requested.
auto t = ones(*s, T);
ASSERT_EQ((size_t)t.dim(), s->size());
ASSERT_EQ((size_t)t.ndimension(), s->size());
ASSERT_TRUE(t.sizes().equals(*s));
ASSERT_EQ(t.strides().size(), s->size());
const auto numel = c10::multiply_integers(s->begin(), s->end());
ASSERT_EQ(t.numel(), numel);
// verify we can output
std::stringstream ss;
// NOLINTNEXTLINE(hicpp-avoid-goto,cppcoreguidelines-avoid-goto)
ASSERT_NO_THROW(ss << t << std::endl);
// set_
auto t2 = ones(*s, T);
t2.set_();
require_equal_size_dim(t2, ones({0}, T));
// unsqueeze
ASSERT_EQ(t.unsqueeze(0).dim(), t.dim() + 1);
// unsqueeze_
{
auto t2 = ones(*s, T);
auto r = t2.unsqueeze_(0);
ASSERT_EQ(r.dim(), t.dim() + 1);
}
// squeeze (with dimension argument)
if (t.dim() == 0 || t.sizes()[0] == 1) {
ASSERT_EQ(t.squeeze(0).dim(), std::max<int64_t>(t.dim() - 1, 0));
} else {
// In PyTorch, it is a no-op to try to squeeze a dimension that has size
// != 1; in NumPy this is an error.
ASSERT_EQ(t.squeeze(0).dim(), t.dim());
}
// squeeze (with no dimension argument)
{
std::vector<int64_t> size_without_ones;
for (auto size : *s) {
if (size != 1) {
size_without_ones.push_back(size);
}
}
auto result = t.squeeze();
require_equal_size_dim(result, ones(size_without_ones, T));
}
{
// squeeze_ (with dimension argument)
auto t2 = ones(*s, T);
if (t2.dim() == 0 || t2.sizes()[0] == 1) {
ASSERT_EQ(t2.squeeze_(0).dim(), std::max<int64_t>(t.dim() - 1, 0));
} else {
// In PyTorch, it is a no-op to try to squeeze a dimension that has size
// != 1; in NumPy this is an error.
ASSERT_EQ(t2.squeeze_(0).dim(), t.dim());
}
}
// squeeze_ (with no dimension argument)
{
auto t2 = ones(*s, T);
std::vector<int64_t> size_without_ones;
for (auto size : *s) {
if (size != 1) {
size_without_ones.push_back(size);
}
}
auto r = t2.squeeze_();
require_equal_size_dim(t2, ones(size_without_ones, T));
}
// reduce (with dimension argument and with 1 return argument)
if (t.numel() != 0) {
ASSERT_EQ(t.sum(0).dim(), std::max<int64_t>(t.dim() - 1, 0));
} else {
ASSERT_TRUE(t.sum(0).equal(at::zeros({}, T)));
}
// reduce (with dimension argument and with 2 return arguments)
if (t.numel() != 0) {
auto ret = t.min(0);
ASSERT_EQ(std::get<0>(ret).dim(), std::max<int64_t>(t.dim() - 1, 0));
ASSERT_EQ(std::get<1>(ret).dim(), std::max<int64_t>(t.dim() - 1, 0));
} else {
// NOLINTNEXTLINE(hicpp-avoid-goto,cppcoreguidelines-avoid-goto)
ASSERT_ANY_THROW(t.min(0));
}
// simple indexing
if (t.dim() > 0 && t.numel() != 0) {
ASSERT_EQ(t[0].dim(), std::max<int64_t>(t.dim() - 1, 0));
} else {
// NOLINTNEXTLINE(hicpp-avoid-goto,cppcoreguidelines-avoid-goto)
ASSERT_ANY_THROW(t[0]);
}
// fill_ (argument to fill_ can only be a 0-dim tensor)
TRY_CATCH_ELSE(
t.fill_(t.sum(0)), ASSERT_GT(t.dim(), 1), ASSERT_LE(t.dim(), 1));
}
for (auto lhs_it = sizes.begin(); lhs_it != sizes.end(); ++lhs_it) {
// NOLINTNEXTLINE(modernize-loop-convert)
for (auto rhs_it = sizes.begin(); rhs_it != sizes.end(); ++rhs_it) {
// is_same_size should only match if they are the same shape
{
auto lhs = ones(*lhs_it, T);
auto rhs = ones(*rhs_it, T);
if (*lhs_it != *rhs_it) {
ASSERT_FALSE(lhs.is_same_size(rhs));
ASSERT_FALSE(rhs.is_same_size(lhs));
}
}
// forced size functions (resize_, resize_as, set_)
// resize_
{
{
auto lhs = ones(*lhs_it, T);
auto rhs = ones(*rhs_it, T);
lhs.resize_(*rhs_it);
require_equal_size_dim(lhs, rhs);
}
// resize_as_
{
auto lhs = ones(*lhs_it, T);
auto rhs = ones(*rhs_it, T);
lhs.resize_as_(rhs);
require_equal_size_dim(lhs, rhs);
}
// set_
{
{
// with tensor
auto lhs = ones(*lhs_it, T);
auto rhs = ones(*rhs_it, T);
lhs.set_(rhs);
require_equal_size_dim(lhs, rhs);
}
{
// with storage
auto lhs = ones(*lhs_it, T);
auto rhs = ones(*rhs_it, T);
lhs.set_(rhs.storage());
// should not be dim 0 because an empty storage is dim 1; all other
// storages aren't scalars
ASSERT_NE(lhs.dim(), 0);
}
{
// with storage, offset, sizes, strides
auto lhs = ones(*lhs_it, T);
auto rhs = ones(*rhs_it, T);
lhs.set_(rhs.storage(), rhs.storage_offset(), rhs.sizes(), rhs.strides());
require_equal_size_dim(lhs, rhs);
}
}
}
// view
{
auto lhs = ones(*lhs_it, T);
auto rhs = ones(*rhs_it, T);
auto rhs_size = *rhs_it;
TRY_CATCH_ELSE(auto result = lhs.view(rhs_size),
ASSERT_NE(lhs.numel(), rhs.numel()),
ASSERT_EQ(lhs.numel(), rhs.numel());
require_equal_size_dim(result, rhs););
}
// take
{
auto lhs = ones(*lhs_it, T);
auto rhs = zeros(*rhs_it, T).toType(ScalarType::Long);
TRY_CATCH_ELSE(auto result = lhs.take(rhs),
ASSERT_EQ(lhs.numel(), 0); ASSERT_NE(rhs.numel(), 0),
require_equal_size_dim(result, rhs));
}
// put
{
auto lhs = ones(*lhs_it, T);
auto rhs1 = zeros(*rhs_it, T).toType(ScalarType::Long);
auto rhs2 = zeros(*rhs_it, T);
TRY_CATCH_ELSE(auto result = lhs.put(rhs1, rhs2),
ASSERT_EQ(lhs.numel(), 0); ASSERT_NE(rhs1.numel(), 0),
require_equal_size_dim(result, lhs));
}
// ger
{
auto lhs = ones(*lhs_it, T);
auto rhs = ones(*rhs_it, T);
TRY_CATCH_ELSE(auto result = lhs.ger(rhs),
ASSERT_TRUE(
(lhs.numel() == 0 || rhs.numel() == 0 ||
lhs.dim() != 1 || rhs.dim() != 1)),
[&]() {
int64_t dim0 = lhs.dim() == 0 ? 1 : lhs.size(0);
int64_t dim1 = rhs.dim() == 0 ? 1 : rhs.size(0);
require_equal_size_dim(
result, at::empty({dim0, dim1}, result.options()));
}(););
}
// expand
{
auto lhs = ones(*lhs_it, T);
auto lhs_size = *lhs_it;
auto rhs = ones(*rhs_it, T);
auto rhs_size = *rhs_it;
bool should_pass = should_expand(lhs_size, rhs_size);
TRY_CATCH_ELSE(auto result = lhs.expand(rhs_size),
ASSERT_FALSE(should_pass),
ASSERT_TRUE(should_pass);
require_equal_size_dim(result, rhs););
// in-place functions (would be good if we can also do a non-broadcasting
// one, b/c broadcasting functions will always end up operating on tensors
// of same size; is there an example of this outside of assign_ ?)
{
bool should_pass_inplace = should_expand(rhs_size, lhs_size);
TRY_CATCH_ELSE(lhs.add_(rhs),
ASSERT_FALSE(should_pass_inplace),
ASSERT_TRUE(should_pass_inplace);
require_equal_size_dim(lhs, ones(*lhs_it, T)););
}
}
}
}
}
TEST(TestScalarTensor, TestScalarTensorCPU) {
manual_seed(123);
test(CPU(kFloat));
}
TEST(TestScalarTensor, TestScalarTensorCUDA) {
manual_seed(123);
if (at::hasCUDA()) {
test(CUDA(kFloat));
}
}
TEST(TestScalarTensor, TestScalarTensorMPS) {
manual_seed(123);
if (at::hasMPS()) {
test(MPS(kFloat));
}
} | cpp | github | https://github.com/pytorch/pytorch | aten/src/ATen/test/scalar_tensor_test.cpp |
# -*- coding: utf-8 -*-
#
# petition/data/models.py
#
# Copyright (C) 2011-16 Tomáš Pecina <tomas@pecina.cz>
#
# This file is part of petition.pecina.cz, a web-based petition
# application.
#
# This application is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This application is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Petition(models.Model):
user = models.ForeignKey(User)
name = models.CharField(max_length=30, unique=True)
domain = models.CharField(max_length=255, blank=True)
email = models.EmailField(blank=True)
closed = models.BooleanField()
longname = models.CharField(max_length=255)
keywords = models.CharField(max_length=255, blank=True)
css = models.TextField(blank=True)
text = models.TextField()
counter = models.IntegerField()
timestamp = models.DateTimeField(auto_now_add=True, db_index=True)
def __str__(self):
return self.longname
class Signature(models.Model):
petition = models.ForeignKey(Petition)
name = models.CharField(max_length=30, db_index=True)
occupation = models.CharField(max_length=255, blank=True)
occupation_hidden = models.BooleanField()
address = models.CharField(max_length=255)
address_hidden = models.BooleanField()
birthdate = models.DateField(blank=True, null=True)
birthdate_hidden = models.BooleanField()
email = models.EmailField(blank=True)
email_hidden = models.BooleanField()
note = models.TextField(blank=True)
note_hidden = models.BooleanField()
ip = models.GenericIPAddressField()
domain = models.CharField(max_length=255)
reported = models.BooleanField(default=False)
timestamp = models.DateTimeField(auto_now_add=True, db_index=True)
def __str__(self):
return self.name | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# Copyright 2012 Google Inc. All Rights Reserved.
"""Client actions related to plist files."""
import cStringIO
import types
from binplist import binplist
from grr.client import actions
from grr.client import vfs
from grr.lib import plist as plist_lib
from grr.lib.rdfvalues import plist as rdfplist
from grr.lib.rdfvalues import protodict
class PlistQuery(actions.ActionPlugin):
"""Parses the plist request specified and returns the results.
PlistQuery allows you to obtain data from a plist, optionally only if it
matches the given filter.
Querying for a plist is done in two steps. First, its contents are
retrieved.
For plists where the top level element is a dict, you can use the key
parameter of the PlistRequest to specify a path into the dict to retrieve.
When specifying a key, the requested key values are places under a dictionary
key called "key".
Whether you've specified a key or not, the query parameter allows you to
filter based on the
"""
in_rdfvalue = rdfplist.PlistRequest
out_rdfvalue = protodict.RDFValueArray
MAX_PLIST_SIZE = 1024 * 1024 * 100 # 100 MB
def Run(self, args):
self.context = args.context
self.filter_query = args.query
with vfs.VFSOpen(args.pathspec, progress_callback=self.Progress) as fd:
data = fd.Read(self.MAX_PLIST_SIZE)
plist = binplist.readPlist(cStringIO.StringIO(data))
# Create the query parser
parser = plist_lib.PlistFilterParser(self.filter_query).Parse()
filter_imp = plist_lib.PlistFilterImplementation
matcher = parser.Compile(filter_imp)
if self.context:
# Obtain the values for the context using the value expander
value_expander = filter_imp.FILTERS["ValueExpander"]
iterator = value_expander().Expand(plist, self.context)
else:
# If we didn't get a context, the context is the whole plist
iterator = [plist]
reply = protodict.RDFValueArray()
for item in iterator:
# As we're setting the context manually, we need to account for types
if isinstance(item, types.ListType):
for sub_item in item:
partial_plist = plist_lib.PlistValueToPlainValue(sub_item)
if matcher.Matches(partial_plist):
reply.Append(sub_item)
else:
partial_plist = plist_lib.PlistValueToPlainValue(item)
if matcher.Matches(partial_plist):
reply.Append(partial_plist)
self.SendReply(reply) | unknown | codeparrot/codeparrot-clean | ||
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#include "ndsrvp_hal.hpp"
#include "opencv2/imgproc/hal/interface.h"
#include "cvutils.hpp"
namespace cv {
namespace ndsrvp {
int remap32f(int src_type, const uchar* src_data, size_t src_step, int src_width, int src_height,
uchar* dst_data, size_t dst_step, int dst_width, int dst_height, float* mapx, size_t mapx_step,
float* mapy, size_t mapy_step, int interpolation, int border_type, const double border_value[4])
{
const bool isRelative = ((interpolation & CV_HAL_WARP_RELATIVE_MAP) != 0);
interpolation &= ~CV_HAL_WARP_RELATIVE_MAP;
if( interpolation == CV_HAL_INTER_AREA )
interpolation = CV_HAL_INTER_LINEAR;
if( interpolation != CV_HAL_INTER_NEAREST )
return CV_HAL_ERROR_NOT_IMPLEMENTED;
// only CV_8U
if( (src_type & CV_MAT_DEPTH_MASK) != CV_8U )
return CV_HAL_ERROR_NOT_IMPLEMENTED;
int cn = CV_MAT_CN(src_type);
src_step /= sizeof(uchar);
dst_step /= sizeof(uchar);
// mapping CV_32FC1
mapx_step /= sizeof(float);
mapy_step /= sizeof(float);
// border
uchar border_const[CV_CN_MAX];
for( int k = 0; k < CV_CN_MAX; k++ )
border_const[k] = saturate_cast<uchar>(border_value[k & 3]);
// divide into blocks
const int BLOCK_SIZE = 1024;
int x, y, x1, y1;
std::array<short, BLOCK_SIZE * BLOCK_SIZE * 2> aXY;
short* XY = aXY.data();
size_t XY_step = BLOCK_SIZE * 2;
// vectorize
const int32x2_t src_wh = {src_width, src_height};
const int32x2_t arr_index = {cn, (int)src_step};
for (y = 0; y < dst_height; y += BLOCK_SIZE)
{
int dy = std::min(BLOCK_SIZE, dst_height - y);
for (x = 0; x < dst_width; x += BLOCK_SIZE)
{
const int off_y = isRelative ? y : 0;
const int off_x = isRelative ? x : 0;
const int32x2_t voff = {off_x, off_y};
int dx = std::min(BLOCK_SIZE, dst_width - x);
// prepare mapping data XY
for (y1 = 0; y1 < dy; y1++)
{
short* rXY = XY + y1 * XY_step;
const float* sX = mapx + (y + y1) * mapx_step + x;
const float* sY = mapy + (y + y1) * mapy_step + x;
for (x1 = 0; x1 < dx; x1++)
{
rXY[x1 * 2] = saturate_cast<short>(sX[x1]);
rXY[x1 * 2 + 1] = saturate_cast<short>(sY[x1]);
}
}
// precalulate offset
if(isRelative)
{
int16x8_t voff_x;
int16x8_t voff_y = {0, 0, 1, 0, 2, 0, 3, 0};
int16x8_t vones_x = {4, 0, 4, 0, 4, 0, 4, 0};
int16x8_t vones_y = {0, 1, 0, 1, 0, 1, 0, 1};
for(y1 = 0; y1 < BLOCK_SIZE; y1++, voff_y += vones_y)
{
int16x8_t* vrXY = (int16x8_t*)(XY + y1 * XY_step);
for(x1 = 0, voff_x = voff_y; x1 < BLOCK_SIZE; x1 += 4, vrXY++, voff_x += vones_x)
{
*vrXY += voff_x;
}
}
}
// process the block
for( y1 = 0; y1 < dy; y1++ )
{
uchar* dst_row = dst_data + (y + y1) * dst_step + x * cn;
const short* rXY = XY + y1 * XY_step;
if( cn == 1 )
{
for( x1 = 0; x1 < dx; x1++ )
{
int32x2_t vsxy = (int32x2_t){rXY[x1 * 2], rXY[x1 * 2 + 1]} + voff;
if( (long)((uint32x2_t)vsxy < (uint32x2_t)src_wh) == -1 )
dst_row[x1] = src_data[__nds__v_smar64(0, vsxy, arr_index)];
else
{
if( border_type == CV_HAL_BORDER_REPLICATE )
{
vsxy = vclip(vsxy, (int32x2_t){0, 0}, src_wh);
dst_row[x1] = src_data[__nds__v_smar64(0, vsxy, arr_index)];
}
else if( border_type == CV_HAL_BORDER_CONSTANT )
dst_row[x1] = border_const[0];
else if( border_type != CV_HAL_BORDER_TRANSPARENT )
{
vsxy[0] = borderInterpolate(vsxy[0], src_width, border_type);
vsxy[1] = borderInterpolate(vsxy[1], src_height, border_type);
dst_row[x1] = src_data[__nds__v_smar64(0, vsxy, arr_index)];
}
}
}
}
else
{
uchar* dst_ptr = dst_row;
for(x1 = 0; x1 < dx; x1++, dst_ptr += cn )
{
int32x2_t vsxy = (int32x2_t){rXY[x1 * 2], rXY[x1 * 2 + 1]} + voff;
const uchar *src_ptr;
if( (long)((uint32x2_t)vsxy < (uint32x2_t)src_wh) == -1 )
{
if( cn == 3 )
{
src_ptr = (uchar*)__nds__v_smar64((long)src_data, vsxy, arr_index);
dst_ptr[0] = src_ptr[0]; dst_ptr[1] = src_ptr[1]; dst_ptr[2] = src_ptr[2];
// performance loss, commented out
// *(unsigned*)dst_ptr = __nds__bpick(*(unsigned*)dst_ptr, *(unsigned*)src_ptr, 0xFF000000);
}
else if( cn == 4 )
{
src_ptr = (uchar*)__nds__v_smar64((long)src_data, vsxy, arr_index);
*(uint8x4_t*)dst_ptr = *(uint8x4_t*)src_ptr;
}
else
{
src_ptr = (uchar*)__nds__v_smar64((long)src_data, vsxy, arr_index);
int k = cn;
for(; k >= 8; k -= 8, dst_ptr += 8, src_ptr += 8)
*(uint8x8_t*)dst_ptr = *(uint8x8_t*)src_ptr;
while( k-- )
dst_ptr[k] = src_ptr[k];
}
}
else if( border_type != CV_HAL_BORDER_TRANSPARENT )
{
if( border_type == CV_HAL_BORDER_REPLICATE )
{
vsxy = vclip(vsxy, (int32x2_t){0, 0}, src_wh);
src_ptr = (uchar*)__nds__v_smar64((long)src_data, vsxy, arr_index);
}
else if( border_type == CV_HAL_BORDER_CONSTANT )
src_ptr = &border_const[0];
else
{
vsxy[0] = borderInterpolate(vsxy[0], src_width, border_type);
vsxy[1] = borderInterpolate(vsxy[1], src_height, border_type);
src_ptr = (uchar*)__nds__v_smar64((long)src_data, vsxy, arr_index);
}
int k = cn;
for(; k >= 8; k -= 8, dst_ptr += 8, src_ptr += 8)
*(uint8x8_t*)dst_ptr = *(uint8x8_t*)src_ptr;
while( k-- )
dst_ptr[k] = src_ptr[k];
}
}
}
}
}
}
return CV_HAL_ERROR_OK;
}
} // namespace ndsrvp
} // namespace cv | cpp | github | https://github.com/opencv/opencv | hal/ndsrvp/src/remap.cpp |
// SPDX-License-Identifier: GPL-2.0
/*
* linux/mm/compaction.c
*
* Memory compaction for the reduction of external fragmentation. Note that
* this heavily depends upon page migration to do all the real heavy
* lifting
*
* Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie>
*/
#include <linux/cpu.h>
#include <linux/swap.h>
#include <linux/migrate.h>
#include <linux/compaction.h>
#include <linux/mm_inline.h>
#include <linux/sched/signal.h>
#include <linux/backing-dev.h>
#include <linux/sysctl.h>
#include <linux/sysfs.h>
#include <linux/page-isolation.h>
#include <linux/kasan.h>
#include <linux/kthread.h>
#include <linux/freezer.h>
#include <linux/page_owner.h>
#include <linux/psi.h>
#include <linux/cpuset.h>
#include "internal.h"
#ifdef CONFIG_COMPACTION
/*
* Fragmentation score check interval for proactive compaction purposes.
*/
#define HPAGE_FRAG_CHECK_INTERVAL_MSEC (500)
static inline void count_compact_event(enum vm_event_item item)
{
count_vm_event(item);
}
static inline void count_compact_events(enum vm_event_item item, long delta)
{
count_vm_events(item, delta);
}
/*
* order == -1 is expected when compacting proactively via
* 1. /proc/sys/vm/compact_memory
* 2. /sys/devices/system/node/nodex/compact
* 3. /proc/sys/vm/compaction_proactiveness
*/
static inline bool is_via_compact_memory(int order)
{
return order == -1;
}
#else
#define count_compact_event(item) do { } while (0)
#define count_compact_events(item, delta) do { } while (0)
static inline bool is_via_compact_memory(int order) { return false; }
#endif
#if defined CONFIG_COMPACTION || defined CONFIG_CMA
#define CREATE_TRACE_POINTS
#include <trace/events/compaction.h>
#define block_start_pfn(pfn, order) round_down(pfn, 1UL << (order))
#define block_end_pfn(pfn, order) ALIGN((pfn) + 1, 1UL << (order))
/*
* Page order with-respect-to which proactive compaction
* calculates external fragmentation, which is used as
* the "fragmentation score" of a node/zone.
*/
#if defined CONFIG_TRANSPARENT_HUGEPAGE
#define COMPACTION_HPAGE_ORDER HPAGE_PMD_ORDER
#elif defined CONFIG_HUGETLBFS
#define COMPACTION_HPAGE_ORDER HUGETLB_PAGE_ORDER
#else
#define COMPACTION_HPAGE_ORDER (PMD_SHIFT - PAGE_SHIFT)
#endif
static struct page *mark_allocated_noprof(struct page *page, unsigned int order, gfp_t gfp_flags)
{
post_alloc_hook(page, order, __GFP_MOVABLE);
set_page_refcounted(page);
return page;
}
#define mark_allocated(...) alloc_hooks(mark_allocated_noprof(__VA_ARGS__))
static unsigned long release_free_list(struct list_head *freepages)
{
int order;
unsigned long high_pfn = 0;
for (order = 0; order < NR_PAGE_ORDERS; order++) {
struct page *page, *next;
list_for_each_entry_safe(page, next, &freepages[order], lru) {
unsigned long pfn = page_to_pfn(page);
list_del(&page->lru);
/*
* Convert free pages into post allocation pages, so
* that we can free them via __free_page.
*/
mark_allocated(page, order, __GFP_MOVABLE);
__free_pages(page, order);
if (pfn > high_pfn)
high_pfn = pfn;
}
}
return high_pfn;
}
#ifdef CONFIG_COMPACTION
/* Do not skip compaction more than 64 times */
#define COMPACT_MAX_DEFER_SHIFT 6
/*
* Compaction is deferred when compaction fails to result in a page
* allocation success. 1 << compact_defer_shift, compactions are skipped up
* to a limit of 1 << COMPACT_MAX_DEFER_SHIFT
*/
static void defer_compaction(struct zone *zone, int order)
{
zone->compact_considered = 0;
zone->compact_defer_shift++;
if (order < zone->compact_order_failed)
zone->compact_order_failed = order;
if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT)
zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT;
trace_mm_compaction_defer_compaction(zone, order);
}
/* Returns true if compaction should be skipped this time */
static bool compaction_deferred(struct zone *zone, int order)
{
unsigned long defer_limit = 1UL << zone->compact_defer_shift;
if (order < zone->compact_order_failed)
return false;
/* Avoid possible overflow */
if (++zone->compact_considered >= defer_limit) {
zone->compact_considered = defer_limit;
return false;
}
trace_mm_compaction_deferred(zone, order);
return true;
}
/*
* Update defer tracking counters after successful compaction of given order,
* which means an allocation either succeeded (alloc_success == true) or is
* expected to succeed.
*/
void compaction_defer_reset(struct zone *zone, int order,
bool alloc_success)
{
if (alloc_success) {
zone->compact_considered = 0;
zone->compact_defer_shift = 0;
}
if (order >= zone->compact_order_failed)
zone->compact_order_failed = order + 1;
trace_mm_compaction_defer_reset(zone, order);
}
/* Returns true if restarting compaction after many failures */
static bool compaction_restarting(struct zone *zone, int order)
{
if (order < zone->compact_order_failed)
return false;
return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT &&
zone->compact_considered >= 1UL << zone->compact_defer_shift;
}
/* Returns true if the pageblock should be scanned for pages to isolate. */
static inline bool isolation_suitable(struct compact_control *cc,
struct page *page)
{
if (cc->ignore_skip_hint)
return true;
return !get_pageblock_skip(page);
}
static void reset_cached_positions(struct zone *zone)
{
zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn;
zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn;
zone->compact_cached_free_pfn =
pageblock_start_pfn(zone_end_pfn(zone) - 1);
}
#ifdef CONFIG_SPARSEMEM
/*
* If the PFN falls into an offline section, return the start PFN of the
* next online section. If the PFN falls into an online section or if
* there is no next online section, return 0.
*/
static unsigned long skip_offline_sections(unsigned long start_pfn)
{
unsigned long start_nr = pfn_to_section_nr(start_pfn);
if (online_section_nr(start_nr))
return 0;
while (++start_nr <= __highest_present_section_nr) {
if (online_section_nr(start_nr))
return section_nr_to_pfn(start_nr);
}
return 0;
}
/*
* If the PFN falls into an offline section, return the end PFN of the
* next online section in reverse. If the PFN falls into an online section
* or if there is no next online section in reverse, return 0.
*/
static unsigned long skip_offline_sections_reverse(unsigned long start_pfn)
{
unsigned long start_nr = pfn_to_section_nr(start_pfn);
if (!start_nr || online_section_nr(start_nr))
return 0;
while (start_nr-- > 0) {
if (online_section_nr(start_nr))
return section_nr_to_pfn(start_nr) + PAGES_PER_SECTION;
}
return 0;
}
#else
static unsigned long skip_offline_sections(unsigned long start_pfn)
{
return 0;
}
static unsigned long skip_offline_sections_reverse(unsigned long start_pfn)
{
return 0;
}
#endif
/*
* Compound pages of >= pageblock_order should consistently be skipped until
* released. It is always pointless to compact pages of such order (if they are
* migratable), and the pageblocks they occupy cannot contain any free pages.
*/
static bool pageblock_skip_persistent(struct page *page)
{
if (!PageCompound(page))
return false;
page = compound_head(page);
if (compound_order(page) >= pageblock_order)
return true;
return false;
}
static bool
__reset_isolation_pfn(struct zone *zone, unsigned long pfn, bool check_source,
bool check_target)
{
struct page *page = pfn_to_online_page(pfn);
struct page *block_page;
struct page *end_page;
unsigned long block_pfn;
if (!page)
return false;
if (zone != page_zone(page))
return false;
if (pageblock_skip_persistent(page))
return false;
/*
* If skip is already cleared do no further checking once the
* restart points have been set.
*/
if (check_source && check_target && !get_pageblock_skip(page))
return true;
/*
* If clearing skip for the target scanner, do not select a
* non-movable pageblock as the starting point.
*/
if (!check_source && check_target &&
get_pageblock_migratetype(page) != MIGRATE_MOVABLE)
return false;
/* Ensure the start of the pageblock or zone is online and valid */
block_pfn = pageblock_start_pfn(pfn);
block_pfn = max(block_pfn, zone->zone_start_pfn);
block_page = pfn_to_online_page(block_pfn);
if (block_page) {
page = block_page;
pfn = block_pfn;
}
/* Ensure the end of the pageblock or zone is online and valid */
block_pfn = pageblock_end_pfn(pfn) - 1;
block_pfn = min(block_pfn, zone_end_pfn(zone) - 1);
end_page = pfn_to_online_page(block_pfn);
if (!end_page)
return false;
/*
* Only clear the hint if a sample indicates there is either a
* free page or an LRU page in the block. One or other condition
* is necessary for the block to be a migration source/target.
*/
do {
if (check_source && PageLRU(page)) {
clear_pageblock_skip(page);
return true;
}
if (check_target && PageBuddy(page)) {
clear_pageblock_skip(page);
return true;
}
page += (1 << PAGE_ALLOC_COSTLY_ORDER);
} while (page <= end_page);
return false;
}
/*
* This function is called to clear all cached information on pageblocks that
* should be skipped for page isolation when the migrate and free page scanner
* meet.
*/
static void __reset_isolation_suitable(struct zone *zone)
{
unsigned long migrate_pfn = zone->zone_start_pfn;
unsigned long free_pfn = zone_end_pfn(zone) - 1;
unsigned long reset_migrate = free_pfn;
unsigned long reset_free = migrate_pfn;
bool source_set = false;
bool free_set = false;
/* Only flush if a full compaction finished recently */
if (!zone->compact_blockskip_flush)
return;
zone->compact_blockskip_flush = false;
/*
* Walk the zone and update pageblock skip information. Source looks
* for PageLRU while target looks for PageBuddy. When the scanner
* is found, both PageBuddy and PageLRU are checked as the pageblock
* is suitable as both source and target.
*/
for (; migrate_pfn < free_pfn; migrate_pfn += pageblock_nr_pages,
free_pfn -= pageblock_nr_pages) {
cond_resched();
/* Update the migrate PFN */
if (__reset_isolation_pfn(zone, migrate_pfn, true, source_set) &&
migrate_pfn < reset_migrate) {
source_set = true;
reset_migrate = migrate_pfn;
zone->compact_init_migrate_pfn = reset_migrate;
zone->compact_cached_migrate_pfn[0] = reset_migrate;
zone->compact_cached_migrate_pfn[1] = reset_migrate;
}
/* Update the free PFN */
if (__reset_isolation_pfn(zone, free_pfn, free_set, true) &&
free_pfn > reset_free) {
free_set = true;
reset_free = free_pfn;
zone->compact_init_free_pfn = reset_free;
zone->compact_cached_free_pfn = reset_free;
}
}
/* Leave no distance if no suitable block was reset */
if (reset_migrate >= reset_free) {
zone->compact_cached_migrate_pfn[0] = migrate_pfn;
zone->compact_cached_migrate_pfn[1] = migrate_pfn;
zone->compact_cached_free_pfn = free_pfn;
}
}
void reset_isolation_suitable(pg_data_t *pgdat)
{
int zoneid;
for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
struct zone *zone = &pgdat->node_zones[zoneid];
if (!populated_zone(zone))
continue;
__reset_isolation_suitable(zone);
}
}
/*
* Sets the pageblock skip bit if it was clear. Note that this is a hint as
* locks are not required for read/writers. Returns true if it was already set.
*/
static bool test_and_set_skip(struct compact_control *cc, struct page *page)
{
bool skip;
/* Do not update if skip hint is being ignored */
if (cc->ignore_skip_hint)
return false;
skip = get_pageblock_skip(page);
if (!skip && !cc->no_set_skip_hint)
set_pageblock_skip(page);
return skip;
}
static void update_cached_migrate(struct compact_control *cc, unsigned long pfn)
{
struct zone *zone = cc->zone;
/* Set for isolation rather than compaction */
if (cc->no_set_skip_hint)
return;
pfn = pageblock_end_pfn(pfn);
/* Update where async and sync compaction should restart */
if (pfn > zone->compact_cached_migrate_pfn[0])
zone->compact_cached_migrate_pfn[0] = pfn;
if (cc->mode != MIGRATE_ASYNC &&
pfn > zone->compact_cached_migrate_pfn[1])
zone->compact_cached_migrate_pfn[1] = pfn;
}
/*
* If no pages were isolated then mark this pageblock to be skipped in the
* future. The information is later cleared by __reset_isolation_suitable().
*/
static void update_pageblock_skip(struct compact_control *cc,
struct page *page, unsigned long pfn)
{
struct zone *zone = cc->zone;
if (cc->no_set_skip_hint)
return;
set_pageblock_skip(page);
if (pfn < zone->compact_cached_free_pfn)
zone->compact_cached_free_pfn = pfn;
}
#else
static inline bool isolation_suitable(struct compact_control *cc,
struct page *page)
{
return true;
}
static inline bool pageblock_skip_persistent(struct page *page)
{
return false;
}
static inline void update_pageblock_skip(struct compact_control *cc,
struct page *page, unsigned long pfn)
{
}
static void update_cached_migrate(struct compact_control *cc, unsigned long pfn)
{
}
static bool test_and_set_skip(struct compact_control *cc, struct page *page)
{
return false;
}
#endif /* CONFIG_COMPACTION */
/*
* Compaction requires the taking of some coarse locks that are potentially
* very heavily contended. For async compaction, trylock and record if the
* lock is contended. The lock will still be acquired but compaction will
* abort when the current block is finished regardless of success rate.
* Sync compaction acquires the lock.
*
* Always returns true which makes it easier to track lock state in callers.
*/
static bool compact_lock_irqsave(spinlock_t *lock, unsigned long *flags,
struct compact_control *cc)
__acquires(lock)
{
/* Track if the lock is contended in async mode */
if (cc->mode == MIGRATE_ASYNC && !cc->contended) {
if (spin_trylock_irqsave(lock, *flags))
return true;
cc->contended = true;
}
spin_lock_irqsave(lock, *flags);
return true;
}
/*
* Compaction requires the taking of some coarse locks that are potentially
* very heavily contended. The lock should be periodically unlocked to avoid
* having disabled IRQs for a long time, even when there is nobody waiting on
* the lock. It might also be that allowing the IRQs will result in
* need_resched() becoming true. If scheduling is needed, compaction schedules.
* Either compaction type will also abort if a fatal signal is pending.
* In either case if the lock was locked, it is dropped and not regained.
*
* Returns true if compaction should abort due to fatal signal pending.
* Returns false when compaction can continue.
*/
static bool compact_unlock_should_abort(spinlock_t *lock,
unsigned long flags, bool *locked, struct compact_control *cc)
{
if (*locked) {
spin_unlock_irqrestore(lock, flags);
*locked = false;
}
if (fatal_signal_pending(current)) {
cc->contended = true;
return true;
}
cond_resched();
return false;
}
/*
* Isolate free pages onto a private freelist. If @strict is true, will abort
* returning 0 on any invalid PFNs or non-free pages inside of the pageblock
* (even though it may still end up isolating some pages).
*/
static unsigned long isolate_freepages_block(struct compact_control *cc,
unsigned long *start_pfn,
unsigned long end_pfn,
struct list_head *freelist,
unsigned int stride,
bool strict)
{
int nr_scanned = 0, total_isolated = 0;
struct page *page;
unsigned long flags = 0;
bool locked = false;
unsigned long blockpfn = *start_pfn;
unsigned int order;
/* Strict mode is for isolation, speed is secondary */
if (strict)
stride = 1;
page = pfn_to_page(blockpfn);
/* Isolate free pages. */
for (; blockpfn < end_pfn; blockpfn += stride, page += stride) {
int isolated;
/*
* Periodically drop the lock (if held) regardless of its
* contention, to give chance to IRQs. Abort if fatal signal
* pending.
*/
if (!(blockpfn % COMPACT_CLUSTER_MAX)
&& compact_unlock_should_abort(&cc->zone->lock, flags,
&locked, cc))
break;
nr_scanned++;
/*
* For compound pages such as THP and hugetlbfs, we can save
* potentially a lot of iterations if we skip them at once.
* The check is racy, but we can consider only valid values
* and the only danger is skipping too much.
*/
if (PageCompound(page)) {
const unsigned int order = compound_order(page);
if ((order <= MAX_PAGE_ORDER) &&
(blockpfn + (1UL << order) <= end_pfn)) {
blockpfn += (1UL << order) - 1;
page += (1UL << order) - 1;
nr_scanned += (1UL << order) - 1;
}
goto isolate_fail;
}
if (!PageBuddy(page))
goto isolate_fail;
/* If we already hold the lock, we can skip some rechecking. */
if (!locked) {
locked = compact_lock_irqsave(&cc->zone->lock,
&flags, cc);
/* Recheck this is a buddy page under lock */
if (!PageBuddy(page))
goto isolate_fail;
}
/* Found a free page, will break it into order-0 pages */
order = buddy_order(page);
isolated = __isolate_free_page(page, order);
if (!isolated)
break;
set_page_private(page, order);
nr_scanned += isolated - 1;
total_isolated += isolated;
cc->nr_freepages += isolated;
list_add_tail(&page->lru, &freelist[order]);
if (!strict && cc->nr_migratepages <= cc->nr_freepages) {
blockpfn += isolated;
break;
}
/* Advance to the end of split page */
blockpfn += isolated - 1;
page += isolated - 1;
continue;
isolate_fail:
if (strict)
break;
}
if (locked)
spin_unlock_irqrestore(&cc->zone->lock, flags);
/*
* Be careful to not go outside of the pageblock.
*/
if (unlikely(blockpfn > end_pfn))
blockpfn = end_pfn;
trace_mm_compaction_isolate_freepages(*start_pfn, blockpfn,
nr_scanned, total_isolated);
/* Record how far we have got within the block */
*start_pfn = blockpfn;
/*
* If strict isolation is requested by CMA then check that all the
* pages requested were isolated. If there were any failures, 0 is
* returned and CMA will fail.
*/
if (strict && blockpfn < end_pfn)
total_isolated = 0;
cc->total_free_scanned += nr_scanned;
if (total_isolated)
count_compact_events(COMPACTISOLATED, total_isolated);
return total_isolated;
}
/**
* isolate_freepages_range() - isolate free pages.
* @cc: Compaction control structure.
* @start_pfn: The first PFN to start isolating.
* @end_pfn: The one-past-last PFN.
*
* Non-free pages, invalid PFNs, or zone boundaries within the
* [start_pfn, end_pfn) range are considered errors, cause function to
* undo its actions and return zero. cc->freepages[] are empty.
*
* Otherwise, function returns one-past-the-last PFN of isolated page
* (which may be greater then end_pfn if end fell in a middle of
* a free page). cc->freepages[] contain free pages isolated.
*/
unsigned long
isolate_freepages_range(struct compact_control *cc,
unsigned long start_pfn, unsigned long end_pfn)
{
unsigned long isolated, pfn, block_start_pfn, block_end_pfn;
int order;
for (order = 0; order < NR_PAGE_ORDERS; order++)
INIT_LIST_HEAD(&cc->freepages[order]);
pfn = start_pfn;
block_start_pfn = pageblock_start_pfn(pfn);
if (block_start_pfn < cc->zone->zone_start_pfn)
block_start_pfn = cc->zone->zone_start_pfn;
block_end_pfn = pageblock_end_pfn(pfn);
for (; pfn < end_pfn; pfn += isolated,
block_start_pfn = block_end_pfn,
block_end_pfn += pageblock_nr_pages) {
/* Protect pfn from changing by isolate_freepages_block */
unsigned long isolate_start_pfn = pfn;
/*
* pfn could pass the block_end_pfn if isolated freepage
* is more than pageblock order. In this case, we adjust
* scanning range to right one.
*/
if (pfn >= block_end_pfn) {
block_start_pfn = pageblock_start_pfn(pfn);
block_end_pfn = pageblock_end_pfn(pfn);
}
block_end_pfn = min(block_end_pfn, end_pfn);
if (!pageblock_pfn_to_page(block_start_pfn,
block_end_pfn, cc->zone))
break;
isolated = isolate_freepages_block(cc, &isolate_start_pfn,
block_end_pfn, cc->freepages, 0, true);
/*
* In strict mode, isolate_freepages_block() returns 0 if
* there are any holes in the block (ie. invalid PFNs or
* non-free pages).
*/
if (!isolated)
break;
/*
* If we managed to isolate pages, it is always (1 << n) *
* pageblock_nr_pages for some non-negative n. (Max order
* page may span two pageblocks).
*/
}
if (pfn < end_pfn) {
/* Loop terminated early, cleanup. */
release_free_list(cc->freepages);
return 0;
}
/* We don't use freelists for anything. */
return pfn;
}
/* Similar to reclaim, but different enough that they don't share logic */
static bool too_many_isolated(struct compact_control *cc)
{
pg_data_t *pgdat = cc->zone->zone_pgdat;
bool too_many;
unsigned long active, inactive, isolated;
inactive = node_page_state(pgdat, NR_INACTIVE_FILE) +
node_page_state(pgdat, NR_INACTIVE_ANON);
active = node_page_state(pgdat, NR_ACTIVE_FILE) +
node_page_state(pgdat, NR_ACTIVE_ANON);
isolated = node_page_state(pgdat, NR_ISOLATED_FILE) +
node_page_state(pgdat, NR_ISOLATED_ANON);
/*
* Allow GFP_NOFS to isolate past the limit set for regular
* compaction runs. This prevents an ABBA deadlock when other
* compactors have already isolated to the limit, but are
* blocked on filesystem locks held by the GFP_NOFS thread.
*/
if (cc->gfp_mask & __GFP_FS) {
inactive >>= 3;
active >>= 3;
}
too_many = isolated > (inactive + active) / 2;
if (!too_many)
wake_throttle_isolated(pgdat);
return too_many;
}
/**
* skip_isolation_on_order() - determine when to skip folio isolation based on
* folio order and compaction target order
* @order: to-be-isolated folio order
* @target_order: compaction target order
*
* This avoids unnecessary folio isolations during compaction.
*/
static bool skip_isolation_on_order(int order, int target_order)
{
/*
* Unless we are performing global compaction (i.e.,
* is_via_compact_memory), skip any folios that are larger than the
* target order: we wouldn't be here if we'd have a free folio with
* the desired target_order, so migrating this folio would likely fail
* later.
*/
if (!is_via_compact_memory(target_order) && order >= target_order)
return true;
/*
* We limit memory compaction to pageblocks and won't try
* creating free blocks of memory that are larger than that.
*/
return order >= pageblock_order;
}
/**
* isolate_migratepages_block() - isolate all migrate-able pages within
* a single pageblock
* @cc: Compaction control structure.
* @low_pfn: The first PFN to isolate
* @end_pfn: The one-past-the-last PFN to isolate, within same pageblock
* @mode: Isolation mode to be used.
*
* Isolate all pages that can be migrated from the range specified by
* [low_pfn, end_pfn). The range is expected to be within same pageblock.
* Returns errno, like -EAGAIN or -EINTR in case e.g signal pending or congestion,
* -ENOMEM in case we could not allocate a page, or 0.
* cc->migrate_pfn will contain the next pfn to scan.
*
* The pages are isolated on cc->migratepages list (not required to be empty),
* and cc->nr_migratepages is updated accordingly.
*/
static int
isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
unsigned long end_pfn, isolate_mode_t mode)
{
pg_data_t *pgdat = cc->zone->zone_pgdat;
unsigned long nr_scanned = 0, nr_isolated = 0;
struct lruvec *lruvec;
unsigned long flags = 0;
struct lruvec *locked = NULL;
struct folio *folio = NULL;
struct page *page = NULL, *valid_page = NULL;
struct address_space *mapping;
unsigned long start_pfn = low_pfn;
bool skip_on_failure = false;
unsigned long next_skip_pfn = 0;
bool skip_updated = false;
int ret = 0;
cc->migrate_pfn = low_pfn;
/*
* Ensure that there are not too many pages isolated from the LRU
* list by either parallel reclaimers or compaction. If there are,
* delay for some time until fewer pages are isolated
*/
while (unlikely(too_many_isolated(cc))) {
/* stop isolation if there are still pages not migrated */
if (cc->nr_migratepages)
return -EAGAIN;
/* async migration should just abort */
if (cc->mode == MIGRATE_ASYNC)
return -EAGAIN;
reclaim_throttle(pgdat, VMSCAN_THROTTLE_ISOLATED);
if (fatal_signal_pending(current))
return -EINTR;
}
cond_resched();
if (cc->direct_compaction && (cc->mode == MIGRATE_ASYNC)) {
skip_on_failure = true;
next_skip_pfn = block_end_pfn(low_pfn, cc->order);
}
/* Time to isolate some pages for migration */
for (; low_pfn < end_pfn; low_pfn++) {
bool is_dirty, is_unevictable;
if (skip_on_failure && low_pfn >= next_skip_pfn) {
/*
* We have isolated all migration candidates in the
* previous order-aligned block, and did not skip it due
* to failure. We should migrate the pages now and
* hopefully succeed compaction.
*/
if (nr_isolated)
break;
/*
* We failed to isolate in the previous order-aligned
* block. Set the new boundary to the end of the
* current block. Note we can't simply increase
* next_skip_pfn by 1 << order, as low_pfn might have
* been incremented by a higher number due to skipping
* a compound or a high-order buddy page in the
* previous loop iteration.
*/
next_skip_pfn = block_end_pfn(low_pfn, cc->order);
}
/*
* Periodically drop the lock (if held) regardless of its
* contention, to give chance to IRQs. Abort completely if
* a fatal signal is pending.
*/
if (!(low_pfn % COMPACT_CLUSTER_MAX)) {
if (locked) {
unlock_page_lruvec_irqrestore(locked, flags);
locked = NULL;
}
if (fatal_signal_pending(current)) {
cc->contended = true;
ret = -EINTR;
goto fatal_pending;
}
cond_resched();
}
nr_scanned++;
page = pfn_to_page(low_pfn);
/*
* Check if the pageblock has already been marked skipped.
* Only the first PFN is checked as the caller isolates
* COMPACT_CLUSTER_MAX at a time so the second call must
* not falsely conclude that the block should be skipped.
*/
if (!valid_page && (pageblock_aligned(low_pfn) ||
low_pfn == cc->zone->zone_start_pfn)) {
if (!isolation_suitable(cc, page)) {
low_pfn = end_pfn;
folio = NULL;
goto isolate_abort;
}
valid_page = page;
}
if (PageHuge(page)) {
const unsigned int order = compound_order(page);
/*
* skip hugetlbfs if we are not compacting for pages
* bigger than its order. THPs and other compound pages
* are handled below.
*/
if (!cc->alloc_contig) {
if (order <= MAX_PAGE_ORDER) {
low_pfn += (1UL << order) - 1;
nr_scanned += (1UL << order) - 1;
}
goto isolate_fail;
}
/* for alloc_contig case */
if (locked) {
unlock_page_lruvec_irqrestore(locked, flags);
locked = NULL;
}
folio = page_folio(page);
ret = isolate_or_dissolve_huge_folio(folio, &cc->migratepages);
/*
* Fail isolation in case isolate_or_dissolve_huge_folio()
* reports an error. In case of -ENOMEM, abort right away.
*/
if (ret < 0) {
/* Do not report -EBUSY down the chain */
if (ret == -EBUSY)
ret = 0;
low_pfn += (1UL << order) - 1;
nr_scanned += (1UL << order) - 1;
goto isolate_fail;
}
if (folio_test_hugetlb(folio)) {
/*
* Hugepage was successfully isolated and placed
* on the cc->migratepages list.
*/
low_pfn += folio_nr_pages(folio) - folio_page_idx(folio, page) - 1;
goto isolate_success_no_list;
}
/*
* Ok, the hugepage was dissolved. Now these pages are
* Buddy and cannot be re-allocated because they are
* isolated. Fall-through as the check below handles
* Buddy pages.
*/
}
/*
* Skip if free. We read page order here without zone lock
* which is generally unsafe, but the race window is small and
* the worst thing that can happen is that we skip some
* potential isolation targets.
*/
if (PageBuddy(page)) {
unsigned long freepage_order = buddy_order_unsafe(page);
/*
* Without lock, we cannot be sure that what we got is
* a valid page order. Consider only values in the
* valid order range to prevent low_pfn overflow.
*/
if (freepage_order > 0 && freepage_order <= MAX_PAGE_ORDER) {
low_pfn += (1UL << freepage_order) - 1;
nr_scanned += (1UL << freepage_order) - 1;
}
continue;
}
/*
* Regardless of being on LRU, compound pages such as THP
* (hugetlbfs is handled above) are not to be compacted unless
* we are attempting an allocation larger than the compound
* page size. We can potentially save a lot of iterations if we
* skip them at once. The check is racy, but we can consider
* only valid values and the only danger is skipping too much.
*/
if (PageCompound(page) && !cc->alloc_contig) {
const unsigned int order = compound_order(page);
/* Skip based on page order and compaction target order. */
if (skip_isolation_on_order(order, cc->order)) {
if (order <= MAX_PAGE_ORDER) {
low_pfn += (1UL << order) - 1;
nr_scanned += (1UL << order) - 1;
}
goto isolate_fail;
}
}
/*
* Check may be lockless but that's ok as we recheck later.
* It's possible to migrate LRU and non-lru movable pages.
* Skip any other type of page
*/
if (!PageLRU(page)) {
/* Isolation code will deal with any races. */
if (unlikely(page_has_movable_ops(page)) &&
!PageMovableOpsIsolated(page)) {
if (locked) {
unlock_page_lruvec_irqrestore(locked, flags);
locked = NULL;
}
if (isolate_movable_ops_page(page, mode)) {
folio = page_folio(page);
goto isolate_success;
}
}
goto isolate_fail;
}
/*
* Be careful not to clear PageLRU until after we're
* sure the page is not being freed elsewhere -- the
* page release code relies on it.
*/
folio = folio_get_nontail_page(page);
if (unlikely(!folio))
goto isolate_fail;
/*
* Migration will fail if an anonymous page is pinned in memory,
* so avoid taking lru_lock and isolating it unnecessarily in an
* admittedly racy check.
*/
mapping = folio_mapping(folio);
if (!mapping && (folio_ref_count(folio) - 1) > folio_mapcount(folio))
goto isolate_fail_put;
/*
* Only allow to migrate anonymous pages in GFP_NOFS context
* because those do not depend on fs locks.
*/
if (!(cc->gfp_mask & __GFP_FS) && mapping)
goto isolate_fail_put;
/* Only take pages on LRU: a check now makes later tests safe */
if (!folio_test_lru(folio))
goto isolate_fail_put;
is_unevictable = folio_test_unevictable(folio);
/* Compaction might skip unevictable pages but CMA takes them */
if (!(mode & ISOLATE_UNEVICTABLE) && is_unevictable)
goto isolate_fail_put;
/*
* To minimise LRU disruption, the caller can indicate with
* ISOLATE_ASYNC_MIGRATE that it only wants to isolate pages
* it will be able to migrate without blocking - clean pages
* for the most part. PageWriteback would require blocking.
*/
if ((mode & ISOLATE_ASYNC_MIGRATE) && folio_test_writeback(folio))
goto isolate_fail_put;
is_dirty = folio_test_dirty(folio);
if (((mode & ISOLATE_ASYNC_MIGRATE) && is_dirty) ||
(mapping && is_unevictable)) {
bool migrate_dirty = true;
bool is_inaccessible;
/*
* Only folios without mappings or that have
* a ->migrate_folio callback are possible to migrate
* without blocking.
*
* Folios from inaccessible mappings are not migratable.
*
* However, we can be racing with truncation, which can
* free the mapping that we need to check. Truncation
* holds the folio lock until after the folio is removed
* from the page so holding it ourselves is sufficient.
*
* To avoid locking the folio just to check inaccessible,
* assume every inaccessible folio is also unevictable,
* which is a cheaper test. If our assumption goes
* wrong, it's not a correctness bug, just potentially
* wasted cycles.
*/
if (!folio_trylock(folio))
goto isolate_fail_put;
mapping = folio_mapping(folio);
if ((mode & ISOLATE_ASYNC_MIGRATE) && is_dirty) {
migrate_dirty = !mapping ||
mapping->a_ops->migrate_folio;
}
is_inaccessible = mapping && mapping_inaccessible(mapping);
folio_unlock(folio);
if (!migrate_dirty || is_inaccessible)
goto isolate_fail_put;
}
/* Try isolate the folio */
if (!folio_test_clear_lru(folio))
goto isolate_fail_put;
lruvec = folio_lruvec(folio);
/* If we already hold the lock, we can skip some rechecking */
if (lruvec != locked) {
if (locked)
unlock_page_lruvec_irqrestore(locked, flags);
compact_lock_irqsave(&lruvec->lru_lock, &flags, cc);
locked = lruvec;
lruvec_memcg_debug(lruvec, folio);
/*
* Try get exclusive access under lock. If marked for
* skip, the scan is aborted unless the current context
* is a rescan to reach the end of the pageblock.
*/
if (!skip_updated && valid_page) {
skip_updated = true;
if (test_and_set_skip(cc, valid_page) &&
!cc->finish_pageblock) {
low_pfn = end_pfn;
goto isolate_abort;
}
}
/*
* Check LRU folio order under the lock
*/
if (unlikely(skip_isolation_on_order(folio_order(folio),
cc->order) &&
!cc->alloc_contig)) {
low_pfn += folio_nr_pages(folio) - 1;
nr_scanned += folio_nr_pages(folio) - 1;
folio_set_lru(folio);
goto isolate_fail_put;
}
}
/* The folio is taken off the LRU */
if (folio_test_large(folio))
low_pfn += folio_nr_pages(folio) - 1;
/* Successfully isolated */
lruvec_del_folio(lruvec, folio);
node_stat_mod_folio(folio,
NR_ISOLATED_ANON + folio_is_file_lru(folio),
folio_nr_pages(folio));
isolate_success:
list_add(&folio->lru, &cc->migratepages);
isolate_success_no_list:
cc->nr_migratepages += folio_nr_pages(folio);
nr_isolated += folio_nr_pages(folio);
nr_scanned += folio_nr_pages(folio) - 1;
/*
* Avoid isolating too much unless this block is being
* fully scanned (e.g. dirty/writeback pages, parallel allocation)
* or a lock is contended. For contention, isolate quickly to
* potentially remove one source of contention.
*/
if (cc->nr_migratepages >= COMPACT_CLUSTER_MAX &&
!cc->finish_pageblock && !cc->contended) {
++low_pfn;
break;
}
continue;
isolate_fail_put:
/* Avoid potential deadlock in freeing page under lru_lock */
if (locked) {
unlock_page_lruvec_irqrestore(locked, flags);
locked = NULL;
}
folio_put(folio);
isolate_fail:
if (!skip_on_failure && ret != -ENOMEM)
continue;
/*
* We have isolated some pages, but then failed. Release them
* instead of migrating, as we cannot form the cc->order buddy
* page anyway.
*/
if (nr_isolated) {
if (locked) {
unlock_page_lruvec_irqrestore(locked, flags);
locked = NULL;
}
putback_movable_pages(&cc->migratepages);
cc->nr_migratepages = 0;
nr_isolated = 0;
}
if (low_pfn < next_skip_pfn) {
low_pfn = next_skip_pfn - 1;
/*
* The check near the loop beginning would have updated
* next_skip_pfn too, but this is a bit simpler.
*/
next_skip_pfn += 1UL << cc->order;
}
if (ret == -ENOMEM)
break;
}
/*
* The PageBuddy() check could have potentially brought us outside
* the range to be scanned.
*/
if (unlikely(low_pfn > end_pfn))
low_pfn = end_pfn;
folio = NULL;
isolate_abort:
if (locked)
unlock_page_lruvec_irqrestore(locked, flags);
if (folio) {
folio_set_lru(folio);
folio_put(folio);
}
/*
* Update the cached scanner pfn once the pageblock has been scanned.
* Pages will either be migrated in which case there is no point
* scanning in the near future or migration failed in which case the
* failure reason may persist. The block is marked for skipping if
* there were no pages isolated in the block or if the block is
* rescanned twice in a row.
*/
if (low_pfn == end_pfn && (!nr_isolated || cc->finish_pageblock)) {
if (!cc->no_set_skip_hint && valid_page && !skip_updated)
set_pageblock_skip(valid_page);
update_cached_migrate(cc, low_pfn);
}
trace_mm_compaction_isolate_migratepages(start_pfn, low_pfn,
nr_scanned, nr_isolated);
fatal_pending:
cc->total_migrate_scanned += nr_scanned;
if (nr_isolated)
count_compact_events(COMPACTISOLATED, nr_isolated);
cc->migrate_pfn = low_pfn;
return ret;
}
/**
* isolate_migratepages_range() - isolate migrate-able pages in a PFN range
* @cc: Compaction control structure.
* @start_pfn: The first PFN to start isolating.
* @end_pfn: The one-past-last PFN.
*
* Returns -EAGAIN when contented, -EINTR in case of a signal pending, -ENOMEM
* in case we could not allocate a page, or 0.
*/
int
isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn,
unsigned long end_pfn)
{
unsigned long pfn, block_start_pfn, block_end_pfn;
int ret = 0;
/* Scan block by block. First and last block may be incomplete */
pfn = start_pfn;
block_start_pfn = pageblock_start_pfn(pfn);
if (block_start_pfn < cc->zone->zone_start_pfn)
block_start_pfn = cc->zone->zone_start_pfn;
block_end_pfn = pageblock_end_pfn(pfn);
for (; pfn < end_pfn; pfn = block_end_pfn,
block_start_pfn = block_end_pfn,
block_end_pfn += pageblock_nr_pages) {
block_end_pfn = min(block_end_pfn, end_pfn);
if (!pageblock_pfn_to_page(block_start_pfn,
block_end_pfn, cc->zone))
continue;
ret = isolate_migratepages_block(cc, pfn, block_end_pfn,
ISOLATE_UNEVICTABLE);
if (ret)
break;
if (cc->nr_migratepages >= COMPACT_CLUSTER_MAX)
break;
}
return ret;
}
#endif /* CONFIG_COMPACTION || CONFIG_CMA */
#ifdef CONFIG_COMPACTION
static bool suitable_migration_source(struct compact_control *cc,
struct page *page)
{
int block_mt;
if (pageblock_skip_persistent(page))
return false;
if ((cc->mode != MIGRATE_ASYNC) || !cc->direct_compaction)
return true;
block_mt = get_pageblock_migratetype(page);
if (cc->migratetype == MIGRATE_MOVABLE)
return is_migrate_movable(block_mt);
else
return block_mt == cc->migratetype;
}
/* Returns true if the page is within a block suitable for migration to */
static bool suitable_migration_target(struct compact_control *cc,
struct page *page)
{
/* If the page is a large free page, then disallow migration */
if (PageBuddy(page)) {
int order = cc->order > 0 ? cc->order : pageblock_order;
/*
* We are checking page_order without zone->lock taken. But
* the only small danger is that we skip a potentially suitable
* pageblock, so it's not worth to check order for valid range.
*/
if (buddy_order_unsafe(page) >= order)
return false;
}
if (cc->ignore_block_suitable)
return true;
/* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
if (is_migrate_movable(get_pageblock_migratetype(page)))
return true;
/* Otherwise skip the block */
return false;
}
static inline unsigned int
freelist_scan_limit(struct compact_control *cc)
{
unsigned short shift = BITS_PER_LONG - 1;
return (COMPACT_CLUSTER_MAX >> min(shift, cc->fast_search_fail)) + 1;
}
/*
* Test whether the free scanner has reached the same or lower pageblock than
* the migration scanner, and compaction should thus terminate.
*/
static inline bool compact_scanners_met(struct compact_control *cc)
{
return (cc->free_pfn >> pageblock_order)
<= (cc->migrate_pfn >> pageblock_order);
}
/*
* Used when scanning for a suitable migration target which scans freelists
* in reverse. Reorders the list such as the unscanned pages are scanned
* first on the next iteration of the free scanner
*/
static void
move_freelist_head(struct list_head *freelist, struct page *freepage)
{
LIST_HEAD(sublist);
if (!list_is_first(&freepage->buddy_list, freelist)) {
list_cut_before(&sublist, freelist, &freepage->buddy_list);
list_splice_tail(&sublist, freelist);
}
}
/*
* Similar to move_freelist_head except used by the migration scanner
* when scanning forward. It's possible for these list operations to
* move against each other if they search the free list exactly in
* lockstep.
*/
static void
move_freelist_tail(struct list_head *freelist, struct page *freepage)
{
LIST_HEAD(sublist);
if (!list_is_last(&freepage->buddy_list, freelist)) {
list_cut_position(&sublist, freelist, &freepage->buddy_list);
list_splice_tail(&sublist, freelist);
}
}
static void
fast_isolate_around(struct compact_control *cc, unsigned long pfn)
{
unsigned long start_pfn, end_pfn;
struct page *page;
/* Do not search around if there are enough pages already */
if (cc->nr_freepages >= cc->nr_migratepages)
return;
/* Minimise scanning during async compaction */
if (cc->direct_compaction && cc->mode == MIGRATE_ASYNC)
return;
/* Pageblock boundaries */
start_pfn = max(pageblock_start_pfn(pfn), cc->zone->zone_start_pfn);
end_pfn = min(pageblock_end_pfn(pfn), zone_end_pfn(cc->zone));
page = pageblock_pfn_to_page(start_pfn, end_pfn, cc->zone);
if (!page)
return;
isolate_freepages_block(cc, &start_pfn, end_pfn, cc->freepages, 1, false);
/* Skip this pageblock in the future as it's full or nearly full */
if (start_pfn == end_pfn && !cc->no_set_skip_hint)
set_pageblock_skip(page);
}
/* Search orders in round-robin fashion */
static int next_search_order(struct compact_control *cc, int order)
{
order--;
if (order < 0)
order = cc->order - 1;
/* Search wrapped around? */
if (order == cc->search_order) {
cc->search_order--;
if (cc->search_order < 0)
cc->search_order = cc->order - 1;
return -1;
}
return order;
}
static void fast_isolate_freepages(struct compact_control *cc)
{
unsigned int limit = max(1U, freelist_scan_limit(cc) >> 1);
unsigned int nr_scanned = 0, total_isolated = 0;
unsigned long low_pfn, min_pfn, highest = 0;
unsigned long nr_isolated = 0;
unsigned long distance;
struct page *page = NULL;
bool scan_start = false;
int order;
/* Full compaction passes in a negative order */
if (cc->order <= 0)
return;
/*
* If starting the scan, use a deeper search and use the highest
* PFN found if a suitable one is not found.
*/
if (cc->free_pfn >= cc->zone->compact_init_free_pfn) {
limit = pageblock_nr_pages >> 1;
scan_start = true;
}
/*
* Preferred point is in the top quarter of the scan space but take
* a pfn from the top half if the search is problematic.
*/
distance = (cc->free_pfn - cc->migrate_pfn);
low_pfn = pageblock_start_pfn(cc->free_pfn - (distance >> 2));
min_pfn = pageblock_start_pfn(cc->free_pfn - (distance >> 1));
if (WARN_ON_ONCE(min_pfn > low_pfn))
low_pfn = min_pfn;
/*
* Search starts from the last successful isolation order or the next
* order to search after a previous failure
*/
cc->search_order = min_t(unsigned int, cc->order - 1, cc->search_order);
for (order = cc->search_order;
!page && order >= 0;
order = next_search_order(cc, order)) {
struct free_area *area = &cc->zone->free_area[order];
struct list_head *freelist;
struct page *freepage;
unsigned long flags;
unsigned int order_scanned = 0;
unsigned long high_pfn = 0;
if (!area->nr_free)
continue;
spin_lock_irqsave(&cc->zone->lock, flags);
freelist = &area->free_list[MIGRATE_MOVABLE];
list_for_each_entry_reverse(freepage, freelist, buddy_list) {
unsigned long pfn;
order_scanned++;
nr_scanned++;
pfn = page_to_pfn(freepage);
if (pfn >= highest)
highest = max(pageblock_start_pfn(pfn),
cc->zone->zone_start_pfn);
if (pfn >= low_pfn) {
cc->fast_search_fail = 0;
cc->search_order = order;
page = freepage;
break;
}
if (pfn >= min_pfn && pfn > high_pfn) {
high_pfn = pfn;
/* Shorten the scan if a candidate is found */
limit >>= 1;
}
if (order_scanned >= limit)
break;
}
/* Use a maximum candidate pfn if a preferred one was not found */
if (!page && high_pfn) {
page = pfn_to_page(high_pfn);
/* Update freepage for the list reorder below */
freepage = page;
}
/* Reorder to so a future search skips recent pages */
move_freelist_head(freelist, freepage);
/* Isolate the page if available */
if (page) {
if (__isolate_free_page(page, order)) {
set_page_private(page, order);
nr_isolated = 1 << order;
nr_scanned += nr_isolated - 1;
total_isolated += nr_isolated;
cc->nr_freepages += nr_isolated;
list_add_tail(&page->lru, &cc->freepages[order]);
count_compact_events(COMPACTISOLATED, nr_isolated);
} else {
/* If isolation fails, abort the search */
order = cc->search_order + 1;
page = NULL;
}
}
spin_unlock_irqrestore(&cc->zone->lock, flags);
/* Skip fast search if enough freepages isolated */
if (cc->nr_freepages >= cc->nr_migratepages)
break;
/*
* Smaller scan on next order so the total scan is related
* to freelist_scan_limit.
*/
if (order_scanned >= limit)
limit = max(1U, limit >> 1);
}
trace_mm_compaction_fast_isolate_freepages(min_pfn, cc->free_pfn,
nr_scanned, total_isolated);
if (!page) {
cc->fast_search_fail++;
if (scan_start) {
/*
* Use the highest PFN found above min. If one was
* not found, be pessimistic for direct compaction
* and use the min mark.
*/
if (highest >= min_pfn) {
page = pfn_to_page(highest);
cc->free_pfn = highest;
} else {
if (cc->direct_compaction && pfn_valid(min_pfn)) {
page = pageblock_pfn_to_page(min_pfn,
min(pageblock_end_pfn(min_pfn),
zone_end_pfn(cc->zone)),
cc->zone);
if (page && !suitable_migration_target(cc, page))
page = NULL;
cc->free_pfn = min_pfn;
}
}
}
}
if (highest && highest >= cc->zone->compact_cached_free_pfn) {
highest -= pageblock_nr_pages;
cc->zone->compact_cached_free_pfn = highest;
}
cc->total_free_scanned += nr_scanned;
if (!page)
return;
low_pfn = page_to_pfn(page);
fast_isolate_around(cc, low_pfn);
}
/*
* Based on information in the current compact_control, find blocks
* suitable for isolating free pages from and then isolate them.
*/
static void isolate_freepages(struct compact_control *cc)
{
struct zone *zone = cc->zone;
struct page *page;
unsigned long block_start_pfn; /* start of current pageblock */
unsigned long isolate_start_pfn; /* exact pfn we start at */
unsigned long block_end_pfn; /* end of current pageblock */
unsigned long low_pfn; /* lowest pfn scanner is able to scan */
unsigned int stride;
/* Try a small search of the free lists for a candidate */
fast_isolate_freepages(cc);
if (cc->nr_freepages)
return;
/*
* Initialise the free scanner. The starting point is where we last
* successfully isolated from, zone-cached value, or the end of the
* zone when isolating for the first time. For looping we also need
* this pfn aligned down to the pageblock boundary, because we do
* block_start_pfn -= pageblock_nr_pages in the for loop.
* For ending point, take care when isolating in last pageblock of a
* zone which ends in the middle of a pageblock.
* The low boundary is the end of the pageblock the migration scanner
* is using.
*/
isolate_start_pfn = cc->free_pfn;
block_start_pfn = pageblock_start_pfn(isolate_start_pfn);
block_end_pfn = min(block_start_pfn + pageblock_nr_pages,
zone_end_pfn(zone));
low_pfn = pageblock_end_pfn(cc->migrate_pfn);
stride = cc->mode == MIGRATE_ASYNC ? COMPACT_CLUSTER_MAX : 1;
/*
* Isolate free pages until enough are available to migrate the
* pages on cc->migratepages. We stop searching if the migrate
* and free page scanners meet or enough free pages are isolated.
*/
for (; block_start_pfn >= low_pfn;
block_end_pfn = block_start_pfn,
block_start_pfn -= pageblock_nr_pages,
isolate_start_pfn = block_start_pfn) {
unsigned long nr_isolated;
/*
* This can iterate a massively long zone without finding any
* suitable migration targets, so periodically check resched.
*/
if (!(block_start_pfn % (COMPACT_CLUSTER_MAX * pageblock_nr_pages)))
cond_resched();
page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn,
zone);
if (!page) {
unsigned long next_pfn;
next_pfn = skip_offline_sections_reverse(block_start_pfn);
if (next_pfn)
block_start_pfn = max(next_pfn, low_pfn);
continue;
}
/* Check the block is suitable for migration */
if (!suitable_migration_target(cc, page))
continue;
/* If isolation recently failed, do not retry */
if (!isolation_suitable(cc, page))
continue;
/* Found a block suitable for isolating free pages from. */
nr_isolated = isolate_freepages_block(cc, &isolate_start_pfn,
block_end_pfn, cc->freepages, stride, false);
/* Update the skip hint if the full pageblock was scanned */
if (isolate_start_pfn == block_end_pfn)
update_pageblock_skip(cc, page, block_start_pfn -
pageblock_nr_pages);
/* Are enough freepages isolated? */
if (cc->nr_freepages >= cc->nr_migratepages) {
if (isolate_start_pfn >= block_end_pfn) {
/*
* Restart at previous pageblock if more
* freepages can be isolated next time.
*/
isolate_start_pfn =
block_start_pfn - pageblock_nr_pages;
}
break;
} else if (isolate_start_pfn < block_end_pfn) {
/*
* If isolation failed early, do not continue
* needlessly.
*/
break;
}
/* Adjust stride depending on isolation */
if (nr_isolated) {
stride = 1;
continue;
}
stride = min_t(unsigned int, COMPACT_CLUSTER_MAX, stride << 1);
}
/*
* Record where the free scanner will restart next time. Either we
* broke from the loop and set isolate_start_pfn based on the last
* call to isolate_freepages_block(), or we met the migration scanner
* and the loop terminated due to isolate_start_pfn < low_pfn
*/
cc->free_pfn = isolate_start_pfn;
}
/*
* This is a migrate-callback that "allocates" freepages by taking pages
* from the isolated freelists in the block we are migrating to.
*/
static struct folio *compaction_alloc_noprof(struct folio *src, unsigned long data)
{
struct compact_control *cc = (struct compact_control *)data;
struct folio *dst;
int order = folio_order(src);
bool has_isolated_pages = false;
int start_order;
struct page *freepage;
unsigned long size;
again:
for (start_order = order; start_order < NR_PAGE_ORDERS; start_order++)
if (!list_empty(&cc->freepages[start_order]))
break;
/* no free pages in the list */
if (start_order == NR_PAGE_ORDERS) {
if (has_isolated_pages)
return NULL;
isolate_freepages(cc);
has_isolated_pages = true;
goto again;
}
freepage = list_first_entry(&cc->freepages[start_order], struct page,
lru);
size = 1 << start_order;
list_del(&freepage->lru);
while (start_order > order) {
start_order--;
size >>= 1;
list_add(&freepage[size].lru, &cc->freepages[start_order]);
set_page_private(&freepage[size], start_order);
}
dst = (struct folio *)freepage;
post_alloc_hook(&dst->page, order, __GFP_MOVABLE);
set_page_refcounted(&dst->page);
if (order)
prep_compound_page(&dst->page, order);
cc->nr_freepages -= 1 << order;
cc->nr_migratepages -= 1 << order;
return page_rmappable_folio(&dst->page);
}
static struct folio *compaction_alloc(struct folio *src, unsigned long data)
{
return alloc_hooks(compaction_alloc_noprof(src, data));
}
/*
* This is a migrate-callback that "frees" freepages back to the isolated
* freelist. All pages on the freelist are from the same zone, so there is no
* special handling needed for NUMA.
*/
static void compaction_free(struct folio *dst, unsigned long data)
{
struct compact_control *cc = (struct compact_control *)data;
int order = folio_order(dst);
struct page *page = &dst->page;
if (folio_put_testzero(dst)) {
free_pages_prepare(page, order);
list_add(&dst->lru, &cc->freepages[order]);
cc->nr_freepages += 1 << order;
}
cc->nr_migratepages += 1 << order;
/*
* someone else has referenced the page, we cannot take it back to our
* free list.
*/
}
/* possible outcome of isolate_migratepages */
typedef enum {
ISOLATE_ABORT, /* Abort compaction now */
ISOLATE_NONE, /* No pages isolated, continue scanning */
ISOLATE_SUCCESS, /* Pages isolated, migrate */
} isolate_migrate_t;
/*
* Allow userspace to control policy on scanning the unevictable LRU for
* compactable pages.
*/
static int sysctl_compact_unevictable_allowed __read_mostly = CONFIG_COMPACT_UNEVICTABLE_DEFAULT;
/*
* Tunable for proactive compaction. It determines how
* aggressively the kernel should compact memory in the
* background. It takes values in the range [0, 100].
*/
static unsigned int __read_mostly sysctl_compaction_proactiveness = 20;
static int sysctl_extfrag_threshold = 500;
static int __read_mostly sysctl_compact_memory;
static inline void
update_fast_start_pfn(struct compact_control *cc, unsigned long pfn)
{
if (cc->fast_start_pfn == ULONG_MAX)
return;
if (!cc->fast_start_pfn)
cc->fast_start_pfn = pfn;
cc->fast_start_pfn = min(cc->fast_start_pfn, pfn);
}
static inline unsigned long
reinit_migrate_pfn(struct compact_control *cc)
{
if (!cc->fast_start_pfn || cc->fast_start_pfn == ULONG_MAX)
return cc->migrate_pfn;
cc->migrate_pfn = cc->fast_start_pfn;
cc->fast_start_pfn = ULONG_MAX;
return cc->migrate_pfn;
}
/*
* Briefly search the free lists for a migration source that already has
* some free pages to reduce the number of pages that need migration
* before a pageblock is free.
*/
static unsigned long fast_find_migrateblock(struct compact_control *cc)
{
unsigned int limit = freelist_scan_limit(cc);
unsigned int nr_scanned = 0;
unsigned long distance;
unsigned long pfn = cc->migrate_pfn;
unsigned long high_pfn;
int order;
bool found_block = false;
/* Skip hints are relied on to avoid repeats on the fast search */
if (cc->ignore_skip_hint)
return pfn;
/*
* If the pageblock should be finished then do not select a different
* pageblock.
*/
if (cc->finish_pageblock)
return pfn;
/*
* If the migrate_pfn is not at the start of a zone or the start
* of a pageblock then assume this is a continuation of a previous
* scan restarted due to COMPACT_CLUSTER_MAX.
*/
if (pfn != cc->zone->zone_start_pfn && pfn != pageblock_start_pfn(pfn))
return pfn;
/*
* For smaller orders, just linearly scan as the number of pages
* to migrate should be relatively small and does not necessarily
* justify freeing up a large block for a small allocation.
*/
if (cc->order <= PAGE_ALLOC_COSTLY_ORDER)
return pfn;
/*
* Only allow kcompactd and direct requests for movable pages to
* quickly clear out a MOVABLE pageblock for allocation. This
* reduces the risk that a large movable pageblock is freed for
* an unmovable/reclaimable small allocation.
*/
if (cc->direct_compaction && cc->migratetype != MIGRATE_MOVABLE)
return pfn;
/*
* When starting the migration scanner, pick any pageblock within the
* first half of the search space. Otherwise try and pick a pageblock
* within the first eighth to reduce the chances that a migration
* target later becomes a source.
*/
distance = (cc->free_pfn - cc->migrate_pfn) >> 1;
if (cc->migrate_pfn != cc->zone->zone_start_pfn)
distance >>= 2;
high_pfn = pageblock_start_pfn(cc->migrate_pfn + distance);
for (order = cc->order - 1;
order >= PAGE_ALLOC_COSTLY_ORDER && !found_block && nr_scanned < limit;
order--) {
struct free_area *area = &cc->zone->free_area[order];
struct list_head *freelist;
unsigned long flags;
struct page *freepage;
if (!area->nr_free)
continue;
spin_lock_irqsave(&cc->zone->lock, flags);
freelist = &area->free_list[MIGRATE_MOVABLE];
list_for_each_entry(freepage, freelist, buddy_list) {
unsigned long free_pfn;
if (nr_scanned++ >= limit) {
move_freelist_tail(freelist, freepage);
break;
}
free_pfn = page_to_pfn(freepage);
if (free_pfn < high_pfn) {
/*
* Avoid if skipped recently. Ideally it would
* move to the tail but even safe iteration of
* the list assumes an entry is deleted, not
* reordered.
*/
if (get_pageblock_skip(freepage))
continue;
/* Reorder to so a future search skips recent pages */
move_freelist_tail(freelist, freepage);
update_fast_start_pfn(cc, free_pfn);
pfn = pageblock_start_pfn(free_pfn);
if (pfn < cc->zone->zone_start_pfn)
pfn = cc->zone->zone_start_pfn;
cc->fast_search_fail = 0;
found_block = true;
break;
}
}
spin_unlock_irqrestore(&cc->zone->lock, flags);
}
cc->total_migrate_scanned += nr_scanned;
/*
* If fast scanning failed then use a cached entry for a page block
* that had free pages as the basis for starting a linear scan.
*/
if (!found_block) {
cc->fast_search_fail++;
pfn = reinit_migrate_pfn(cc);
}
return pfn;
}
/*
* Isolate all pages that can be migrated from the first suitable block,
* starting at the block pointed to by the migrate scanner pfn within
* compact_control.
*/
static isolate_migrate_t isolate_migratepages(struct compact_control *cc)
{
unsigned long block_start_pfn;
unsigned long block_end_pfn;
unsigned long low_pfn;
struct page *page;
const isolate_mode_t isolate_mode =
(sysctl_compact_unevictable_allowed ? ISOLATE_UNEVICTABLE : 0) |
(cc->mode != MIGRATE_SYNC ? ISOLATE_ASYNC_MIGRATE : 0);
bool fast_find_block;
/*
* Start at where we last stopped, or beginning of the zone as
* initialized by compact_zone(). The first failure will use
* the lowest PFN as the starting point for linear scanning.
*/
low_pfn = fast_find_migrateblock(cc);
block_start_pfn = pageblock_start_pfn(low_pfn);
if (block_start_pfn < cc->zone->zone_start_pfn)
block_start_pfn = cc->zone->zone_start_pfn;
/*
* fast_find_migrateblock() has already ensured the pageblock is not
* set with a skipped flag, so to avoid the isolation_suitable check
* below again, check whether the fast search was successful.
*/
fast_find_block = low_pfn != cc->migrate_pfn && !cc->fast_search_fail;
/* Only scan within a pageblock boundary */
block_end_pfn = pageblock_end_pfn(low_pfn);
/*
* Iterate over whole pageblocks until we find the first suitable.
* Do not cross the free scanner.
*/
for (; block_end_pfn <= cc->free_pfn;
fast_find_block = false,
cc->migrate_pfn = low_pfn = block_end_pfn,
block_start_pfn = block_end_pfn,
block_end_pfn += pageblock_nr_pages) {
/*
* This can potentially iterate a massively long zone with
* many pageblocks unsuitable, so periodically check if we
* need to schedule.
*/
if (!(low_pfn % (COMPACT_CLUSTER_MAX * pageblock_nr_pages)))
cond_resched();
page = pageblock_pfn_to_page(block_start_pfn,
block_end_pfn, cc->zone);
if (!page) {
unsigned long next_pfn;
next_pfn = skip_offline_sections(block_start_pfn);
if (next_pfn)
block_end_pfn = min(next_pfn, cc->free_pfn);
continue;
}
/*
* If isolation recently failed, do not retry. Only check the
* pageblock once. COMPACT_CLUSTER_MAX causes a pageblock
* to be visited multiple times. Assume skip was checked
* before making it "skip" so other compaction instances do
* not scan the same block.
*/
if ((pageblock_aligned(low_pfn) ||
low_pfn == cc->zone->zone_start_pfn) &&
!fast_find_block && !isolation_suitable(cc, page))
continue;
/*
* For async direct compaction, only scan the pageblocks of the
* same migratetype without huge pages. Async direct compaction
* is optimistic to see if the minimum amount of work satisfies
* the allocation. The cached PFN is updated as it's possible
* that all remaining blocks between source and target are
* unsuitable and the compaction scanners fail to meet.
*/
if (!suitable_migration_source(cc, page)) {
update_cached_migrate(cc, block_end_pfn);
continue;
}
/* Perform the isolation */
if (isolate_migratepages_block(cc, low_pfn, block_end_pfn,
isolate_mode))
return ISOLATE_ABORT;
/*
* Either we isolated something and proceed with migration. Or
* we failed and compact_zone should decide if we should
* continue or not.
*/
break;
}
return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE;
}
/*
* Determine whether kswapd is (or recently was!) running on this node.
*
* pgdat_kswapd_lock() pins pgdat->kswapd, so a concurrent kswapd_stop() can't
* zero it.
*/
static bool kswapd_is_running(pg_data_t *pgdat)
{
bool running;
pgdat_kswapd_lock(pgdat);
running = pgdat->kswapd && task_is_running(pgdat->kswapd);
pgdat_kswapd_unlock(pgdat);
return running;
}
/*
* A zone's fragmentation score is the external fragmentation wrt to the
* COMPACTION_HPAGE_ORDER. It returns a value in the range [0, 100].
*/
static unsigned int fragmentation_score_zone(struct zone *zone)
{
return extfrag_for_order(zone, COMPACTION_HPAGE_ORDER);
}
/*
* A weighted zone's fragmentation score is the external fragmentation
* wrt to the COMPACTION_HPAGE_ORDER scaled by the zone's size. It
* returns a value in the range [0, 100].
*
* The scaling factor ensures that proactive compaction focuses on larger
* zones like ZONE_NORMAL, rather than smaller, specialized zones like
* ZONE_DMA32. For smaller zones, the score value remains close to zero,
* and thus never exceeds the high threshold for proactive compaction.
*/
static unsigned int fragmentation_score_zone_weighted(struct zone *zone)
{
unsigned long score;
score = zone->present_pages * fragmentation_score_zone(zone);
return div64_ul(score, zone->zone_pgdat->node_present_pages + 1);
}
/*
* The per-node proactive (background) compaction process is started by its
* corresponding kcompactd thread when the node's fragmentation score
* exceeds the high threshold. The compaction process remains active till
* the node's score falls below the low threshold, or one of the back-off
* conditions is met.
*/
static unsigned int fragmentation_score_node(pg_data_t *pgdat)
{
unsigned int score = 0;
int zoneid;
for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
struct zone *zone;
zone = &pgdat->node_zones[zoneid];
if (!populated_zone(zone))
continue;
score += fragmentation_score_zone_weighted(zone);
}
return score;
}
static unsigned int fragmentation_score_wmark(bool low)
{
unsigned int wmark_low, leeway;
wmark_low = 100U - sysctl_compaction_proactiveness;
leeway = min(10U, wmark_low / 2);
return low ? wmark_low : min(wmark_low + leeway, 100U);
}
static bool should_proactive_compact_node(pg_data_t *pgdat)
{
int wmark_high;
if (!sysctl_compaction_proactiveness || kswapd_is_running(pgdat))
return false;
wmark_high = fragmentation_score_wmark(false);
return fragmentation_score_node(pgdat) > wmark_high;
}
static enum compact_result __compact_finished(struct compact_control *cc)
{
unsigned int order;
const int migratetype = cc->migratetype;
int ret;
/* Compaction run completes if the migrate and free scanner meet */
if (compact_scanners_met(cc)) {
/* Let the next compaction start anew. */
reset_cached_positions(cc->zone);
/*
* Mark that the PG_migrate_skip information should be cleared
* by kswapd when it goes to sleep. kcompactd does not set the
* flag itself as the decision to be clear should be directly
* based on an allocation request.
*/
if (cc->direct_compaction)
cc->zone->compact_blockskip_flush = true;
if (cc->whole_zone)
return COMPACT_COMPLETE;
else
return COMPACT_PARTIAL_SKIPPED;
}
if (cc->proactive_compaction) {
int score, wmark_low;
pg_data_t *pgdat;
pgdat = cc->zone->zone_pgdat;
if (kswapd_is_running(pgdat))
return COMPACT_PARTIAL_SKIPPED;
score = fragmentation_score_zone(cc->zone);
wmark_low = fragmentation_score_wmark(true);
if (score > wmark_low)
ret = COMPACT_CONTINUE;
else
ret = COMPACT_SUCCESS;
goto out;
}
if (is_via_compact_memory(cc->order))
return COMPACT_CONTINUE;
/*
* Always finish scanning a pageblock to reduce the possibility of
* fallbacks in the future. This is particularly important when
* migration source is unmovable/reclaimable but it's not worth
* special casing.
*/
if (!pageblock_aligned(cc->migrate_pfn))
return COMPACT_CONTINUE;
/*
* When defrag_mode is enabled, make kcompactd target
* watermarks in whole pageblocks. Because they can be stolen
* without polluting, no further fallback checks are needed.
*/
if (defrag_mode && !cc->direct_compaction) {
if (__zone_watermark_ok(cc->zone, cc->order,
high_wmark_pages(cc->zone),
cc->highest_zoneidx, cc->alloc_flags,
zone_page_state(cc->zone,
NR_FREE_PAGES_BLOCKS)))
return COMPACT_SUCCESS;
return COMPACT_CONTINUE;
}
/* Direct compactor: Is a suitable page free? */
ret = COMPACT_NO_SUITABLE_PAGE;
for (order = cc->order; order < NR_PAGE_ORDERS; order++) {
struct free_area *area = &cc->zone->free_area[order];
/* Job done if page is free of the right migratetype */
if (!free_area_empty(area, migratetype))
return COMPACT_SUCCESS;
#ifdef CONFIG_CMA
/* MIGRATE_MOVABLE can fallback on MIGRATE_CMA */
if (migratetype == MIGRATE_MOVABLE &&
!free_area_empty(area, MIGRATE_CMA))
return COMPACT_SUCCESS;
#endif
/*
* Job done if allocation would steal freepages from
* other migratetype buddy lists.
*/
if (find_suitable_fallback(area, order, migratetype, true) >= 0)
/*
* Movable pages are OK in any pageblock. If we are
* stealing for a non-movable allocation, make sure
* we finish compacting the current pageblock first
* (which is assured by the above migrate_pfn align
* check) so it is as free as possible and we won't
* have to steal another one soon.
*/
return COMPACT_SUCCESS;
}
out:
if (cc->contended || fatal_signal_pending(current))
ret = COMPACT_CONTENDED;
return ret;
}
static enum compact_result compact_finished(struct compact_control *cc)
{
int ret;
ret = __compact_finished(cc);
trace_mm_compaction_finished(cc->zone, cc->order, ret);
if (ret == COMPACT_NO_SUITABLE_PAGE)
ret = COMPACT_CONTINUE;
return ret;
}
static bool __compaction_suitable(struct zone *zone, int order,
unsigned long watermark, int highest_zoneidx,
unsigned long free_pages)
{
/*
* Watermarks for order-0 must be met for compaction to be able to
* isolate free pages for migration targets. This means that the
* watermark have to match, or be more pessimistic than the check in
* __isolate_free_page().
*
* For costly orders, we require a higher watermark for compaction to
* proceed to increase its chances.
*
* We use the direct compactor's highest_zoneidx to skip over zones
* where lowmem reserves would prevent allocation even if compaction
* succeeds.
*
* ALLOC_CMA is used, as pages in CMA pageblocks are considered
* suitable migration targets.
*/
watermark += compact_gap(order);
if (order > PAGE_ALLOC_COSTLY_ORDER)
watermark += low_wmark_pages(zone) - min_wmark_pages(zone);
return __zone_watermark_ok(zone, 0, watermark, highest_zoneidx,
ALLOC_CMA, free_pages);
}
/*
* compaction_suitable: Is this suitable to run compaction on this zone now?
*/
bool compaction_suitable(struct zone *zone, int order, unsigned long watermark,
int highest_zoneidx)
{
enum compact_result compact_result;
bool suitable;
suitable = __compaction_suitable(zone, order, watermark, highest_zoneidx,
zone_page_state(zone, NR_FREE_PAGES));
/*
* fragmentation index determines if allocation failures are due to
* low memory or external fragmentation
*
* index of -1000 would imply allocations might succeed depending on
* watermarks, but we already failed the high-order watermark check
* index towards 0 implies failure is due to lack of memory
* index towards 1000 implies failure is due to fragmentation
*
* Only compact if a failure would be due to fragmentation. Also
* ignore fragindex for non-costly orders where the alternative to
* a successful reclaim/compaction is OOM. Fragindex and the
* vm.extfrag_threshold sysctl is meant as a heuristic to prevent
* excessive compaction for costly orders, but it should not be at the
* expense of system stability.
*/
if (suitable) {
compact_result = COMPACT_CONTINUE;
if (order > PAGE_ALLOC_COSTLY_ORDER) {
int fragindex = fragmentation_index(zone, order);
if (fragindex >= 0 &&
fragindex <= sysctl_extfrag_threshold) {
suitable = false;
compact_result = COMPACT_NOT_SUITABLE_ZONE;
}
}
} else {
compact_result = COMPACT_SKIPPED;
}
trace_mm_compaction_suitable(zone, order, compact_result);
return suitable;
}
/* Used by direct reclaimers */
bool compaction_zonelist_suitable(struct alloc_context *ac, int order,
int alloc_flags)
{
struct zone *zone;
struct zoneref *z;
/*
* Make sure at least one zone would pass __compaction_suitable if we continue
* retrying the reclaim.
*/
for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
ac->highest_zoneidx, ac->nodemask) {
unsigned long available;
/*
* Do not consider all the reclaimable memory because we do not
* want to trash just for a single high order allocation which
* is even not guaranteed to appear even if __compaction_suitable
* is happy about the watermark check.
*/
available = zone_reclaimable_pages(zone) / order;
available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
if (__compaction_suitable(zone, order, min_wmark_pages(zone),
ac->highest_zoneidx, available))
return true;
}
return false;
}
/*
* Should we do compaction for target allocation order.
* Return COMPACT_SUCCESS if allocation for target order can be already
* satisfied
* Return COMPACT_SKIPPED if compaction for target order is likely to fail
* Return COMPACT_CONTINUE if compaction for target order should be ran
*/
static enum compact_result
compaction_suit_allocation_order(struct zone *zone, unsigned int order,
int highest_zoneidx, unsigned int alloc_flags,
bool async, bool kcompactd)
{
unsigned long free_pages;
unsigned long watermark;
if (kcompactd && defrag_mode)
free_pages = zone_page_state(zone, NR_FREE_PAGES_BLOCKS);
else
free_pages = zone_page_state(zone, NR_FREE_PAGES);
watermark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK);
if (__zone_watermark_ok(zone, order, watermark, highest_zoneidx,
alloc_flags, free_pages))
return COMPACT_SUCCESS;
/*
* For unmovable allocations (without ALLOC_CMA), check if there is enough
* free memory in the non-CMA pageblocks. Otherwise compaction could form
* the high-order page in CMA pageblocks, which would not help the
* allocation to succeed. However, limit the check to costly order async
* compaction (such as opportunistic THP attempts) because there is the
* possibility that compaction would migrate pages from non-CMA to CMA
* pageblock.
*/
if (order > PAGE_ALLOC_COSTLY_ORDER && async &&
!(alloc_flags & ALLOC_CMA)) {
if (!__zone_watermark_ok(zone, 0, watermark + compact_gap(order),
highest_zoneidx, 0,
zone_page_state(zone, NR_FREE_PAGES)))
return COMPACT_SKIPPED;
}
if (!compaction_suitable(zone, order, watermark, highest_zoneidx))
return COMPACT_SKIPPED;
return COMPACT_CONTINUE;
}
static enum compact_result
compact_zone(struct compact_control *cc, struct capture_control *capc)
{
enum compact_result ret;
unsigned long start_pfn = cc->zone->zone_start_pfn;
unsigned long end_pfn = zone_end_pfn(cc->zone);
unsigned long last_migrated_pfn;
const bool sync = cc->mode != MIGRATE_ASYNC;
bool update_cached;
unsigned int nr_succeeded = 0, nr_migratepages;
int order;
/*
* These counters track activities during zone compaction. Initialize
* them before compacting a new zone.
*/
cc->total_migrate_scanned = 0;
cc->total_free_scanned = 0;
cc->nr_migratepages = 0;
cc->nr_freepages = 0;
for (order = 0; order < NR_PAGE_ORDERS; order++)
INIT_LIST_HEAD(&cc->freepages[order]);
INIT_LIST_HEAD(&cc->migratepages);
cc->migratetype = gfp_migratetype(cc->gfp_mask);
if (!is_via_compact_memory(cc->order)) {
ret = compaction_suit_allocation_order(cc->zone, cc->order,
cc->highest_zoneidx,
cc->alloc_flags,
cc->mode == MIGRATE_ASYNC,
!cc->direct_compaction);
if (ret != COMPACT_CONTINUE)
return ret;
}
/*
* Clear pageblock skip if there were failures recently and compaction
* is about to be retried after being deferred.
*/
if (compaction_restarting(cc->zone, cc->order))
__reset_isolation_suitable(cc->zone);
/*
* Setup to move all movable pages to the end of the zone. Used cached
* information on where the scanners should start (unless we explicitly
* want to compact the whole zone), but check that it is initialised
* by ensuring the values are within zone boundaries.
*/
cc->fast_start_pfn = 0;
if (cc->whole_zone) {
cc->migrate_pfn = start_pfn;
cc->free_pfn = pageblock_start_pfn(end_pfn - 1);
} else {
cc->migrate_pfn = cc->zone->compact_cached_migrate_pfn[sync];
cc->free_pfn = cc->zone->compact_cached_free_pfn;
if (cc->free_pfn < start_pfn || cc->free_pfn >= end_pfn) {
cc->free_pfn = pageblock_start_pfn(end_pfn - 1);
cc->zone->compact_cached_free_pfn = cc->free_pfn;
}
if (cc->migrate_pfn < start_pfn || cc->migrate_pfn >= end_pfn) {
cc->migrate_pfn = start_pfn;
cc->zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn;
cc->zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn;
}
if (cc->migrate_pfn <= cc->zone->compact_init_migrate_pfn)
cc->whole_zone = true;
}
last_migrated_pfn = 0;
/*
* Migrate has separate cached PFNs for ASYNC and SYNC* migration on
* the basis that some migrations will fail in ASYNC mode. However,
* if the cached PFNs match and pageblocks are skipped due to having
* no isolation candidates, then the sync state does not matter.
* Until a pageblock with isolation candidates is found, keep the
* cached PFNs in sync to avoid revisiting the same blocks.
*/
update_cached = !sync &&
cc->zone->compact_cached_migrate_pfn[0] == cc->zone->compact_cached_migrate_pfn[1];
trace_mm_compaction_begin(cc, start_pfn, end_pfn, sync);
/* lru_add_drain_all could be expensive with involving other CPUs */
lru_add_drain();
while ((ret = compact_finished(cc)) == COMPACT_CONTINUE) {
int err;
unsigned long iteration_start_pfn = cc->migrate_pfn;
/*
* Avoid multiple rescans of the same pageblock which can
* happen if a page cannot be isolated (dirty/writeback in
* async mode) or if the migrated pages are being allocated
* before the pageblock is cleared. The first rescan will
* capture the entire pageblock for migration. If it fails,
* it'll be marked skip and scanning will proceed as normal.
*/
cc->finish_pageblock = false;
if (pageblock_start_pfn(last_migrated_pfn) ==
pageblock_start_pfn(iteration_start_pfn)) {
cc->finish_pageblock = true;
}
rescan:
switch (isolate_migratepages(cc)) {
case ISOLATE_ABORT:
ret = COMPACT_CONTENDED;
putback_movable_pages(&cc->migratepages);
cc->nr_migratepages = 0;
goto out;
case ISOLATE_NONE:
if (update_cached) {
cc->zone->compact_cached_migrate_pfn[1] =
cc->zone->compact_cached_migrate_pfn[0];
}
/*
* We haven't isolated and migrated anything, but
* there might still be unflushed migrations from
* previous cc->order aligned block.
*/
goto check_drain;
case ISOLATE_SUCCESS:
update_cached = false;
last_migrated_pfn = max(cc->zone->zone_start_pfn,
pageblock_start_pfn(cc->migrate_pfn - 1));
}
/*
* Record the number of pages to migrate since the
* compaction_alloc/free() will update cc->nr_migratepages
* properly.
*/
nr_migratepages = cc->nr_migratepages;
err = migrate_pages(&cc->migratepages, compaction_alloc,
compaction_free, (unsigned long)cc, cc->mode,
MR_COMPACTION, &nr_succeeded);
trace_mm_compaction_migratepages(nr_migratepages, nr_succeeded);
/* All pages were either migrated or will be released */
cc->nr_migratepages = 0;
if (err) {
putback_movable_pages(&cc->migratepages);
/*
* migrate_pages() may return -ENOMEM when scanners meet
* and we want compact_finished() to detect it
*/
if (err == -ENOMEM && !compact_scanners_met(cc)) {
ret = COMPACT_CONTENDED;
goto out;
}
/*
* If an ASYNC or SYNC_LIGHT fails to migrate a page
* within the pageblock_order-aligned block and
* fast_find_migrateblock may be used then scan the
* remainder of the pageblock. This will mark the
* pageblock "skip" to avoid rescanning in the near
* future. This will isolate more pages than necessary
* for the request but avoid loops due to
* fast_find_migrateblock revisiting blocks that were
* recently partially scanned.
*/
if (!pageblock_aligned(cc->migrate_pfn) &&
!cc->ignore_skip_hint && !cc->finish_pageblock &&
(cc->mode < MIGRATE_SYNC)) {
cc->finish_pageblock = true;
/*
* Draining pcplists does not help THP if
* any page failed to migrate. Even after
* drain, the pageblock will not be free.
*/
if (cc->order == COMPACTION_HPAGE_ORDER)
last_migrated_pfn = 0;
goto rescan;
}
}
/* Stop if a page has been captured */
if (capc && capc->page) {
ret = COMPACT_SUCCESS;
break;
}
check_drain:
/*
* Has the migration scanner moved away from the previous
* cc->order aligned block where we migrated from? If yes,
* flush the pages that were freed, so that they can merge and
* compact_finished() can detect immediately if allocation
* would succeed.
*/
if (cc->order > 0 && last_migrated_pfn) {
unsigned long current_block_start =
block_start_pfn(cc->migrate_pfn, cc->order);
if (last_migrated_pfn < current_block_start) {
lru_add_drain_cpu_zone(cc->zone);
/* No more flushing until we migrate again */
last_migrated_pfn = 0;
}
}
}
out:
/*
* Release free pages and update where the free scanner should restart,
* so we don't leave any returned pages behind in the next attempt.
*/
if (cc->nr_freepages > 0) {
unsigned long free_pfn = release_free_list(cc->freepages);
cc->nr_freepages = 0;
VM_BUG_ON(free_pfn == 0);
/* The cached pfn is always the first in a pageblock */
free_pfn = pageblock_start_pfn(free_pfn);
/*
* Only go back, not forward. The cached pfn might have been
* already reset to zone end in compact_finished()
*/
if (free_pfn > cc->zone->compact_cached_free_pfn)
cc->zone->compact_cached_free_pfn = free_pfn;
}
count_compact_events(COMPACTMIGRATE_SCANNED, cc->total_migrate_scanned);
count_compact_events(COMPACTFREE_SCANNED, cc->total_free_scanned);
trace_mm_compaction_end(cc, start_pfn, end_pfn, sync, ret);
VM_BUG_ON(!list_empty(&cc->migratepages));
return ret;
}
static enum compact_result compact_zone_order(struct zone *zone, int order,
gfp_t gfp_mask, enum compact_priority prio,
unsigned int alloc_flags, int highest_zoneidx,
struct page **capture)
{
enum compact_result ret;
struct compact_control cc = {
.order = order,
.search_order = order,
.gfp_mask = gfp_mask,
.zone = zone,
.mode = (prio == COMPACT_PRIO_ASYNC) ?
MIGRATE_ASYNC : MIGRATE_SYNC_LIGHT,
.alloc_flags = alloc_flags,
.highest_zoneidx = highest_zoneidx,
.direct_compaction = true,
.whole_zone = (prio == MIN_COMPACT_PRIORITY),
.ignore_skip_hint = (prio == MIN_COMPACT_PRIORITY),
.ignore_block_suitable = (prio == MIN_COMPACT_PRIORITY)
};
struct capture_control capc = {
.cc = &cc,
.page = NULL,
};
/*
* Make sure the structs are really initialized before we expose the
* capture control, in case we are interrupted and the interrupt handler
* frees a page.
*/
barrier();
WRITE_ONCE(current->capture_control, &capc);
ret = compact_zone(&cc, &capc);
/*
* Make sure we hide capture control first before we read the captured
* page pointer, otherwise an interrupt could free and capture a page
* and we would leak it.
*/
WRITE_ONCE(current->capture_control, NULL);
*capture = READ_ONCE(capc.page);
/*
* Technically, it is also possible that compaction is skipped but
* the page is still captured out of luck(IRQ came and freed the page).
* Returning COMPACT_SUCCESS in such cases helps in properly accounting
* the COMPACT[STALL|FAIL] when compaction is skipped.
*/
if (*capture)
ret = COMPACT_SUCCESS;
return ret;
}
/**
* try_to_compact_pages - Direct compact to satisfy a high-order allocation
* @gfp_mask: The GFP mask of the current allocation
* @order: The order of the current allocation
* @alloc_flags: The allocation flags of the current allocation
* @ac: The context of current allocation
* @prio: Determines how hard direct compaction should try to succeed
* @capture: Pointer to free page created by compaction will be stored here
*
* This is the main entry point for direct page compaction.
*/
enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
unsigned int alloc_flags, const struct alloc_context *ac,
enum compact_priority prio, struct page **capture)
{
struct zoneref *z;
struct zone *zone;
enum compact_result rc = COMPACT_SKIPPED;
if (!gfp_compaction_allowed(gfp_mask))
return COMPACT_SKIPPED;
trace_mm_compaction_try_to_compact_pages(order, gfp_mask, prio);
/* Compact each zone in the list */
for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
ac->highest_zoneidx, ac->nodemask) {
enum compact_result status;
if (cpusets_enabled() &&
(alloc_flags & ALLOC_CPUSET) &&
!__cpuset_zone_allowed(zone, gfp_mask))
continue;
if (prio > MIN_COMPACT_PRIORITY
&& compaction_deferred(zone, order)) {
rc = max_t(enum compact_result, COMPACT_DEFERRED, rc);
continue;
}
status = compact_zone_order(zone, order, gfp_mask, prio,
alloc_flags, ac->highest_zoneidx, capture);
rc = max(status, rc);
/* The allocation should succeed, stop compacting */
if (status == COMPACT_SUCCESS) {
/*
* We think the allocation will succeed in this zone,
* but it is not certain, hence the false. The caller
* will repeat this with true if allocation indeed
* succeeds in this zone.
*/
compaction_defer_reset(zone, order, false);
break;
}
if (prio != COMPACT_PRIO_ASYNC && (status == COMPACT_COMPLETE ||
status == COMPACT_PARTIAL_SKIPPED))
/*
* We think that allocation won't succeed in this zone
* so we defer compaction there. If it ends up
* succeeding after all, it will be reset.
*/
defer_compaction(zone, order);
/*
* We might have stopped compacting due to need_resched() in
* async compaction, or due to a fatal signal detected. In that
* case do not try further zones
*/
if ((prio == COMPACT_PRIO_ASYNC && need_resched())
|| fatal_signal_pending(current))
break;
}
return rc;
}
/*
* compact_node() - compact all zones within a node
* @pgdat: The node page data
* @proactive: Whether the compaction is proactive
*
* For proactive compaction, compact till each zone's fragmentation score
* reaches within proactive compaction thresholds (as determined by the
* proactiveness tunable), it is possible that the function returns before
* reaching score targets due to various back-off conditions, such as,
* contention on per-node or per-zone locks.
*/
static int compact_node(pg_data_t *pgdat, bool proactive)
{
int zoneid;
struct zone *zone;
struct compact_control cc = {
.order = -1,
.mode = proactive ? MIGRATE_SYNC_LIGHT : MIGRATE_SYNC,
.ignore_skip_hint = true,
.whole_zone = true,
.gfp_mask = GFP_KERNEL,
.proactive_compaction = proactive,
};
for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
zone = &pgdat->node_zones[zoneid];
if (!populated_zone(zone))
continue;
if (fatal_signal_pending(current))
return -EINTR;
cc.zone = zone;
compact_zone(&cc, NULL);
if (proactive) {
count_compact_events(KCOMPACTD_MIGRATE_SCANNED,
cc.total_migrate_scanned);
count_compact_events(KCOMPACTD_FREE_SCANNED,
cc.total_free_scanned);
}
}
return 0;
}
/* Compact all zones of all nodes in the system */
static int compact_nodes(void)
{
int ret, nid;
/* Flush pending updates to the LRU lists */
lru_add_drain_all();
for_each_online_node(nid) {
ret = compact_node(NODE_DATA(nid), false);
if (ret)
return ret;
}
return 0;
}
static int compaction_proactiveness_sysctl_handler(const struct ctl_table *table, int write,
void *buffer, size_t *length, loff_t *ppos)
{
int rc, nid;
rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
if (rc)
return rc;
if (write && sysctl_compaction_proactiveness) {
for_each_online_node(nid) {
pg_data_t *pgdat = NODE_DATA(nid);
if (pgdat->proactive_compact_trigger)
continue;
pgdat->proactive_compact_trigger = true;
trace_mm_compaction_wakeup_kcompactd(pgdat->node_id, -1,
pgdat->nr_zones - 1);
wake_up_interruptible(&pgdat->kcompactd_wait);
}
}
return 0;
}
/*
* This is the entry point for compacting all nodes via
* /proc/sys/vm/compact_memory
*/
static int sysctl_compaction_handler(const struct ctl_table *table, int write,
void *buffer, size_t *length, loff_t *ppos)
{
int ret;
ret = proc_dointvec(table, write, buffer, length, ppos);
if (ret)
return ret;
if (sysctl_compact_memory != 1)
return -EINVAL;
if (write)
ret = compact_nodes();
return ret;
}
#if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
static ssize_t compact_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
int nid = dev->id;
if (nid >= 0 && nid < nr_node_ids && node_online(nid)) {
/* Flush pending updates to the LRU lists */
lru_add_drain_all();
compact_node(NODE_DATA(nid), false);
}
return count;
}
static DEVICE_ATTR_WO(compact);
int compaction_register_node(struct node *node)
{
return device_create_file(&node->dev, &dev_attr_compact);
}
void compaction_unregister_node(struct node *node)
{
device_remove_file(&node->dev, &dev_attr_compact);
}
#endif /* CONFIG_SYSFS && CONFIG_NUMA */
static inline bool kcompactd_work_requested(pg_data_t *pgdat)
{
return pgdat->kcompactd_max_order > 0 || kthread_should_stop() ||
pgdat->proactive_compact_trigger;
}
static bool kcompactd_node_suitable(pg_data_t *pgdat)
{
int zoneid;
struct zone *zone;
enum zone_type highest_zoneidx = pgdat->kcompactd_highest_zoneidx;
enum compact_result ret;
unsigned int alloc_flags = defrag_mode ?
ALLOC_WMARK_HIGH : ALLOC_WMARK_MIN;
for (zoneid = 0; zoneid <= highest_zoneidx; zoneid++) {
zone = &pgdat->node_zones[zoneid];
if (!populated_zone(zone))
continue;
ret = compaction_suit_allocation_order(zone,
pgdat->kcompactd_max_order,
highest_zoneidx, alloc_flags,
false, true);
if (ret == COMPACT_CONTINUE)
return true;
}
return false;
}
static void kcompactd_do_work(pg_data_t *pgdat)
{
/*
* With no special task, compact all zones so that a page of requested
* order is allocatable.
*/
int zoneid;
struct zone *zone;
struct compact_control cc = {
.order = pgdat->kcompactd_max_order,
.search_order = pgdat->kcompactd_max_order,
.highest_zoneidx = pgdat->kcompactd_highest_zoneidx,
.mode = MIGRATE_SYNC_LIGHT,
.ignore_skip_hint = false,
.gfp_mask = GFP_KERNEL,
.alloc_flags = defrag_mode ? ALLOC_WMARK_HIGH : ALLOC_WMARK_MIN,
};
enum compact_result ret;
trace_mm_compaction_kcompactd_wake(pgdat->node_id, cc.order,
cc.highest_zoneidx);
count_compact_event(KCOMPACTD_WAKE);
for (zoneid = 0; zoneid <= cc.highest_zoneidx; zoneid++) {
int status;
zone = &pgdat->node_zones[zoneid];
if (!populated_zone(zone))
continue;
if (compaction_deferred(zone, cc.order))
continue;
ret = compaction_suit_allocation_order(zone,
cc.order, zoneid, cc.alloc_flags,
false, true);
if (ret != COMPACT_CONTINUE)
continue;
if (kthread_should_stop())
return;
cc.zone = zone;
status = compact_zone(&cc, NULL);
if (status == COMPACT_SUCCESS) {
compaction_defer_reset(zone, cc.order, false);
} else if (status == COMPACT_PARTIAL_SKIPPED || status == COMPACT_COMPLETE) {
/*
* Buddy pages may become stranded on pcps that could
* otherwise coalesce on the zone's free area for
* order >= cc.order. This is ratelimited by the
* upcoming deferral.
*/
drain_all_pages(zone);
/*
* We use sync migration mode here, so we defer like
* sync direct compaction does.
*/
defer_compaction(zone, cc.order);
}
count_compact_events(KCOMPACTD_MIGRATE_SCANNED,
cc.total_migrate_scanned);
count_compact_events(KCOMPACTD_FREE_SCANNED,
cc.total_free_scanned);
}
/*
* Regardless of success, we are done until woken up next. But remember
* the requested order/highest_zoneidx in case it was higher/tighter
* than our current ones
*/
if (pgdat->kcompactd_max_order <= cc.order)
pgdat->kcompactd_max_order = 0;
if (pgdat->kcompactd_highest_zoneidx >= cc.highest_zoneidx)
pgdat->kcompactd_highest_zoneidx = pgdat->nr_zones - 1;
}
void wakeup_kcompactd(pg_data_t *pgdat, int order, int highest_zoneidx)
{
if (!order)
return;
if (pgdat->kcompactd_max_order < order)
pgdat->kcompactd_max_order = order;
if (pgdat->kcompactd_highest_zoneidx > highest_zoneidx)
pgdat->kcompactd_highest_zoneidx = highest_zoneidx;
/*
* Pairs with implicit barrier in wait_event_freezable()
* such that wakeups are not missed.
*/
if (!wq_has_sleeper(&pgdat->kcompactd_wait))
return;
if (!kcompactd_node_suitable(pgdat))
return;
trace_mm_compaction_wakeup_kcompactd(pgdat->node_id, order,
highest_zoneidx);
wake_up_interruptible(&pgdat->kcompactd_wait);
}
/*
* The background compaction daemon, started as a kernel thread
* from the init process.
*/
static int kcompactd(void *p)
{
pg_data_t *pgdat = (pg_data_t *)p;
long default_timeout = msecs_to_jiffies(HPAGE_FRAG_CHECK_INTERVAL_MSEC);
long timeout = default_timeout;
current->flags |= PF_KCOMPACTD;
set_freezable();
pgdat->kcompactd_max_order = 0;
pgdat->kcompactd_highest_zoneidx = pgdat->nr_zones - 1;
while (!kthread_should_stop()) {
unsigned long pflags;
/*
* Avoid the unnecessary wakeup for proactive compaction
* when it is disabled.
*/
if (!sysctl_compaction_proactiveness)
timeout = MAX_SCHEDULE_TIMEOUT;
trace_mm_compaction_kcompactd_sleep(pgdat->node_id);
if (wait_event_freezable_timeout(pgdat->kcompactd_wait,
kcompactd_work_requested(pgdat), timeout) &&
!pgdat->proactive_compact_trigger) {
psi_memstall_enter(&pflags);
kcompactd_do_work(pgdat);
psi_memstall_leave(&pflags);
/*
* Reset the timeout value. The defer timeout from
* proactive compaction is lost here but that is fine
* as the condition of the zone changing substantionally
* then carrying on with the previous defer interval is
* not useful.
*/
timeout = default_timeout;
continue;
}
/*
* Start the proactive work with default timeout. Based
* on the fragmentation score, this timeout is updated.
*/
timeout = default_timeout;
if (should_proactive_compact_node(pgdat)) {
unsigned int prev_score, score;
prev_score = fragmentation_score_node(pgdat);
compact_node(pgdat, true);
score = fragmentation_score_node(pgdat);
/*
* Defer proactive compaction if the fragmentation
* score did not go down i.e. no progress made.
*/
if (unlikely(score >= prev_score))
timeout =
default_timeout << COMPACT_MAX_DEFER_SHIFT;
}
if (unlikely(pgdat->proactive_compact_trigger))
pgdat->proactive_compact_trigger = false;
}
current->flags &= ~PF_KCOMPACTD;
return 0;
}
/*
* This kcompactd start function will be called by init and node-hot-add.
* On node-hot-add, kcompactd will moved to proper cpus if cpus are hot-added.
*/
void __meminit kcompactd_run(int nid)
{
pg_data_t *pgdat = NODE_DATA(nid);
if (pgdat->kcompactd)
return;
pgdat->kcompactd = kthread_create_on_node(kcompactd, pgdat, nid, "kcompactd%d", nid);
if (IS_ERR(pgdat->kcompactd)) {
pr_err("Failed to start kcompactd on node %d\n", nid);
pgdat->kcompactd = NULL;
} else {
wake_up_process(pgdat->kcompactd);
}
}
/*
* Called by memory hotplug when all memory in a node is offlined. Caller must
* be holding mem_hotplug_begin/done().
*/
void __meminit kcompactd_stop(int nid)
{
struct task_struct *kcompactd = NODE_DATA(nid)->kcompactd;
if (kcompactd) {
kthread_stop(kcompactd);
NODE_DATA(nid)->kcompactd = NULL;
}
}
static int proc_dointvec_minmax_warn_RT_change(const struct ctl_table *table,
int write, void *buffer, size_t *lenp, loff_t *ppos)
{
int ret, old;
if (!IS_ENABLED(CONFIG_PREEMPT_RT) || !write)
return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
old = *(int *)table->data;
ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
if (ret)
return ret;
if (old != *(int *)table->data)
pr_warn_once("sysctl attribute %s changed by %s[%d]\n",
table->procname, current->comm,
task_pid_nr(current));
return ret;
}
static const struct ctl_table vm_compaction[] = {
{
.procname = "compact_memory",
.data = &sysctl_compact_memory,
.maxlen = sizeof(int),
.mode = 0200,
.proc_handler = sysctl_compaction_handler,
},
{
.procname = "compaction_proactiveness",
.data = &sysctl_compaction_proactiveness,
.maxlen = sizeof(sysctl_compaction_proactiveness),
.mode = 0644,
.proc_handler = compaction_proactiveness_sysctl_handler,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE_HUNDRED,
},
{
.procname = "extfrag_threshold",
.data = &sysctl_extfrag_threshold,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE_THOUSAND,
},
{
.procname = "compact_unevictable_allowed",
.data = &sysctl_compact_unevictable_allowed,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax_warn_RT_change,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
},
};
static int __init kcompactd_init(void)
{
int nid;
for_each_node_state(nid, N_MEMORY)
kcompactd_run(nid);
register_sysctl_init("vm", vm_compaction);
return 0;
}
subsys_initcall(kcompactd_init)
#endif /* CONFIG_COMPACTION */ | c | github | https://github.com/torvalds/linux | mm/compaction.c |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2013 Oscar Campos <oscar.campos@member.fsf.org>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
.. module:: glosarium
:platform: POSIX, Windows
:synopsis: This module loads glosary terms from a formatted web page
.. moduleauthor:: Oscar Campos <oscar.campos@member.fsf.org>
"""
import re
import httplib2
class WebParserError(Exception):
"""Class for WebParser exceptions
"""
class WebParser(object):
"""
This class is used to parse the Glosarium GNU web page and generates a
list of terms to use in our glosary
:param url: the URL to parse
:type url: str
"""
def __init__(self, url=None):
if url is None:
self.url = (
'http://www.gnu.org/server/standards/translations/es/'
'recursos.html#glosario'
)
self.__glosary = {}
self._parsed = False
@property
def glosary(self):
"""Return the glosary back
"""
if not self._parsed:
self._parse()
return self.__glosary
def _parse(self):
"""I parse the web site
"""
h = httplib2.Http('/tmp/.cache')
try:
resp, content = h.request(self.url)
content = '\n'.join(content.split('\n')[
content.split('\n').index('<!-- Begin Glosario -->') + 1:
content.split('\n').index('<!-- End Glosario -->')
])
except httplib2.ServerNotFoundError:
raise WebParserError(
'GNU site is unavailable, check your internet connection'
)
if resp.get('status') != '200':
raise WebParserError('error: the server at {0} reply {1}'.format(
self.url, resp.get('status')
))
regex = re.compile(r'(?<=<strong>)(?P<term>.*?)(?=</strong>)')
regex2 = re.compile(
r'((?<=</strong>\:)[\s\S]*?(?=<strong>|<br />|</p>))')
terms = [term.strip('"') for term in regex.findall(content)]
translations = [
re.sub(r'<[^<]+?>', '', term).strip()
for term in regex2.findall(content)
]
self.__glosary = dict(zip(terms, translations))
self._parsed = True
__all__ = ['WebParser', 'WebParserError'] | unknown | codeparrot/codeparrot-clean | ||
from ovito import *
from ovito.io import *
from ovito.data import *
from ovito.modifiers import *
import numpy
node = import_file("../../files/CFG/fcc_coherent_twin.0.cfg")
modifier = ElasticStrainModifier()
print("Parameter defaults:")
print(" calculate_deformation_gradients: {}".format(modifier.calculate_deformation_gradients))
print(" calculate_strain_tensors: {}".format(modifier.calculate_strain_tensors))
print(" push_strain_tensors_forward: {}".format(modifier.push_strain_tensors_forward))
print(" lattice_constant: {}".format(modifier.lattice_constant))
print(" axial_ratio: {}".format(modifier.axial_ratio))
print(" input_crystal_structure: {}".format(modifier.input_crystal_structure))
node.modifiers.append(modifier)
modifier.input_crystal_structure = ElasticStrainModifier.Lattice.FCC
modifier.lattice_constant = 0.99
modifier.calculate_deformation_gradients = True
node.compute()
print("Computed structure types:")
print(node.output.particle_properties.structure_type.array)
print("Computed strain tensors:")
print(node.output.particle_properties['Elastic Strain'].array)
print("Computed deformation gradient tensors:")
print(node.output.particle_properties['Elastic Deformation Gradient'].array)
print("Computed volumetric strain:")
print(node.output.particle_properties['Volumetric Strain'].array) | unknown | codeparrot/codeparrot-clean | ||
imports:
- { resource: framework.yml }
- { resource: twig.yml } | unknown | github | https://github.com/symfony/symfony | src/Symfony/Bundle/SecurityBundle/Tests/Functional/app/config/default.yml |
# -*- coding: utf-8 -*-
import random
import string
from itertools import cycle
from django.core.exceptions import ValidationError
import mock
from nose.tools import eq_, ok_
import mkt.site.tests
import mkt.feed.constants as feed
from mkt.feed.models import (FeedApp, FeedBrand, FeedCollection, FeedItem,
FeedShelf)
from mkt.operators.models import OperatorPermission
from mkt.site.fixtures import fixture
from mkt.webapps.models import Webapp
class FeedTestMixin(object):
fixtures = fixture('webapp_337141')
def feed_app_factory(self, app_id=None, app_type=feed.FEEDAPP_ICON,
**kwargs):
count = FeedApp.objects.count()
return FeedApp.objects.create(
app_id=app_id or Webapp.objects.get(id=337141).id,
slug='feed-app-%s' % count, type=app_type, **kwargs)
def feed_brand_factory(self, app_ids=None, layout=feed.BRAND_GRID,
brand_type='mystery-app', **kwargs):
count = FeedBrand.objects.count()
brand = FeedBrand.objects.create(slug='feed-brand-%s' % count,
type=brand_type, **kwargs)
brand.set_apps(app_ids or [337141])
return brand
def feed_collection_factory(self, app_ids=None, name='test-coll',
coll_type=feed.COLLECTION_LISTING,
grouped=False, **kwargs):
count = FeedCollection.objects.count()
coll = FeedCollection.objects.create(
name=name, slug='feed-coll-%s' % count, type=coll_type, **kwargs)
app_ids = app_ids or [337141]
coll.set_apps(app_ids)
if grouped:
for i, mem in enumerate(coll.feedcollectionmembership_set.all()):
if i == len(app_ids) - 1 and len(app_ids) > 1:
mem.group = 'second-group'
else:
mem.group = 'first-group'
mem.save()
return coll
def feed_shelf_factory(self, app_ids=None, name='test-shelf',
carrier=1, region=1, grouped=False, **kwargs):
count = FeedShelf.objects.count()
shelf = FeedShelf.objects.create(
name=name, slug='feed-shelf-%s' % count, carrier=carrier,
region=region, **kwargs)
app_ids = app_ids or [337141]
shelf.set_apps(app_ids)
if grouped:
for i, mem in enumerate(shelf.feedshelfmembership_set.all()):
if i == len(app_ids) - 1 and len(app_ids) > 1:
mem.group = 'second-group'
else:
mem.group = 'first-group'
mem.save()
return shelf
def feed_shelf_permission_factory(self, user, carrier=1, region=1):
return OperatorPermission.objects.create(user=user, carrier=carrier,
region=region)
def feed_item_factory(self, carrier=1, region=1,
item_type=feed.FEED_TYPE_APP, **kw):
"""Creates a single FeedItem of any feed element type specified."""
feed_item = FeedItem(carrier=carrier, region=region,
item_type=item_type, **kw)
if item_type == feed.FEED_TYPE_APP:
feed_item.app = self.feed_app_factory()
elif item_type == feed.FEED_TYPE_BRAND:
feed_item.brand = self.feed_brand_factory()
elif item_type == feed.FEED_TYPE_COLL:
feed_item.collection = self.feed_collection_factory()
elif item_type == feed.FEED_TYPE_SHELF:
feed_item.shelf = self.feed_shelf_factory(carrier=carrier,
region=region)
feed_item.save()
return feed_item
def feed_factory(self, carrier=1, region=1, item_types=None,
num_items=None):
"""
Iterates over a list of feed element types and creates `num_items`
FeedItems, cycling over those types. By default, creates one of each
type. Returns a list of FeedItems.
"""
item_types = item_types or [feed.FEED_TYPE_APP, feed.FEED_TYPE_BRAND,
feed.FEED_TYPE_COLL, feed.FEED_TYPE_SHELF]
if not num_items:
num_items = len(item_types)
item_types = cycle(item_types)
feed_items = []
for i in xrange(num_items):
feed_items.append(
self.feed_item_factory(carrier=carrier, region=region,
item_type=item_types.next()))
return feed_items
class FeedAppMixin(object):
fixtures = fixture('webapp_337141')
def setUp(self):
self.feedapp_data = {
'app': 337141,
'color': 'emerald',
'type': 'icon',
'description': {
'en-US': u'pan-fried potatoes'
},
'slug': self.random_slug()
}
self.pullquote_data = {
'pullquote_text': {'en-US': u'The bést!'},
'pullquote_rating': 4,
'pullquote_attribution': u'Jamés Bod'
}
self.feedapps = []
super(FeedAppMixin, self).setUp()
def random_slug(self):
return ''.join(random.choice(string.ascii_uppercase + string.digits)
for _ in range(10)).lower()
def create_feedapps(self, n=2, **kwargs):
data = dict(self.feedapp_data)
data.update(kwargs)
if not isinstance(data['app'], Webapp):
data['app'] = Webapp.objects.get(pk=data['app'])
feedapps = []
for idx in xrange(n):
data['slug'] = self.random_slug()
feedapps.append(FeedApp.objects.create(**data))
self.feedapps.extend(feedapps)
return feedapps
class TestFeedApp(FeedAppMixin, mkt.site.tests.TestCase):
def setUp(self):
super(TestFeedApp, self).setUp()
self.feedapp_data.update(**self.pullquote_data)
self.feedapp_data['app'] = (
Webapp.objects.get(pk=self.feedapp_data['app']))
def test_create(self):
feedapp = FeedApp(**self.feedapp_data)
ok_(isinstance(feedapp, FeedApp))
feedapp.clean_fields() # Tests validators on fields.
feedapp.clean() # Test model validation.
feedapp.save() # Tests required fields.
def test_missing_pullquote_rating(self):
del self.feedapp_data['pullquote_rating']
self.test_create()
def test_missing_pullquote_text(self):
del self.feedapp_data['pullquote_text']
with self.assertRaises(ValidationError):
self.test_create()
def test_pullquote_rating_fractional(self):
"""
This passes because PositiveSmallIntegerField will coerce the float
into an int, which effectively returns math.floor(value).
"""
self.feedapp_data['pullquote_rating'] = 4.5
self.test_create()
def test_bad_pullquote_rating_low(self):
self.feedapp_data['pullquote_rating'] = -1
with self.assertRaises(ValidationError):
self.test_create()
def test_bad_pullquote_rating_high(self):
self.feedapp_data['pullquote_rating'] = 6
with self.assertRaises(ValidationError):
self.test_create()
class TestFeedBrand(mkt.site.tests.TestCase):
def setUp(self):
super(TestFeedBrand, self).setUp()
self.apps = [mkt.site.tests.app_factory() for i in xrange(3)]
self.brand = None
self.brand_data = {
'slug': 'potato',
'type': 1,
'layout': 1
}
def test_create(self):
self.brand = FeedBrand.objects.create(**self.brand_data)
ok_(isinstance(self.brand, FeedBrand))
for name, value in self.brand_data.iteritems():
eq_(getattr(self.brand, name), value, name)
def test_add_app(self):
self.test_create()
m = self.brand.add_app(self.apps[0], order=3)
ok_(self.brand.apps(), [self.apps[0]])
eq_(m.order, 3)
eq_(m.app, self.apps[0])
eq_(m.obj, self.brand)
def test_add_app_sort_order_respected(self):
self.test_add_app()
self.brand.add_app(self.apps[1], order=1)
ok_(self.brand.apps(), [self.apps[1], self.apps[0]])
def test_add_app_no_order_passed(self):
self.test_add_app()
m = self.brand.add_app(self.apps[1])
ok_(m.order, 4)
def test_remove_app(self):
self.test_add_app()
ok_(self.apps[0] in self.brand.apps())
removed = self.brand.remove_app(self.apps[0])
ok_(removed)
ok_(self.apps[0] not in self.brand.apps())
def test_remove_app_not_in_brand(self):
self.test_remove_app()
removed = self.brand.remove_app(self.apps[1])
ok_(not removed)
def test_set_apps(self):
self.test_add_app_sort_order_respected()
new_apps = [app.pk for app in self.apps][::-1]
self.brand.set_apps(new_apps)
eq_(new_apps, [app.pk for app in self.brand.apps().no_cache()])
def test_set_apps_nonexistant(self):
self.test_add_app_sort_order_respected()
with self.assertRaises(Webapp.DoesNotExist):
self.brand.set_apps([99999])
class TestESReceivers(FeedTestMixin, mkt.site.tests.TestCase):
@mock.patch('mkt.search.indexers.BaseIndexer.index_ids')
def test_update_search_index(self, update_mock):
feed_items = self.feed_factory()
calls = [update_call[0][0][0] for update_call in
update_mock.call_args_list]
for feed_item in feed_items:
assert feed_item.id in calls
assert getattr(feed_item, feed_item.item_type).id in calls
@mock.patch('mkt.search.indexers.BaseIndexer.unindex')
def test_delete_search_index(self, delete_mock):
for x in xrange(4):
self.feed_item_factory()
count = FeedItem.objects.count()
FeedItem.objects.all().delete()
eq_(delete_mock.call_count, count)
class TestFeedShelf(FeedTestMixin, mkt.site.tests.TestCase):
def test_is_published(self):
shelf = self.feed_shelf_factory()
assert not shelf.is_published
shelf.feeditem_set.create()
assert shelf.is_published
class TestFeedCollection(FeedTestMixin, mkt.site.tests.TestCase):
def test_update_apps(self):
coll = self.feed_collection_factory()
eq_(coll.apps().count(), 1)
coll.set_apps([337141, mkt.site.tests.app_factory().id,
mkt.site.tests.app_factory().id])
eq_(coll.apps().count(), 3) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# -*- coding: ascii -*-
#
# Copyright 2011, 2012
# Andr\xe9 Malo or his licensors, as applicable
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
==============
CSS Minifier
==============
CSS Minifier.
The minifier is based on the semantics of the `YUI compressor`_\, which itself
is based on `the rule list by Isaac Schlueter`_\.
This module is a re-implementation aiming for speed instead of maximum
compression, so it can be used at runtime (rather than during a preprocessing
step). RCSSmin does syntactical compression only (removing spaces, comments
and possibly semicolons). It does not provide semantic compression (like
removing empty blocks, collapsing redundant properties etc). It does, however,
support various CSS hacks (by keeping them working as intended).
Here's a feature list:
- Strings are kept, except that escaped newlines are stripped
- Space/Comments before the very end or before various characters are
stripped: ``:{});=>+],!`` (The colon (``:``) is a special case, a single
space is kept if it's outside a ruleset.)
- Space/Comments at the very beginning or after various characters are
stripped: ``{}(=:>+[,!``
- Optional space after unicode escapes is kept, resp. replaced by a simple
space
- whitespaces inside ``url()`` definitions are stripped
- Comments starting with an exclamation mark (``!``) can be kept optionally.
- All other comments and/or whitespace characters are replaced by a single
space.
- Multiple consecutive semicolons are reduced to one
- The last semicolon within a ruleset is stripped
- CSS Hacks supported:
- IE7 hack (``>/**/``)
- Mac-IE5 hack (``/*\*/.../**/``)
- The boxmodelhack is supported naturally because it relies on valid CSS2
strings
- Between ``:first-line`` and the following comma or curly brace a space is
inserted. (apparently it's needed for IE6)
- Same for ``:first-letter``
rcssmin.c is a reimplementation of rcssmin.py in C and improves runtime up to
factor 50 or so (depending on the input).
Both python 2 (>= 2.4) and python 3 are supported.
.. _YUI compressor: https://github.com/yui/yuicompressor/
.. _the rule list by Isaac Schlueter: https://github.com/isaacs/cssmin/tree/
"""
__author__ = "Andr\xe9 Malo"
__author__ = getattr(__author__, 'decode', lambda x: __author__)('latin-1')
__docformat__ = "restructuredtext en"
__license__ = "Apache License, Version 2.0"
__version__ = '1.0.2'
__all__ = ['cssmin']
import re as _re
def _make_cssmin(python_only=False):
"""
Generate CSS minifier.
:Parameters:
`python_only` : ``bool``
Use only the python variant. If true, the c extension is not even
tried to be loaded.
:Return: Minifier
:Rtype: ``callable``
"""
# pylint: disable = W0612
# ("unused" variables)
# pylint: disable = R0911, R0912, R0914, R0915
# (too many anything)
if not python_only:
try:
import _rcssmin
except ImportError:
pass
else:
return _rcssmin.cssmin
nl = r'(?:[\n\f]|\r\n?)' # pylint: disable = C0103
spacechar = r'[\r\n\f\040\t]'
unicoded = r'[0-9a-fA-F]{1,6}(?:[\040\n\t\f]|\r\n?)?'
escaped = r'[^\n\r\f0-9a-fA-F]'
escape = r'(?:\\(?:%(unicoded)s|%(escaped)s))' % locals()
nmchar = r'[^\000-\054\056\057\072-\100\133-\136\140\173-\177]'
#nmstart = r'[^\000-\100\133-\136\140\173-\177]'
#ident = (r'(?:'
# r'-?(?:%(nmstart)s|%(escape)s)%(nmchar)s*(?:%(escape)s%(nmchar)s*)*'
#r')') % locals()
comment = r'(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/)'
# only for specific purposes. The bang is grouped:
_bang_comment = r'(?:/\*(!?)[^*]*\*+(?:[^/*][^*]*\*+)*/)'
string1 = \
r'(?:\047[^\047\\\r\n\f]*(?:\\[^\r\n\f][^\047\\\r\n\f]*)*\047)'
string2 = r'(?:"[^"\\\r\n\f]*(?:\\[^\r\n\f][^"\\\r\n\f]*)*")'
strings = r'(?:%s|%s)' % (string1, string2)
nl_string1 = \
r'(?:\047[^\047\\\r\n\f]*(?:\\(?:[^\r]|\r\n?)[^\047\\\r\n\f]*)*\047)'
nl_string2 = r'(?:"[^"\\\r\n\f]*(?:\\(?:[^\r]|\r\n?)[^"\\\r\n\f]*)*")'
nl_strings = r'(?:%s|%s)' % (nl_string1, nl_string2)
uri_nl_string1 = r'(?:\047[^\047\\]*(?:\\(?:[^\r]|\r\n?)[^\047\\]*)*\047)'
uri_nl_string2 = r'(?:"[^"\\]*(?:\\(?:[^\r]|\r\n?)[^"\\]*)*")'
uri_nl_strings = r'(?:%s|%s)' % (uri_nl_string1, uri_nl_string2)
nl_escaped = r'(?:\\%(nl)s)' % locals()
space = r'(?:%(spacechar)s|%(comment)s)' % locals()
ie7hack = r'(?:>/\*\*/)'
uri = (r'(?:'
r'(?:[^\000-\040"\047()\\\177]*'
r'(?:%(escape)s[^\000-\040"\047()\\\177]*)*)'
r'(?:'
r'(?:%(spacechar)s+|%(nl_escaped)s+)'
r'(?:'
r'(?:[^\000-\040"\047()\\\177]|%(escape)s|%(nl_escaped)s)'
r'[^\000-\040"\047()\\\177]*'
r'(?:%(escape)s[^\000-\040"\047()\\\177]*)*'
r')+'
r')*'
r')') % locals()
nl_unesc_sub = _re.compile(nl_escaped).sub
uri_space_sub = _re.compile((
r'(%(escape)s+)|%(spacechar)s+|%(nl_escaped)s+'
) % locals()).sub
uri_space_subber = lambda m: m.groups()[0] or ''
space_sub_simple = _re.compile((
r'[\r\n\f\040\t;]+|(%(comment)s+)'
) % locals()).sub
space_sub_banged = _re.compile((
r'[\r\n\f\040\t;]+|(%(_bang_comment)s+)'
) % locals()).sub
post_esc_sub = _re.compile(r'[\r\n\f\t]+').sub
main_sub = _re.compile((
r'([^\\"\047u>@\r\n\f\040\t/;:{}]+)'
r'|(?<=[{}(=:>+[,!])(%(space)s+)'
r'|^(%(space)s+)'
r'|(%(space)s+)(?=(([:{});=>+\],!])|$)?)'
r'|;(%(space)s*(?:;%(space)s*)*)(?=(\})?)'
r'|(\{)'
r'|(\})'
r'|(%(strings)s)'
r'|(?<!%(nmchar)s)url\(%(spacechar)s*('
r'%(uri_nl_strings)s'
r'|%(uri)s'
r')%(spacechar)s*\)'
r'|(@[mM][eE][dD][iI][aA])(?!%(nmchar)s)'
r'|(%(ie7hack)s)(%(space)s*)'
r'|(:[fF][iI][rR][sS][tT]-[lL]'
r'(?:[iI][nN][eE]|[eE][tT][tT][eE][rR]))'
r'(%(space)s*)(?=[{,])'
r'|(%(nl_strings)s)'
r'|(%(escape)s[^\\"\047u>@\r\n\f\040\t/;:{}]*)'
) % locals()).sub
#print main_sub.__self__.pattern
def main_subber(keep_bang_comments):
""" Make main subber """
in_macie5, in_rule, at_media = [0], [0], [0]
if keep_bang_comments:
space_sub = space_sub_banged
def space_subber(match):
""" Space|Comment subber """
if match.lastindex:
group1, group2 = match.group(1, 2)
if group2:
if group1.endswith(r'\*/'):
in_macie5[0] = 1
else:
in_macie5[0] = 0
return group1
elif group1:
if group1.endswith(r'\*/'):
if in_macie5[0]:
return ''
in_macie5[0] = 1
return r'/*\*/'
elif in_macie5[0]:
in_macie5[0] = 0
return '/**/'
return ''
else:
space_sub = space_sub_simple
def space_subber(match):
""" Space|Comment subber """
if match.lastindex:
if match.group(1).endswith(r'\*/'):
if in_macie5[0]:
return ''
in_macie5[0] = 1
return r'/*\*/'
elif in_macie5[0]:
in_macie5[0] = 0
return '/**/'
return ''
def fn_space_post(group):
""" space with token after """
if group(5) is None or (
group(6) == ':' and not in_rule[0] and not at_media[0]):
return ' ' + space_sub(space_subber, group(4))
return space_sub(space_subber, group(4))
def fn_semicolon(group):
""" ; handler """
return ';' + space_sub(space_subber, group(7))
def fn_semicolon2(group):
""" ; handler """
if in_rule[0]:
return space_sub(space_subber, group(7))
return ';' + space_sub(space_subber, group(7))
def fn_open(group):
""" { handler """
# pylint: disable = W0613
if at_media[0]:
at_media[0] -= 1
else:
in_rule[0] = 1
return '{'
def fn_close(group):
""" } handler """
# pylint: disable = W0613
in_rule[0] = 0
return '}'
def fn_media(group):
""" @media handler """
at_media[0] += 1
return group(13)
def fn_ie7hack(group):
""" IE7 Hack handler """
if not in_rule[0] and not at_media[0]:
in_macie5[0] = 0
return group(14) + space_sub(space_subber, group(15))
return '>' + space_sub(space_subber, group(15))
table = (
None,
None,
None,
None,
fn_space_post, # space with token after
fn_space_post, # space with token after
fn_space_post, # space with token after
fn_semicolon, # semicolon
fn_semicolon2, # semicolon
fn_open, # {
fn_close, # }
lambda g: g(11), # string
lambda g: 'url(%s)' % uri_space_sub(uri_space_subber, g(12)),
# url(...)
fn_media, # @media
None,
fn_ie7hack, # ie7hack
None,
lambda g: g(16) + ' ' + space_sub(space_subber, g(17)),
# :first-line|letter followed
# by [{,] (apparently space
# needed for IE6)
lambda g: nl_unesc_sub('', g(18)), # nl_string
lambda g: post_esc_sub(' ', g(19)), # escape
)
def func(match):
""" Main subber """
idx, group = match.lastindex, match.group
if idx > 3:
return table[idx](group)
# shortcuts for frequent operations below:
elif idx == 1: # not interesting
return group(1)
#else: # space with token before or at the beginning
return space_sub(space_subber, group(idx))
return func
def cssmin(style, keep_bang_comments=False): # pylint: disable = W0621
"""
Minify CSS.
:Parameters:
`style` : ``str``
CSS to minify
`keep_bang_comments` : ``bool``
Keep comments starting with an exclamation mark? (``/*!...*/``)
:Return: Minified style
:Rtype: ``str``
"""
return main_sub(main_subber(keep_bang_comments), style)
return cssmin
cssmin = _make_cssmin()
if __name__ == '__main__':
def main():
""" Main """
import sys as _sys
keep_bang_comments = (
'-b' in _sys.argv[1:]
or '-bp' in _sys.argv[1:]
or '-pb' in _sys.argv[1:]
)
if '-p' in _sys.argv[1:] or '-bp' in _sys.argv[1:] \
or '-pb' in _sys.argv[1:]:
global cssmin # pylint: disable = W0603
cssmin = _make_cssmin(python_only=True)
_sys.stdout.write(cssmin(
_sys.stdin.read(), keep_bang_comments=keep_bang_comments
))
main() | unknown | codeparrot/codeparrot-clean | ||
"""Loading unittests."""
import os
import re
import sys
import traceback
import types
import functools
from fnmatch import fnmatch, fnmatchcase
from . import case, suite, util
__unittest = True
# what about .pyc (etc)
# we would need to avoid loading the same tests multiple times
# from '.py', *and* '.pyc'
VALID_MODULE_NAME = re.compile(r'[_a-z]\w*\.py$', re.IGNORECASE)
class _FailedTest(case.TestCase):
_testMethodName = None
def __init__(self, method_name, exception):
self._exception = exception
super(_FailedTest, self).__init__(method_name)
def __getattr__(self, name):
if name != self._testMethodName:
return super(_FailedTest, self).__getattr__(name)
def testFailure():
raise self._exception
return testFailure
def _make_failed_import_test(name, suiteClass):
message = 'Failed to import test module: %s\n%s' % (
name, traceback.format_exc())
return _make_failed_test(name, ImportError(message), suiteClass, message)
def _make_failed_load_tests(name, exception, suiteClass):
message = 'Failed to call load_tests:\n%s' % (traceback.format_exc(),)
return _make_failed_test(
name, exception, suiteClass, message)
def _make_failed_test(methodname, exception, suiteClass, message):
test = _FailedTest(methodname, exception)
return suiteClass((test,)), message
def _make_skipped_test(methodname, exception, suiteClass):
@case.skip(str(exception))
def testSkipped(self):
pass
attrs = {methodname: testSkipped}
TestClass = type("ModuleSkipped", (case.TestCase,), attrs)
return suiteClass((TestClass(methodname),))
def _splitext(path):
return os.path.splitext(path)[0]
class TestLoader(object):
"""
This class is responsible for loading tests according to various criteria
and returning them wrapped in a TestSuite
"""
testMethodPrefix = 'test'
sortTestMethodsUsing = staticmethod(util.three_way_cmp)
testNamePatterns = None
suiteClass = suite.TestSuite
_top_level_dir = None
def __init__(self):
super(TestLoader, self).__init__()
self.errors = []
# Tracks packages which we have called into via load_tests, to
# avoid infinite re-entrancy.
self._loading_packages = set()
def loadTestsFromTestCase(self, testCaseClass):
"""Return a suite of all test cases contained in testCaseClass"""
if issubclass(testCaseClass, suite.TestSuite):
raise TypeError("Test cases should not be derived from "
"TestSuite. Maybe you meant to derive from "
"TestCase?")
if testCaseClass in (case.TestCase, case.FunctionTestCase):
# We don't load any tests from base types that should not be loaded.
testCaseNames = []
else:
testCaseNames = self.getTestCaseNames(testCaseClass)
if not testCaseNames and hasattr(testCaseClass, 'runTest'):
testCaseNames = ['runTest']
loaded_suite = self.suiteClass(map(testCaseClass, testCaseNames))
return loaded_suite
def loadTestsFromModule(self, module, *, pattern=None):
"""Return a suite of all test cases contained in the given module"""
tests = []
for name in dir(module):
obj = getattr(module, name)
if (
isinstance(obj, type)
and issubclass(obj, case.TestCase)
and obj not in (case.TestCase, case.FunctionTestCase)
):
tests.append(self.loadTestsFromTestCase(obj))
load_tests = getattr(module, 'load_tests', None)
tests = self.suiteClass(tests)
if load_tests is not None:
try:
return load_tests(self, tests, pattern)
except Exception as e:
error_case, error_message = _make_failed_load_tests(
module.__name__, e, self.suiteClass)
self.errors.append(error_message)
return error_case
return tests
def loadTestsFromName(self, name, module=None):
"""Return a suite of all test cases given a string specifier.
The name may resolve either to a module, a test case class, a
test method within a test case class, or a callable object which
returns a TestCase or TestSuite instance.
The method optionally resolves the names relative to a given module.
"""
parts = name.split('.')
error_case, error_message = None, None
if module is None:
parts_copy = parts[:]
while parts_copy:
try:
module_name = '.'.join(parts_copy)
module = __import__(module_name)
break
except ImportError:
next_attribute = parts_copy.pop()
# Last error so we can give it to the user if needed.
error_case, error_message = _make_failed_import_test(
next_attribute, self.suiteClass)
if not parts_copy:
# Even the top level import failed: report that error.
self.errors.append(error_message)
return error_case
parts = parts[1:]
obj = module
for part in parts:
try:
parent, obj = obj, getattr(obj, part)
except AttributeError as e:
# We can't traverse some part of the name.
if (getattr(obj, '__path__', None) is not None
and error_case is not None):
# This is a package (no __path__ per importlib docs), and we
# encountered an error importing something. We cannot tell
# the difference between package.WrongNameTestClass and
# package.wrong_module_name so we just report the
# ImportError - it is more informative.
self.errors.append(error_message)
return error_case
else:
# Otherwise, we signal that an AttributeError has occurred.
error_case, error_message = _make_failed_test(
part, e, self.suiteClass,
'Failed to access attribute:\n%s' % (
traceback.format_exc(),))
self.errors.append(error_message)
return error_case
if isinstance(obj, types.ModuleType):
return self.loadTestsFromModule(obj)
elif (
isinstance(obj, type)
and issubclass(obj, case.TestCase)
and obj not in (case.TestCase, case.FunctionTestCase)
):
return self.loadTestsFromTestCase(obj)
elif (isinstance(obj, types.FunctionType) and
isinstance(parent, type) and
issubclass(parent, case.TestCase)):
name = parts[-1]
inst = parent(name)
# static methods follow a different path
if not isinstance(getattr(inst, name), types.FunctionType):
return self.suiteClass([inst])
elif isinstance(obj, suite.TestSuite):
return obj
if callable(obj):
test = obj()
if isinstance(test, suite.TestSuite):
return test
elif isinstance(test, case.TestCase):
return self.suiteClass([test])
else:
raise TypeError("calling %s returned %s, not a test" %
(obj, test))
else:
raise TypeError("don't know how to make test from: %s" % obj)
def loadTestsFromNames(self, names, module=None):
"""Return a suite of all test cases found using the given sequence
of string specifiers. See 'loadTestsFromName()'.
"""
suites = [self.loadTestsFromName(name, module) for name in names]
return self.suiteClass(suites)
def getTestCaseNames(self, testCaseClass):
"""Return a sorted sequence of method names found within testCaseClass
"""
def shouldIncludeMethod(attrname):
if not attrname.startswith(self.testMethodPrefix):
return False
testFunc = getattr(testCaseClass, attrname)
if not callable(testFunc):
return False
fullName = f'%s.%s.%s' % (
testCaseClass.__module__, testCaseClass.__qualname__, attrname
)
return self.testNamePatterns is None or \
any(fnmatchcase(fullName, pattern) for pattern in self.testNamePatterns)
testFnNames = list(filter(shouldIncludeMethod, dir(testCaseClass)))
if self.sortTestMethodsUsing:
testFnNames.sort(key=functools.cmp_to_key(self.sortTestMethodsUsing))
return testFnNames
def discover(self, start_dir, pattern='test*.py', top_level_dir=None):
"""Find and return all test modules from the specified start
directory, recursing into subdirectories to find them and return all
tests found within them. Only test files that match the pattern will
be loaded. (Using shell style pattern matching.)
All test modules must be importable from the top level of the project.
If the start directory is not the top level directory then the top
level directory must be specified separately.
If a test package name (directory with '__init__.py') matches the
pattern then the package will be checked for a 'load_tests' function. If
this exists then it will be called with (loader, tests, pattern) unless
the package has already had load_tests called from the same discovery
invocation, in which case the package module object is not scanned for
tests - this ensures that when a package uses discover to further
discover child tests that infinite recursion does not happen.
If load_tests exists then discovery does *not* recurse into the package,
load_tests is responsible for loading all tests in the package.
The pattern is deliberately not stored as a loader attribute so that
packages can continue discovery themselves. top_level_dir is stored so
load_tests does not need to pass this argument in to loader.discover().
Paths are sorted before being imported to ensure reproducible execution
order even on filesystems with non-alphabetical ordering like ext3/4.
"""
original_top_level_dir = self._top_level_dir
set_implicit_top = False
if top_level_dir is None and self._top_level_dir is not None:
# make top_level_dir optional if called from load_tests in a package
top_level_dir = self._top_level_dir
elif top_level_dir is None:
set_implicit_top = True
top_level_dir = start_dir
top_level_dir = os.path.abspath(top_level_dir)
if not top_level_dir in sys.path:
# all test modules must be importable from the top level directory
# should we *unconditionally* put the start directory in first
# in sys.path to minimise likelihood of conflicts between installed
# modules and development versions?
sys.path.insert(0, top_level_dir)
self._top_level_dir = top_level_dir
is_not_importable = False
is_namespace = False
tests = []
if os.path.isdir(os.path.abspath(start_dir)):
start_dir = os.path.abspath(start_dir)
if start_dir != top_level_dir:
is_not_importable = not os.path.isfile(os.path.join(start_dir, '__init__.py'))
else:
# support for discovery from dotted module names
try:
__import__(start_dir)
except ImportError:
is_not_importable = True
else:
the_module = sys.modules[start_dir]
if not hasattr(the_module, "__file__") or the_module.__file__ is None:
# look for namespace packages
try:
spec = the_module.__spec__
except AttributeError:
spec = None
if spec and spec.submodule_search_locations is not None:
is_namespace = True
for path in the_module.__path__:
if (not set_implicit_top and
not path.startswith(top_level_dir)):
continue
self._top_level_dir = \
(path.split(the_module.__name__
.replace(".", os.path.sep))[0])
tests.extend(self._find_tests(path, pattern, namespace=True))
elif the_module.__name__ in sys.builtin_module_names:
# builtin module
raise TypeError('Can not use builtin modules '
'as dotted module names') from None
else:
raise TypeError(
f"don't know how to discover from {the_module!r}"
) from None
else:
top_part = start_dir.split('.')[0]
start_dir = os.path.abspath(os.path.dirname((the_module.__file__)))
if set_implicit_top:
if not is_namespace:
if sys.modules[top_part].__file__ is None:
self._top_level_dir = os.path.dirname(the_module.__file__)
if self._top_level_dir not in sys.path:
sys.path.insert(0, self._top_level_dir)
else:
self._top_level_dir = \
self._get_directory_containing_module(top_part)
sys.path.remove(top_level_dir)
if is_not_importable:
raise ImportError('Start directory is not importable: %r' % start_dir)
if not is_namespace:
tests = list(self._find_tests(start_dir, pattern))
self._top_level_dir = original_top_level_dir
return self.suiteClass(tests)
def _get_directory_containing_module(self, module_name):
module = sys.modules[module_name]
full_path = os.path.abspath(module.__file__)
if os.path.basename(full_path).lower().startswith('__init__.py'):
return os.path.dirname(os.path.dirname(full_path))
else:
# here we have been given a module rather than a package - so
# all we can do is search the *same* directory the module is in
# should an exception be raised instead
return os.path.dirname(full_path)
def _get_name_from_path(self, path):
if path == self._top_level_dir:
return '.'
path = _splitext(os.path.normpath(path))
_relpath = os.path.relpath(path, self._top_level_dir)
assert not os.path.isabs(_relpath), "Path must be within the project"
assert not _relpath.startswith('..'), "Path must be within the project"
name = _relpath.replace(os.path.sep, '.')
return name
def _get_module_from_name(self, name):
__import__(name)
return sys.modules[name]
def _match_path(self, path, full_path, pattern):
# override this method to use alternative matching strategy
return fnmatch(path, pattern)
def _find_tests(self, start_dir, pattern, namespace=False):
"""Used by discovery. Yields test suites it loads."""
# Handle the __init__ in this package
name = self._get_name_from_path(start_dir)
# name is '.' when start_dir == top_level_dir (and top_level_dir is by
# definition not a package).
if name != '.' and name not in self._loading_packages:
# name is in self._loading_packages while we have called into
# loadTestsFromModule with name.
tests, should_recurse = self._find_test_path(
start_dir, pattern, namespace)
if tests is not None:
yield tests
if not should_recurse:
# Either an error occurred, or load_tests was used by the
# package.
return
# Handle the contents.
paths = sorted(os.listdir(start_dir))
for path in paths:
full_path = os.path.join(start_dir, path)
tests, should_recurse = self._find_test_path(
full_path, pattern, False)
if tests is not None:
yield tests
if should_recurse:
# we found a package that didn't use load_tests.
name = self._get_name_from_path(full_path)
self._loading_packages.add(name)
try:
yield from self._find_tests(full_path, pattern, False)
finally:
self._loading_packages.discard(name)
def _find_test_path(self, full_path, pattern, namespace=False):
"""Used by discovery.
Loads tests from a single file, or a directories' __init__.py when
passed the directory.
Returns a tuple (None_or_tests_from_file, should_recurse).
"""
basename = os.path.basename(full_path)
if os.path.isfile(full_path):
if not VALID_MODULE_NAME.match(basename):
# valid Python identifiers only
return None, False
if not self._match_path(basename, full_path, pattern):
return None, False
# if the test file matches, load it
name = self._get_name_from_path(full_path)
try:
module = self._get_module_from_name(name)
except case.SkipTest as e:
return _make_skipped_test(name, e, self.suiteClass), False
except:
error_case, error_message = \
_make_failed_import_test(name, self.suiteClass)
self.errors.append(error_message)
return error_case, False
else:
mod_file = os.path.abspath(
getattr(module, '__file__', full_path))
realpath = _splitext(
os.path.realpath(mod_file))
fullpath_noext = _splitext(
os.path.realpath(full_path))
if realpath.lower() != fullpath_noext.lower():
module_dir = os.path.dirname(realpath)
mod_name = _splitext(
os.path.basename(full_path))
expected_dir = os.path.dirname(full_path)
msg = ("%r module incorrectly imported from %r. Expected "
"%r. Is this module globally installed?")
raise ImportError(
msg % (mod_name, module_dir, expected_dir))
return self.loadTestsFromModule(module, pattern=pattern), False
elif os.path.isdir(full_path):
if (not namespace and
not os.path.isfile(os.path.join(full_path, '__init__.py'))):
return None, False
load_tests = None
tests = None
name = self._get_name_from_path(full_path)
try:
package = self._get_module_from_name(name)
except case.SkipTest as e:
return _make_skipped_test(name, e, self.suiteClass), False
except:
error_case, error_message = \
_make_failed_import_test(name, self.suiteClass)
self.errors.append(error_message)
return error_case, False
else:
load_tests = getattr(package, 'load_tests', None)
# Mark this package as being in load_tests (possibly ;))
self._loading_packages.add(name)
try:
tests = self.loadTestsFromModule(package, pattern=pattern)
if load_tests is not None:
# loadTestsFromModule(package) has loaded tests for us.
return tests, False
return tests, True
finally:
self._loading_packages.discard(name)
else:
return None, False
defaultTestLoader = TestLoader() | python | github | https://github.com/python/cpython | Lib/unittest/loader.py |
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
# For license information, please see license.txt
from __future__ import unicode_literals
import webnotes
import urllib
import json
import gdata
from apiclient.discovery import build
from oauth2client.file import Storage
from oauth2client.tools import run
import oauth2client.client
from oauth2client.client import Credentials
from oauth2client.client import OAuth2WebServerFlow
from oauth2client.client import flow_from_clientsecrets
from oauth2client.client import FlowExchangeError
import logging
from webnotes.model.doc import Document
# The URL root for accessing Google Accounts.
GOOGLE_ACCOUNTS_BASE_URL = 'https://accounts.google.com'
# Hardcoded dummy redirect URI for non-web apps.
REDIRECT_URI = 'urn:ietf:wg:oauth:2.0:oob'
class DocType:
def __init__(self, d, dl):
self.doc, self.doclist = d, dl
@webnotes.whitelist()
def get_verification_code(client_id, client_secret,app_name):
#webnotes.errprint("in the py")
scope='https://mail.google.com/'
webnotes.errprint(client_id)
Url=GeneratePermissionUrl(client_id, scope)
# webnotes.errprint(Url)
# Url=genearate_calendar_cred(client_id, client_secret, app_name)
return Url
def GeneratePermissionUrl(client_id, scope='https://mail.google.com/'):
#webnotes.errprint('in the generate url')
params = {}
params['client_id'] = client_id
params['redirect_uri'] = REDIRECT_URI
params['scope'] = scope
params['response_type'] = 'code'
return '%s?%s' % (AccountsUrl('o/oauth2/auth'),
FormatUrlParams(params))
def FormatUrlParams(params):
#webnotes.errprint("in the FormatUrlParams")
param_fragments = []
for param in sorted(params.iteritems(), key=lambda x: x[0]):
param_fragments.append('%s=%s' % (param[0],UrlEscape(param[1])))
return '&'.join(param_fragments)
def AccountsUrl(command):
#webnotes.errprint("in the account url")
return '%s/%s' % (GOOGLE_ACCOUNTS_BASE_URL, command)
def UrlEscape(text):
#webnotes.errprint("in the UrlEscape")
import urllib
# See OAUTH 5.1 for a definition of which characters need to be escaped.
return urllib.quote(text, safe='~-._')
# methods for generate_token
@webnotes.whitelist()
def generate_token(client_id, client_secret, authorization_code,user_name,app_name):
AuthorizeTokens(client_id,client_secret,authorization_code,user_name)
@webnotes.whitelist()
def generate_token_calender(client_id, client_secret, authorization_code,app_name):
generate_credentials(client_id, client_secret, authorization_code,app_name)
def AuthorizeTokens(client_id, client_secret, authorization_code,user_name):
import urllib
params = {}
params['client_id'] = client_id
params['client_secret'] = client_secret
params['code'] = authorization_code
params['redirect_uri'] = REDIRECT_URI
params['grant_type'] = 'authorization_code'
request_url = AccountsUrl('o/oauth2/token')
#webnotes.errprint(request_url)
response_json= urllib.urlopen(request_url, urllib.urlencode(params)).read()
# webnotes.errprint("authorised token")
#webnotes.errprint(response_json)
response=json.loads(response_json)
# webnotes.errprint(response1)
access_token=response["access_token"]
#webnotes.errprint(response["access_token"])
refresh_token=response["refresh_token"]
#webnotes.errprint(response["refresh_token"])
#webnotes.errprint(user_name)
set_values(access_token,refresh_token,user_name)
def set_values(access_token,refresh_token,user_name):
#webnotes.errprint("in the set_values")
pr = Document('Profile',user_name)
pr.response = access_token
pr.refresh_token=refresh_token
pr.save()
# webnotes.errprint(pr)
@webnotes.whitelist()
def genearate_calendar_cred(client_id, client_secret, app_name):
#webnotes.errprint("in the genearate_calendar_cred")
flow = get_gcalendar_flow(client_id, client_secret, app_name)
authorize_url = flow.step1_get_authorize_url()
# return authorize_url
return {
"authorize_url": authorize_url,
}
def get_gcalendar_flow(client_id, client_secret, app_name):
#webnotes.errprint("in the get_gcalendar_flow")
from oauth2client.client import OAuth2WebServerFlow
if client_secret and client_id and app_name:
flow = OAuth2WebServerFlow(client_id=client_id,
client_secret=client_secret,
scope='https://www.googleapis.com/auth/calendar',
redirect_uri='urn:ietf:wg:oauth:2.0:oob',
user_agent=app_name)
#webnotes.errprint("flow")
#webnotes.errprint(flow)
# 'https://www.googleapis.com/auth/calendar',
return flow
@webnotes.whitelist()
def generate_credentials(client_id, client_secret,authorization_code,app_name,user_name):
#webnotes.errprint("in the generate_credentials")
flow = get_gcalendar_flow(client_id, client_secret, app_name)
if authorization_code:
credentials = flow.step2_exchange(authorization_code)
final_credentials = credentials.to_json()
# final_token=json.loads(final_credentials)
set_values_calender(final_credentials,user_name)
# webnotes.errprint(type(final_credentials))
# webnotes.errprint(final_credentials)
# return{
# 'final_credentials': final_credentials
# }
def set_values_calender(final_credentials,user_name):
#webnotes.errprint("in the cal set_values")
# return json.loads(response)
cr = Document('Profile',user_name)
cr.credentails = final_credentials
cr.save()
#webnotes.errprint(cr) | unknown | codeparrot/codeparrot-clean | ||
"use strict";
/** @type {import("webpack").Configuration} */
const config = {
// mode: "development" || "production",
optimization: {
usedExports: true,
concatenateModules: true,
chunkIds: "named" // To keep filename consistent between different modes (for example building only)
}
};
module.exports = config; | javascript | github | https://github.com/webpack/webpack | examples/scope-hoisting/webpack.config.js |
"""
osgDB module
Part of osgpyplusplus python bindings for OpenSceneGraph C++ library
https://github.com/JaneliaSciComp/osgpyplusplus
The osgDB library provides support for reading and writing scene graphs,
providing a plugin framework and file utility classes.
The plugin framework in centered around the osgDB::Registry, and allows
plugins which provide specific file format support to be dynamically
loaded on demand.
"""
# modify PATH to be able to load DLLs from osgpyplusplus installation, like zlib and jpeg
import os
import platform
if platform.system() == "Windows":
osgpypp_path = os.path.dirname(os.path.realpath(__file__))
os.environ['PATH'] = osgpypp_path + ';' + os.environ['PATH']
# print os.environ['PATH']
# osgDB depends on upstream modules, so always load these
from . import osgUtil
# delegate to binary module, created with pyplusplus and boost::python
from _osgDB import *
# Translate C++ typedefs into python
Registry.FindFileCallback = FindFileCallback
Registry.ReadFileCallback = ReadFileCallback
Registry.WriteFileCallback = WriteFileCallback
Registry.FileLocationCallback = FileLocationCallback | unknown | codeparrot/codeparrot-clean | ||
#include "ruby.h"
static const rb_data_type_t my_integer_type = {
"MyInteger", {0}, 0, 0, RUBY_TYPED_FREE_IMMEDIATELY
};
static VALUE
my_integer_s_new(VALUE klass)
{
return TypedData_Wrap_Struct(klass, &my_integer_type, 0);
}
void
Init_my_integer(VALUE klass)
{
VALUE cMyInteger;
cMyInteger = rb_define_class_under(klass, "MyInteger", rb_cInteger);
rb_define_singleton_method(cMyInteger, "new", my_integer_s_new, 0);
} | c | github | https://github.com/ruby/ruby | ext/-test-/integer/my_integer.c |
# -*- coding: utf-8 -*-
#
# Copyright 2015-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from helpers import LuigiTestCase, RunOnceTask, with_config
import luigi
import luigi.worker
import luigi.execution_summary
import threading
import datetime
import mock
from enum import Enum
class ExecutionSummaryTest(LuigiTestCase):
def setUp(self):
super(ExecutionSummaryTest, self).setUp()
self.scheduler = luigi.scheduler.Scheduler(prune_on_get_work=False)
self.worker = luigi.worker.Worker(scheduler=self.scheduler)
def run_task(self, task):
self.worker.add(task) # schedule
self.worker.run() # run
def summary_dict(self):
return luigi.execution_summary._summary_dict(self.worker)
def summary(self):
return luigi.execution_summary.summary(self.worker)
def test_all_statuses(self):
class Bar(luigi.Task):
num = luigi.IntParameter()
def run(self):
if self.num == 0:
raise ValueError()
def complete(self):
if self.num == 1:
return True
return False
class Foo(luigi.Task):
def requires(self):
for i in range(5):
yield Bar(i)
self.run_task(Foo())
d = self.summary_dict()
self.assertEqual({Bar(num=1)}, d['already_done'])
self.assertEqual({Bar(num=2), Bar(num=3), Bar(num=4)}, d['completed'])
self.assertEqual({Bar(num=0)}, d['failed'])
self.assertEqual({Foo()}, d['upstream_failure'])
self.assertFalse(d['upstream_missing_dependency'])
self.assertFalse(d['run_by_other_worker'])
self.assertFalse(d['still_pending_ext'])
summary = self.summary()
expected = ['',
'===== Luigi Execution Summary =====',
'',
'Scheduled 6 tasks of which:',
'* 1 present dependencies were encountered:',
' - 1 Bar(num=1)',
'* 3 ran successfully:',
' - 3 Bar(num=2,3,4)',
'* 1 failed:',
' - 1 Bar(num=0)',
'* 1 were left pending, among these:',
' * 1 had failed dependencies:',
' - 1 Foo()',
'',
'This progress looks :( because there were failed tasks',
'',
'===== Luigi Execution Summary =====',
'']
result = summary.split('\n')
self.assertEqual(len(result), len(expected))
for i, line in enumerate(result):
self.assertEqual(line, expected[i])
def test_batch_complete(self):
ran_tasks = set()
class MaxBatchTask(luigi.Task):
param = luigi.IntParameter(batch_method=max)
def run(self):
ran_tasks.add(self.param)
def complete(self):
return any(self.param <= ran_param for ran_param in ran_tasks)
class MaxBatches(luigi.WrapperTask):
def requires(self):
return map(MaxBatchTask, range(5))
self.run_task(MaxBatches())
d = self.summary_dict()
expected_completed = {
MaxBatchTask(0),
MaxBatchTask(1),
MaxBatchTask(2),
MaxBatchTask(3),
MaxBatchTask(4),
MaxBatches(),
}
self.assertEqual(expected_completed, d['completed'])
def test_batch_fail(self):
class MaxBatchFailTask(luigi.Task):
param = luigi.IntParameter(batch_method=max)
def run(self):
assert self.param < 4
def complete(self):
return False
class MaxBatches(luigi.WrapperTask):
def requires(self):
return map(MaxBatchFailTask, range(5))
self.run_task(MaxBatches())
d = self.summary_dict()
expected_failed = {
MaxBatchFailTask(0),
MaxBatchFailTask(1),
MaxBatchFailTask(2),
MaxBatchFailTask(3),
MaxBatchFailTask(4),
}
self.assertEqual(expected_failed, d['failed'])
def test_check_complete_error(self):
class Bar(luigi.Task):
def run(self):
pass
def complete(self):
raise Exception
return True
class Foo(luigi.Task):
def requires(self):
yield Bar()
self.run_task(Foo())
d = self.summary_dict()
self.assertEqual({Foo()}, d['still_pending_not_ext'])
self.assertEqual({Foo()}, d['upstream_scheduling_error'])
self.assertEqual({Bar()}, d['scheduling_error'])
self.assertFalse(d['not_run'])
self.assertFalse(d['already_done'])
self.assertFalse(d['completed'])
self.assertFalse(d['failed'])
self.assertFalse(d['upstream_failure'])
self.assertFalse(d['upstream_missing_dependency'])
self.assertFalse(d['run_by_other_worker'])
self.assertFalse(d['still_pending_ext'])
summary = self.summary()
expected = ['',
'===== Luigi Execution Summary =====',
'',
'Scheduled 2 tasks of which:',
'* 1 failed scheduling:',
' - 1 Bar()',
'* 1 were left pending, among these:',
" * 1 had dependencies whose scheduling failed:",
' - 1 Foo()',
'',
'Did not run any tasks',
'This progress looks :( because there were tasks whose scheduling failed',
'',
'===== Luigi Execution Summary =====',
'']
result = summary.split('\n')
self.assertEqual(len(result), len(expected))
for i, line in enumerate(result):
self.assertEqual(line, expected[i])
def test_not_run_error(self):
class Bar(luigi.Task):
def complete(self):
return True
class Foo(luigi.Task):
def requires(self):
yield Bar()
def new_func(*args, **kwargs):
return None
with mock.patch('luigi.scheduler.Scheduler.add_task', new_func):
self.run_task(Foo())
d = self.summary_dict()
self.assertEqual({Foo()}, d['still_pending_not_ext'])
self.assertEqual({Foo()}, d['not_run'])
self.assertEqual({Bar()}, d['already_done'])
self.assertFalse(d['upstream_scheduling_error'])
self.assertFalse(d['scheduling_error'])
self.assertFalse(d['completed'])
self.assertFalse(d['failed'])
self.assertFalse(d['upstream_failure'])
self.assertFalse(d['upstream_missing_dependency'])
self.assertFalse(d['run_by_other_worker'])
self.assertFalse(d['still_pending_ext'])
summary = self.summary()
expected = ['',
'===== Luigi Execution Summary =====',
'',
'Scheduled 2 tasks of which:',
'* 1 present dependencies were encountered:',
' - 1 Bar()',
'* 1 were left pending, among these:',
" * 1 was not granted run permission by the scheduler:",
' - 1 Foo()',
'',
'Did not run any tasks',
'This progress looks :| because there were tasks that were not granted run permission by the scheduler',
'',
'===== Luigi Execution Summary =====',
'']
result = summary.split('\n')
self.assertEqual(len(result), len(expected))
for i, line in enumerate(result):
self.assertEqual(line, expected[i])
def test_deps_error(self):
class Bar(luigi.Task):
def run(self):
pass
def complete(self):
return True
class Foo(luigi.Task):
def requires(self):
raise Exception
yield Bar()
self.run_task(Foo())
d = self.summary_dict()
self.assertEqual({Foo()}, d['scheduling_error'])
self.assertFalse(d['upstream_scheduling_error'])
self.assertFalse(d['not_run'])
self.assertFalse(d['already_done'])
self.assertFalse(d['completed'])
self.assertFalse(d['failed'])
self.assertFalse(d['upstream_failure'])
self.assertFalse(d['upstream_missing_dependency'])
self.assertFalse(d['run_by_other_worker'])
self.assertFalse(d['still_pending_ext'])
summary = self.summary()
expected = ['',
'===== Luigi Execution Summary =====',
'',
'Scheduled 1 tasks of which:',
'* 1 failed scheduling:',
' - 1 Foo()',
'',
'Did not run any tasks',
'This progress looks :( because there were tasks whose scheduling failed',
'',
'===== Luigi Execution Summary =====',
'']
result = summary.split('\n')
self.assertEqual(len(result), len(expected))
for i, line in enumerate(result):
self.assertEqual(line, expected[i])
@with_config({'execution_summary': {'summary_length': '1'}})
def test_config_summary_limit(self):
class Bar(luigi.Task):
num = luigi.IntParameter()
def run(self):
pass
def complete(self):
return True
class Biz(Bar):
pass
class Bat(Bar):
pass
class Wut(Bar):
pass
class Foo(luigi.Task):
def requires(self):
yield Bat(1)
yield Wut(1)
yield Biz(1)
for i in range(4):
yield Bar(i)
def complete(self):
return False
self.run_task(Foo())
d = self.summary_dict()
self.assertEqual({Bat(1), Wut(1), Biz(1), Bar(0), Bar(1), Bar(2), Bar(3)}, d['already_done'])
self.assertEqual({Foo()}, d['completed'])
self.assertFalse(d['failed'])
self.assertFalse(d['upstream_failure'])
self.assertFalse(d['upstream_missing_dependency'])
self.assertFalse(d['run_by_other_worker'])
self.assertFalse(d['still_pending_ext'])
summary = self.summary()
expected = ['',
'===== Luigi Execution Summary =====',
'',
'Scheduled 8 tasks of which:',
'* 7 present dependencies were encountered:',
' - 4 Bar(num=0...3)',
' ...',
'* 1 ran successfully:',
' - 1 Foo()',
'',
'This progress looks :) because there were no failed tasks or missing external dependencies',
'',
'===== Luigi Execution Summary =====',
'']
result = summary.split('\n')
self.assertEqual(len(result), len(expected))
for i, line in enumerate(result):
self.assertEqual(line, expected[i])
def test_upstream_not_running(self):
class ExternalBar(luigi.ExternalTask):
num = luigi.IntParameter()
def complete(self):
if self.num == 1:
return True
return False
class Bar(luigi.Task):
num = luigi.IntParameter()
def run(self):
if self.num == 0:
raise ValueError()
class Foo(luigi.Task):
def requires(self):
for i in range(5):
yield ExternalBar(i)
yield Bar(i)
self.run_task(Foo())
d = self.summary_dict()
self.assertEqual({ExternalBar(num=1)}, d['already_done'])
self.assertEqual({Bar(num=1), Bar(num=2), Bar(num=3), Bar(num=4)}, d['completed'])
self.assertEqual({Bar(num=0)}, d['failed'])
self.assertEqual({Foo()}, d['upstream_failure'])
self.assertEqual({Foo()}, d['upstream_missing_dependency'])
self.assertFalse(d['run_by_other_worker'])
self.assertEqual({ExternalBar(num=0), ExternalBar(num=2), ExternalBar(num=3), ExternalBar(num=4)}, d['still_pending_ext'])
s = self.summary()
self.assertIn('\n* 1 present dependencies were encountered:\n - 1 ExternalBar(num=1)\n', s)
self.assertIn('\n* 4 ran successfully:\n - 4 Bar(num=1...4)\n', s)
self.assertIn('\n* 1 failed:\n - 1 Bar(num=0)\n', s)
self.assertIn('\n* 5 were left pending, among these:\n * 4 were missing external dependencies:\n - 4 ExternalBar(num=', s)
self.assertIn('\n * 1 had failed dependencies:\n'
' - 1 Foo()\n'
' * 1 had missing external dependencies:\n'
' - 1 Foo()\n\n'
'This progress looks :( because there were failed tasks\n', s)
self.assertNotIn('\n\n\n', s)
def test_already_running(self):
lock1 = threading.Lock()
lock2 = threading.Lock()
class ParentTask(RunOnceTask):
def requires(self):
yield LockTask()
class LockTask(RunOnceTask):
def run(self):
lock2.release()
lock1.acquire()
self.comp = True
lock1.acquire()
lock2.acquire()
other_worker = luigi.worker.Worker(scheduler=self.scheduler, worker_id="other_worker")
other_worker.add(ParentTask())
t1 = threading.Thread(target=other_worker.run)
t1.start()
lock2.acquire()
self.run_task(ParentTask())
lock1.release()
t1.join()
d = self.summary_dict()
self.assertEqual({LockTask()}, d['run_by_other_worker'])
self.assertEqual({ParentTask()}, d['upstream_run_by_other_worker'])
s = self.summary()
self.assertIn('\nScheduled 2 tasks of which:\n'
'* 2 were left pending, among these:\n'
' * 1 were being run by another worker:\n'
' - 1 LockTask()\n'
' * 1 had dependencies that were being run by other worker:\n'
' - 1 ParentTask()\n', s)
self.assertIn('\n\nThe other workers were:\n'
' - other_worker ran 1 tasks\n\n'
'Did not run any tasks\n'
'This progress looks :) because there were no failed '
'tasks or missing external dependencies\n', s)
self.assertNotIn('\n\n\n', s)
def test_already_running_2(self):
class AlreadyRunningTask(luigi.Task):
def run(self):
pass
other_worker = luigi.worker.Worker(scheduler=self.scheduler, worker_id="other_worker")
other_worker.add(AlreadyRunningTask()) # This also registers this worker
old_func = luigi.scheduler.Scheduler.get_work
def new_func(*args, **kwargs):
new_kwargs = kwargs.copy()
new_kwargs['worker'] = 'other_worker'
old_func(*args, **new_kwargs)
return old_func(*args, **kwargs)
with mock.patch('luigi.scheduler.Scheduler.get_work', new_func):
self.run_task(AlreadyRunningTask())
d = self.summary_dict()
self.assertFalse(d['already_done'])
self.assertFalse(d['completed'])
self.assertFalse(d['not_run'])
self.assertEqual({AlreadyRunningTask()}, d['run_by_other_worker'])
def test_not_run(self):
class AlreadyRunningTask(luigi.Task):
def run(self):
pass
other_worker = luigi.worker.Worker(scheduler=self.scheduler, worker_id="other_worker")
other_worker.add(AlreadyRunningTask()) # This also registers this worker
old_func = luigi.scheduler.Scheduler.get_work
def new_func(*args, **kwargs):
kwargs['current_tasks'] = None
old_func(*args, **kwargs)
return old_func(*args, **kwargs)
with mock.patch('luigi.scheduler.Scheduler.get_work', new_func):
self.run_task(AlreadyRunningTask())
d = self.summary_dict()
self.assertFalse(d['already_done'])
self.assertFalse(d['completed'])
self.assertFalse(d['run_by_other_worker'])
self.assertEqual({AlreadyRunningTask()}, d['not_run'])
s = self.summary()
self.assertIn('\nScheduled 1 tasks of which:\n'
'* 1 were left pending, among these:\n'
' * 1 was not granted run permission by the scheduler:\n'
' - 1 AlreadyRunningTask()\n', s)
self.assertNotIn('\n\n\n', s)
def test_somebody_else_finish_task(self):
class SomeTask(RunOnceTask):
pass
other_worker = luigi.worker.Worker(scheduler=self.scheduler, worker_id="other_worker")
self.worker.add(SomeTask())
other_worker.add(SomeTask())
other_worker.run()
self.worker.run()
d = self.summary_dict()
self.assertFalse(d['already_done'])
self.assertFalse(d['completed'])
self.assertFalse(d['run_by_other_worker'])
self.assertEqual({SomeTask()}, d['not_run'])
def test_somebody_else_disables_task(self):
class SomeTask(luigi.Task):
def complete(self):
return False
def run(self):
raise ValueError()
other_worker = luigi.worker.Worker(scheduler=self.scheduler, worker_id="other_worker")
self.worker.add(SomeTask())
other_worker.add(SomeTask())
other_worker.run() # Assuming it is disabled for a while after this
self.worker.run()
d = self.summary_dict()
self.assertFalse(d['already_done'])
self.assertFalse(d['completed'])
self.assertFalse(d['run_by_other_worker'])
self.assertEqual({SomeTask()}, d['not_run'])
def test_larger_tree(self):
class Dog(RunOnceTask):
def requires(self):
yield Cat(2)
class Cat(luigi.Task):
num = luigi.IntParameter()
def __init__(self, *args, **kwargs):
super(Cat, self).__init__(*args, **kwargs)
self.comp = False
def run(self):
if self.num == 2:
raise ValueError()
self.comp = True
def complete(self):
if self.num == 1:
return True
else:
return self.comp
class Bar(RunOnceTask):
num = luigi.IntParameter()
def requires(self):
if self.num == 0:
yield ExternalBar()
yield Cat(0)
if self.num == 1:
yield Cat(0)
yield Cat(1)
if self.num == 2:
yield Dog()
class Foo(luigi.Task):
def requires(self):
for i in range(3):
yield Bar(i)
class ExternalBar(luigi.ExternalTask):
def complete(self):
return False
self.run_task(Foo())
d = self.summary_dict()
self.assertEqual({Cat(num=1)}, d['already_done'])
self.assertEqual({Cat(num=0), Bar(num=1)}, d['completed'])
self.assertEqual({Cat(num=2)}, d['failed'])
self.assertEqual({Dog(), Bar(num=2), Foo()}, d['upstream_failure'])
self.assertEqual({Bar(num=0), Foo()}, d['upstream_missing_dependency'])
self.assertFalse(d['run_by_other_worker'])
self.assertEqual({ExternalBar()}, d['still_pending_ext'])
s = self.summary()
self.assertNotIn('\n\n\n', s)
def test_with_dates(self):
""" Just test that it doesn't crash with date params """
start = datetime.date(1998, 3, 23)
class Bar(RunOnceTask):
date = luigi.DateParameter()
class Foo(luigi.Task):
def requires(self):
for i in range(10):
new_date = start + datetime.timedelta(days=i)
yield Bar(date=new_date)
self.run_task(Foo())
d = self.summary_dict()
exp_set = {Bar(start + datetime.timedelta(days=i)) for i in range(10)}
exp_set.add(Foo())
self.assertEqual(exp_set, d['completed'])
s = self.summary()
self.assertIn('date=1998-0', s)
self.assertIn('Scheduled 11 tasks', s)
self.assertIn('Luigi Execution Summary', s)
self.assertNotIn('00:00:00', s)
self.assertNotIn('\n\n\n', s)
def test_with_ranges_minutes(self):
start = datetime.datetime(1998, 3, 23, 1, 50)
class Bar(RunOnceTask):
time = luigi.DateMinuteParameter()
class Foo(luigi.Task):
def requires(self):
for i in range(300):
new_time = start + datetime.timedelta(minutes=i)
yield Bar(time=new_time)
self.run_task(Foo())
d = self.summary_dict()
exp_set = {Bar(start + datetime.timedelta(minutes=i)) for i in range(300)}
exp_set.add(Foo())
self.assertEqual(exp_set, d['completed'])
s = self.summary()
self.assertIn('Bar(time=1998-03-23T0150...1998-03-23T0649)', s)
self.assertNotIn('\n\n\n', s)
def test_with_ranges_one_param(self):
class Bar(RunOnceTask):
num = luigi.IntParameter()
class Foo(luigi.Task):
def requires(self):
for i in range(11):
yield Bar(i)
self.run_task(Foo())
d = self.summary_dict()
exp_set = {Bar(i) for i in range(11)}
exp_set.add(Foo())
self.assertEqual(exp_set, d['completed'])
s = self.summary()
self.assertIn('Bar(num=0...10)', s)
self.assertNotIn('\n\n\n', s)
def test_with_ranges_multiple_params(self):
class Bar(RunOnceTask):
num1 = luigi.IntParameter()
num2 = luigi.IntParameter()
num3 = luigi.IntParameter()
class Foo(luigi.Task):
def requires(self):
for i in range(5):
yield Bar(5, i, 25)
self.run_task(Foo())
d = self.summary_dict()
exp_set = {Bar(5, i, 25) for i in range(5)}
exp_set.add(Foo())
self.assertEqual(exp_set, d['completed'])
s = self.summary()
self.assertIn('- 5 Bar(num1=5, num2=0...4, num3=25)', s)
self.assertNotIn('\n\n\n', s)
def test_with_two_tasks(self):
class Bar(RunOnceTask):
num = luigi.IntParameter()
num2 = luigi.IntParameter()
class Foo(luigi.Task):
def requires(self):
for i in range(2):
yield Bar(i, 2 * i)
self.run_task(Foo())
d = self.summary_dict()
self.assertEqual({Foo(), Bar(num=0, num2=0), Bar(num=1, num2=2)}, d['completed'])
summary = self.summary()
result = summary.split('\n')
expected = ['',
'===== Luigi Execution Summary =====',
'',
'Scheduled 3 tasks of which:',
'* 3 ran successfully:',
' - 2 Bar(num=0, num2=0) and Bar(num=1, num2=2)',
' - 1 Foo()',
'',
'This progress looks :) because there were no failed tasks or missing external dependencies',
'',
'===== Luigi Execution Summary =====',
'']
self.assertEqual(len(result), len(expected))
for i, line in enumerate(result):
self.assertEqual(line, expected[i])
def test_really_long_param_name(self):
class Bar(RunOnceTask):
This_is_a_really_long_parameter_that_we_should_not_print_out_because_people_will_get_annoyed = luigi.IntParameter()
class Foo(luigi.Task):
def requires(self):
yield Bar(0)
self.run_task(Foo())
s = self.summary()
self.assertIn('Bar(...)', s)
self.assertNotIn("Did not run any tasks", s)
self.assertNotIn('\n\n\n', s)
def test_multiple_params_multiple_same_task_family(self):
class Bar(RunOnceTask):
num = luigi.IntParameter()
num2 = luigi.IntParameter()
class Foo(luigi.Task):
def requires(self):
for i in range(4):
yield Bar(i, 2 * i)
self.run_task(Foo())
summary = self.summary()
result = summary.split('\n')
expected = ['',
'===== Luigi Execution Summary =====',
'',
'Scheduled 5 tasks of which:',
'* 5 ran successfully:',
' - 4 Bar(num=0, num2=0) ...',
' - 1 Foo()',
'',
'This progress looks :) because there were no failed tasks or missing external dependencies',
'',
'===== Luigi Execution Summary =====',
'']
self.assertEqual(len(result), len(expected))
for i, line in enumerate(result):
self.assertEqual(line, expected[i])
def test_happy_smiley_face_normal(self):
class Bar(RunOnceTask):
num = luigi.IntParameter()
num2 = luigi.IntParameter()
class Foo(luigi.Task):
def requires(self):
for i in range(4):
yield Bar(i, 2 * i)
self.run_task(Foo())
s = self.summary()
self.assertIn('\nThis progress looks :) because there were no failed tasks or missing external dependencies', s)
self.assertNotIn("Did not run any tasks", s)
self.assertNotIn('\n\n\n', s)
def test_happy_smiley_face_other_workers(self):
lock1 = threading.Lock()
lock2 = threading.Lock()
class ParentTask(RunOnceTask):
def requires(self):
yield LockTask()
class LockTask(RunOnceTask):
def run(self):
lock2.release()
lock1.acquire()
self.comp = True
lock1.acquire()
lock2.acquire()
other_worker = luigi.worker.Worker(scheduler=self.scheduler, worker_id="other_worker")
other_worker.add(ParentTask())
t1 = threading.Thread(target=other_worker.run)
t1.start()
lock2.acquire()
self.run_task(ParentTask())
lock1.release()
t1.join()
s = self.summary()
self.assertIn('\nThis progress looks :) because there were no failed tasks or missing external dependencies', s)
self.assertNotIn('\n\n\n', s)
def test_sad_smiley_face(self):
class ExternalBar(luigi.ExternalTask):
def complete(self):
return False
class Bar(luigi.Task):
num = luigi.IntParameter()
def run(self):
if self.num == 0:
raise ValueError()
class Foo(luigi.Task):
def requires(self):
for i in range(5):
yield Bar(i)
yield ExternalBar()
self.run_task(Foo())
s = self.summary()
self.assertIn('\nThis progress looks :( because there were failed tasks', s)
self.assertNotIn("Did not run any tasks", s)
self.assertNotIn('\n\n\n', s)
def test_neutral_smiley_face(self):
class ExternalBar(luigi.ExternalTask):
def complete(self):
return False
class Foo(luigi.Task):
def requires(self):
yield ExternalBar()
self.run_task(Foo())
s = self.summary()
self.assertIn('\nThis progress looks :| because there were missing external dependencies', s)
self.assertNotIn('\n\n\n', s)
def test_did_not_run_any_tasks(self):
class ExternalBar(luigi.ExternalTask):
num = luigi.IntParameter()
def complete(self):
if self.num == 5:
return True
return False
class Foo(luigi.Task):
def requires(self):
for i in range(10):
yield ExternalBar(i)
self.run_task(Foo())
d = self.summary_dict()
self.assertEqual({ExternalBar(5)}, d['already_done'])
self.assertEqual({ExternalBar(i) for i in range(10) if i != 5}, d['still_pending_ext'])
self.assertEqual({Foo()}, d['upstream_missing_dependency'])
s = self.summary()
self.assertIn('\n\nDid not run any tasks\nThis progress looks :| because there were missing external dependencies', s)
self.assertNotIn('\n\n\n', s)
def test_example(self):
class MyExternal(luigi.ExternalTask):
def complete(self):
return False
class Boom(luigi.Task):
this_is_a_really_long_I_mean_way_too_long_and_annoying_parameter = luigi.IntParameter()
def requires(self):
for i in range(5, 200):
yield Bar(i)
class Foo(luigi.Task):
num = luigi.IntParameter()
num2 = luigi.IntParameter()
def requires(self):
yield MyExternal()
yield Boom(0)
class Bar(luigi.Task):
num = luigi.IntParameter()
def complete(self):
return True
class DateTask(luigi.Task):
date = luigi.DateParameter()
num = luigi.IntParameter()
def requires(self):
yield MyExternal()
yield Boom(0)
class EntryPoint(luigi.Task):
def requires(self):
for i in range(10):
yield Foo(100, 2 * i)
for i in range(10):
yield DateTask(datetime.date(1998, 3, 23) + datetime.timedelta(days=i), 5)
self.run_task(EntryPoint())
summary = self.summary()
expected = ['',
'===== Luigi Execution Summary =====',
'',
'Scheduled 218 tasks of which:',
'* 195 present dependencies were encountered:',
' - 195 Bar(num=5...199)',
'* 1 ran successfully:',
' - 1 Boom(...)',
'* 22 were left pending, among these:',
' * 1 were missing external dependencies:',
' - 1 MyExternal()',
' * 21 had missing external dependencies:',
' - 10 DateTask(date=1998-03-23...1998-04-01, num=5)',
' - 1 EntryPoint()',
' - 10 Foo(num=100, num2=0) ...',
'',
'This progress looks :| because there were missing external dependencies',
'',
'===== Luigi Execution Summary =====',
'']
result = summary.split('\n')
self.assertEqual(len(result), len(expected))
for i, line in enumerate(result):
self.assertEqual(line, expected[i])
def test_with_datehours(self):
""" Just test that it doesn't crash with datehour params """
start = datetime.datetime(1998, 3, 23, 5)
class Bar(RunOnceTask):
datehour = luigi.DateHourParameter()
class Foo(luigi.Task):
def requires(self):
for i in range(10):
new_date = start + datetime.timedelta(hours=i)
yield Bar(datehour=new_date)
self.run_task(Foo())
d = self.summary_dict()
exp_set = {Bar(start + datetime.timedelta(hours=i)) for i in range(10)}
exp_set.add(Foo())
self.assertEqual(exp_set, d['completed'])
s = self.summary()
self.assertIn('datehour=1998-03-23T0', s)
self.assertIn('Scheduled 11 tasks', s)
self.assertIn('Luigi Execution Summary', s)
self.assertNotIn('00:00:00', s)
self.assertNotIn('\n\n\n', s)
def test_with_months(self):
""" Just test that it doesn't crash with month params """
start = datetime.datetime(1998, 3, 23)
class Bar(RunOnceTask):
month = luigi.MonthParameter()
class Foo(luigi.Task):
def requires(self):
for i in range(3):
new_date = start + datetime.timedelta(days=30*i)
yield Bar(month=new_date)
self.run_task(Foo())
d = self.summary_dict()
exp_set = {Bar(start + datetime.timedelta(days=30*i)) for i in range(3)}
exp_set.add(Foo())
self.assertEqual(exp_set, d['completed'])
s = self.summary()
self.assertIn('month=1998-0', s)
self.assertIn('Scheduled 4 tasks', s)
self.assertIn('Luigi Execution Summary', s)
self.assertNotIn('00:00:00', s)
self.assertNotIn('\n\n\n', s)
def test_multiple_dash_dash_workers(self):
"""
Don't print own worker with ``--workers 2`` setting.
"""
self.worker = luigi.worker.Worker(scheduler=self.scheduler, worker_processes=2)
class Foo(RunOnceTask):
pass
self.run_task(Foo())
d = self.summary_dict()
self.assertEqual(set(), d['run_by_other_worker'])
s = self.summary()
self.assertNotIn('The other workers were', s)
self.assertIn('This progress looks :) because there were no failed ', s)
self.assertNotIn('\n\n\n', s)
def test_with_uncomparable_parameters(self):
"""
Don't rely on parameters being sortable
"""
class Color(Enum):
red = 1
yellow = 2
class Bar(RunOnceTask):
eparam = luigi.EnumParameter(enum=Color)
class Baz(RunOnceTask):
eparam = luigi.EnumParameter(enum=Color)
another_param = luigi.IntParameter()
class Foo(luigi.Task):
def requires(self):
yield Bar(Color.red)
yield Bar(Color.yellow)
yield Baz(Color.red, 5)
yield Baz(Color.yellow, 5)
self.run_task(Foo())
s = self.summary()
self.assertIn('yellow', s)
def test_with_dict_dependency(self):
""" Just test that it doesn't crash with dict params in dependencies """
args = dict(start=datetime.date(1998, 3, 23), num=3)
class Bar(RunOnceTask):
args = luigi.DictParameter()
class Foo(luigi.Task):
def requires(self):
for i in range(10):
new_dict = args.copy()
new_dict['start'] = str(new_dict['start'] + datetime.timedelta(days=i))
yield Bar(args=new_dict)
self.run_task(Foo())
d = self.summary_dict()
exp_set = set()
for i in range(10):
new_dict = args.copy()
new_dict['start'] = str(new_dict['start'] + datetime.timedelta(days=i))
exp_set.add(Bar(new_dict))
exp_set.add(Foo())
self.assertEqual(exp_set, d['completed'])
s = self.summary()
self.assertIn('"num": 3', s)
self.assertIn('"start": "1998-0', s)
self.assertIn('Scheduled 11 tasks', s)
self.assertIn('Luigi Execution Summary', s)
self.assertNotIn('00:00:00', s)
self.assertNotIn('\n\n\n', s)
def test_with_dict_argument(self):
""" Just test that it doesn't crash with dict params """
args = dict(start=str(datetime.date(1998, 3, 23)), num=3)
class Bar(RunOnceTask):
args = luigi.DictParameter()
self.run_task(Bar(args=args))
d = self.summary_dict()
exp_set = set()
exp_set.add(Bar(args=args))
self.assertEqual(exp_set, d['completed'])
s = self.summary()
self.assertIn('"num": 3', s)
self.assertIn('"start": "1998-0', s)
self.assertIn('Scheduled 1 task', s)
self.assertIn('Luigi Execution Summary', s)
self.assertNotIn('00:00:00', s)
self.assertNotIn('\n\n\n', s)
"""
Test that a task once crashing and then succeeding should be counted as no failure.
"""
def test_status_with_task_retry(self):
class Foo(luigi.Task):
run_count = 0
def run(self):
self.run_count += 1
if self.run_count == 1:
raise ValueError()
def complete(self):
return self.run_count > 0
self.run_task(Foo())
self.run_task(Foo())
d = self.summary_dict()
self.assertEqual({Foo()}, d['completed'])
self.assertEqual({Foo()}, d['ever_failed'])
self.assertFalse(d['failed'])
self.assertFalse(d['upstream_failure'])
self.assertFalse(d['upstream_missing_dependency'])
self.assertFalse(d['run_by_other_worker'])
self.assertFalse(d['still_pending_ext'])
s = self.summary()
self.assertIn('Scheduled 1 task', s)
self.assertIn('Luigi Execution Summary', s)
self.assertNotIn('ever failed', s)
self.assertIn('\n\nThis progress looks :) because there were failed tasks but they all suceeded in a retry', s) | unknown | codeparrot/codeparrot-clean | ||
# -*- encoding: utf-8 -*-
# Module iacorr
def iacorr(X, Y):
import numpy as np
if X.size == Y.size:
X = X.ravel()
Y = Y.ravel()
Xm = X.mean()
Ym = Y.mean()
Xc = X - Xm
Yc = Y - Ym
cov = (Xc*Yc).mean()
v1 = (Xc*Xc).mean()
v2 = (Yc*Yc).mean()
else:
if Y.ndim == (X.ndim + 1):
# m is the number of pixels of X
n = Y.shape[0] # number of images in Y
X = X.ravel().reshape(1,-1) # shape (1,m)
Y = Y.reshape(n,-1) # shape (n,m)
Xm = X.mean() # scalar
Ym = Y.mean(axis=1,keepdims=True) # shape (n,1)
Xc = X - Xm # shape (1,m)
Yc = Y - Ym # broadcast shape (n,m)-(n,1) -> (n,m)
cov = (Xc*Yc).mean(axis=1) # broadcast shape (1,m)*(n,m) -> (n,)
v1 = (Xc*Xc).mean() # scalar
v2 = (Yc*Yc).mean(axis=1) # reduction (n,m) -> (n,)
return cov/(np.sqrt(v1*v2)) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
#Copyright (C) 2012 by Glenn Hickey (hickey@soe.ucsc.edu)
#
#Released under the MIT license, see LICENSE.txt
import unittest
import sys
import os
import copy
from contigSim.src.model import Model
from contigSim.src.contig import LinearContig
from contigSim.src.contig import CircularContig
from sonLib.bioio import TestStatus
from sonLib.bioio import system
from sonLib.bioio import getLogLevelString
class TestCase(unittest.TestCase):
def setUp(self):
self.testNo = TestStatus.getTestSetup()
self.tempFiles = []
unittest.TestCase.setUp(self)
def tearDown(self):
for tempFile in self.tempFiles:
os.remove(tempFile)
unittest.TestCase.tearDown(self)
def testModelInit(self):
model = Model()
assert model.pool.size() == 0
model.setParameters(100, 0.1)
model.setStartingState(0, 21, 3)
assert model.pool.size() == 24
assert model.pool.weight() == 100
linCount = 0
cirCount = 0
for contig in model.pool.dataElements():
if type(contig) == LinearContig:
linCount += 1
elif type(contig) == CircularContig:
cirCount += 1
else:
assert False
assert linCount == 21
assert cirCount == 3
model.setStartingState(1, 3, 0)
assert model.pool.size() == 4
assert model.pool.weight() == 100
model.setParameters(55, 0.1)
model.setStartingState(11, 0, 4)
assert model.pool.size() == 5
assert model.pool.weight() == 55
def testSimulateCircularPool(self):
model = Model()
model.setParameters(100, 0.1)
model.setStartingState(0, 0, 2)
model.simulate(100)
assert model.eventQueue.time == 100
assert model.pool.size() >= 1 and model.pool.size() <= 102
def testSimulateMixedPool(self):
model = Model()
model.setParameters(10000, 0.000001)
model.setStartingState(0, 100, 0)
model.simulate(100000)
assert model.eventQueue.time == 100000
assert model.pool.size() >= 1 and model.pool.size() <= 1000
def testSimulateAllParams(self):
model = Model()
model.setParameters(10000, 0.00001, 0.00001, 0.00001, 0.1, 0.1, 0.5)
model.setStartingState(100, 30, 30)
model.simulate(100000)
def main():
parseCactusSuiteTestOptions()
sys.argv = sys.argv[:1]
unittest.main()
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
# coding=utf-8
from __future__ import absolute_import
from octoprint.printer.estimation import TimeEstimationHelper
__author__ = "Gina Häußge <osd@foosel.net>"
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
__copyright__ = "Copyright (C) 2014 The OctoPrint Project - Released under terms of the AGPLv3 License"
import unittest
from ddt import ddt, data, unpack
import octoprint.printer
@ddt
class EstimationTestCase(unittest.TestCase):
def setUp(self):
self.estimation_helper = type(TimeEstimationHelper)(TimeEstimationHelper.__name__, (TimeEstimationHelper,), {
'STABLE_THRESHOLD': 0.1,
'STABLE_ROLLING_WINDOW': 3,
'STABLE_COUNTDOWN': 1
})()
@data(
((1.0, 2.0, 3.0, 4.0, 5.0), 3.0),
((1.0, 2.0, 0.0, 1.0, 2.0), 1.2),
((1.0, -2.0, -1.0, -2.0, 3.0), -0.2)
)
@unpack
def test_average_total(self, estimates, expected):
for estimate in estimates:
self.estimation_helper.update(estimate)
self.assertEquals(self.estimation_helper.average_total, expected)
@data(
((1.0, 2.0), None), # not enough values, have 1, need 3
((1.0, 2.0, 3.0), None), # not enough values, have 2, need 3
((1.0, 2.0, 3.0, 4.0), 0.5), # average totals: 1.0, 1.5, 2.0, 2.5 => (3 * 0.5 / 3 = 0.5
((1.0, 2.0, 3.0, 4.0, 5.0), 0.5), # average totals: 1.0, 1.5, 2.0, 2.5, 3.0 => (0.5 + 0.5 + 0.5) / 3 = 0.5
((1.0, 2.0, 0.0, 1.0, 2.0), 0.7 / 3) # average totals: 1.0, 1.5, 1.0, 1.0, 1.2 => (0.5 + 0.0 + 0.2) / 3 = 0.7 / 3
)
@unpack
def test_average_distance(self, estimates, expected):
for estimate in estimates:
self.estimation_helper.update(estimate)
self.assertEquals(self.estimation_helper.average_distance, expected)
@data(
((1.0, 1.0), None),
((1.0, 1.0, 1.0), 1.0),
((1.0, 2.0, 3.0, 4.0, 5.0), 4.0),
)
@unpack
def test_average_total_rolling(self, estimates, expected):
for estimate in estimates:
self.estimation_helper.update(estimate)
self.assertEquals(self.estimation_helper.average_total_rolling, expected)
@data(
((1.0, 1.0, 1.0, 1.0), False), # average totals: 1.0, 1.0, 1.0, 1.0 => 3.0 / 3 = 1.0
((1.0, 1.0, 1.0, 1.0, 1.0), True), # average totals: 1.0, 1.0, 1.0, 1.0, 1.0 => 0.0 / 3 = 0.0
((1.0, 2.0, 3.0, 4.0, 5.0), False), # average totals: 1.0, 1.5, 2.0, 2.5, 3.0 => 1.5 / 3 = 0.5
((0.0, 0.09, 0.18, 0.27, 0.36), True) # average totals: 0.0, 0.045, 0.09, 0.135, 0.18 => (0.045 + 0.045 + 0.045) / 3 = 0.045
)
@unpack
def test_is_stable(self, estimates, expected):
for estimate in estimates:
self.estimation_helper.update(estimate)
self.assertEquals(self.estimation_helper.is_stable(), expected) | unknown | codeparrot/codeparrot-clean | ||
# -*- test-case-name: twisted.web.test.test_xmlrpc -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for XML-RPC support in L{twisted.web.xmlrpc}.
"""
import datetime
import xmlrpclib
from StringIO import StringIO
from twisted.trial import unittest
from twisted.web import xmlrpc
from twisted.web.xmlrpc import (
XMLRPC, payloadTemplate, addIntrospection, _QueryFactory, Proxy, withRequest)
from twisted.web import server, static, client, error, http
from twisted.internet import reactor, defer
from twisted.internet.error import ConnectionDone
from twisted.python import failure
from twisted.test.proto_helpers import MemoryReactor
from twisted.web.test.test_web import DummyRequest
try:
import twisted.internet.ssl
except ImportError:
sslSkip = "OpenSSL not present"
else:
sslSkip = None
class AsyncXMLRPCTests(unittest.TestCase):
"""
Tests for L{XMLRPC}'s support of Deferreds.
"""
def setUp(self):
self.request = DummyRequest([''])
self.request.method = 'POST'
self.request.content = StringIO(
payloadTemplate % ('async', xmlrpclib.dumps(())))
result = self.result = defer.Deferred()
class AsyncResource(XMLRPC):
def xmlrpc_async(self):
return result
self.resource = AsyncResource()
def test_deferredResponse(self):
"""
If an L{XMLRPC} C{xmlrpc_*} method returns a L{defer.Deferred}, the
response to the request is the result of that L{defer.Deferred}.
"""
self.resource.render(self.request)
self.assertEquals(self.request.written, [])
self.result.callback("result")
resp = xmlrpclib.loads("".join(self.request.written))
self.assertEquals(resp, (('result',), None))
self.assertEquals(self.request.finished, 1)
def test_interruptedDeferredResponse(self):
"""
While waiting for the L{Deferred} returned by an L{XMLRPC} C{xmlrpc_*}
method to fire, the connection the request was issued over may close.
If this happens, neither C{write} nor C{finish} is called on the
request.
"""
self.resource.render(self.request)
self.request.processingFailed(
failure.Failure(ConnectionDone("Simulated")))
self.result.callback("result")
self.assertEquals(self.request.written, [])
self.assertEquals(self.request.finished, 0)
class TestRuntimeError(RuntimeError):
pass
class TestValueError(ValueError):
pass
class Test(XMLRPC):
# If you add xmlrpc_ methods to this class, go change test_listMethods
# below.
FAILURE = 666
NOT_FOUND = 23
SESSION_EXPIRED = 42
def xmlrpc_echo(self, arg):
return arg
# the doc string is part of the test
def xmlrpc_add(self, a, b):
"""
This function add two numbers.
"""
return a + b
xmlrpc_add.signature = [['int', 'int', 'int'],
['double', 'double', 'double']]
# the doc string is part of the test
def xmlrpc_pair(self, string, num):
"""
This function puts the two arguments in an array.
"""
return [string, num]
xmlrpc_pair.signature = [['array', 'string', 'int']]
# the doc string is part of the test
def xmlrpc_defer(self, x):
"""Help for defer."""
return defer.succeed(x)
def xmlrpc_deferFail(self):
return defer.fail(TestValueError())
# don't add a doc string, it's part of the test
def xmlrpc_fail(self):
raise TestRuntimeError
def xmlrpc_fault(self):
return xmlrpc.Fault(12, "hello")
def xmlrpc_deferFault(self):
return defer.fail(xmlrpc.Fault(17, "hi"))
def xmlrpc_complex(self):
return {"a": ["b", "c", 12, []], "D": "foo"}
def xmlrpc_dict(self, map, key):
return map[key]
xmlrpc_dict.help = 'Help for dict.'
@withRequest
def xmlrpc_withRequest(self, request, other):
"""
A method decorated with L{withRequest} which can be called by
a test to verify that the request object really is passed as
an argument.
"""
return (
# as a proof that request is a request
request.method +
# plus proof other arguments are still passed along
' ' + other)
def _getFunction(self, functionPath):
try:
return XMLRPC._getFunction(self, functionPath)
except xmlrpc.NoSuchFunction:
if functionPath.startswith("SESSION"):
raise xmlrpc.Fault(self.SESSION_EXPIRED,
"Session non-existant/expired.")
else:
raise
class TestAuthHeader(Test):
"""
This is used to get the header info so that we can test
authentication.
"""
def __init__(self):
Test.__init__(self)
self.request = None
def render(self, request):
self.request = request
return Test.render(self, request)
def xmlrpc_authinfo(self):
return self.request.getUser(), self.request.getPassword()
class TestQueryProtocol(xmlrpc.QueryProtocol):
"""
QueryProtocol for tests that saves headers received inside the factory.
"""
def connectionMade(self):
self.factory.transport = self.transport
xmlrpc.QueryProtocol.connectionMade(self)
def handleHeader(self, key, val):
self.factory.headers[key.lower()] = val
class TestQueryFactory(xmlrpc._QueryFactory):
"""
QueryFactory using L{TestQueryProtocol} for saving headers.
"""
protocol = TestQueryProtocol
def __init__(self, *args, **kwargs):
self.headers = {}
xmlrpc._QueryFactory.__init__(self, *args, **kwargs)
class TestQueryFactoryCancel(xmlrpc._QueryFactory):
"""
QueryFactory that saves a reference to the
L{twisted.internet.interfaces.IConnector} to test connection lost.
"""
def startedConnecting(self, connector):
self.connector = connector
class XMLRPCTestCase(unittest.TestCase):
def setUp(self):
self.p = reactor.listenTCP(0, server.Site(Test()),
interface="127.0.0.1")
self.port = self.p.getHost().port
self.factories = []
def tearDown(self):
self.factories = []
return self.p.stopListening()
def queryFactory(self, *args, **kwargs):
"""
Specific queryFactory for proxy that uses our custom
L{TestQueryFactory}, and save factories.
"""
factory = TestQueryFactory(*args, **kwargs)
self.factories.append(factory)
return factory
def proxy(self, factory=None):
"""
Return a new xmlrpc.Proxy for the test site created in
setUp(), using the given factory as the queryFactory, or
self.queryFactory if no factory is provided.
"""
p = xmlrpc.Proxy("http://127.0.0.1:%d/" % self.port)
if factory is None:
p.queryFactory = self.queryFactory
else:
p.queryFactory = factory
return p
def test_results(self):
inputOutput = [
("add", (2, 3), 5),
("defer", ("a",), "a"),
("dict", ({"a": 1}, "a"), 1),
("pair", ("a", 1), ["a", 1]),
("complex", (), {"a": ["b", "c", 12, []], "D": "foo"})]
dl = []
for meth, args, outp in inputOutput:
d = self.proxy().callRemote(meth, *args)
d.addCallback(self.assertEquals, outp)
dl.append(d)
return defer.DeferredList(dl, fireOnOneErrback=True)
def test_errors(self):
"""
Verify that for each way a method exposed via XML-RPC can fail, the
correct 'Content-type' header is set in the response and that the
client-side Deferred is errbacked with an appropriate C{Fault}
instance.
"""
dl = []
for code, methodName in [(666, "fail"), (666, "deferFail"),
(12, "fault"), (23, "noSuchMethod"),
(17, "deferFault"), (42, "SESSION_TEST")]:
d = self.proxy().callRemote(methodName)
d = self.assertFailure(d, xmlrpc.Fault)
d.addCallback(lambda exc, code=code:
self.assertEquals(exc.faultCode, code))
dl.append(d)
d = defer.DeferredList(dl, fireOnOneErrback=True)
def cb(ign):
for factory in self.factories:
self.assertEquals(factory.headers['content-type'],
'text/xml')
self.flushLoggedErrors(TestRuntimeError, TestValueError)
d.addCallback(cb)
return d
def test_cancel(self):
"""
A deferred from the Proxy can be cancelled, disconnecting
the L{twisted.internet.interfaces.IConnector}.
"""
def factory(*args, **kw):
factory.f = TestQueryFactoryCancel(*args, **kw)
return factory.f
d = self.proxy(factory).callRemote('add', 2, 3)
self.assertNotEquals(factory.f.connector.state, "disconnected")
d.cancel()
self.assertEquals(factory.f.connector.state, "disconnected")
d = self.assertFailure(d, defer.CancelledError)
return d
def test_errorGet(self):
"""
A classic GET on the xml server should return a NOT_ALLOWED.
"""
d = client.getPage("http://127.0.0.1:%d/" % (self.port,))
d = self.assertFailure(d, error.Error)
d.addCallback(
lambda exc: self.assertEquals(int(exc.args[0]), http.NOT_ALLOWED))
return d
def test_errorXMLContent(self):
"""
Test that an invalid XML input returns an L{xmlrpc.Fault}.
"""
d = client.getPage("http://127.0.0.1:%d/" % (self.port,),
method="POST", postdata="foo")
def cb(result):
self.assertRaises(xmlrpc.Fault, xmlrpclib.loads, result)
d.addCallback(cb)
return d
def test_datetimeRoundtrip(self):
"""
If an L{xmlrpclib.DateTime} is passed as an argument to an XML-RPC
call and then returned by the server unmodified, the result should
be equal to the original object.
"""
when = xmlrpclib.DateTime()
d = self.proxy().callRemote("echo", when)
d.addCallback(self.assertEqual, when)
return d
def test_doubleEncodingError(self):
"""
If it is not possible to encode a response to the request (for example,
because L{xmlrpclib.dumps} raises an exception when encoding a
L{Fault}) the exception which prevents the response from being
generated is logged and the request object is finished anyway.
"""
d = self.proxy().callRemote("echo", "")
# *Now* break xmlrpclib.dumps. Hopefully the client already used it.
def fakeDumps(*args, **kwargs):
raise RuntimeError("Cannot encode anything at all!")
self.patch(xmlrpclib, 'dumps', fakeDumps)
# It doesn't matter how it fails, so long as it does. Also, it happens
# to fail with an implementation detail exception right now, not
# something suitable as part of a public interface.
d = self.assertFailure(d, Exception)
def cbFailed(ignored):
# The fakeDumps exception should have been logged.
self.assertEqual(len(self.flushLoggedErrors(RuntimeError)), 1)
d.addCallback(cbFailed)
return d
def test_closeConnectionAfterRequest(self):
"""
The connection to the web server is closed when the request is done.
"""
d = self.proxy().callRemote('echo', '')
def responseDone(ignored):
[factory] = self.factories
self.assertFalse(factory.transport.connected)
self.assertTrue(factory.transport.disconnected)
return d.addCallback(responseDone)
def test_tcpTimeout(self):
"""
For I{HTTP} URIs, L{xmlrpc.Proxy.callRemote} passes the value it
received for the C{connectTimeout} parameter as the C{timeout} argument
to the underlying connectTCP call.
"""
reactor = MemoryReactor()
proxy = xmlrpc.Proxy("http://127.0.0.1:69", connectTimeout=2.0,
reactor=reactor)
proxy.callRemote("someMethod")
self.assertEquals(reactor.tcpClients[0][3], 2.0)
def test_sslTimeout(self):
"""
For I{HTTPS} URIs, L{xmlrpc.Proxy.callRemote} passes the value it
received for the C{connectTimeout} parameter as the C{timeout} argument
to the underlying connectSSL call.
"""
reactor = MemoryReactor()
proxy = xmlrpc.Proxy("https://127.0.0.1:69", connectTimeout=3.0,
reactor=reactor)
proxy.callRemote("someMethod")
self.assertEquals(reactor.sslClients[0][4], 3.0)
test_sslTimeout.skip = sslSkip
class XMLRPCTestCase2(XMLRPCTestCase):
"""
Test with proxy that doesn't add a slash.
"""
def proxy(self, factory=None):
p = xmlrpc.Proxy("http://127.0.0.1:%d" % self.port)
if factory is None:
p.queryFactory = self.queryFactory
else:
p.queryFactory = factory
return p
class SerializationConfigMixin:
"""
Mixin which defines a couple tests which should pass when a particular flag
is passed to L{XMLRPC}.
These are not meant to be exhaustive serialization tests, since L{xmlrpclib}
does all of the actual serialization work. They are just meant to exercise
a few codepaths to make sure we are calling into xmlrpclib correctly.
@ivar flagName: A C{str} giving the name of the flag which must be passed to
L{XMLRPC} to allow the tests to pass. Subclasses should set this.
@ivar value: A value which the specified flag will allow the serialization
of. Subclasses should set this.
"""
def setUp(self):
"""
Create a new XML-RPC server with C{allowNone} set to C{True}.
"""
kwargs = {self.flagName: True}
self.p = reactor.listenTCP(
0, server.Site(Test(**kwargs)), interface="127.0.0.1")
self.addCleanup(self.p.stopListening)
self.port = self.p.getHost().port
self.proxy = xmlrpc.Proxy(
"http://127.0.0.1:%d/" % (self.port,), **kwargs)
def test_roundtripValue(self):
"""
C{self.value} can be round-tripped over an XMLRPC method call/response.
"""
d = self.proxy.callRemote('defer', self.value)
d.addCallback(self.assertEquals, self.value)
return d
def test_roundtripNestedValue(self):
"""
A C{dict} which contains C{self.value} can be round-tripped over an
XMLRPC method call/response.
"""
d = self.proxy.callRemote('defer', {'a': self.value})
d.addCallback(self.assertEquals, {'a': self.value})
return d
class XMLRPCAllowNoneTestCase(SerializationConfigMixin, unittest.TestCase):
"""
Tests for passing C{None} when the C{allowNone} flag is set.
"""
flagName = "allowNone"
value = None
try:
xmlrpclib.loads(xmlrpclib.dumps(({}, {})), use_datetime=True)
except TypeError:
_datetimeSupported = False
else:
_datetimeSupported = True
class XMLRPCUseDateTimeTestCase(SerializationConfigMixin, unittest.TestCase):
"""
Tests for passing a C{datetime.datetime} instance when the C{useDateTime}
flag is set.
"""
flagName = "useDateTime"
value = datetime.datetime(2000, 12, 28, 3, 45, 59)
if not _datetimeSupported:
skip = (
"Available version of xmlrpclib does not support datetime "
"objects.")
class XMLRPCDisableUseDateTimeTestCase(unittest.TestCase):
"""
Tests for the C{useDateTime} flag on Python 2.4.
"""
if _datetimeSupported:
skip = (
"Available version of xmlrpclib supports datetime objects.")
def test_cannotInitializeWithDateTime(self):
"""
L{XMLRPC} raises L{RuntimeError} if passed C{True} for C{useDateTime}.
"""
self.assertRaises(RuntimeError, XMLRPC, useDateTime=True)
self.assertRaises(
RuntimeError, Proxy, "http://localhost/", useDateTime=True)
def test_cannotSetDateTime(self):
"""
Setting L{XMLRPC.useDateTime} to C{True} after initialization raises
L{RuntimeError}.
"""
xmlrpc = XMLRPC(useDateTime=False)
self.assertRaises(RuntimeError, setattr, xmlrpc, "useDateTime", True)
proxy = Proxy("http://localhost/", useDateTime=False)
self.assertRaises(RuntimeError, setattr, proxy, "useDateTime", True)
class XMLRPCTestAuthenticated(XMLRPCTestCase):
"""
Test with authenticated proxy. We run this with the same inout/ouput as
above.
"""
user = "username"
password = "asecret"
def setUp(self):
self.p = reactor.listenTCP(0, server.Site(TestAuthHeader()),
interface="127.0.0.1")
self.port = self.p.getHost().port
self.factories = []
def test_authInfoInURL(self):
p = xmlrpc.Proxy("http://%s:%s@127.0.0.1:%d/" % (
self.user, self.password, self.port))
d = p.callRemote("authinfo")
d.addCallback(self.assertEquals, [self.user, self.password])
return d
def test_explicitAuthInfo(self):
p = xmlrpc.Proxy("http://127.0.0.1:%d/" % (
self.port,), self.user, self.password)
d = p.callRemote("authinfo")
d.addCallback(self.assertEquals, [self.user, self.password])
return d
def test_explicitAuthInfoOverride(self):
p = xmlrpc.Proxy("http://wrong:info@127.0.0.1:%d/" % (
self.port,), self.user, self.password)
d = p.callRemote("authinfo")
d.addCallback(self.assertEquals, [self.user, self.password])
return d
class XMLRPCTestIntrospection(XMLRPCTestCase):
def setUp(self):
xmlrpc = Test()
addIntrospection(xmlrpc)
self.p = reactor.listenTCP(0, server.Site(xmlrpc),interface="127.0.0.1")
self.port = self.p.getHost().port
self.factories = []
def test_listMethods(self):
def cbMethods(meths):
meths.sort()
self.assertEqual(
meths,
['add', 'complex', 'defer', 'deferFail',
'deferFault', 'dict', 'echo', 'fail', 'fault',
'pair', 'system.listMethods',
'system.methodHelp',
'system.methodSignature', 'withRequest'])
d = self.proxy().callRemote("system.listMethods")
d.addCallback(cbMethods)
return d
def test_methodHelp(self):
inputOutputs = [
("defer", "Help for defer."),
("fail", ""),
("dict", "Help for dict.")]
dl = []
for meth, expected in inputOutputs:
d = self.proxy().callRemote("system.methodHelp", meth)
d.addCallback(self.assertEquals, expected)
dl.append(d)
return defer.DeferredList(dl, fireOnOneErrback=True)
def test_methodSignature(self):
inputOutputs = [
("defer", ""),
("add", [['int', 'int', 'int'],
['double', 'double', 'double']]),
("pair", [['array', 'string', 'int']])]
dl = []
for meth, expected in inputOutputs:
d = self.proxy().callRemote("system.methodSignature", meth)
d.addCallback(self.assertEquals, expected)
dl.append(d)
return defer.DeferredList(dl, fireOnOneErrback=True)
class XMLRPCClientErrorHandling(unittest.TestCase):
"""
Test error handling on the xmlrpc client.
"""
def setUp(self):
self.resource = static.Data(
"This text is not a valid XML-RPC response.",
"text/plain")
self.resource.isLeaf = True
self.port = reactor.listenTCP(0, server.Site(self.resource),
interface='127.0.0.1')
def tearDown(self):
return self.port.stopListening()
def test_erroneousResponse(self):
"""
Test that calling the xmlrpc client on a static http server raises
an exception.
"""
proxy = xmlrpc.Proxy("http://127.0.0.1:%d/" %
(self.port.getHost().port,))
return self.assertFailure(proxy.callRemote("someMethod"), Exception)
class TestQueryFactoryParseResponse(unittest.TestCase):
"""
Test the behaviour of L{_QueryFactory.parseResponse}.
"""
def setUp(self):
# The _QueryFactory that we are testing. We don't care about any
# of the constructor parameters.
self.queryFactory = _QueryFactory(
path=None, host=None, method='POST', user=None, password=None,
allowNone=False, args=())
# An XML-RPC response that will parse without raising an error.
self.goodContents = xmlrpclib.dumps(('',))
# An 'XML-RPC response' that will raise a parsing error.
self.badContents = 'invalid xml'
# A dummy 'reason' to pass to clientConnectionLost. We don't care
# what it is.
self.reason = failure.Failure(ConnectionDone())
def test_parseResponseCallbackSafety(self):
"""
We can safely call L{_QueryFactory.clientConnectionLost} as a callback
of L{_QueryFactory.parseResponse}.
"""
d = self.queryFactory.deferred
# The failure mode is that this callback raises an AlreadyCalled
# error. We have to add it now so that it gets called synchronously
# and triggers the race condition.
d.addCallback(self.queryFactory.clientConnectionLost, self.reason)
self.queryFactory.parseResponse(self.goodContents)
return d
def test_parseResponseErrbackSafety(self):
"""
We can safely call L{_QueryFactory.clientConnectionLost} as an errback
of L{_QueryFactory.parseResponse}.
"""
d = self.queryFactory.deferred
# The failure mode is that this callback raises an AlreadyCalled
# error. We have to add it now so that it gets called synchronously
# and triggers the race condition.
d.addErrback(self.queryFactory.clientConnectionLost, self.reason)
self.queryFactory.parseResponse(self.badContents)
return d
def test_badStatusErrbackSafety(self):
"""
We can safely call L{_QueryFactory.clientConnectionLost} as an errback
of L{_QueryFactory.badStatus}.
"""
d = self.queryFactory.deferred
# The failure mode is that this callback raises an AlreadyCalled
# error. We have to add it now so that it gets called synchronously
# and triggers the race condition.
d.addErrback(self.queryFactory.clientConnectionLost, self.reason)
self.queryFactory.badStatus('status', 'message')
return d
def test_parseResponseWithoutData(self):
"""
Some server can send a response without any data:
L{_QueryFactory.parseResponse} should catch the error and call the
result errback.
"""
content = """
<methodResponse>
<params>
<param>
</param>
</params>
</methodResponse>"""
d = self.queryFactory.deferred
self.queryFactory.parseResponse(content)
return self.assertFailure(d, IndexError)
class XMLRPCTestWithRequest(unittest.TestCase):
def setUp(self):
self.resource = Test()
def test_withRequest(self):
"""
When an XML-RPC method is called and the implementation is
decorated with L{withRequest}, the request object is passed as
the first argument.
"""
request = DummyRequest('/RPC2')
request.method = "POST"
request.content = StringIO(xmlrpclib.dumps(("foo",), 'withRequest'))
def valid(n, request):
data = xmlrpclib.loads(request.written[0])
self.assertEquals(data, (('POST foo',), None))
d = request.notifyFinish().addCallback(valid, request)
self.resource.render_POST(request)
return d | unknown | codeparrot/codeparrot-clean | ||
#! /usr/bin/env python
"""RFC 3548: Base16, Base32, Base64 Data Encodings"""
# Modified 04-Oct-1995 by Jack Jansen to use binascii module
# Modified 30-Dec-2003 by Barry Warsaw to add full RFC 3548 support
import re
import struct
import binascii
__all__ = [
# Legacy interface exports traditional RFC 1521 Base64 encodings
'encode', 'decode', 'encodestring', 'decodestring',
# Generalized interface for other encodings
'b64encode', 'b64decode', 'b32encode', 'b32decode',
'b16encode', 'b16decode',
# Standard Base64 encoding
'standard_b64encode', 'standard_b64decode',
# Some common Base64 alternatives. As referenced by RFC 3458, see thread
# starting at:
#
# http://zgp.org/pipermail/p2p-hackers/2001-September/000316.html
'urlsafe_b64encode', 'urlsafe_b64decode',
]
_translation = [chr(_x) for _x in range(256)]
EMPTYSTRING = ''
def _translate(s, altchars):
translation = _translation[:]
for k, v in altchars.items():
translation[ord(k)] = v
return s.translate(''.join(translation))
# Base64 encoding/decoding uses binascii
def b64encode(s, altchars=None):
"""Encode a string using Base64.
s is the string to encode. Optional altchars must be a string of at least
length 2 (additional characters are ignored) which specifies an
alternative alphabet for the '+' and '/' characters. This allows an
application to e.g. generate url or filesystem safe Base64 strings.
The encoded string is returned.
"""
# Strip off the trailing newline
encoded = binascii.b2a_base64(s)[:-1]
if altchars is not None:
return _translate(encoded, {'+': altchars[0], '/': altchars[1]})
return encoded
def b64decode(s, altchars=None):
"""Decode a Base64 encoded string.
s is the string to decode. Optional altchars must be a string of at least
length 2 (additional characters are ignored) which specifies the
alternative alphabet used instead of the '+' and '/' characters.
The decoded string is returned. A TypeError is raised if s were
incorrectly padded or if there are non-alphabet characters present in the
string.
"""
if altchars is not None:
s = _translate(s, {altchars[0]: '+', altchars[1]: '/'})
try:
return binascii.a2b_base64(s)
except binascii.Error, msg:
# Transform this exception for consistency
raise TypeError(msg)
def standard_b64encode(s):
"""Encode a string using the standard Base64 alphabet.
s is the string to encode. The encoded string is returned.
"""
return b64encode(s)
def standard_b64decode(s):
"""Decode a string encoded with the standard Base64 alphabet.
s is the string to decode. The decoded string is returned. A TypeError
is raised if the string is incorrectly padded or if there are non-alphabet
characters present in the string.
"""
return b64decode(s)
def urlsafe_b64encode(s):
"""Encode a string using a url-safe Base64 alphabet.
s is the string to encode. The encoded string is returned. The alphabet
uses '-' instead of '+' and '_' instead of '/'.
"""
return b64encode(s, '-_')
def urlsafe_b64decode(s):
"""Decode a string encoded with the standard Base64 alphabet.
s is the string to decode. The decoded string is returned. A TypeError
is raised if the string is incorrectly padded or if there are non-alphabet
characters present in the string.
The alphabet uses '-' instead of '+' and '_' instead of '/'.
"""
return b64decode(s, '-_')
# Base32 encoding/decoding must be done in Python
_b32alphabet = {
0: 'A', 9: 'J', 18: 'S', 27: '3',
1: 'B', 10: 'K', 19: 'T', 28: '4',
2: 'C', 11: 'L', 20: 'U', 29: '5',
3: 'D', 12: 'M', 21: 'V', 30: '6',
4: 'E', 13: 'N', 22: 'W', 31: '7',
5: 'F', 14: 'O', 23: 'X',
6: 'G', 15: 'P', 24: 'Y',
7: 'H', 16: 'Q', 25: 'Z',
8: 'I', 17: 'R', 26: '2',
}
_b32tab = _b32alphabet.items()
_b32tab.sort()
_b32tab = [v for k, v in _b32tab]
_b32rev = dict([(v, long(k)) for k, v in _b32alphabet.items()])
def b32encode(s):
"""Encode a string using Base32.
s is the string to encode. The encoded string is returned.
"""
parts = []
quanta, leftover = divmod(len(s), 5)
# Pad the last quantum with zero bits if necessary
if leftover:
s += ('\0' * (5 - leftover))
quanta += 1
for i in range(quanta):
# c1 and c2 are 16 bits wide, c3 is 8 bits wide. The intent of this
# code is to process the 40 bits in units of 5 bits. So we take the 1
# leftover bit of c1 and tack it onto c2. Then we take the 2 leftover
# bits of c2 and tack them onto c3. The shifts and masks are intended
# to give us values of exactly 5 bits in width.
c1, c2, c3 = struct.unpack('!HHB', s[i*5:(i+1)*5])
c2 += (c1 & 1) << 16 # 17 bits wide
c3 += (c2 & 3) << 8 # 10 bits wide
parts.extend([_b32tab[c1 >> 11], # bits 1 - 5
_b32tab[(c1 >> 6) & 0x1f], # bits 6 - 10
_b32tab[(c1 >> 1) & 0x1f], # bits 11 - 15
_b32tab[c2 >> 12], # bits 16 - 20 (1 - 5)
_b32tab[(c2 >> 7) & 0x1f], # bits 21 - 25 (6 - 10)
_b32tab[(c2 >> 2) & 0x1f], # bits 26 - 30 (11 - 15)
_b32tab[c3 >> 5], # bits 31 - 35 (1 - 5)
_b32tab[c3 & 0x1f], # bits 36 - 40 (1 - 5)
])
encoded = EMPTYSTRING.join(parts)
# Adjust for any leftover partial quanta
if leftover == 1:
return encoded[:-6] + '======'
elif leftover == 2:
return encoded[:-4] + '===='
elif leftover == 3:
return encoded[:-3] + '==='
elif leftover == 4:
return encoded[:-1] + '='
return encoded
def b32decode(s, casefold=False, map01=None):
"""Decode a Base32 encoded string.
s is the string to decode. Optional casefold is a flag specifying whether
a lowercase alphabet is acceptable as input. For security purposes, the
default is False.
RFC 3548 allows for optional mapping of the digit 0 (zero) to the letter O
(oh), and for optional mapping of the digit 1 (one) to either the letter I
(eye) or letter L (el). The optional argument map01 when not None,
specifies which letter the digit 1 should be mapped to (when map01 is not
None, the digit 0 is always mapped to the letter O). For security
purposes the default is None, so that 0 and 1 are not allowed in the
input.
The decoded string is returned. A TypeError is raised if s were
incorrectly padded or if there are non-alphabet characters present in the
string.
"""
quanta, leftover = divmod(len(s), 8)
if leftover:
raise TypeError('Incorrect padding')
# Handle section 2.4 zero and one mapping. The flag map01 will be either
# False, or the character to map the digit 1 (one) to. It should be
# either L (el) or I (eye).
if map01:
s = _translate(s, {'0': 'O', '1': map01})
if casefold:
s = s.upper()
# Strip off pad characters from the right. We need to count the pad
# characters because this will tell us how many null bytes to remove from
# the end of the decoded string.
padchars = 0
mo = re.search('(?P<pad>[=]*)$', s)
if mo:
padchars = len(mo.group('pad'))
if padchars > 0:
s = s[:-padchars]
# Now decode the full quanta
parts = []
acc = 0
shift = 35
for c in s:
val = _b32rev.get(c)
if val is None:
raise TypeError('Non-base32 digit found')
acc += _b32rev[c] << shift
shift -= 5
if shift < 0:
parts.append(binascii.unhexlify('%010x' % acc))
acc = 0
shift = 35
# Process the last, partial quanta
last = binascii.unhexlify('%010x' % acc)
if padchars == 0:
last = '' # No characters
elif padchars == 1:
last = last[:-1]
elif padchars == 3:
last = last[:-2]
elif padchars == 4:
last = last[:-3]
elif padchars == 6:
last = last[:-4]
else:
raise TypeError('Incorrect padding')
parts.append(last)
return EMPTYSTRING.join(parts)
# RFC 3548, Base 16 Alphabet specifies uppercase, but hexlify() returns
# lowercase. The RFC also recommends against accepting input case
# insensitively.
def b16encode(s):
"""Encode a string using Base16.
s is the string to encode. The encoded string is returned.
"""
return binascii.hexlify(s).upper()
def b16decode(s, casefold=False):
"""Decode a Base16 encoded string.
s is the string to decode. Optional casefold is a flag specifying whether
a lowercase alphabet is acceptable as input. For security purposes, the
default is False.
The decoded string is returned. A TypeError is raised if s were
incorrectly padded or if there are non-alphabet characters present in the
string.
"""
if casefold:
s = s.upper()
if re.search('[^0-9A-F]', s):
raise TypeError('Non-base16 digit found')
return binascii.unhexlify(s)
# Legacy interface. This code could be cleaned up since I don't believe
# binascii has any line length limitations. It just doesn't seem worth it
# though.
MAXLINESIZE = 76 # Excluding the CRLF
MAXBINSIZE = (MAXLINESIZE//4)*3
def encode(input, output):
"""Encode a file."""
while True:
s = input.read(MAXBINSIZE)
if not s:
break
while len(s) < MAXBINSIZE:
ns = input.read(MAXBINSIZE-len(s))
if not ns:
break
s += ns
line = binascii.b2a_base64(s)
output.write(line)
def decode(input, output):
"""Decode a file."""
while True:
line = input.readline()
if not line:
break
s = binascii.a2b_base64(line)
output.write(s)
def encodestring(s):
"""Encode a string into multiple lines of base-64 data."""
pieces = []
for i in range(0, len(s), MAXBINSIZE):
chunk = s[i : i + MAXBINSIZE]
pieces.append(binascii.b2a_base64(chunk))
return "".join(pieces)
def decodestring(s):
"""Decode a string."""
return binascii.a2b_base64(s)
# Useable as a script...
def test():
"""Small test program"""
import sys, getopt
try:
opts, args = getopt.getopt(sys.argv[1:], 'deut')
except getopt.error, msg:
sys.stdout = sys.stderr
print msg
print """usage: %s [-d|-e|-u|-t] [file|-]
-d, -u: decode
-e: encode (default)
-t: encode and decode string 'Aladdin:open sesame'"""%sys.argv[0]
sys.exit(2)
func = encode
for o, a in opts:
if o == '-e': func = encode
if o == '-d': func = decode
if o == '-u': func = decode
if o == '-t': test1(); return
if args and args[0] != '-':
with open(args[0], 'rb') as f:
func(f, sys.stdout)
else:
func(sys.stdin, sys.stdout)
def test1():
s0 = "Aladdin:open sesame"
s1 = encodestring(s0)
s2 = decodestring(s1)
print s0, repr(s1), s2
if __name__ == '__main__':
test() | unknown | codeparrot/codeparrot-clean | ||
#
# * This program is free software; you can redistribute it and/or modify
# * it under the terms of the GNU General Public License version 2 as
# * published by the Free Software Foundation;
# *
# * This program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; if not, write to the Free Software
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# Network topology
#
# n0 n1 n2 n3
# | | | |
# =================
# LAN
#
# - UDP flows from n0 to n1 and back
# - DropTail queues
# - Tracing of queues and packet receptions to file "udp-echo.tr"
import ns.applications
import ns.core
import ns.csma
import ns.internet
import ns.network
def main(argv):
#
# Allow the user to override any of the defaults and the above Bind() at
# run-time, via command-line arguments
#
cmd = ns.core.CommandLine()
cmd.Parse(argv)
#
# But since this is a realtime script, don't allow the user to mess with
# that.
#
ns.core.GlobalValue.Bind("SimulatorImplementationType", ns.core.StringValue("ns3::RealtimeSimulatorImpl"))
#
# Explicitly create the nodes required by the topology (shown above).
#
print "Create nodes."
n = ns.network.NodeContainer()
n.Create(4)
internet = ns.internet.InternetStackHelper()
internet.Install(n)
#
# Explicitly create the channels required by the topology (shown above).
#
print ("Create channels.")
csma = ns.csma.CsmaHelper()
csma.SetChannelAttribute("DataRate", ns.network.DataRateValue(ns.network.DataRate(5000000)))
csma.SetChannelAttribute("Delay", ns.core.TimeValue(ns.core.MilliSeconds(2)));
csma.SetDeviceAttribute("Mtu", ns.core.UintegerValue(1400))
d = csma.Install(n)
#
# We've got the "hardware" in place. Now we need to add IP addresses.
#
print ("Assign IP Addresses.")
ipv4 = ns.internet.Ipv4AddressHelper()
ipv4.SetBase(ns.network.Ipv4Address("10.1.1.0"), ns.network.Ipv4Mask("255.255.255.0"))
i = ipv4.Assign(d)
print ("Create Applications.")
#
# Create a UdpEchoServer application on node one.
#
port = 9 # well-known echo port number
server = ns.applications.UdpEchoServerHelper(port)
apps = server.Install(n.Get(1))
apps.Start(ns.core.Seconds(1.0))
apps.Stop(ns.core.Seconds(10.0))
#
# Create a UdpEchoClient application to send UDP datagrams from node zero to
# node one.
#
packetSize = 1024
maxPacketCount = 500
interPacketInterval = ns.core.Seconds(0.01)
client = ns.applications.UdpEchoClientHelper(i.GetAddress (1), port)
client.SetAttribute("MaxPackets", ns.core.UintegerValue(maxPacketCount))
client.SetAttribute("Interval", ns.core.TimeValue(interPacketInterval))
client.SetAttribute("PacketSize", ns.core.UintegerValue(packetSize))
apps = client.Install(n.Get(0))
apps.Start(ns.core.Seconds(2.0))
apps.Stop(ns.core.Seconds(10.0))
ascii = ns.network.AsciiTraceHelper()
csma.EnableAsciiAll(ascii.CreateFileStream("realtime-udp-echo.tr"))
csma.EnablePcapAll("realtime-udp-echo", False)
#
# Now, do the actual simulation.
#
print ("Run Simulation.")
ns.core.Simulator.Run()
ns.core.Simulator.Destroy()
print ("Done.")
if __name__ == '__main__':
import sys
main(sys.argv) | unknown | codeparrot/codeparrot-clean | ||
from __future__ import absolute_import, division, unicode_literals
from six import with_metaclass
import types
from . import inputstream
from . import tokenizer
from . import treebuilders
from .treebuilders._base import Marker
from . import utils
from . import constants
from .constants import spaceCharacters, asciiUpper2Lower
from .constants import specialElements
from .constants import headingElements
from .constants import cdataElements, rcdataElements
from .constants import tokenTypes, ReparseException, namespaces
from .constants import htmlIntegrationPointElements, mathmlTextIntegrationPointElements
from .constants import adjustForeignAttributes as adjustForeignAttributesMap
def parse(doc, treebuilder="etree", encoding=None,
namespaceHTMLElements=True):
"""Parse a string or file-like object into a tree"""
tb = treebuilders.getTreeBuilder(treebuilder)
p = HTMLParser(tb, namespaceHTMLElements=namespaceHTMLElements)
return p.parse(doc, encoding=encoding)
def parseFragment(doc, container="div", treebuilder="etree", encoding=None,
namespaceHTMLElements=True):
tb = treebuilders.getTreeBuilder(treebuilder)
p = HTMLParser(tb, namespaceHTMLElements=namespaceHTMLElements)
return p.parseFragment(doc, container=container, encoding=encoding)
def method_decorator_metaclass(function):
class Decorated(type):
def __new__(meta, classname, bases, classDict):
for attributeName, attribute in classDict.items():
if isinstance(attribute, types.FunctionType):
attribute = function(attribute)
classDict[attributeName] = attribute
return type.__new__(meta, classname, bases, classDict)
return Decorated
class HTMLParser(object):
"""HTML parser. Generates a tree structure from a stream of (possibly
malformed) HTML"""
def __init__(self, tree=None, tokenizer=tokenizer.HTMLTokenizer,
strict=False, namespaceHTMLElements=True, debug=False):
"""
strict - raise an exception when a parse error is encountered
tree - a treebuilder class controlling the type of tree that will be
returned. Built in treebuilders can be accessed through
html5lib.treebuilders.getTreeBuilder(treeType)
tokenizer - a class that provides a stream of tokens to the treebuilder.
This may be replaced for e.g. a sanitizer which converts some tags to
text
"""
# Raise an exception on the first error encountered
self.strict = strict
if tree is None:
tree = treebuilders.getTreeBuilder("etree")
self.tree = tree(namespaceHTMLElements)
self.tokenizer_class = tokenizer
self.errors = []
self.phases = dict([(name, cls(self, self.tree)) for name, cls in
getPhases(debug).items()])
def _parse(self, stream, innerHTML=False, container="div",
encoding=None, parseMeta=True, useChardet=True, **kwargs):
self.innerHTMLMode = innerHTML
self.container = container
self.tokenizer = self.tokenizer_class(stream, encoding=encoding,
parseMeta=parseMeta,
useChardet=useChardet,
parser=self, **kwargs)
self.reset()
while True:
try:
self.mainLoop()
break
except ReparseException:
self.reset()
def reset(self):
self.tree.reset()
self.firstStartTag = False
self.errors = []
self.log = [] # only used with debug mode
# "quirks" / "limited quirks" / "no quirks"
self.compatMode = "no quirks"
if self.innerHTMLMode:
self.innerHTML = self.container.lower()
if self.innerHTML in cdataElements:
self.tokenizer.state = self.tokenizer.rcdataState
elif self.innerHTML in rcdataElements:
self.tokenizer.state = self.tokenizer.rawtextState
elif self.innerHTML == 'plaintext':
self.tokenizer.state = self.tokenizer.plaintextState
else:
# state already is data state
# self.tokenizer.state = self.tokenizer.dataState
pass
self.phase = self.phases["beforeHtml"]
self.phase.insertHtmlElement()
self.resetInsertionMode()
else:
self.innerHTML = False
self.phase = self.phases["initial"]
self.lastPhase = None
self.beforeRCDataPhase = None
self.framesetOK = True
def isHTMLIntegrationPoint(self, element):
if (element.name == "annotation-xml" and
element.namespace == namespaces["mathml"]):
return ("encoding" in element.attributes and
element.attributes["encoding"].translate(
asciiUpper2Lower) in
("text/html", "application/xhtml+xml"))
else:
return (element.namespace, element.name) in htmlIntegrationPointElements
def isMathMLTextIntegrationPoint(self, element):
return (element.namespace, element.name) in mathmlTextIntegrationPointElements
def mainLoop(self):
CharactersToken = tokenTypes["Characters"]
SpaceCharactersToken = tokenTypes["SpaceCharacters"]
StartTagToken = tokenTypes["StartTag"]
EndTagToken = tokenTypes["EndTag"]
CommentToken = tokenTypes["Comment"]
DoctypeToken = tokenTypes["Doctype"]
ParseErrorToken = tokenTypes["ParseError"]
for token in self.normalizedTokens():
new_token = token
while new_token is not None:
currentNode = self.tree.openElements[-1] if self.tree.openElements else None
currentNodeNamespace = currentNode.namespace if currentNode else None
currentNodeName = currentNode.name if currentNode else None
type = new_token["type"]
if type == ParseErrorToken:
self.parseError(new_token["data"], new_token.get("datavars", {}))
new_token = None
else:
if (len(self.tree.openElements) == 0 or
currentNodeNamespace == self.tree.defaultNamespace or
(self.isMathMLTextIntegrationPoint(currentNode) and
((type == StartTagToken and
token["name"] not in frozenset(["mglyph", "malignmark"])) or
type in (CharactersToken, SpaceCharactersToken))) or
(currentNodeNamespace == namespaces["mathml"] and
currentNodeName == "annotation-xml" and
token["name"] == "svg") or
(self.isHTMLIntegrationPoint(currentNode) and
type in (StartTagToken, CharactersToken, SpaceCharactersToken))):
phase = self.phase
else:
phase = self.phases["inForeignContent"]
if type == CharactersToken:
new_token = phase.processCharacters(new_token)
elif type == SpaceCharactersToken:
new_token = phase.processSpaceCharacters(new_token)
elif type == StartTagToken:
new_token = phase.processStartTag(new_token)
elif type == EndTagToken:
new_token = phase.processEndTag(new_token)
elif type == CommentToken:
new_token = phase.processComment(new_token)
elif type == DoctypeToken:
new_token = phase.processDoctype(new_token)
if (type == StartTagToken and token["selfClosing"]
and not token["selfClosingAcknowledged"]):
self.parseError("non-void-element-with-trailing-solidus",
{"name": token["name"]})
# When the loop finishes it's EOF
reprocess = True
phases = []
while reprocess:
phases.append(self.phase)
reprocess = self.phase.processEOF()
if reprocess:
assert self.phase not in phases
def normalizedTokens(self):
for token in self.tokenizer:
yield self.normalizeToken(token)
def parse(self, stream, encoding=None, parseMeta=True, useChardet=True):
"""Parse a HTML document into a well-formed tree
stream - a filelike object or string containing the HTML to be parsed
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
"""
self._parse(stream, innerHTML=False, encoding=encoding,
parseMeta=parseMeta, useChardet=useChardet)
return self.tree.getDocument()
def parseFragment(self, stream, container="div", encoding=None,
parseMeta=False, useChardet=True):
"""Parse a HTML fragment into a well-formed tree fragment
container - name of the element we're setting the innerHTML property
if set to None, default to 'div'
stream - a filelike object or string containing the HTML to be parsed
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
"""
self._parse(stream, True, container=container, encoding=encoding)
return self.tree.getFragment()
def parseError(self, errorcode="XXX-undefined-error", datavars={}):
# XXX The idea is to make errorcode mandatory.
self.errors.append((self.tokenizer.stream.position(), errorcode, datavars))
if self.strict:
raise ParseError
def normalizeToken(self, token):
""" HTML5 specific normalizations to the token stream """
if token["type"] == tokenTypes["StartTag"]:
token["data"] = dict(token["data"][::-1])
return token
def adjustMathMLAttributes(self, token):
replacements = {"definitionurl": "definitionURL"}
for k, v in replacements.items():
if k in token["data"]:
token["data"][v] = token["data"][k]
del token["data"][k]
def adjustSVGAttributes(self, token):
replacements = {
"attributename": "attributeName",
"attributetype": "attributeType",
"basefrequency": "baseFrequency",
"baseprofile": "baseProfile",
"calcmode": "calcMode",
"clippathunits": "clipPathUnits",
"contentscripttype": "contentScriptType",
"contentstyletype": "contentStyleType",
"diffuseconstant": "diffuseConstant",
"edgemode": "edgeMode",
"externalresourcesrequired": "externalResourcesRequired",
"filterres": "filterRes",
"filterunits": "filterUnits",
"glyphref": "glyphRef",
"gradienttransform": "gradientTransform",
"gradientunits": "gradientUnits",
"kernelmatrix": "kernelMatrix",
"kernelunitlength": "kernelUnitLength",
"keypoints": "keyPoints",
"keysplines": "keySplines",
"keytimes": "keyTimes",
"lengthadjust": "lengthAdjust",
"limitingconeangle": "limitingConeAngle",
"markerheight": "markerHeight",
"markerunits": "markerUnits",
"markerwidth": "markerWidth",
"maskcontentunits": "maskContentUnits",
"maskunits": "maskUnits",
"numoctaves": "numOctaves",
"pathlength": "pathLength",
"patterncontentunits": "patternContentUnits",
"patterntransform": "patternTransform",
"patternunits": "patternUnits",
"pointsatx": "pointsAtX",
"pointsaty": "pointsAtY",
"pointsatz": "pointsAtZ",
"preservealpha": "preserveAlpha",
"preserveaspectratio": "preserveAspectRatio",
"primitiveunits": "primitiveUnits",
"refx": "refX",
"refy": "refY",
"repeatcount": "repeatCount",
"repeatdur": "repeatDur",
"requiredextensions": "requiredExtensions",
"requiredfeatures": "requiredFeatures",
"specularconstant": "specularConstant",
"specularexponent": "specularExponent",
"spreadmethod": "spreadMethod",
"startoffset": "startOffset",
"stddeviation": "stdDeviation",
"stitchtiles": "stitchTiles",
"surfacescale": "surfaceScale",
"systemlanguage": "systemLanguage",
"tablevalues": "tableValues",
"targetx": "targetX",
"targety": "targetY",
"textlength": "textLength",
"viewbox": "viewBox",
"viewtarget": "viewTarget",
"xchannelselector": "xChannelSelector",
"ychannelselector": "yChannelSelector",
"zoomandpan": "zoomAndPan"
}
for originalName in list(token["data"].keys()):
if originalName in replacements:
svgName = replacements[originalName]
token["data"][svgName] = token["data"][originalName]
del token["data"][originalName]
def adjustForeignAttributes(self, token):
replacements = adjustForeignAttributesMap
for originalName in token["data"].keys():
if originalName in replacements:
foreignName = replacements[originalName]
token["data"][foreignName] = token["data"][originalName]
del token["data"][originalName]
def reparseTokenNormal(self, token):
self.parser.phase()
def resetInsertionMode(self):
# The name of this method is mostly historical. (It's also used in the
# specification.)
last = False
newModes = {
"select": "inSelect",
"td": "inCell",
"th": "inCell",
"tr": "inRow",
"tbody": "inTableBody",
"thead": "inTableBody",
"tfoot": "inTableBody",
"caption": "inCaption",
"colgroup": "inColumnGroup",
"table": "inTable",
"head": "inBody",
"body": "inBody",
"frameset": "inFrameset",
"html": "beforeHead"
}
for node in self.tree.openElements[::-1]:
nodeName = node.name
new_phase = None
if node == self.tree.openElements[0]:
assert self.innerHTML
last = True
nodeName = self.innerHTML
# Check for conditions that should only happen in the innerHTML
# case
if nodeName in ("select", "colgroup", "head", "html"):
assert self.innerHTML
if not last and node.namespace != self.tree.defaultNamespace:
continue
if nodeName in newModes:
new_phase = self.phases[newModes[nodeName]]
break
elif last:
new_phase = self.phases["inBody"]
break
self.phase = new_phase
def parseRCDataRawtext(self, token, contentType):
"""Generic RCDATA/RAWTEXT Parsing algorithm
contentType - RCDATA or RAWTEXT
"""
assert contentType in ("RAWTEXT", "RCDATA")
self.tree.insertElement(token)
if contentType == "RAWTEXT":
self.tokenizer.state = self.tokenizer.rawtextState
else:
self.tokenizer.state = self.tokenizer.rcdataState
self.originalPhase = self.phase
self.phase = self.phases["text"]
def getPhases(debug):
def log(function):
"""Logger that records which phase processes each token"""
type_names = dict((value, key) for key, value in
constants.tokenTypes.items())
def wrapped(self, *args, **kwargs):
if function.__name__.startswith("process") and len(args) > 0:
token = args[0]
try:
info = {"type": type_names[token['type']]}
except:
raise
if token['type'] in constants.tagTokenTypes:
info["name"] = token['name']
self.parser.log.append((self.parser.tokenizer.state.__name__,
self.parser.phase.__class__.__name__,
self.__class__.__name__,
function.__name__,
info))
return function(self, *args, **kwargs)
else:
return function(self, *args, **kwargs)
return wrapped
def getMetaclass(use_metaclass, metaclass_func):
if use_metaclass:
return method_decorator_metaclass(metaclass_func)
else:
return type
class Phase(with_metaclass(getMetaclass(debug, log))):
"""Base class for helper object that implements each phase of processing
"""
def __init__(self, parser, tree):
self.parser = parser
self.tree = tree
def processEOF(self):
raise NotImplementedError
def processComment(self, token):
# For most phases the following is correct. Where it's not it will be
# overridden.
self.tree.insertComment(token, self.tree.openElements[-1])
def processDoctype(self, token):
self.parser.parseError("unexpected-doctype")
def processCharacters(self, token):
self.tree.insertText(token["data"])
def processSpaceCharacters(self, token):
self.tree.insertText(token["data"])
def processStartTag(self, token):
return self.startTagHandler[token["name"]](token)
def startTagHtml(self, token):
if not self.parser.firstStartTag and token["name"] == "html":
self.parser.parseError("non-html-root")
# XXX Need a check here to see if the first start tag token emitted is
# this token... If it's not, invoke self.parser.parseError().
for attr, value in token["data"].items():
if attr not in self.tree.openElements[0].attributes:
self.tree.openElements[0].attributes[attr] = value
self.parser.firstStartTag = False
def processEndTag(self, token):
return self.endTagHandler[token["name"]](token)
class InitialPhase(Phase):
def processSpaceCharacters(self, token):
pass
def processComment(self, token):
self.tree.insertComment(token, self.tree.document)
def processDoctype(self, token):
name = token["name"]
publicId = token["publicId"]
systemId = token["systemId"]
correct = token["correct"]
if (name != "html" or publicId is not None or
systemId is not None and systemId != "about:legacy-compat"):
self.parser.parseError("unknown-doctype")
if publicId is None:
publicId = ""
self.tree.insertDoctype(token)
if publicId != "":
publicId = publicId.translate(asciiUpper2Lower)
if (not correct or token["name"] != "html"
or publicId.startswith(
("+//silmaril//dtd html pro v0r11 19970101//",
"-//advasoft ltd//dtd html 3.0 aswedit + extensions//",
"-//as//dtd html 3.0 aswedit + extensions//",
"-//ietf//dtd html 2.0 level 1//",
"-//ietf//dtd html 2.0 level 2//",
"-//ietf//dtd html 2.0 strict level 1//",
"-//ietf//dtd html 2.0 strict level 2//",
"-//ietf//dtd html 2.0 strict//",
"-//ietf//dtd html 2.0//",
"-//ietf//dtd html 2.1e//",
"-//ietf//dtd html 3.0//",
"-//ietf//dtd html 3.2 final//",
"-//ietf//dtd html 3.2//",
"-//ietf//dtd html 3//",
"-//ietf//dtd html level 0//",
"-//ietf//dtd html level 1//",
"-//ietf//dtd html level 2//",
"-//ietf//dtd html level 3//",
"-//ietf//dtd html strict level 0//",
"-//ietf//dtd html strict level 1//",
"-//ietf//dtd html strict level 2//",
"-//ietf//dtd html strict level 3//",
"-//ietf//dtd html strict//",
"-//ietf//dtd html//",
"-//metrius//dtd metrius presentational//",
"-//microsoft//dtd internet explorer 2.0 html strict//",
"-//microsoft//dtd internet explorer 2.0 html//",
"-//microsoft//dtd internet explorer 2.0 tables//",
"-//microsoft//dtd internet explorer 3.0 html strict//",
"-//microsoft//dtd internet explorer 3.0 html//",
"-//microsoft//dtd internet explorer 3.0 tables//",
"-//netscape comm. corp.//dtd html//",
"-//netscape comm. corp.//dtd strict html//",
"-//o'reilly and associates//dtd html 2.0//",
"-//o'reilly and associates//dtd html extended 1.0//",
"-//o'reilly and associates//dtd html extended relaxed 1.0//",
"-//softquad software//dtd hotmetal pro 6.0::19990601::extensions to html 4.0//",
"-//softquad//dtd hotmetal pro 4.0::19971010::extensions to html 4.0//",
"-//spyglass//dtd html 2.0 extended//",
"-//sq//dtd html 2.0 hotmetal + extensions//",
"-//sun microsystems corp.//dtd hotjava html//",
"-//sun microsystems corp.//dtd hotjava strict html//",
"-//w3c//dtd html 3 1995-03-24//",
"-//w3c//dtd html 3.2 draft//",
"-//w3c//dtd html 3.2 final//",
"-//w3c//dtd html 3.2//",
"-//w3c//dtd html 3.2s draft//",
"-//w3c//dtd html 4.0 frameset//",
"-//w3c//dtd html 4.0 transitional//",
"-//w3c//dtd html experimental 19960712//",
"-//w3c//dtd html experimental 970421//",
"-//w3c//dtd w3 html//",
"-//w3o//dtd w3 html 3.0//",
"-//webtechs//dtd mozilla html 2.0//",
"-//webtechs//dtd mozilla html//"))
or publicId in
("-//w3o//dtd w3 html strict 3.0//en//",
"-/w3c/dtd html 4.0 transitional/en",
"html")
or publicId.startswith(
("-//w3c//dtd html 4.01 frameset//",
"-//w3c//dtd html 4.01 transitional//")) and
systemId is None
or systemId and systemId.lower() == "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd"):
self.parser.compatMode = "quirks"
elif (publicId.startswith(
("-//w3c//dtd xhtml 1.0 frameset//",
"-//w3c//dtd xhtml 1.0 transitional//"))
or publicId.startswith(
("-//w3c//dtd html 4.01 frameset//",
"-//w3c//dtd html 4.01 transitional//")) and
systemId is not None):
self.parser.compatMode = "limited quirks"
self.parser.phase = self.parser.phases["beforeHtml"]
def anythingElse(self):
self.parser.compatMode = "quirks"
self.parser.phase = self.parser.phases["beforeHtml"]
def processCharacters(self, token):
self.parser.parseError("expected-doctype-but-got-chars")
self.anythingElse()
return token
def processStartTag(self, token):
self.parser.parseError("expected-doctype-but-got-start-tag",
{"name": token["name"]})
self.anythingElse()
return token
def processEndTag(self, token):
self.parser.parseError("expected-doctype-but-got-end-tag",
{"name": token["name"]})
self.anythingElse()
return token
def processEOF(self):
self.parser.parseError("expected-doctype-but-got-eof")
self.anythingElse()
return True
class BeforeHtmlPhase(Phase):
# helper methods
def insertHtmlElement(self):
self.tree.insertRoot(impliedTagToken("html", "StartTag"))
self.parser.phase = self.parser.phases["beforeHead"]
# other
def processEOF(self):
self.insertHtmlElement()
return True
def processComment(self, token):
self.tree.insertComment(token, self.tree.document)
def processSpaceCharacters(self, token):
pass
def processCharacters(self, token):
self.insertHtmlElement()
return token
def processStartTag(self, token):
if token["name"] == "html":
self.parser.firstStartTag = True
self.insertHtmlElement()
return token
def processEndTag(self, token):
if token["name"] not in ("head", "body", "html", "br"):
self.parser.parseError("unexpected-end-tag-before-html",
{"name": token["name"]})
else:
self.insertHtmlElement()
return token
class BeforeHeadPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("head", self.startTagHead)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
(("head", "body", "html", "br"), self.endTagImplyHead)
])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
self.startTagHead(impliedTagToken("head", "StartTag"))
return True
def processSpaceCharacters(self, token):
pass
def processCharacters(self, token):
self.startTagHead(impliedTagToken("head", "StartTag"))
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagHead(self, token):
self.tree.insertElement(token)
self.tree.headPointer = self.tree.openElements[-1]
self.parser.phase = self.parser.phases["inHead"]
def startTagOther(self, token):
self.startTagHead(impliedTagToken("head", "StartTag"))
return token
def endTagImplyHead(self, token):
self.startTagHead(impliedTagToken("head", "StartTag"))
return token
def endTagOther(self, token):
self.parser.parseError("end-tag-after-implied-root",
{"name": token["name"]})
class InHeadPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("title", self.startTagTitle),
(("noscript", "noframes", "style"), self.startTagNoScriptNoFramesStyle),
("script", self.startTagScript),
(("base", "basefont", "bgsound", "command", "link"),
self.startTagBaseLinkCommand),
("meta", self.startTagMeta),
("head", self.startTagHead)
])
self.startTagHandler.default = self.startTagOther
self. endTagHandler = utils.MethodDispatcher([
("head", self.endTagHead),
(("br", "html", "body"), self.endTagHtmlBodyBr)
])
self.endTagHandler.default = self.endTagOther
# the real thing
def processEOF(self):
self.anythingElse()
return True
def processCharacters(self, token):
self.anythingElse()
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagHead(self, token):
self.parser.parseError("two-heads-are-not-better-than-one")
def startTagBaseLinkCommand(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def startTagMeta(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
attributes = token["data"]
if self.parser.tokenizer.stream.charEncoding[1] == "tentative":
if "charset" in attributes:
self.parser.tokenizer.stream.changeEncoding(attributes["charset"])
elif ("content" in attributes and
"http-equiv" in attributes and
attributes["http-equiv"].lower() == "content-type"):
# Encoding it as UTF-8 here is a hack, as really we should pass
# the abstract Unicode string, and just use the
# ContentAttrParser on that, but using UTF-8 allows all chars
# to be encoded and as a ASCII-superset works.
data = inputstream.EncodingBytes(attributes["content"].encode("utf-8"))
parser = inputstream.ContentAttrParser(data)
codec = parser.parse()
self.parser.tokenizer.stream.changeEncoding(codec)
def startTagTitle(self, token):
self.parser.parseRCDataRawtext(token, "RCDATA")
def startTagNoScriptNoFramesStyle(self, token):
# Need to decide whether to implement the scripting-disabled case
self.parser.parseRCDataRawtext(token, "RAWTEXT")
def startTagScript(self, token):
self.tree.insertElement(token)
self.parser.tokenizer.state = self.parser.tokenizer.scriptDataState
self.parser.originalPhase = self.parser.phase
self.parser.phase = self.parser.phases["text"]
def startTagOther(self, token):
self.anythingElse()
return token
def endTagHead(self, token):
node = self.parser.tree.openElements.pop()
assert node.name == "head", "Expected head got %s" % node.name
self.parser.phase = self.parser.phases["afterHead"]
def endTagHtmlBodyBr(self, token):
self.anythingElse()
return token
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def anythingElse(self):
self.endTagHead(impliedTagToken("head"))
# XXX If we implement a parser for which scripting is disabled we need to
# implement this phase.
#
# class InHeadNoScriptPhase(Phase):
class AfterHeadPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("body", self.startTagBody),
("frameset", self.startTagFrameset),
(("base", "basefont", "bgsound", "link", "meta", "noframes", "script",
"style", "title"),
self.startTagFromHead),
("head", self.startTagHead)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([(("body", "html", "br"),
self.endTagHtmlBodyBr)])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
self.anythingElse()
return True
def processCharacters(self, token):
self.anythingElse()
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagBody(self, token):
self.parser.framesetOK = False
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inBody"]
def startTagFrameset(self, token):
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inFrameset"]
def startTagFromHead(self, token):
self.parser.parseError("unexpected-start-tag-out-of-my-head",
{"name": token["name"]})
self.tree.openElements.append(self.tree.headPointer)
self.parser.phases["inHead"].processStartTag(token)
for node in self.tree.openElements[::-1]:
if node.name == "head":
self.tree.openElements.remove(node)
break
def startTagHead(self, token):
self.parser.parseError("unexpected-start-tag", {"name": token["name"]})
def startTagOther(self, token):
self.anythingElse()
return token
def endTagHtmlBodyBr(self, token):
self.anythingElse()
return token
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def anythingElse(self):
self.tree.insertElement(impliedTagToken("body", "StartTag"))
self.parser.phase = self.parser.phases["inBody"]
self.parser.framesetOK = True
class InBodyPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#parsing-main-inbody
# the really-really-really-very crazy mode
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
# Keep a ref to this for special handling of whitespace in <pre>
self.processSpaceCharactersNonPre = self.processSpaceCharacters
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
(("base", "basefont", "bgsound", "command", "link", "meta",
"noframes", "script", "style", "title"),
self.startTagProcessInHead),
("body", self.startTagBody),
("frameset", self.startTagFrameset),
(("address", "article", "aside", "blockquote", "center", "details",
"details", "dir", "div", "dl", "fieldset", "figcaption", "figure",
"footer", "header", "hgroup", "main", "menu", "nav", "ol", "p",
"section", "summary", "ul"),
self.startTagCloseP),
(headingElements, self.startTagHeading),
(("pre", "listing"), self.startTagPreListing),
("form", self.startTagForm),
(("li", "dd", "dt"), self.startTagListItem),
("plaintext", self.startTagPlaintext),
("a", self.startTagA),
(("b", "big", "code", "em", "font", "i", "s", "small", "strike",
"strong", "tt", "u"), self.startTagFormatting),
("nobr", self.startTagNobr),
("button", self.startTagButton),
(("applet", "marquee", "object"), self.startTagAppletMarqueeObject),
("xmp", self.startTagXmp),
("table", self.startTagTable),
(("area", "br", "embed", "img", "keygen", "wbr"),
self.startTagVoidFormatting),
(("param", "source", "track"), self.startTagParamSource),
("input", self.startTagInput),
("hr", self.startTagHr),
("image", self.startTagImage),
("isindex", self.startTagIsIndex),
("textarea", self.startTagTextarea),
("iframe", self.startTagIFrame),
(("noembed", "noframes", "noscript"), self.startTagRawtext),
("select", self.startTagSelect),
(("rp", "rt"), self.startTagRpRt),
(("option", "optgroup"), self.startTagOpt),
(("math"), self.startTagMath),
(("svg"), self.startTagSvg),
(("caption", "col", "colgroup", "frame", "head",
"tbody", "td", "tfoot", "th", "thead",
"tr"), self.startTagMisplaced)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("body", self.endTagBody),
("html", self.endTagHtml),
(("address", "article", "aside", "blockquote", "button", "center",
"details", "dialog", "dir", "div", "dl", "fieldset", "figcaption", "figure",
"footer", "header", "hgroup", "listing", "main", "menu", "nav", "ol", "pre",
"section", "summary", "ul"), self.endTagBlock),
("form", self.endTagForm),
("p", self.endTagP),
(("dd", "dt", "li"), self.endTagListItem),
(headingElements, self.endTagHeading),
(("a", "b", "big", "code", "em", "font", "i", "nobr", "s", "small",
"strike", "strong", "tt", "u"), self.endTagFormatting),
(("applet", "marquee", "object"), self.endTagAppletMarqueeObject),
("br", self.endTagBr),
])
self.endTagHandler.default = self.endTagOther
def isMatchingFormattingElement(self, node1, node2):
if node1.name != node2.name or node1.namespace != node2.namespace:
return False
elif len(node1.attributes) != len(node2.attributes):
return False
else:
attributes1 = sorted(node1.attributes.items())
attributes2 = sorted(node2.attributes.items())
for attr1, attr2 in zip(attributes1, attributes2):
if attr1 != attr2:
return False
return True
# helper
def addFormattingElement(self, token):
self.tree.insertElement(token)
element = self.tree.openElements[-1]
matchingElements = []
for node in self.tree.activeFormattingElements[::-1]:
if node is Marker:
break
elif self.isMatchingFormattingElement(node, element):
matchingElements.append(node)
assert len(matchingElements) <= 3
if len(matchingElements) == 3:
self.tree.activeFormattingElements.remove(matchingElements[-1])
self.tree.activeFormattingElements.append(element)
# the real deal
def processEOF(self):
allowed_elements = frozenset(("dd", "dt", "li", "p", "tbody", "td",
"tfoot", "th", "thead", "tr", "body",
"html"))
for node in self.tree.openElements[::-1]:
if node.name not in allowed_elements:
self.parser.parseError("expected-closing-tag-but-got-eof")
break
# Stop parsing
def processSpaceCharactersDropNewline(self, token):
# Sometimes (start of <pre>, <listing>, and <textarea> blocks) we
# want to drop leading newlines
data = token["data"]
self.processSpaceCharacters = self.processSpaceCharactersNonPre
if (data.startswith("\n") and
self.tree.openElements[-1].name in ("pre", "listing", "textarea")
and not self.tree.openElements[-1].hasContent()):
data = data[1:]
if data:
self.tree.reconstructActiveFormattingElements()
self.tree.insertText(data)
def processCharacters(self, token):
if token["data"] == "\u0000":
# The tokenizer should always emit null on its own
return
self.tree.reconstructActiveFormattingElements()
self.tree.insertText(token["data"])
# This must be bad for performance
if (self.parser.framesetOK and
any([char not in spaceCharacters
for char in token["data"]])):
self.parser.framesetOK = False
def processSpaceCharacters(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertText(token["data"])
def startTagProcessInHead(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagBody(self, token):
self.parser.parseError("unexpected-start-tag", {"name": "body"})
if (len(self.tree.openElements) == 1
or self.tree.openElements[1].name != "body"):
assert self.parser.innerHTML
else:
self.parser.framesetOK = False
for attr, value in token["data"].items():
if attr not in self.tree.openElements[1].attributes:
self.tree.openElements[1].attributes[attr] = value
def startTagFrameset(self, token):
self.parser.parseError("unexpected-start-tag", {"name": "frameset"})
if (len(self.tree.openElements) == 1 or self.tree.openElements[1].name != "body"):
assert self.parser.innerHTML
elif not self.parser.framesetOK:
pass
else:
if self.tree.openElements[1].parent:
self.tree.openElements[1].parent.removeChild(self.tree.openElements[1])
while self.tree.openElements[-1].name != "html":
self.tree.openElements.pop()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inFrameset"]
def startTagCloseP(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
def startTagPreListing(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
self.parser.framesetOK = False
self.processSpaceCharacters = self.processSpaceCharactersDropNewline
def startTagForm(self, token):
if self.tree.formPointer:
self.parser.parseError("unexpected-start-tag", {"name": "form"})
else:
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
self.tree.formPointer = self.tree.openElements[-1]
def startTagListItem(self, token):
self.parser.framesetOK = False
stopNamesMap = {"li": ["li"],
"dt": ["dt", "dd"],
"dd": ["dt", "dd"]}
stopNames = stopNamesMap[token["name"]]
for node in reversed(self.tree.openElements):
if node.name in stopNames:
self.parser.phase.processEndTag(
impliedTagToken(node.name, "EndTag"))
break
if (node.nameTuple in specialElements and
node.name not in ("address", "div", "p")):
break
if self.tree.elementInScope("p", variant="button"):
self.parser.phase.processEndTag(
impliedTagToken("p", "EndTag"))
self.tree.insertElement(token)
def startTagPlaintext(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
self.parser.tokenizer.state = self.parser.tokenizer.plaintextState
def startTagHeading(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
if self.tree.openElements[-1].name in headingElements:
self.parser.parseError("unexpected-start-tag", {"name": token["name"]})
self.tree.openElements.pop()
self.tree.insertElement(token)
def startTagA(self, token):
afeAElement = self.tree.elementInActiveFormattingElements("a")
if afeAElement:
self.parser.parseError("unexpected-start-tag-implies-end-tag",
{"startName": "a", "endName": "a"})
self.endTagFormatting(impliedTagToken("a"))
if afeAElement in self.tree.openElements:
self.tree.openElements.remove(afeAElement)
if afeAElement in self.tree.activeFormattingElements:
self.tree.activeFormattingElements.remove(afeAElement)
self.tree.reconstructActiveFormattingElements()
self.addFormattingElement(token)
def startTagFormatting(self, token):
self.tree.reconstructActiveFormattingElements()
self.addFormattingElement(token)
def startTagNobr(self, token):
self.tree.reconstructActiveFormattingElements()
if self.tree.elementInScope("nobr"):
self.parser.parseError("unexpected-start-tag-implies-end-tag",
{"startName": "nobr", "endName": "nobr"})
self.processEndTag(impliedTagToken("nobr"))
# XXX Need tests that trigger the following
self.tree.reconstructActiveFormattingElements()
self.addFormattingElement(token)
def startTagButton(self, token):
if self.tree.elementInScope("button"):
self.parser.parseError("unexpected-start-tag-implies-end-tag",
{"startName": "button", "endName": "button"})
self.processEndTag(impliedTagToken("button"))
return token
else:
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
self.parser.framesetOK = False
def startTagAppletMarqueeObject(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
self.tree.activeFormattingElements.append(Marker)
self.parser.framesetOK = False
def startTagXmp(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.reconstructActiveFormattingElements()
self.parser.framesetOK = False
self.parser.parseRCDataRawtext(token, "RAWTEXT")
def startTagTable(self, token):
if self.parser.compatMode != "quirks":
if self.tree.elementInScope("p", variant="button"):
self.processEndTag(impliedTagToken("p"))
self.tree.insertElement(token)
self.parser.framesetOK = False
self.parser.phase = self.parser.phases["inTable"]
def startTagVoidFormatting(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
self.parser.framesetOK = False
def startTagInput(self, token):
framesetOK = self.parser.framesetOK
self.startTagVoidFormatting(token)
if ("type" in token["data"] and
token["data"]["type"].translate(asciiUpper2Lower) == "hidden"):
# input type=hidden doesn't change framesetOK
self.parser.framesetOK = framesetOK
def startTagParamSource(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def startTagHr(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
self.parser.framesetOK = False
def startTagImage(self, token):
# No really...
self.parser.parseError("unexpected-start-tag-treated-as",
{"originalName": "image", "newName": "img"})
self.processStartTag(impliedTagToken("img", "StartTag",
attributes=token["data"],
selfClosing=token["selfClosing"]))
def startTagIsIndex(self, token):
self.parser.parseError("deprecated-tag", {"name": "isindex"})
if self.tree.formPointer:
return
form_attrs = {}
if "action" in token["data"]:
form_attrs["action"] = token["data"]["action"]
self.processStartTag(impliedTagToken("form", "StartTag",
attributes=form_attrs))
self.processStartTag(impliedTagToken("hr", "StartTag"))
self.processStartTag(impliedTagToken("label", "StartTag"))
# XXX Localization ...
if "prompt" in token["data"]:
prompt = token["data"]["prompt"]
else:
prompt = "This is a searchable index. Enter search keywords: "
self.processCharacters(
{"type": tokenTypes["Characters"], "data": prompt})
attributes = token["data"].copy()
if "action" in attributes:
del attributes["action"]
if "prompt" in attributes:
del attributes["prompt"]
attributes["name"] = "isindex"
self.processStartTag(impliedTagToken("input", "StartTag",
attributes=attributes,
selfClosing=
token["selfClosing"]))
self.processEndTag(impliedTagToken("label"))
self.processStartTag(impliedTagToken("hr", "StartTag"))
self.processEndTag(impliedTagToken("form"))
def startTagTextarea(self, token):
self.tree.insertElement(token)
self.parser.tokenizer.state = self.parser.tokenizer.rcdataState
self.processSpaceCharacters = self.processSpaceCharactersDropNewline
self.parser.framesetOK = False
def startTagIFrame(self, token):
self.parser.framesetOK = False
self.startTagRawtext(token)
def startTagRawtext(self, token):
"""iframe, noembed noframes, noscript(if scripting enabled)"""
self.parser.parseRCDataRawtext(token, "RAWTEXT")
def startTagOpt(self, token):
if self.tree.openElements[-1].name == "option":
self.parser.phase.processEndTag(impliedTagToken("option"))
self.tree.reconstructActiveFormattingElements()
self.parser.tree.insertElement(token)
def startTagSelect(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
self.parser.framesetOK = False
if self.parser.phase in (self.parser.phases["inTable"],
self.parser.phases["inCaption"],
self.parser.phases["inColumnGroup"],
self.parser.phases["inTableBody"],
self.parser.phases["inRow"],
self.parser.phases["inCell"]):
self.parser.phase = self.parser.phases["inSelectInTable"]
else:
self.parser.phase = self.parser.phases["inSelect"]
def startTagRpRt(self, token):
if self.tree.elementInScope("ruby"):
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != "ruby":
self.parser.parseError()
self.tree.insertElement(token)
def startTagMath(self, token):
self.tree.reconstructActiveFormattingElements()
self.parser.adjustMathMLAttributes(token)
self.parser.adjustForeignAttributes(token)
token["namespace"] = namespaces["mathml"]
self.tree.insertElement(token)
# Need to get the parse error right for the case where the token
# has a namespace not equal to the xmlns attribute
if token["selfClosing"]:
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def startTagSvg(self, token):
self.tree.reconstructActiveFormattingElements()
self.parser.adjustSVGAttributes(token)
self.parser.adjustForeignAttributes(token)
token["namespace"] = namespaces["svg"]
self.tree.insertElement(token)
# Need to get the parse error right for the case where the token
# has a namespace not equal to the xmlns attribute
if token["selfClosing"]:
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def startTagMisplaced(self, token):
""" Elements that should be children of other elements that have a
different insertion mode; here they are ignored
"caption", "col", "colgroup", "frame", "frameset", "head",
"option", "optgroup", "tbody", "td", "tfoot", "th", "thead",
"tr", "noscript"
"""
self.parser.parseError("unexpected-start-tag-ignored", {"name": token["name"]})
def startTagOther(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
def endTagP(self, token):
if not self.tree.elementInScope("p", variant="button"):
self.startTagCloseP(impliedTagToken("p", "StartTag"))
self.parser.parseError("unexpected-end-tag", {"name": "p"})
self.endTagP(impliedTagToken("p", "EndTag"))
else:
self.tree.generateImpliedEndTags("p")
if self.tree.openElements[-1].name != "p":
self.parser.parseError("unexpected-end-tag", {"name": "p"})
node = self.tree.openElements.pop()
while node.name != "p":
node = self.tree.openElements.pop()
def endTagBody(self, token):
if not self.tree.elementInScope("body"):
self.parser.parseError()
return
elif self.tree.openElements[-1].name != "body":
for node in self.tree.openElements[2:]:
if node.name not in frozenset(("dd", "dt", "li", "optgroup",
"option", "p", "rp", "rt",
"tbody", "td", "tfoot",
"th", "thead", "tr", "body",
"html")):
# Not sure this is the correct name for the parse error
self.parser.parseError(
"expected-one-end-tag-but-got-another",
{"expectedName": "body", "gotName": node.name})
break
self.parser.phase = self.parser.phases["afterBody"]
def endTagHtml(self, token):
# We repeat the test for the body end tag token being ignored here
if self.tree.elementInScope("body"):
self.endTagBody(impliedTagToken("body"))
return token
def endTagBlock(self, token):
# Put us back in the right whitespace handling mode
if token["name"] == "pre":
self.processSpaceCharacters = self.processSpaceCharactersNonPre
inScope = self.tree.elementInScope(token["name"])
if inScope:
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("end-tag-too-early", {"name": token["name"]})
if inScope:
node = self.tree.openElements.pop()
while node.name != token["name"]:
node = self.tree.openElements.pop()
def endTagForm(self, token):
node = self.tree.formPointer
self.tree.formPointer = None
if node is None or not self.tree.elementInScope(node):
self.parser.parseError("unexpected-end-tag",
{"name": "form"})
else:
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1] != node:
self.parser.parseError("end-tag-too-early-ignored",
{"name": "form"})
self.tree.openElements.remove(node)
def endTagListItem(self, token):
if token["name"] == "li":
variant = "list"
else:
variant = None
if not self.tree.elementInScope(token["name"], variant=variant):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
else:
self.tree.generateImpliedEndTags(exclude=token["name"])
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError(
"end-tag-too-early",
{"name": token["name"]})
node = self.tree.openElements.pop()
while node.name != token["name"]:
node = self.tree.openElements.pop()
def endTagHeading(self, token):
for item in headingElements:
if self.tree.elementInScope(item):
self.tree.generateImpliedEndTags()
break
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("end-tag-too-early", {"name": token["name"]})
for item in headingElements:
if self.tree.elementInScope(item):
item = self.tree.openElements.pop()
while item.name not in headingElements:
item = self.tree.openElements.pop()
break
def endTagFormatting(self, token):
"""The much-feared adoption agency algorithm"""
# http://svn.whatwg.org/webapps/complete.html#adoptionAgency revision 7867
# XXX Better parseError messages appreciated.
# Step 1
outerLoopCounter = 0
# Step 2
while outerLoopCounter < 8:
# Step 3
outerLoopCounter += 1
# Step 4:
# Let the formatting element be the last element in
# the list of active formatting elements that:
# - is between the end of the list and the last scope
# marker in the list, if any, or the start of the list
# otherwise, and
# - has the same tag name as the token.
formattingElement = self.tree.elementInActiveFormattingElements(
token["name"])
if (not formattingElement or
(formattingElement in self.tree.openElements and
not self.tree.elementInScope(formattingElement.name))):
# If there is no such node, then abort these steps
# and instead act as described in the "any other
# end tag" entry below.
self.endTagOther(token)
return
# Otherwise, if there is such a node, but that node is
# not in the stack of open elements, then this is a
# parse error; remove the element from the list, and
# abort these steps.
elif formattingElement not in self.tree.openElements:
self.parser.parseError("adoption-agency-1.2", {"name": token["name"]})
self.tree.activeFormattingElements.remove(formattingElement)
return
# Otherwise, if there is such a node, and that node is
# also in the stack of open elements, but the element
# is not in scope, then this is a parse error; ignore
# the token, and abort these steps.
elif not self.tree.elementInScope(formattingElement.name):
self.parser.parseError("adoption-agency-4.4", {"name": token["name"]})
return
# Otherwise, there is a formatting element and that
# element is in the stack and is in scope. If the
# element is not the current node, this is a parse
# error. In any case, proceed with the algorithm as
# written in the following steps.
else:
if formattingElement != self.tree.openElements[-1]:
self.parser.parseError("adoption-agency-1.3", {"name": token["name"]})
# Step 5:
# Let the furthest block be the topmost node in the
# stack of open elements that is lower in the stack
# than the formatting element, and is an element in
# the special category. There might not be one.
afeIndex = self.tree.openElements.index(formattingElement)
furthestBlock = None
for element in self.tree.openElements[afeIndex:]:
if element.nameTuple in specialElements:
furthestBlock = element
break
# Step 6:
# If there is no furthest block, then the UA must
# first pop all the nodes from the bottom of the stack
# of open elements, from the current node up to and
# including the formatting element, then remove the
# formatting element from the list of active
# formatting elements, and finally abort these steps.
if furthestBlock is None:
element = self.tree.openElements.pop()
while element != formattingElement:
element = self.tree.openElements.pop()
self.tree.activeFormattingElements.remove(element)
return
# Step 7
commonAncestor = self.tree.openElements[afeIndex - 1]
# Step 8:
# The bookmark is supposed to help us identify where to reinsert
# nodes in step 15. We have to ensure that we reinsert nodes after
# the node before the active formatting element. Note the bookmark
# can move in step 9.7
bookmark = self.tree.activeFormattingElements.index(formattingElement)
# Step 9
lastNode = node = furthestBlock
innerLoopCounter = 0
index = self.tree.openElements.index(node)
while innerLoopCounter < 3:
innerLoopCounter += 1
# Node is element before node in open elements
index -= 1
node = self.tree.openElements[index]
if node not in self.tree.activeFormattingElements:
self.tree.openElements.remove(node)
continue
# Step 9.6
if node == formattingElement:
break
# Step 9.7
if lastNode == furthestBlock:
bookmark = self.tree.activeFormattingElements.index(node) + 1
# Step 9.8
clone = node.cloneNode()
# Replace node with clone
self.tree.activeFormattingElements[
self.tree.activeFormattingElements.index(node)] = clone
self.tree.openElements[
self.tree.openElements.index(node)] = clone
node = clone
# Step 9.9
# Remove lastNode from its parents, if any
if lastNode.parent:
lastNode.parent.removeChild(lastNode)
node.appendChild(lastNode)
# Step 9.10
lastNode = node
# Step 10
# Foster parent lastNode if commonAncestor is a
# table, tbody, tfoot, thead, or tr we need to foster
# parent the lastNode
if lastNode.parent:
lastNode.parent.removeChild(lastNode)
if commonAncestor.name in frozenset(("table", "tbody", "tfoot", "thead", "tr")):
parent, insertBefore = self.tree.getTableMisnestedNodePosition()
parent.insertBefore(lastNode, insertBefore)
else:
commonAncestor.appendChild(lastNode)
# Step 11
clone = formattingElement.cloneNode()
# Step 12
furthestBlock.reparentChildren(clone)
# Step 13
furthestBlock.appendChild(clone)
# Step 14
self.tree.activeFormattingElements.remove(formattingElement)
self.tree.activeFormattingElements.insert(bookmark, clone)
# Step 15
self.tree.openElements.remove(formattingElement)
self.tree.openElements.insert(
self.tree.openElements.index(furthestBlock) + 1, clone)
def endTagAppletMarqueeObject(self, token):
if self.tree.elementInScope(token["name"]):
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("end-tag-too-early", {"name": token["name"]})
if self.tree.elementInScope(token["name"]):
element = self.tree.openElements.pop()
while element.name != token["name"]:
element = self.tree.openElements.pop()
self.tree.clearActiveFormattingElements()
def endTagBr(self, token):
self.parser.parseError("unexpected-end-tag-treated-as",
{"originalName": "br", "newName": "br element"})
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(impliedTagToken("br", "StartTag"))
self.tree.openElements.pop()
def endTagOther(self, token):
for node in self.tree.openElements[::-1]:
if node.name == token["name"]:
self.tree.generateImpliedEndTags(exclude=token["name"])
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
while self.tree.openElements.pop() != node:
pass
break
else:
if node.nameTuple in specialElements:
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
break
class TextPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("script", self.endTagScript)])
self.endTagHandler.default = self.endTagOther
def processCharacters(self, token):
self.tree.insertText(token["data"])
def processEOF(self):
self.parser.parseError("expected-named-closing-tag-but-got-eof",
{"name": self.tree.openElements[-1].name})
self.tree.openElements.pop()
self.parser.phase = self.parser.originalPhase
return True
def startTagOther(self, token):
assert False, "Tried to process start tag %s in RCDATA/RAWTEXT mode" % token['name']
def endTagScript(self, token):
node = self.tree.openElements.pop()
assert node.name == "script"
self.parser.phase = self.parser.originalPhase
# The rest of this method is all stuff that only happens if
# document.write works
def endTagOther(self, token):
self.tree.openElements.pop()
self.parser.phase = self.parser.originalPhase
class InTablePhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-table
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("caption", self.startTagCaption),
("colgroup", self.startTagColgroup),
("col", self.startTagCol),
(("tbody", "tfoot", "thead"), self.startTagRowGroup),
(("td", "th", "tr"), self.startTagImplyTbody),
("table", self.startTagTable),
(("style", "script"), self.startTagStyleScript),
("input", self.startTagInput),
("form", self.startTagForm)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("table", self.endTagTable),
(("body", "caption", "col", "colgroup", "html", "tbody", "td",
"tfoot", "th", "thead", "tr"), self.endTagIgnore)
])
self.endTagHandler.default = self.endTagOther
# helper methods
def clearStackToTableContext(self):
# "clear the stack back to a table context"
while self.tree.openElements[-1].name not in ("table", "html"):
# self.parser.parseError("unexpected-implied-end-tag-in-table",
# {"name": self.tree.openElements[-1].name})
self.tree.openElements.pop()
# When the current node is <html> it's an innerHTML case
# processing methods
def processEOF(self):
if self.tree.openElements[-1].name != "html":
self.parser.parseError("eof-in-table")
else:
assert self.parser.innerHTML
# Stop parsing
def processSpaceCharacters(self, token):
originalPhase = self.parser.phase
self.parser.phase = self.parser.phases["inTableText"]
self.parser.phase.originalPhase = originalPhase
self.parser.phase.processSpaceCharacters(token)
def processCharacters(self, token):
originalPhase = self.parser.phase
self.parser.phase = self.parser.phases["inTableText"]
self.parser.phase.originalPhase = originalPhase
self.parser.phase.processCharacters(token)
def insertText(self, token):
# If we get here there must be at least one non-whitespace character
# Do the table magic!
self.tree.insertFromTable = True
self.parser.phases["inBody"].processCharacters(token)
self.tree.insertFromTable = False
def startTagCaption(self, token):
self.clearStackToTableContext()
self.tree.activeFormattingElements.append(Marker)
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inCaption"]
def startTagColgroup(self, token):
self.clearStackToTableContext()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inColumnGroup"]
def startTagCol(self, token):
self.startTagColgroup(impliedTagToken("colgroup", "StartTag"))
return token
def startTagRowGroup(self, token):
self.clearStackToTableContext()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inTableBody"]
def startTagImplyTbody(self, token):
self.startTagRowGroup(impliedTagToken("tbody", "StartTag"))
return token
def startTagTable(self, token):
self.parser.parseError("unexpected-start-tag-implies-end-tag",
{"startName": "table", "endName": "table"})
self.parser.phase.processEndTag(impliedTagToken("table"))
if not self.parser.innerHTML:
return token
def startTagStyleScript(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagInput(self, token):
if ("type" in token["data"] and
token["data"]["type"].translate(asciiUpper2Lower) == "hidden"):
self.parser.parseError("unexpected-hidden-input-in-table")
self.tree.insertElement(token)
# XXX associate with form
self.tree.openElements.pop()
else:
self.startTagOther(token)
def startTagForm(self, token):
self.parser.parseError("unexpected-form-in-table")
if self.tree.formPointer is None:
self.tree.insertElement(token)
self.tree.formPointer = self.tree.openElements[-1]
self.tree.openElements.pop()
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-implies-table-voodoo", {"name": token["name"]})
# Do the table magic!
self.tree.insertFromTable = True
self.parser.phases["inBody"].processStartTag(token)
self.tree.insertFromTable = False
def endTagTable(self, token):
if self.tree.elementInScope("table", variant="table"):
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != "table":
self.parser.parseError("end-tag-too-early-named",
{"gotName": "table",
"expectedName": self.tree.openElements[-1].name})
while self.tree.openElements[-1].name != "table":
self.tree.openElements.pop()
self.tree.openElements.pop()
self.parser.resetInsertionMode()
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-implies-table-voodoo", {"name": token["name"]})
# Do the table magic!
self.tree.insertFromTable = True
self.parser.phases["inBody"].processEndTag(token)
self.tree.insertFromTable = False
class InTableTextPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.originalPhase = None
self.characterTokens = []
def flushCharacters(self):
data = "".join([item["data"] for item in self.characterTokens])
if any([item not in spaceCharacters for item in data]):
token = {"type": tokenTypes["Characters"], "data": data}
self.parser.phases["inTable"].insertText(token)
elif data:
self.tree.insertText(data)
self.characterTokens = []
def processComment(self, token):
self.flushCharacters()
self.parser.phase = self.originalPhase
return token
def processEOF(self):
self.flushCharacters()
self.parser.phase = self.originalPhase
return True
def processCharacters(self, token):
if token["data"] == "\u0000":
return
self.characterTokens.append(token)
def processSpaceCharacters(self, token):
# pretty sure we should never reach here
self.characterTokens.append(token)
# assert False
def processStartTag(self, token):
self.flushCharacters()
self.parser.phase = self.originalPhase
return token
def processEndTag(self, token):
self.flushCharacters()
self.parser.phase = self.originalPhase
return token
class InCaptionPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-caption
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
(("caption", "col", "colgroup", "tbody", "td", "tfoot", "th",
"thead", "tr"), self.startTagTableElement)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("caption", self.endTagCaption),
("table", self.endTagTable),
(("body", "col", "colgroup", "html", "tbody", "td", "tfoot", "th",
"thead", "tr"), self.endTagIgnore)
])
self.endTagHandler.default = self.endTagOther
def ignoreEndTagCaption(self):
return not self.tree.elementInScope("caption", variant="table")
def processEOF(self):
self.parser.phases["inBody"].processEOF()
def processCharacters(self, token):
return self.parser.phases["inBody"].processCharacters(token)
def startTagTableElement(self, token):
self.parser.parseError()
# XXX Have to duplicate logic here to find out if the tag is ignored
ignoreEndTag = self.ignoreEndTagCaption()
self.parser.phase.processEndTag(impliedTagToken("caption"))
if not ignoreEndTag:
return token
def startTagOther(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def endTagCaption(self, token):
if not self.ignoreEndTagCaption():
# AT this code is quite similar to endTagTable in "InTable"
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != "caption":
self.parser.parseError("expected-one-end-tag-but-got-another",
{"gotName": "caption",
"expectedName": self.tree.openElements[-1].name})
while self.tree.openElements[-1].name != "caption":
self.tree.openElements.pop()
self.tree.openElements.pop()
self.tree.clearActiveFormattingElements()
self.parser.phase = self.parser.phases["inTable"]
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagTable(self, token):
self.parser.parseError()
ignoreEndTag = self.ignoreEndTagCaption()
self.parser.phase.processEndTag(impliedTagToken("caption"))
if not ignoreEndTag:
return token
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def endTagOther(self, token):
return self.parser.phases["inBody"].processEndTag(token)
class InColumnGroupPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-column
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("col", self.startTagCol)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("colgroup", self.endTagColgroup),
("col", self.endTagCol)
])
self.endTagHandler.default = self.endTagOther
def ignoreEndTagColgroup(self):
return self.tree.openElements[-1].name == "html"
def processEOF(self):
if self.tree.openElements[-1].name == "html":
assert self.parser.innerHTML
return
else:
ignoreEndTag = self.ignoreEndTagColgroup()
self.endTagColgroup(impliedTagToken("colgroup"))
if not ignoreEndTag:
return True
def processCharacters(self, token):
ignoreEndTag = self.ignoreEndTagColgroup()
self.endTagColgroup(impliedTagToken("colgroup"))
if not ignoreEndTag:
return token
def startTagCol(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
def startTagOther(self, token):
ignoreEndTag = self.ignoreEndTagColgroup()
self.endTagColgroup(impliedTagToken("colgroup"))
if not ignoreEndTag:
return token
def endTagColgroup(self, token):
if self.ignoreEndTagColgroup():
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
else:
self.tree.openElements.pop()
self.parser.phase = self.parser.phases["inTable"]
def endTagCol(self, token):
self.parser.parseError("no-end-tag", {"name": "col"})
def endTagOther(self, token):
ignoreEndTag = self.ignoreEndTagColgroup()
self.endTagColgroup(impliedTagToken("colgroup"))
if not ignoreEndTag:
return token
class InTableBodyPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-table0
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("tr", self.startTagTr),
(("td", "th"), self.startTagTableCell),
(("caption", "col", "colgroup", "tbody", "tfoot", "thead"),
self.startTagTableOther)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
(("tbody", "tfoot", "thead"), self.endTagTableRowGroup),
("table", self.endTagTable),
(("body", "caption", "col", "colgroup", "html", "td", "th",
"tr"), self.endTagIgnore)
])
self.endTagHandler.default = self.endTagOther
# helper methods
def clearStackToTableBodyContext(self):
while self.tree.openElements[-1].name not in ("tbody", "tfoot",
"thead", "html"):
# self.parser.parseError("unexpected-implied-end-tag-in-table",
# {"name": self.tree.openElements[-1].name})
self.tree.openElements.pop()
if self.tree.openElements[-1].name == "html":
assert self.parser.innerHTML
# the rest
def processEOF(self):
self.parser.phases["inTable"].processEOF()
def processSpaceCharacters(self, token):
return self.parser.phases["inTable"].processSpaceCharacters(token)
def processCharacters(self, token):
return self.parser.phases["inTable"].processCharacters(token)
def startTagTr(self, token):
self.clearStackToTableBodyContext()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inRow"]
def startTagTableCell(self, token):
self.parser.parseError("unexpected-cell-in-table-body",
{"name": token["name"]})
self.startTagTr(impliedTagToken("tr", "StartTag"))
return token
def startTagTableOther(self, token):
# XXX AT Any ideas on how to share this with endTagTable?
if (self.tree.elementInScope("tbody", variant="table") or
self.tree.elementInScope("thead", variant="table") or
self.tree.elementInScope("tfoot", variant="table")):
self.clearStackToTableBodyContext()
self.endTagTableRowGroup(
impliedTagToken(self.tree.openElements[-1].name))
return token
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def startTagOther(self, token):
return self.parser.phases["inTable"].processStartTag(token)
def endTagTableRowGroup(self, token):
if self.tree.elementInScope(token["name"], variant="table"):
self.clearStackToTableBodyContext()
self.tree.openElements.pop()
self.parser.phase = self.parser.phases["inTable"]
else:
self.parser.parseError("unexpected-end-tag-in-table-body",
{"name": token["name"]})
def endTagTable(self, token):
if (self.tree.elementInScope("tbody", variant="table") or
self.tree.elementInScope("thead", variant="table") or
self.tree.elementInScope("tfoot", variant="table")):
self.clearStackToTableBodyContext()
self.endTagTableRowGroup(
impliedTagToken(self.tree.openElements[-1].name))
return token
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag-in-table-body",
{"name": token["name"]})
def endTagOther(self, token):
return self.parser.phases["inTable"].processEndTag(token)
class InRowPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-row
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
(("td", "th"), self.startTagTableCell),
(("caption", "col", "colgroup", "tbody", "tfoot", "thead",
"tr"), self.startTagTableOther)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("tr", self.endTagTr),
("table", self.endTagTable),
(("tbody", "tfoot", "thead"), self.endTagTableRowGroup),
(("body", "caption", "col", "colgroup", "html", "td", "th"),
self.endTagIgnore)
])
self.endTagHandler.default = self.endTagOther
# helper methods (XXX unify this with other table helper methods)
def clearStackToTableRowContext(self):
while self.tree.openElements[-1].name not in ("tr", "html"):
self.parser.parseError("unexpected-implied-end-tag-in-table-row",
{"name": self.tree.openElements[-1].name})
self.tree.openElements.pop()
def ignoreEndTagTr(self):
return not self.tree.elementInScope("tr", variant="table")
# the rest
def processEOF(self):
self.parser.phases["inTable"].processEOF()
def processSpaceCharacters(self, token):
return self.parser.phases["inTable"].processSpaceCharacters(token)
def processCharacters(self, token):
return self.parser.phases["inTable"].processCharacters(token)
def startTagTableCell(self, token):
self.clearStackToTableRowContext()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inCell"]
self.tree.activeFormattingElements.append(Marker)
def startTagTableOther(self, token):
ignoreEndTag = self.ignoreEndTagTr()
self.endTagTr(impliedTagToken("tr"))
# XXX how are we sure it's always ignored in the innerHTML case?
if not ignoreEndTag:
return token
def startTagOther(self, token):
return self.parser.phases["inTable"].processStartTag(token)
def endTagTr(self, token):
if not self.ignoreEndTagTr():
self.clearStackToTableRowContext()
self.tree.openElements.pop()
self.parser.phase = self.parser.phases["inTableBody"]
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagTable(self, token):
ignoreEndTag = self.ignoreEndTagTr()
self.endTagTr(impliedTagToken("tr"))
# Reprocess the current tag if the tr end tag was not ignored
# XXX how are we sure it's always ignored in the innerHTML case?
if not ignoreEndTag:
return token
def endTagTableRowGroup(self, token):
if self.tree.elementInScope(token["name"], variant="table"):
self.endTagTr(impliedTagToken("tr"))
return token
else:
self.parser.parseError()
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag-in-table-row",
{"name": token["name"]})
def endTagOther(self, token):
return self.parser.phases["inTable"].processEndTag(token)
class InCellPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-cell
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
(("caption", "col", "colgroup", "tbody", "td", "tfoot", "th",
"thead", "tr"), self.startTagTableOther)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
(("td", "th"), self.endTagTableCell),
(("body", "caption", "col", "colgroup", "html"), self.endTagIgnore),
(("table", "tbody", "tfoot", "thead", "tr"), self.endTagImply)
])
self.endTagHandler.default = self.endTagOther
# helper
def closeCell(self):
if self.tree.elementInScope("td", variant="table"):
self.endTagTableCell(impliedTagToken("td"))
elif self.tree.elementInScope("th", variant="table"):
self.endTagTableCell(impliedTagToken("th"))
# the rest
def processEOF(self):
self.parser.phases["inBody"].processEOF()
def processCharacters(self, token):
return self.parser.phases["inBody"].processCharacters(token)
def startTagTableOther(self, token):
if (self.tree.elementInScope("td", variant="table") or
self.tree.elementInScope("th", variant="table")):
self.closeCell()
return token
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def startTagOther(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def endTagTableCell(self, token):
if self.tree.elementInScope(token["name"], variant="table"):
self.tree.generateImpliedEndTags(token["name"])
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("unexpected-cell-end-tag",
{"name": token["name"]})
while True:
node = self.tree.openElements.pop()
if node.name == token["name"]:
break
else:
self.tree.openElements.pop()
self.tree.clearActiveFormattingElements()
self.parser.phase = self.parser.phases["inRow"]
else:
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def endTagImply(self, token):
if self.tree.elementInScope(token["name"], variant="table"):
self.closeCell()
return token
else:
# sometimes innerHTML case
self.parser.parseError()
def endTagOther(self, token):
return self.parser.phases["inBody"].processEndTag(token)
class InSelectPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("option", self.startTagOption),
("optgroup", self.startTagOptgroup),
("select", self.startTagSelect),
(("input", "keygen", "textarea"), self.startTagInput),
("script", self.startTagScript)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("option", self.endTagOption),
("optgroup", self.endTagOptgroup),
("select", self.endTagSelect)
])
self.endTagHandler.default = self.endTagOther
# http://www.whatwg.org/specs/web-apps/current-work/#in-select
def processEOF(self):
if self.tree.openElements[-1].name != "html":
self.parser.parseError("eof-in-select")
else:
assert self.parser.innerHTML
def processCharacters(self, token):
if token["data"] == "\u0000":
return
self.tree.insertText(token["data"])
def startTagOption(self, token):
# We need to imply </option> if <option> is the current node.
if self.tree.openElements[-1].name == "option":
self.tree.openElements.pop()
self.tree.insertElement(token)
def startTagOptgroup(self, token):
if self.tree.openElements[-1].name == "option":
self.tree.openElements.pop()
if self.tree.openElements[-1].name == "optgroup":
self.tree.openElements.pop()
self.tree.insertElement(token)
def startTagSelect(self, token):
self.parser.parseError("unexpected-select-in-select")
self.endTagSelect(impliedTagToken("select"))
def startTagInput(self, token):
self.parser.parseError("unexpected-input-in-select")
if self.tree.elementInScope("select", variant="select"):
self.endTagSelect(impliedTagToken("select"))
return token
else:
assert self.parser.innerHTML
def startTagScript(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-in-select",
{"name": token["name"]})
def endTagOption(self, token):
if self.tree.openElements[-1].name == "option":
self.tree.openElements.pop()
else:
self.parser.parseError("unexpected-end-tag-in-select",
{"name": "option"})
def endTagOptgroup(self, token):
# </optgroup> implicitly closes <option>
if (self.tree.openElements[-1].name == "option" and
self.tree.openElements[-2].name == "optgroup"):
self.tree.openElements.pop()
# It also closes </optgroup>
if self.tree.openElements[-1].name == "optgroup":
self.tree.openElements.pop()
# But nothing else
else:
self.parser.parseError("unexpected-end-tag-in-select",
{"name": "optgroup"})
def endTagSelect(self, token):
if self.tree.elementInScope("select", variant="select"):
node = self.tree.openElements.pop()
while node.name != "select":
node = self.tree.openElements.pop()
self.parser.resetInsertionMode()
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-in-select",
{"name": token["name"]})
class InSelectInTablePhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
(("caption", "table", "tbody", "tfoot", "thead", "tr", "td", "th"),
self.startTagTable)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
(("caption", "table", "tbody", "tfoot", "thead", "tr", "td", "th"),
self.endTagTable)
])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
self.parser.phases["inSelect"].processEOF()
def processCharacters(self, token):
return self.parser.phases["inSelect"].processCharacters(token)
def startTagTable(self, token):
self.parser.parseError("unexpected-table-element-start-tag-in-select-in-table", {"name": token["name"]})
self.endTagOther(impliedTagToken("select"))
return token
def startTagOther(self, token):
return self.parser.phases["inSelect"].processStartTag(token)
def endTagTable(self, token):
self.parser.parseError("unexpected-table-element-end-tag-in-select-in-table", {"name": token["name"]})
if self.tree.elementInScope(token["name"], variant="table"):
self.endTagOther(impliedTagToken("select"))
return token
def endTagOther(self, token):
return self.parser.phases["inSelect"].processEndTag(token)
class InForeignContentPhase(Phase):
breakoutElements = frozenset(["b", "big", "blockquote", "body", "br",
"center", "code", "dd", "div", "dl", "dt",
"em", "embed", "h1", "h2", "h3",
"h4", "h5", "h6", "head", "hr", "i", "img",
"li", "listing", "menu", "meta", "nobr",
"ol", "p", "pre", "ruby", "s", "small",
"span", "strong", "strike", "sub", "sup",
"table", "tt", "u", "ul", "var"])
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
def adjustSVGTagNames(self, token):
replacements = {"altglyph": "altGlyph",
"altglyphdef": "altGlyphDef",
"altglyphitem": "altGlyphItem",
"animatecolor": "animateColor",
"animatemotion": "animateMotion",
"animatetransform": "animateTransform",
"clippath": "clipPath",
"feblend": "feBlend",
"fecolormatrix": "feColorMatrix",
"fecomponenttransfer": "feComponentTransfer",
"fecomposite": "feComposite",
"feconvolvematrix": "feConvolveMatrix",
"fediffuselighting": "feDiffuseLighting",
"fedisplacementmap": "feDisplacementMap",
"fedistantlight": "feDistantLight",
"feflood": "feFlood",
"fefunca": "feFuncA",
"fefuncb": "feFuncB",
"fefuncg": "feFuncG",
"fefuncr": "feFuncR",
"fegaussianblur": "feGaussianBlur",
"feimage": "feImage",
"femerge": "feMerge",
"femergenode": "feMergeNode",
"femorphology": "feMorphology",
"feoffset": "feOffset",
"fepointlight": "fePointLight",
"fespecularlighting": "feSpecularLighting",
"fespotlight": "feSpotLight",
"fetile": "feTile",
"feturbulence": "feTurbulence",
"foreignobject": "foreignObject",
"glyphref": "glyphRef",
"lineargradient": "linearGradient",
"radialgradient": "radialGradient",
"textpath": "textPath"}
if token["name"] in replacements:
token["name"] = replacements[token["name"]]
def processCharacters(self, token):
if token["data"] == "\u0000":
token["data"] = "\uFFFD"
elif (self.parser.framesetOK and
any(char not in spaceCharacters for char in token["data"])):
self.parser.framesetOK = False
Phase.processCharacters(self, token)
def processStartTag(self, token):
currentNode = self.tree.openElements[-1]
if (token["name"] in self.breakoutElements or
(token["name"] == "font" and
set(token["data"].keys()) & set(["color", "face", "size"]))):
self.parser.parseError("unexpected-html-element-in-foreign-content",
{"name": token["name"]})
while (self.tree.openElements[-1].namespace !=
self.tree.defaultNamespace and
not self.parser.isHTMLIntegrationPoint(self.tree.openElements[-1]) and
not self.parser.isMathMLTextIntegrationPoint(self.tree.openElements[-1])):
self.tree.openElements.pop()
return token
else:
if currentNode.namespace == namespaces["mathml"]:
self.parser.adjustMathMLAttributes(token)
elif currentNode.namespace == namespaces["svg"]:
self.adjustSVGTagNames(token)
self.parser.adjustSVGAttributes(token)
self.parser.adjustForeignAttributes(token)
token["namespace"] = currentNode.namespace
self.tree.insertElement(token)
if token["selfClosing"]:
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def processEndTag(self, token):
nodeIndex = len(self.tree.openElements) - 1
node = self.tree.openElements[-1]
if node.name != token["name"]:
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
while True:
if node.name.translate(asciiUpper2Lower) == token["name"]:
# XXX this isn't in the spec but it seems necessary
if self.parser.phase == self.parser.phases["inTableText"]:
self.parser.phase.flushCharacters()
self.parser.phase = self.parser.phase.originalPhase
while self.tree.openElements.pop() != node:
assert self.tree.openElements
new_token = None
break
nodeIndex -= 1
node = self.tree.openElements[nodeIndex]
if node.namespace != self.tree.defaultNamespace:
continue
else:
new_token = self.parser.phase.processEndTag(token)
break
return new_token
class AfterBodyPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([("html", self.endTagHtml)])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
# Stop parsing
pass
def processComment(self, token):
# This is needed because data is to be appended to the <html> element
# here and not to whatever is currently open.
self.tree.insertComment(token, self.tree.openElements[0])
def processCharacters(self, token):
self.parser.parseError("unexpected-char-after-body")
self.parser.phase = self.parser.phases["inBody"]
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-after-body",
{"name": token["name"]})
self.parser.phase = self.parser.phases["inBody"]
return token
def endTagHtml(self, name):
if self.parser.innerHTML:
self.parser.parseError("unexpected-end-tag-after-body-innerhtml")
else:
self.parser.phase = self.parser.phases["afterAfterBody"]
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-after-body",
{"name": token["name"]})
self.parser.phase = self.parser.phases["inBody"]
return token
class InFramesetPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-frameset
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("frameset", self.startTagFrameset),
("frame", self.startTagFrame),
("noframes", self.startTagNoframes)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("frameset", self.endTagFrameset)
])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
if self.tree.openElements[-1].name != "html":
self.parser.parseError("eof-in-frameset")
else:
assert self.parser.innerHTML
def processCharacters(self, token):
self.parser.parseError("unexpected-char-in-frameset")
def startTagFrameset(self, token):
self.tree.insertElement(token)
def startTagFrame(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
def startTagNoframes(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-in-frameset",
{"name": token["name"]})
def endTagFrameset(self, token):
if self.tree.openElements[-1].name == "html":
# innerHTML case
self.parser.parseError("unexpected-frameset-in-frameset-innerhtml")
else:
self.tree.openElements.pop()
if (not self.parser.innerHTML and
self.tree.openElements[-1].name != "frameset"):
# If we're not in innerHTML mode and the the current node is not a
# "frameset" element (anymore) then switch.
self.parser.phase = self.parser.phases["afterFrameset"]
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-in-frameset",
{"name": token["name"]})
class AfterFramesetPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#after3
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("noframes", self.startTagNoframes)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("html", self.endTagHtml)
])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
# Stop parsing
pass
def processCharacters(self, token):
self.parser.parseError("unexpected-char-after-frameset")
def startTagNoframes(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-after-frameset",
{"name": token["name"]})
def endTagHtml(self, token):
self.parser.phase = self.parser.phases["afterAfterFrameset"]
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-after-frameset",
{"name": token["name"]})
class AfterAfterBodyPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml)
])
self.startTagHandler.default = self.startTagOther
def processEOF(self):
pass
def processComment(self, token):
self.tree.insertComment(token, self.tree.document)
def processSpaceCharacters(self, token):
return self.parser.phases["inBody"].processSpaceCharacters(token)
def processCharacters(self, token):
self.parser.parseError("expected-eof-but-got-char")
self.parser.phase = self.parser.phases["inBody"]
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("expected-eof-but-got-start-tag",
{"name": token["name"]})
self.parser.phase = self.parser.phases["inBody"]
return token
def processEndTag(self, token):
self.parser.parseError("expected-eof-but-got-end-tag",
{"name": token["name"]})
self.parser.phase = self.parser.phases["inBody"]
return token
class AfterAfterFramesetPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("noframes", self.startTagNoFrames)
])
self.startTagHandler.default = self.startTagOther
def processEOF(self):
pass
def processComment(self, token):
self.tree.insertComment(token, self.tree.document)
def processSpaceCharacters(self, token):
return self.parser.phases["inBody"].processSpaceCharacters(token)
def processCharacters(self, token):
self.parser.parseError("expected-eof-but-got-char")
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagNoFrames(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("expected-eof-but-got-start-tag",
{"name": token["name"]})
def processEndTag(self, token):
self.parser.parseError("expected-eof-but-got-end-tag",
{"name": token["name"]})
return {
"initial": InitialPhase,
"beforeHtml": BeforeHtmlPhase,
"beforeHead": BeforeHeadPhase,
"inHead": InHeadPhase,
# XXX "inHeadNoscript": InHeadNoScriptPhase,
"afterHead": AfterHeadPhase,
"inBody": InBodyPhase,
"text": TextPhase,
"inTable": InTablePhase,
"inTableText": InTableTextPhase,
"inCaption": InCaptionPhase,
"inColumnGroup": InColumnGroupPhase,
"inTableBody": InTableBodyPhase,
"inRow": InRowPhase,
"inCell": InCellPhase,
"inSelect": InSelectPhase,
"inSelectInTable": InSelectInTablePhase,
"inForeignContent": InForeignContentPhase,
"afterBody": AfterBodyPhase,
"inFrameset": InFramesetPhase,
"afterFrameset": AfterFramesetPhase,
"afterAfterBody": AfterAfterBodyPhase,
"afterAfterFrameset": AfterAfterFramesetPhase,
# XXX after after frameset
}
def impliedTagToken(name, type="EndTag", attributes=None,
selfClosing=False):
if attributes is None:
attributes = {}
return {"type": tokenTypes[type], "name": name, "data": attributes,
"selfClosing": selfClosing}
class ParseError(Exception):
"""Error in parsed document"""
pass | unknown | codeparrot/codeparrot-clean | ||
////////////////////////////////////////////////////////////////////////////
//
// Copyright 2022 Realm Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
////////////////////////////////////////////////////////////////////////////
#import <Realm/RLMCollection.h>
RLM_HEADER_AUDIT_BEGIN(nullability, sendability)
@protocol RLMValue;
@class RLMResults<RLMObjectType>;
/**
A `RLMSectionedResultsChange` object encapsulates information about changes to sectioned
results that are reported by Realm notifications.
`RLMSectionedResultsChange` is passed to the notification blocks registered with
`-addNotificationBlock` on `RLMSectionedResults`, and reports what sections and rows in the
collection changed since the last time the notification block was called.
A complete example of updating a `UITableView` named `tv`:
[tv beginUpdates];
[tv deleteRowsAtIndexPaths:changes.deletions withRowAnimation:UITableViewRowAnimationAutomatic];
[tv insertRowsAtIndexPaths:changes.insertions withRowAnimation:UITableViewRowAnimationAutomatic];
[tv reloadRowsAtIndexPaths:changes.modifications withRowAnimation:UITableViewRowAnimationAutomatic];
[tv insertSections:changes.sectionsToInsert withRowAnimation:UITableViewRowAnimationAutomatic];
[tv deleteSections:changes.sectionsToRemove withRowAnimation:UITableViewRowAnimationAutomatic];
[tv endUpdates];
All of the arrays in an `RLMSectionedResultsChange` are always sorted in ascending order.
*/
@interface RLMSectionedResultsChange : NSObject
/// The index paths of objects in the previous version of the collection which have
/// been removed from this one.
@property (nonatomic, readonly) NSArray<NSIndexPath *> *deletions;
/// The index paths in the new version of the collection which were newly inserted.
@property (nonatomic, readonly) NSArray<NSIndexPath *> *insertions;
/// The index paths in the old version of the collection which were modified.
@property (nonatomic, readonly) NSArray<NSIndexPath *> *modifications;
/// The indices of the sections to be inserted.
@property (nonatomic, readonly) NSIndexSet *sectionsToInsert;
/// The indices of the sections to be removed.
@property (nonatomic, readonly) NSIndexSet *sectionsToRemove;
/// Returns the index paths of the deletion indices in the given section.
- (NSArray<NSIndexPath *> *)deletionsInSection:(NSUInteger)section;
/// Returns the index paths of the insertion indices in the given section.
- (NSArray<NSIndexPath *> *)insertionsInSection:(NSUInteger)section;
/// Returns the index paths of the modification indices in the given section.
- (NSArray<NSIndexPath *> *)modificationsInSection:(NSUInteger)section;
@end
/// The `RLMSectionedResult` protocol defines properties and methods common to both `RLMSectionedResults and RLMSection`
@protocol RLMSectionedResult <NSFastEnumeration, RLMThreadConfined>
#pragma mark - Object Access
/// The count of objects in the collection.
@property (nonatomic, readonly) NSUInteger count;
/// Returns the object for a given index in the collection.
- (id)objectAtIndexedSubscript:(NSUInteger)index;
/// Returns the object for a given index in the collection.
- (id)objectAtIndex:(NSUInteger)index;
#pragma mark - Freeze
/**
Returns a frozen (immutable) snapshot of this collection.
The frozen copy is an immutable collection which contains the same data as this
collection currently contains, but will not update when writes are made to the
containing Realm. Unlike live arrays, frozen collections can be accessed from any
thread.
@warning This method cannot be called during a write transaction, or when the
containing Realm is read-only.
@warning Holding onto a frozen collection for an extended period while performing
write transaction on the Realm may result in the Realm file growing
to large sizes. See `RLMRealmConfiguration.maximumNumberOfActiveVersions`
for more information.
*/
- (instancetype)freeze;
/**
Returns a live version of this frozen collection.
This method resolves a reference to a live copy of the same frozen collection.
If called on a live collection, will return itself.
*/
- (instancetype)thaw;
/**
Indicates if the underlying collection is frozen.
Frozen collections are immutable and can be accessed from any thread.
*/
@property (nonatomic, readonly, getter = isFrozen) BOOL frozen;
#pragma mark - Sectioned Results Notifications
/**
Registers a block to be called each time the collection changes.
The block will be asynchronously called with the initial sectioned results collection,
and then called again after each write transaction which changes either any
of the objects in the results, or which objects are in the results.
The `change` parameter will be `nil` the first time the block is called.
For each call after that, it will contain information about
which rows in the section were added, removed or modified. If a
write transaction did not modify any objects in the section,
the block is not called at all. See the `RLMSectionedResultsChange` documentation for
information on how the changes are reported and an example of updating a
`UITableView`.
At the time when the block is called, the `RLMSection` / `RLMSectionedResults` object will be fully
evaluated and up-to-date.
Notifications are delivered via the standard run loop, and so can't be
delivered while the run loop is blocked by other activity. When
notifications can't be delivered instantly, multiple notifications may be
coalesced into a single notification. This can include the notification
with the initial results. For example, the following code performs a write
transaction immediately after adding the notification block, so there is no
opportunity for the initial notification to be delivered first. As a
result, the initial notification will reflect the state of the Realm after
the write transaction.
RLMResults<Dog *> *results = [Dog allObjects];
RLMSectionedResults<Dog *> *sectionedResults = [results sectionedResultsUsingKeyPath:@"age" ascending:YES];
self.token = [sectionedResults addNotificationBlock:^(RLMSectionedResults *sectionedResults, RLMSectionedResultsChange *changes) {
// Only fired once for the example
NSLog(@"sectionedResults.count: %zu", sectionedResults.count); // => 1
}];
[realm transactionWithBlock:^{
Dog *dog = [[Dog alloc] init];
dog.name = @"Rex";
dog.age = 5;
[realm addObject:dog];
}];
// end of run loop execution context
You must retain the returned token for as long as you want updates to continue
to be sent to the block. To stop receiving updates, call `-invalidate` on the token.
@warning This method cannot be called during a write transaction, or when the
containing Realm is read-only.
@warning The queue must be a serial queue.
@param block The block to be called whenever a change occurs.
@return A token which must be held for as long as you want updates to be delivered.
*/
- (RLMNotificationToken *)addNotificationBlock:(void (^)(id<RLMSectionedResult>, RLMSectionedResultsChange *))block __attribute__((warn_unused_result));
/**
Registers a block to be called each time the collection changes.
The block will be asynchronously called with the initial sectioned results collection,
and then called again after each write transaction which changes either any
of the objects in the results, or which objects are in the results.
The `change` parameter will be `nil` the first time the block is called.
For each call after that, it will contain information about
which rows in the section were added, removed or modified. If a
write transaction did not modify any objects in the section,
the block is not called at all. See the `RLMSectionedResultsChange` documentation for
information on how the changes are reported and an example of updating a
`UITableView`.
At the time when the block is called, the `RLMSection` / `RLMSectionedResults` object will be fully
evaluated and up-to-date.
Notifications are delivered via the standard run loop, and so can't be
delivered while the run loop is blocked by other activity. When
notifications can't be delivered instantly, multiple notifications may be
coalesced into a single notification. This can include the notification
with the initial results. For example, the following code performs a write
transaction immediately after adding the notification block, so there is no
opportunity for the initial notification to be delivered first. As a
result, the initial notification will reflect the state of the Realm after
the write transaction.
RLMResults<Dog *> *results = [Dog allObjects];
RLMSectionedResults<Dog *> *sectionedResults = [results sectionedResultsUsingKeyPath:@"age" ascending:YES];
self.token = [sectionedResults addNotificationBlock:^(RLMSectionedResults *sectionedResults, RLMSectionedResultsChange *changes) {
// Only fired once for the example
NSLog(@"sectionedResults.count: %zu", sectionedResults.count); // => 1
}];
[realm transactionWithBlock:^{
Dog *dog = [[Dog alloc] init];
dog.name = @"Rex";
dog.age = 5;
[realm addObject:dog];
}];
// end of run loop execution context
You must retain the returned token for as long as you want updates to continue
to be sent to the block. To stop receiving updates, call `-invalidate` on the token.
@warning This method cannot be called during a write transaction, or when the
containing Realm is read-only.
@warning The queue must be a serial queue.
@param block The block to be called whenever a change occurs.
@param queue The serial queue to deliver notifications to.
@return A token which must be held for as long as you want updates to be delivered.
*/
- (RLMNotificationToken *)addNotificationBlock:(void (^)(id<RLMSectionedResult>, RLMSectionedResultsChange *))block
queue:(dispatch_queue_t)queue __attribute__((warn_unused_result));
/**
Registers a block to be called each time the collection changes.
The block will be asynchronously called with the initial sectioned results collection,
and then called again after each write transaction which changes either any
of the objects in the results, or which objects are in the results.
The `change` parameter will be `nil` the first time the block is called.
For each call after that, it will contain information about
which rows in the section were added, removed or modified. If a
write transaction did not modify any objects in the section,
the block is not called at all. See the `RLMSectionedResultsChange` documentation for
information on how the changes are reported and an example of updating a
`UITableView`.
At the time when the block is called, the `RLMSection` / `RLMSectionedResults` object will be fully
evaluated and up-to-date.
Notifications are delivered via the standard run loop, and so can't be
delivered while the run loop is blocked by other activity. When
notifications can't be delivered instantly, multiple notifications may be
coalesced into a single notification. This can include the notification
with the initial results. For example, the following code performs a write
transaction immediately after adding the notification block, so there is no
opportunity for the initial notification to be delivered first. As a
result, the initial notification will reflect the state of the Realm after
the write transaction.
RLMResults<Dog *> *results = [Dog allObjects];
RLMSectionedResults<Dog *> *sectionedResults = [results sectionedResultsUsingKeyPath:@"age" ascending:YES];
self.token = [sectionedResults addNotificationBlock:^(RLMSectionedResults *sectionedResults, RLMSectionedResultsChange *changes) {
// Only fired once for the example
NSLog(@"sectionedResults.count: %zu", sectionedResults.count); // => 1
}];
[realm transactionWithBlock:^{
Dog *dog = [[Dog alloc] init];
dog.name = @"Rex";
dog.age = 5;
[realm addObject:dog];
}];
// end of run loop execution context
You must retain the returned token for as long as you want updates to continue
to be sent to the block. To stop receiving updates, call `-invalidate` on the token.
@warning This method cannot be called during a write transaction, or when the
containing Realm is read-only.
@warning The queue must be a serial queue.
@param block The block to be called whenever a change occurs.
@param keyPaths The block will be called for changes occurring on these keypaths. If no
key paths are given, notifications are delivered for every property key path.
@return A token which must be held for as long as you want updates to be delivered.
*/
- (RLMNotificationToken *)addNotificationBlock:(void (^)(id<RLMSectionedResult>, RLMSectionedResultsChange *))block
keyPaths:(NSArray<NSString *> *)keyPaths __attribute__((warn_unused_result));
/**
Registers a block to be called each time the collection changes.
The block will be asynchronously called with the initial sectioned results collection,
and then called again after each write transaction which changes either any
of the objects in the results, or which objects are in the results.
The `change` parameter will be `nil` the first time the block is called.
For each call after that, it will contain information about
which rows in the section were added, removed or modified. If a
write transaction did not modify any objects in the section,
the block is not called at all. See the `RLMSectionedResultsChange` documentation for
information on how the changes are reported and an example of updating a
`UITableView`.
At the time when the block is called, the `RLMSection` / `RLMSectionedResults` object will be fully
evaluated and up-to-date.
Notifications are delivered via the standard run loop, and so can't be
delivered while the run loop is blocked by other activity. When
notifications can't be delivered instantly, multiple notifications may be
coalesced into a single notification. This can include the notification
with the initial results. For example, the following code performs a write
transaction immediately after adding the notification block, so there is no
opportunity for the initial notification to be delivered first. As a
result, the initial notification will reflect the state of the Realm after
the write transaction.
RLMResults<Dog *> *results = [Dog allObjects];
RLMSectionedResults<Dog *> *sectionedResults = [results sectionedResultsUsingKeyPath:@"age" ascending:YES];
self.token = [sectionedResults addNotificationBlock:^(RLMSectionedResults *sectionedResults, RLMSectionedResultsChange *changes) {
// Only fired once for the example
NSLog(@"sectionedResults.count: %zu", sectionedResults.count); // => 1
}];
[realm transactionWithBlock:^{
Dog *dog = [[Dog alloc] init];
dog.name = @"Rex";
dog.age = 5;
[realm addObject:dog];
}];
// end of run loop execution context
You must retain the returned token for as long as you want updates to continue
to be sent to the block. To stop receiving updates, call `-invalidate` on the token.
@warning This method cannot be called during a write transaction, or when the
containing Realm is read-only.
@warning The queue must be a serial queue.
@note When filtering with key paths a notification will be fired in the following scenarios:
- An object in the collection has been modified at the filtered properties.
- An object has been modified on the section key path property, and the result of that modification has changed it's position in the section, or the object may need to move to another section.
- An object of the same observed type has been inserted or deleted from the Realm.
@param block The block to be called whenever a change occurs.
@param keyPaths The block will be called for changes occurring on these keypaths. If no
key paths are given, notifications are delivered for every property key path.
@param queue The serial queue to deliver notifications to.
@return A token which must be held for as long as you want updates to be delivered.
*/
- (RLMNotificationToken *)addNotificationBlock:(void (^)(id<RLMSectionedResult>, RLMSectionedResultsChange *))block
keyPaths:(nullable NSArray<NSString *> *)keyPaths
queue:(nullable dispatch_queue_t)queue __attribute__((warn_unused_result));
@end
/// An RLMSection contains the objects which belong to a specified section key.
@interface RLMSection<RLMKeyType: id<RLMValue>, RLMObjectType> : NSObject<RLMSectionedResult>
/// The value that represents the key in this section.
@property (nonatomic, readonly) RLMKeyType key;
/// The count of objects in the section.
@property (nonatomic, readonly) NSUInteger count;
/// Returns the object for a given index in the section.
- (RLMObjectType)objectAtIndexedSubscript:(NSUInteger)index;
/// Returns the object for a given index in the section.
- (RLMObjectType)objectAtIndex:(NSUInteger)index;
#pragma mark - Freeze
/**
Returns a frozen (immutable) snapshot of this section.
The frozen copy is an immutable section which contains the same data as this
section currently contains, but will not update when writes are made to the
containing Realm. Unlike live arrays, frozen collections can be accessed from any
thread.
@warning This method cannot be called during a write transaction, or when the
containing Realm is read-only.
@warning Holding onto a frozen section for an extended period while performing
write transaction on the Realm may result in the Realm file growing
to large sizes. See `RLMRealmConfiguration.maximumNumberOfActiveVersions`
for more information.
*/
- (instancetype)freeze;
/**
Returns a live version of this frozen section.
This method resolves a reference to a live copy of the same frozen section.
If called on a live section, will return itself.
*/
- (instancetype)thaw;
/**
Indicates if the underlying section is frozen.
Frozen sections are immutable and can be accessed from any thread.
*/
@property (nonatomic, readonly, getter = isFrozen) BOOL frozen;
#pragma mark - Section Notifications
/**
Registers a block to be called each time the section changes.
The block will be asynchronously called with the initial section,
and then called again after each write transaction which changes either any
of the objects in the results, or which objects are in the results.
The `change` parameter will be `nil` the first time the block is called.
For each call after that, it will contain information about
which rows in the section were added, removed or modified. If a
write transaction did not modify any objects in the section,
the block is not called at all. See the `RLMSectionedResultsChange` documentation for
information on how the changes are reported and an example of updating a
`UITableView`.
At the time when the block is called, the `RLMSection` object will be fully
evaluated and up-to-date.
Notifications are delivered via the standard run loop, and so can't be
delivered while the run loop is blocked by other activity. When
notifications can't be delivered instantly, multiple notifications may be
coalesced into a single notification. This can include the notification
with the initial results. For example, the following code performs a write
transaction immediately after adding the notification block, so there is no
opportunity for the initial notification to be delivered first. As a
result, the initial notification will reflect the state of the Realm after
the write transaction.
RLMResults<Dog *> *results = [Dog allObjects];
RLMSectionedResults<Dog *> *sectionedResults = [results sectionedResultsUsingKeyPath:@"age" ascending:YES];
RLMSection<Dog *> *section = sectionedResults[0] // section with dogs aged '5' already exists.
self.token = [section addNotificationBlock:^(RLMSection *section, RLMSectionedResultsChange *changes) {
// Only fired once for the example
NSLog(@"section.count: %zu", section.count); // => 2
}];
[realm transactionWithBlock:^{
Dog *dog = [[Dog alloc] init];
dog.name = @"Rex";
dog.age = 5;
[realm addObject:dog];
}];
// end of run loop execution context
You must retain the returned token for as long as you want updates to continue
to be sent to the block. To stop receiving updates, call `-invalidate` on the token.
@warning This method cannot be called during a write transaction, or when the
containing Realm is read-only.
@warning The queue must be a serial queue.
@param block The block to be called whenever a change occurs.
@return A token which must be held for as long as you want updates to be delivered.
*/
- (RLMNotificationToken *)addNotificationBlock:(void (^)(RLMSection<RLMKeyType, RLMObjectType> *, RLMSectionedResultsChange *))block __attribute__((warn_unused_result));
/**
Registers a block to be called each time the section changes.
The block will be asynchronously called with the initial section,
and then called again after each write transaction which changes either any
of the objects in the results, or which objects are in the results.
The `change` parameter will be `nil` the first time the block is called.
For each call after that, it will contain information about
which rows in the section were added, removed or modified. If a
write transaction did not modify any objects in the section,
the block is not called at all. See the `RLMSectionedResultsChange` documentation for
information on how the changes are reported and an example of updating a
`UITableView`.
At the time when the block is called, the `RLMSection` object will be fully
evaluated and up-to-date.
Notifications are delivered via the standard run loop, and so can't be
delivered while the run loop is blocked by other activity. When
notifications can't be delivered instantly, multiple notifications may be
coalesced into a single notification. This can include the notification
with the initial results. For example, the following code performs a write
transaction immediately after adding the notification block, so there is no
opportunity for the initial notification to be delivered first. As a
result, the initial notification will reflect the state of the Realm after
the write transaction.
RLMResults<Dog *> *results = [Dog allObjects];
RLMSectionedResults<Dog *> *sectionedResults = [results sectionedResultsUsingKeyPath:@"age" ascending:YES];
RLMSection<Dog *> *section = sectionedResults[0] // section with dogs aged '5' already exists.
self.token = [section addNotificationBlock:^(RLMSection *section, RLMSectionedResultsChange *changes) {
// Only fired once for the example
NSLog(@"section.count: %zu", section.count); // => 2
}];
[realm transactionWithBlock:^{
Dog *dog = [[Dog alloc] init];
dog.name = @"Rex";
dog.age = 5;
[realm addObject:dog];
}];
// end of run loop execution context
You must retain the returned token for as long as you want updates to continue
to be sent to the block. To stop receiving updates, call `-invalidate` on the token.
@warning This method cannot be called during a write transaction, or when the
containing Realm is read-only.
@warning The queue must be a serial queue.
@param block The block to be called whenever a change occurs.
@param queue The serial queue to deliver notifications to.
@return A token which must be held for as long as you want updates to be delivered.
*/
- (RLMNotificationToken *)addNotificationBlock:(void (^)(RLMSection<RLMKeyType, RLMObjectType> *, RLMSectionedResultsChange *))block
queue:(dispatch_queue_t)queue __attribute__((warn_unused_result));
/**
Registers a block to be called each time the section changes.
The block will be asynchronously called with the initial section,
and then called again after each write transaction which changes either any
of the objects in the results, or which objects are in the results.
The `change` parameter will be `nil` the first time the block is called.
For each call after that, it will contain information about
which rows in the section were added, removed or modified. If a
write transaction did not modify any objects in the section,
the block is not called at all. See the `RLMSectionedResultsChange` documentation for
information on how the changes are reported and an example of updating a
`UITableView`.
At the time when the block is called, the `RLMSection` object will be fully
evaluated and up-to-date.
Notifications are delivered via the standard run loop, and so can't be
delivered while the run loop is blocked by other activity. When
notifications can't be delivered instantly, multiple notifications may be
coalesced into a single notification. This can include the notification
with the initial results. For example, the following code performs a write
transaction immediately after adding the notification block, so there is no
opportunity for the initial notification to be delivered first. As a
result, the initial notification will reflect the state of the Realm after
the write transaction.
RLMResults<Dog *> *results = [Dog allObjects];
RLMSectionedResults<Dog *> *sectionedResults = [results sectionedResultsUsingKeyPath:@"age" ascending:YES];
RLMSection<Dog *> *section = sectionedResults[0] // section with dogs aged '5' already exists.
self.token = [section addNotificationBlock:^(RLMSection *section, RLMSectionedResultsChange *changes) {
// Only fired once for the example
NSLog(@"section.count: %zu", section.count); // => 2
}];
[realm transactionWithBlock:^{
Dog *dog = [[Dog alloc] init];
dog.name = @"Rex";
dog.age = 5;
[realm addObject:dog];
}];
// end of run loop execution context
You must retain the returned token for as long as you want updates to continue
to be sent to the block. To stop receiving updates, call `-invalidate` on the token.
@warning This method cannot be called during a write transaction, or when the
containing Realm is read-only.
@warning The queue must be a serial queue.
@note When filtering with key paths a notification will be fired in the following scenarios:
- An object in the collection has been modified at the filtered properties.
- An object has been modified on the section key path property, and the result of that modification has changed it's position in the section, or the object may need to move to another section.
- An object of the same observed type has been inserted or deleted from the Realm.
@param block The block to be called whenever a change occurs.
@param keyPaths The block will be called for changes occurring on these keypaths. If no
key paths are given, notifications are delivered for every property key path.
@return A token which must be held for as long as you want updates to be delivered.
*/
- (RLMNotificationToken *)addNotificationBlock:(void (^)(RLMSection<RLMKeyType, RLMObjectType> *, RLMSectionedResultsChange *))block
keyPaths:(NSArray<NSString *> *)keyPaths __attribute__((warn_unused_result));
/**
Registers a block to be called each time the section changes.
The block will be asynchronously called with the initial section,
and then called again after each write transaction which changes either any
of the objects in the results, or which objects are in the results.
The `change` parameter will be `nil` the first time the block is called.
For each call after that, it will contain information about
which rows in the section were added, removed or modified. If a
write transaction did not modify any objects in the section,
the block is not called at all. See the `RLMSectionedResultsChange` documentation for
information on how the changes are reported and an example of updating a
`UITableView`.
At the time when the block is called, the `RLMSection` object will be fully
evaluated and up-to-date.
Notifications are delivered via the standard run loop, and so can't be
delivered while the run loop is blocked by other activity. When
notifications can't be delivered instantly, multiple notifications may be
coalesced into a single notification. This can include the notification
with the initial results. For example, the following code performs a write
transaction immediately after adding the notification block, so there is no
opportunity for the initial notification to be delivered first. As a
result, the initial notification will reflect the state of the Realm after
the write transaction.
RLMResults<Dog *> *results = [Dog allObjects];
RLMSectionedResults<Dog *> *sectionedResults = [results sectionedResultsUsingKeyPath:@"age" ascending:YES];
RLMSection<Dog *> *section = sectionedResults[0] // section with dogs aged '5' already exists.
self.token = [section addNotificationBlock:^(RLMSection *section, RLMSectionedResultsChange *changes) {
// Only fired once for the example
NSLog(@"section.count: %zu", section.count); // => 2
}];
[realm transactionWithBlock:^{
Dog *dog = [[Dog alloc] init];
dog.name = @"Rex";
dog.age = 5;
[realm addObject:dog];
}];
// end of run loop execution context
You must retain the returned token for as long as you want updates to continue
to be sent to the block. To stop receiving updates, call `-invalidate` on the token.
@warning This method cannot be called during a write transaction, or when the
containing Realm is read-only.
@warning The queue must be a serial queue.
@note When filtering with key paths a notification will be fired in the following scenarios:
- An object in the collection has been modified at the filtered properties.
- An object has been modified on the section key path property, and the result of that modification has changed it's position in the section, or the object may need to move to another section.
- An object of the same observed type has been inserted or deleted from the Realm.
@param block The block to be called whenever a change occurs.
@param keyPaths The block will be called for changes occurring on these keypaths. If no
key paths are given, notifications are delivered for every property key path.
@param queue The serial queue to deliver notifications to.
@return A token which must be held for as long as you want updates to be delivered.
*/
- (RLMNotificationToken *)addNotificationBlock:(void (^)(RLMSection<RLMKeyType, RLMObjectType> *, RLMSectionedResultsChange *))block
keyPaths:(nullable NSArray<NSString *> *)keyPaths
queue:(nullable dispatch_queue_t)queue __attribute__((warn_unused_result));
@end
/// A lazily evaluated collection that holds elements in sections determined by a section key.
@interface RLMSectionedResults<RLMKeyType: id<RLMValue>, RLMObjectType: id<RLMValue>> : NSObject<RLMSectionedResult>
/// An array of all keys in the sectioned results collection.
@property (nonatomic) NSArray<RLMKeyType> *allKeys;
/// The total amount of sections in this collection.
@property (nonatomic, readonly, assign) NSUInteger count;
/// Returns the section at a given index.
- (RLMSection<RLMKeyType, RLMObjectType> *)objectAtIndexedSubscript:(NSUInteger)index;
/// Returns the section at a given index.
- (RLMSection<RLMKeyType, RLMObjectType> *)objectAtIndex:(NSUInteger)index;
#pragma mark - Freeze
/**
Returns a frozen (immutable) snapshot of this sectioned results collection.
The frozen copy is an immutable sectioned results collection which contains the same data as this
sectioned results collection currently contains, but will not update when writes are made to the
containing Realm. Unlike live sectioned results collections, frozen sectioned results collection
can be accessed from any thread.
@warning This method cannot be called during a write transaction, or when the
containing Realm is read-only.
@warning Holding onto a frozen sectioned results collection for an extended period while performing
write transaction on the Realm may result in the Realm file growing
to large sizes. See `RLMRealmConfiguration.maximumNumberOfActiveVersions`
for more information.
*/
- (instancetype)freeze;
/**
Returns a live version of this frozen sectioned results collection.
This method resolves a reference to a live copy of the same frozen sectioned results collection.
If called on a live section, will return itself.
*/
- (instancetype)thaw;
/**
Indicates if the underlying sectioned results collection is frozen.
Frozen sectioned results collections are immutable and can be accessed from any thread.
*/
@property (nonatomic, readonly, getter = isFrozen) BOOL frozen;
#pragma mark - Sectioned Results Notifications
/**
Registers a block to be called each time the sectioned results collection changes.
The block will be asynchronously called with the initial sectioned results collection,
and then called again after each write transaction which changes either any
of the objects in the results, or which objects are in the results.
The `change` parameter will be `nil` the first time the block is called.
For each call after that, it will contain information about
which rows in the section were added, removed or modified. If a
write transaction did not modify any objects in the section,
the block is not called at all. See the `RLMSectionedResultsChange` documentation for
information on how the changes are reported and an example of updating a
`UITableView`.
At the time when the block is called, the `RLMSectionedResults` object will be fully
evaluated and up-to-date.
Notifications are delivered via the standard run loop, and so can't be
delivered while the run loop is blocked by other activity. When
notifications can't be delivered instantly, multiple notifications may be
coalesced into a single notification. This can include the notification
with the initial results. For example, the following code performs a write
transaction immediately after adding the notification block, so there is no
opportunity for the initial notification to be delivered first. As a
result, the initial notification will reflect the state of the Realm after
the write transaction.
RLMResults<Dog *> *results = [Dog allObjects];
RLMSectionedResults<Dog *> *sectionedResults = [results sectionedResultsUsingKeyPath:@"age" ascending:YES];
self.token = [sectionedResults addNotificationBlock:^(RLMSectionedResults *sectionedResults, RLMSectionedResultsChange *changes) {
// Only fired once for the example
NSLog(@"sectionedResults.count: %zu", sectionedResults.count); // => 1
}];
[realm transactionWithBlock:^{
Dog *dog = [[Dog alloc] init];
dog.name = @"Rex";
dog.age = 5;
[realm addObject:dog];
}];
// end of run loop execution context
You must retain the returned token for as long as you want updates to continue
to be sent to the block. To stop receiving updates, call `-invalidate` on the token.
@warning This method cannot be called during a write transaction, or when the
containing Realm is read-only.
@warning The queue must be a serial queue.
@param block The block to be called whenever a change occurs.
@return A token which must be held for as long as you want updates to be delivered.
*/
- (RLMNotificationToken *)addNotificationBlock:(void (^)(RLMSectionedResults<RLMKeyType, RLMObjectType> *, RLMSectionedResultsChange *))block __attribute__((warn_unused_result));
/**
Registers a block to be called each time the sectioned results collection changes.
The block will be asynchronously called with the initial sectioned results collection,
and then called again after each write transaction which changes either any
of the objects in the results, or which objects are in the results.
The `change` parameter will be `nil` the first time the block is called.
For each call after that, it will contain information about
which rows in the section were added, removed or modified. If a
write transaction did not modify any objects in the section,
the block is not called at all. See the `RLMSectionedResultsChange` documentation for
information on how the changes are reported and an example of updating a
`UITableView`.
At the time when the block is called, the `RLMSectionedResults` object will be fully
evaluated and up-to-date.
Notifications are delivered via the standard run loop, and so can't be
delivered while the run loop is blocked by other activity. When
notifications can't be delivered instantly, multiple notifications may be
coalesced into a single notification. This can include the notification
with the initial results. For example, the following code performs a write
transaction immediately after adding the notification block, so there is no
opportunity for the initial notification to be delivered first. As a
result, the initial notification will reflect the state of the Realm after
the write transaction.
RLMResults<Dog *> *results = [Dog allObjects];
RLMSectionedResults<Dog *> *sectionedResults = [results sectionedResultsUsingKeyPath:@"age" ascending:YES];
self.token = [sectionedResults addNotificationBlock:^(RLMSectionedResults *sectionedResults, RLMSectionedResultsChange *changes) {
// Only fired once for the example
NSLog(@"sectionedResults.count: %zu", sectionedResults.count); // => 1
}];
[realm transactionWithBlock:^{
Dog *dog = [[Dog alloc] init];
dog.name = @"Rex";
dog.age = 5;
[realm addObject:dog];
}];
// end of run loop execution context
You must retain the returned token for as long as you want updates to continue
to be sent to the block. To stop receiving updates, call `-invalidate` on the token.
@warning This method cannot be called during a write transaction, or when the
containing Realm is read-only.
@warning The queue must be a serial queue.
@param block The block to be called whenever a change occurs.
@param queue The serial queue to deliver notifications to.
@return A token which must be held for as long as you want updates to be delivered.
*/
- (RLMNotificationToken *)addNotificationBlock:(void (^)(RLMSectionedResults<RLMKeyType, RLMObjectType> *, RLMSectionedResultsChange *))block
queue:(dispatch_queue_t)queue __attribute__((warn_unused_result));
/**
Registers a block to be called each time the sectioned results collection changes.
The block will be asynchronously called with the initial sectioned results collection,
and then called again after each write transaction which changes either any
of the objects in the results, or which objects are in the results.
The `change` parameter will be `nil` the first time the block is called.
For each call after that, it will contain information about
which rows in the section were added, removed or modified. If a
write transaction did not modify any objects in the section,
the block is not called at all. See the `RLMSectionedResultsChange` documentation for
information on how the changes are reported and an example of updating a
`UITableView`.
At the time when the block is called, the `RLMSectionedResults` object will be fully
evaluated and up-to-date.
Notifications are delivered via the standard run loop, and so can't be
delivered while the run loop is blocked by other activity. When
notifications can't be delivered instantly, multiple notifications may be
coalesced into a single notification. This can include the notification
with the initial results. For example, the following code performs a write
transaction immediately after adding the notification block, so there is no
opportunity for the initial notification to be delivered first. As a
result, the initial notification will reflect the state of the Realm after
the write transaction.
RLMResults<Dog *> *results = [Dog allObjects];
RLMSectionedResults<Dog *> *sectionedResults = [results sectionedResultsUsingKeyPath:@"age" ascending:YES];
self.token = [sectionedResults addNotificationBlock:^(RLMSectionedResults *sectionedResults, RLMSectionedResultsChange *changes) {
// Only fired once for the example
NSLog(@"sectionedResults.count: %zu", sectionedResults.count); // => 1
}];
[realm transactionWithBlock:^{
Dog *dog = [[Dog alloc] init];
dog.name = @"Rex";
dog.age = 5;
[realm addObject:dog];
}];
// end of run loop execution context
You must retain the returned token for as long as you want updates to continue
to be sent to the block. To stop receiving updates, call `-invalidate` on the token.
@warning This method cannot be called during a write transaction, or when the
containing Realm is read-only.
@warning The queue must be a serial queue.
@note When filtering with key paths a notification will be fired in the following scenarios:
- An object in the collection has been modified at the filtered properties.
- An object has been modified on the section key path property, and the result of that modification has changed it's position in the section, or the object may need to move to another section.
- An object of the same observed type has been inserted or deleted from the Realm.
@param block The block to be called whenever a change occurs.
@param keyPaths The block will be called for changes occurring on these keypaths. If no
key paths are given, notifications are delivered for every property key path.
@return A token which must be held for as long as you want updates to be delivered.
*/
- (RLMNotificationToken *)addNotificationBlock:(void (^)(RLMSectionedResults<RLMKeyType, RLMObjectType> *, RLMSectionedResultsChange *))block
keyPaths:(NSArray<NSString *> *)keyPaths __attribute__((warn_unused_result));
/**
Registers a block to be called each time the sectioned results collection changes.
The block will be asynchronously called with the initial sectioned results collection,
and then called again after each write transaction which changes either any
of the objects in the results, or which objects are in the results.
The `change` parameter will be `nil` the first time the block is called.
For each call after that, it will contain information about
which rows in the section were added, removed or modified. If a
write transaction did not modify any objects in the section,
the block is not called at all. See the `RLMSectionedResultsChange` documentation for
information on how the changes are reported and an example of updating a
`UITableView`.
At the time when the block is called, the `RLMSectionedResults` object will be fully
evaluated and up-to-date.
Notifications are delivered via the standard run loop, and so can't be
delivered while the run loop is blocked by other activity. When
notifications can't be delivered instantly, multiple notifications may be
coalesced into a single notification. This can include the notification
with the initial results. For example, the following code performs a write
transaction immediately after adding the notification block, so there is no
opportunity for the initial notification to be delivered first. As a
result, the initial notification will reflect the state of the Realm after
the write transaction.
RLMResults<Dog *> *results = [Dog allObjects];
RLMSectionedResults<Dog *> *sectionedResults = [results sectionedResultsUsingKeyPath:@"age" ascending:YES];
self.token = [sectionedResults addNotificationBlock:^(RLMSectionedResults *sectionedResults, RLMSectionedResultsChange *changes) {
// Only fired once for the example
NSLog(@"sectionedResults.count: %zu", sectionedResults.count); // => 1
}];
[realm transactionWithBlock:^{
Dog *dog = [[Dog alloc] init];
dog.name = @"Rex";
dog.age = 5;
[realm addObject:dog];
}];
// end of run loop execution context
You must retain the returned token for as long as you want updates to continue
to be sent to the block. To stop receiving updates, call `-invalidate` on the token.
@warning This method cannot be called during a write transaction, or when the
containing Realm is read-only.
@warning The queue must be a serial queue.
@note When filtering with key paths a notification will be fired in the following scenarios:
- An object in the collection has been modified at the filtered properties.
- An object has been modified on the section key path property, and the result of that modification has changed it's position in the section, or the object may need to move to another section.
- An object of the same observed type has been inserted or deleted from the Realm.
@param block The block to be called whenever a change occurs.
@param keyPaths The block will be called for changes occurring on these keypaths. If no
key paths are given, notifications are delivered for every property key path.
@param queue The serial queue to deliver notifications to.
@return A token which must be held for as long as you want updates to be delivered.
*/
- (RLMNotificationToken *)addNotificationBlock:(void (^)(RLMSectionedResults<RLMKeyType, RLMObjectType> *, RLMSectionedResultsChange *))block
keyPaths:(nullable NSArray<NSString *> *)keyPaths
queue:(nullable dispatch_queue_t)queue __attribute__((warn_unused_result));
@end
RLM_HEADER_AUDIT_END(nullability, sendability) | c | github | https://github.com/realm/realm-swift | Realm/RLMSectionedResults.h |
//===--- PrintClangClassType.cpp - Print class types in C/C++ ---*- C++ -*-===//
//
// This source file is part of the Swift.org open source project
//
// Copyright (c) 2014 - 2022 Apple Inc. and the Swift project authors
// Licensed under Apache License v2.0 with Runtime Library Exception
//
// See https://swift.org/LICENSE.txt for license information
// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
//
//===----------------------------------------------------------------------===//
#include "PrintClangClassType.h"
#include "ClangSyntaxPrinter.h"
#include "DeclAndTypePrinter.h"
#include "PrintClangValueType.h"
#include "swift/AST/Decl.h"
#include "swift/IRGen/Linking.h"
using namespace swift;
void ClangClassTypePrinter::printClassTypeDecl(
const ClassDecl *typeDecl, llvm::function_ref<void(void)> bodyPrinter,
DeclAndTypePrinter &declAndTypePrinter) {
auto printCxxImplClassName = ClangValueTypePrinter::printCxxImplClassName;
ClangSyntaxPrinter printer(typeDecl->getASTContext(), os);
auto typeMetadataFunc = irgen::LinkEntity::forTypeMetadataAccessFunction(
typeDecl->getDeclaredType()->getCanonicalType());
std::string typeMetadataFuncName = typeMetadataFunc.mangleAsString(typeDecl->getASTContext());
printer.printParentNamespaceForNestedTypes(typeDecl, [&](raw_ostream &os) {
// Print out a forward declaration of the "hidden" _impl class.
printer.printNamespace(cxx_synthesis::getCxxImplNamespaceName(),
[&](raw_ostream &os) {
os << "class";
declAndTypePrinter.printAvailability(os, typeDecl);
os << ' ';
printCxxImplClassName(os, typeDecl);
os << ";\n";
// Print out special functions, like functions that
// access type metadata.
printer.printCTypeMetadataTypeFunction(
typeDecl, typeMetadataFuncName, {});
});
std::string baseClassName;
std::string baseClassQualifiedName;
if (auto *parentClass = typeDecl->getSuperclassDecl()) {
llvm::raw_string_ostream baseNameOS(baseClassName);
ClangSyntaxPrinter(typeDecl->getASTContext(), baseNameOS).printBaseName(parentClass);
llvm::raw_string_ostream baseQualNameOS(baseClassQualifiedName);
ClangSyntaxPrinter(typeDecl->getASTContext(), baseQualNameOS)
.printModuleNamespaceQualifiersIfNeeded(
parentClass->getModuleContext(), typeDecl->getModuleContext());
baseQualNameOS << baseNameOS.str();
} else {
baseClassName = "RefCountedClass";
baseClassQualifiedName = "swift::_impl::RefCountedClass";
}
os << "class";
declAndTypePrinter.printAvailability(os, typeDecl);
ClangSyntaxPrinter(typeDecl->getASTContext(), os).printSymbolUSRAttribute(typeDecl);
os << ' ';
printer.printBaseName(typeDecl);
if (typeDecl->isFinal())
os << " final";
os << " : public " << baseClassQualifiedName;
os << " {\n";
os << "public:\n";
os << " using " << baseClassName << "::" << baseClassName << ";\n";
os << " using " << baseClassName << "::operator=;\n";
bodyPrinter();
os << "protected:\n";
os << " ";
printer.printInlineForThunk();
printer.printBaseName(typeDecl);
os << "(void * _Nonnull ptr) noexcept : " << baseClassName << "(ptr) {}\n";
os << "private:\n";
os << " friend class " << cxx_synthesis::getCxxImplNamespaceName() << "::";
printCxxImplClassName(os, typeDecl);
os << ";\n";
printer.printSwiftMangledNameForDebugger(typeDecl);
os << "};\n\n";
// Print out the "hidden" _impl class.
printer.printNamespace(
cxx_synthesis::getCxxImplNamespaceName(), [&](raw_ostream &os) {
os << "class";
declAndTypePrinter.printAvailability(os, typeDecl);
os << ' ';
printCxxImplClassName(os, typeDecl);
os << " {\n";
os << "public:\n";
os << "static ";
printer.printInlineForThunk();
printer.printBaseName(typeDecl);
os << " makeRetained(void * _Nonnull ptr) noexcept { return ";
printer.printBaseName(typeDecl);
os << "(ptr); }\n";
os << "};\n";
});
});
ClangValueTypePrinter::printTypeGenericTraits(
os, typeDecl, typeMetadataFuncName, /*genericRequirements=*/{},
typeDecl->getModuleContext(), declAndTypePrinter);
}
void ClangClassTypePrinter::printClassTypeReturnScaffold(
raw_ostream &os, const ClassDecl *type, const ModuleDecl *moduleContext,
llvm::function_ref<void(void)> bodyPrinter) {
ClangSyntaxPrinter printer(type->getASTContext(), os);
os << " return ";
printer.printModuleNamespaceQualifiersIfNeeded(type->getModuleContext(),
moduleContext);
if (!printer.printNestedTypeNamespaceQualifiers(type))
os << "::";
os << cxx_synthesis::getCxxImplNamespaceName() << "::";
ClangValueTypePrinter::printCxxImplClassName(os, type);
os << "::makeRetained(";
bodyPrinter();
os << ");\n";
}
void ClangClassTypePrinter::printParameterCxxtoCUseScaffold(
raw_ostream &os, const ClassDecl *type, const ModuleDecl *moduleContext,
llvm::function_ref<void(void)> bodyPrinter, bool isInOut) {
if (isInOut)
os << '&';
os << "::swift::" << cxx_synthesis::getCxxImplNamespaceName()
<< "::_impl_RefCountedClass"
<< "::getOpaquePointer";
if (isInOut)
os << "Ref";
os << '(';
bodyPrinter();
os << ')';
} | cpp | github | https://github.com/apple/swift | lib/PrintAsClang/PrintClangClassType.cpp |
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package time_test
import (
"errors"
"fmt"
"internal/testenv"
"math/rand"
"runtime"
"strings"
"sync"
"sync/atomic"
"testing"
. "time"
_ "unsafe" // for go:linkname
)
// newTimerFunc simulates NewTimer using AfterFunc,
// but this version will not hit the special cases for channels
// that are used when calling NewTimer.
// This makes it easy to test both paths.
func newTimerFunc(d Duration) *Timer {
c := make(chan Time, 1)
t := AfterFunc(d, func() { c <- Now() })
t.C = c
return t
}
// haveHighResSleep is true if the system supports at least ~1ms sleeps.
//
//go:linkname haveHighResSleep runtime.haveHighResSleep
var haveHighResSleep bool
// adjustDelay returns an adjusted delay based on the system sleep resolution.
// Go runtime uses different Windows timers for time.Now and sleeping.
// These can tick at different frequencies and can arrive out of sync.
// The effect can be seen, for example, as time.Sleep(100ms) is actually
// shorter then 100ms when measured as difference between time.Now before and
// after time.Sleep call. This was observed on Windows XP SP3 (windows/386).
func adjustDelay(t *testing.T, delay Duration) Duration {
if haveHighResSleep {
return delay
}
t.Log("adjusting delay for low resolution sleep")
switch runtime.GOOS {
case "windows":
return delay - 17*Millisecond
default:
t.Fatal("adjustDelay unimplemented on " + runtime.GOOS)
return 0
}
}
func TestSleep(t *testing.T) {
const delay = 100 * Millisecond
go func() {
Sleep(delay / 2)
Interrupt()
}()
start := Now()
Sleep(delay)
delayadj := adjustDelay(t, delay)
duration := Since(start)
if duration < delayadj {
t.Fatalf("Sleep(%s) slept for only %s", delay, duration)
}
}
// Test the basic function calling behavior. Correct queuing
// behavior is tested elsewhere, since After and AfterFunc share
// the same code.
func TestAfterFunc(t *testing.T) {
i := 10
c := make(chan bool)
var f func()
f = func() {
i--
if i >= 0 {
AfterFunc(0, f)
Sleep(1 * Second)
} else {
c <- true
}
}
AfterFunc(0, f)
<-c
}
func TestTickerStress(t *testing.T) {
var stop atomic.Bool
go func() {
for !stop.Load() {
runtime.GC()
// Yield so that the OS can wake up the timer thread,
// so that it can generate channel sends for the main goroutine,
// which will eventually set stop = 1 for us.
Sleep(Nanosecond)
}
}()
ticker := NewTicker(1)
for i := 0; i < 100; i++ {
<-ticker.C
}
ticker.Stop()
stop.Store(true)
}
func TestTickerConcurrentStress(t *testing.T) {
var stop atomic.Bool
go func() {
for !stop.Load() {
runtime.GC()
// Yield so that the OS can wake up the timer thread,
// so that it can generate channel sends for the main goroutine,
// which will eventually set stop = 1 for us.
Sleep(Nanosecond)
}
}()
ticker := NewTicker(1)
var wg sync.WaitGroup
for i := 0; i < 10; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for i := 0; i < 100; i++ {
<-ticker.C
}
}()
}
wg.Wait()
ticker.Stop()
stop.Store(true)
}
func TestAfterFuncStarvation(t *testing.T) {
// Start two goroutines ping-ponging on a channel send.
// At any given time, at least one of these goroutines is runnable:
// if the channel buffer is full, the receiver is runnable,
// and if it is not full, the sender is runnable.
//
// In addition, the AfterFunc callback should become runnable after
// the indicated delay.
//
// Even if GOMAXPROCS=1, we expect the runtime to eventually schedule
// the AfterFunc goroutine instead of the runnable channel goroutine.
// However, in https://go.dev/issue/65178 this was observed to live-lock
// on wasip1/wasm and js/wasm after <10000 runs.
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1))
var (
wg sync.WaitGroup
stop atomic.Bool
c = make(chan bool, 1)
)
wg.Add(2)
go func() {
for !stop.Load() {
c <- true
}
close(c)
wg.Done()
}()
go func() {
for range c {
}
wg.Done()
}()
AfterFunc(1*Microsecond, func() { stop.Store(true) })
wg.Wait()
}
func benchmark(b *testing.B, bench func(*testing.PB)) {
// Create equal number of garbage timers on each P before starting
// the benchmark.
var wg sync.WaitGroup
garbageAll := make([][]*Timer, runtime.GOMAXPROCS(0))
for i := range garbageAll {
wg.Add(1)
go func(i int) {
defer wg.Done()
garbage := make([]*Timer, 1<<15)
for j := range garbage {
garbage[j] = AfterFunc(Hour, nil)
}
garbageAll[i] = garbage
}(i)
}
wg.Wait()
b.ResetTimer()
b.RunParallel(bench)
b.StopTimer()
for _, garbage := range garbageAll {
for _, t := range garbage {
t.Stop()
}
}
}
func BenchmarkAfterFunc1000(b *testing.B) {
benchmark(b, func(pb *testing.PB) {
for pb.Next() {
n := 1000
c := make(chan bool)
var f func()
f = func() {
n--
if n >= 0 {
AfterFunc(0, f)
} else {
c <- true
}
}
AfterFunc(0, f)
<-c
}
})
}
func BenchmarkAfter(b *testing.B) {
benchmark(b, func(pb *testing.PB) {
for pb.Next() {
<-After(1)
}
})
}
func BenchmarkStop(b *testing.B) {
b.Run("impl=chan", func(b *testing.B) {
benchmark(b, func(pb *testing.PB) {
for pb.Next() {
NewTimer(1 * Second).Stop()
}
})
})
b.Run("impl=func", func(b *testing.B) {
benchmark(b, func(pb *testing.PB) {
for pb.Next() {
newTimerFunc(1 * Second).Stop()
}
})
})
}
func BenchmarkSimultaneousAfterFunc1000(b *testing.B) {
benchmark(b, func(pb *testing.PB) {
for pb.Next() {
n := 1000
var wg sync.WaitGroup
wg.Add(n)
for range n {
AfterFunc(0, wg.Done)
}
wg.Wait()
}
})
}
func BenchmarkStartStop1000(b *testing.B) {
benchmark(b, func(pb *testing.PB) {
for pb.Next() {
const N = 1000
timers := make([]*Timer, N)
for i := range timers {
timers[i] = AfterFunc(Hour, nil)
}
for i := range timers {
timers[i].Stop()
}
}
})
}
func BenchmarkReset(b *testing.B) {
b.Run("impl=chan", func(b *testing.B) {
benchmark(b, func(pb *testing.PB) {
t := NewTimer(Hour)
for pb.Next() {
t.Reset(Hour)
}
t.Stop()
})
})
b.Run("impl=func", func(b *testing.B) {
benchmark(b, func(pb *testing.PB) {
t := newTimerFunc(Hour)
for pb.Next() {
t.Reset(Hour)
}
t.Stop()
})
})
}
func BenchmarkSleep1000(b *testing.B) {
benchmark(b, func(pb *testing.PB) {
for pb.Next() {
const N = 1000
var wg sync.WaitGroup
wg.Add(N)
for range N {
go func() {
Sleep(Nanosecond)
wg.Done()
}()
}
wg.Wait()
}
})
}
func TestAfter(t *testing.T) {
const delay = 100 * Millisecond
start := Now()
end := <-After(delay)
delayadj := adjustDelay(t, delay)
if duration := Since(start); duration < delayadj {
t.Fatalf("After(%s) slept for only %d ns", delay, duration)
}
if min := start.Add(delayadj); end.Before(min) {
t.Fatalf("After(%s) expect >= %s, got %s", delay, min, end)
}
}
func TestAfterTick(t *testing.T) {
t.Parallel()
const Count = 10
Delta := 100 * Millisecond
if testing.Short() {
Delta = 10 * Millisecond
}
t0 := Now()
for i := 0; i < Count; i++ {
<-After(Delta)
}
t1 := Now()
d := t1.Sub(t0)
target := Delta * Count
if d < target*9/10 {
t.Fatalf("%d ticks of %s too fast: took %s, expected %s", Count, Delta, d, target)
}
if !testing.Short() && d > target*30/10 {
t.Fatalf("%d ticks of %s too slow: took %s, expected %s", Count, Delta, d, target)
}
}
func TestAfterStop(t *testing.T) {
t.Run("impl=chan", func(t *testing.T) {
testAfterStop(t, NewTimer)
})
t.Run("impl=func", func(t *testing.T) {
testAfterStop(t, newTimerFunc)
})
}
func testAfterStop(t *testing.T, newTimer func(Duration) *Timer) {
// We want to test that we stop a timer before it runs.
// We also want to test that it didn't run after a longer timer.
// Since we don't want the test to run for too long, we don't
// want to use lengthy times. That makes the test inherently flaky.
// So only report an error if it fails five times in a row.
var errs []string
logErrs := func() {
for _, e := range errs {
t.Log(e)
}
}
for i := 0; i < 5; i++ {
AfterFunc(100*Millisecond, func() {})
t0 := newTimer(50 * Millisecond)
c1 := make(chan bool, 1)
t1 := AfterFunc(150*Millisecond, func() { c1 <- true })
c2 := After(200 * Millisecond)
if !t0.Stop() {
errs = append(errs, "failed to stop event 0")
continue
}
if !t1.Stop() {
errs = append(errs, "failed to stop event 1")
continue
}
<-c2
select {
case <-t0.C:
errs = append(errs, "event 0 was not stopped")
continue
case <-c1:
errs = append(errs, "event 1 was not stopped")
continue
default:
}
if t1.Stop() {
errs = append(errs, "Stop returned true twice")
continue
}
// Test passed, so all done.
if len(errs) > 0 {
t.Logf("saw %d errors, ignoring to avoid flakiness", len(errs))
logErrs()
}
return
}
t.Errorf("saw %d errors", len(errs))
logErrs()
}
func TestAfterQueuing(t *testing.T) {
t.Run("impl=chan", func(t *testing.T) {
testAfterQueuing(t, After)
})
t.Run("impl=func", func(t *testing.T) {
testAfterQueuing(t, func(d Duration) <-chan Time { return newTimerFunc(d).C })
})
}
func testAfterQueuing(t *testing.T, after func(Duration) <-chan Time) {
// This test flakes out on some systems,
// so we'll try it a few times before declaring it a failure.
const attempts = 5
err := errors.New("!=nil")
for i := 0; i < attempts && err != nil; i++ {
delta := Duration(20+i*50) * Millisecond
if err = testAfterQueuing1(delta, after); err != nil {
t.Logf("attempt %v failed: %v", i, err)
}
}
if err != nil {
t.Fatal(err)
}
}
var slots = []int{5, 3, 6, 6, 6, 1, 1, 2, 7, 9, 4, 8, 0}
type afterResult struct {
slot int
t Time
}
func await(slot int, result chan<- afterResult, ac <-chan Time) {
result <- afterResult{slot, <-ac}
}
func testAfterQueuing1(delta Duration, after func(Duration) <-chan Time) error {
// make the result channel buffered because we don't want
// to depend on channel queuing semantics that might
// possibly change in the future.
result := make(chan afterResult, len(slots))
t0 := Now()
for _, slot := range slots {
go await(slot, result, After(Duration(slot)*delta))
}
var order []int
var times []Time
for range slots {
r := <-result
order = append(order, r.slot)
times = append(times, r.t)
}
for i := range order {
if i > 0 && order[i] < order[i-1] {
return fmt.Errorf("After calls returned out of order: %v", order)
}
}
for i, t := range times {
dt := t.Sub(t0)
target := Duration(order[i]) * delta
if dt < target-delta/2 || dt > target+delta*10 {
return fmt.Errorf("After(%s) arrived at %s, expected [%s,%s]", target, dt, target-delta/2, target+delta*10)
}
}
return nil
}
func TestTimerStopStress(t *testing.T) {
if testing.Short() {
return
}
t.Parallel()
for i := 0; i < 100; i++ {
go func(i int) {
timer := AfterFunc(2*Second, func() {
t.Errorf("timer %d was not stopped", i)
})
Sleep(1 * Second)
timer.Stop()
}(i)
}
Sleep(3 * Second)
}
func TestSleepZeroDeadlock(t *testing.T) {
// Sleep(0) used to hang, the sequence of events was as follows.
// Sleep(0) sets G's status to Gwaiting, but then immediately returns leaving the status.
// Then the goroutine calls e.g. new and falls down into the scheduler due to pending GC.
// After the GC nobody wakes up the goroutine from Gwaiting status.
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4))
c := make(chan bool)
go func() {
for i := 0; i < 100; i++ {
runtime.GC()
}
c <- true
}()
for i := 0; i < 100; i++ {
Sleep(0)
tmp := make(chan bool, 1)
tmp <- true
<-tmp
}
<-c
}
func testReset(d Duration) error {
t0 := NewTimer(2 * d)
Sleep(d)
if !t0.Reset(3 * d) {
return errors.New("resetting unfired timer returned false")
}
Sleep(2 * d)
select {
case <-t0.C:
return errors.New("timer fired early")
default:
}
Sleep(2 * d)
select {
case <-t0.C:
default:
return errors.New("reset timer did not fire")
}
if t0.Reset(50 * Millisecond) {
return errors.New("resetting expired timer returned true")
}
return nil
}
func TestReset(t *testing.T) {
// We try to run this test with increasingly larger multiples
// until one works so slow, loaded hardware isn't as flaky,
// but without slowing down fast machines unnecessarily.
//
// (maxDuration is several orders of magnitude longer than we
// expect this test to actually take on a fast, unloaded machine.)
d := 1 * Millisecond
const maxDuration = 10 * Second
for {
err := testReset(d)
if err == nil {
break
}
d *= 2
if d > maxDuration {
t.Error(err)
}
t.Logf("%v; trying duration %v", err, d)
}
}
// Test that sleeping (via Sleep or Timer) for an interval so large it
// overflows does not result in a short sleep duration. Nor does it interfere
// with execution of other timers. If it does, timers in this or subsequent
// tests may not fire.
func TestOverflowSleep(t *testing.T) {
const big = Duration(int64(1<<63 - 1))
go func() {
Sleep(big)
// On failure, this may return after the test has completed, so
// we need to panic instead.
panic("big sleep returned")
}()
select {
case <-After(big):
t.Fatalf("big timeout fired")
case <-After(25 * Millisecond):
// OK
}
const neg = Duration(-1 << 63)
Sleep(neg) // Returns immediately.
select {
case <-After(neg):
// OK
case <-After(1 * Second):
t.Fatalf("negative timeout didn't fire")
}
}
// Test that a panic while deleting a timer does not leave
// the timers mutex held, deadlocking a ticker.Stop in a defer.
func TestIssue5745(t *testing.T) {
ticker := NewTicker(Hour)
defer func() {
// would deadlock here before the fix due to
// lock taken before the segfault.
ticker.Stop()
if r := recover(); r == nil {
t.Error("Expected panic, but none happened.")
}
}()
// cause a panic due to a segfault
var timer *Timer
timer.Stop()
t.Error("Should be unreachable.")
}
func TestOverflowPeriodRuntimeTimer(t *testing.T) {
// This may hang forever if timers are broken. See comment near
// the end of CheckRuntimeTimerOverflow in internal_test.go.
CheckRuntimeTimerPeriodOverflow()
}
func checkZeroPanicString(t *testing.T) {
e := recover()
s, _ := e.(string)
if want := "called on uninitialized Timer"; !strings.Contains(s, want) {
t.Errorf("panic = %v; want substring %q", e, want)
}
}
func TestZeroTimerResetPanics(t *testing.T) {
defer checkZeroPanicString(t)
var tr Timer
tr.Reset(1)
}
func TestZeroTimerStopPanics(t *testing.T) {
defer checkZeroPanicString(t)
var tr Timer
tr.Stop()
}
// Test that zero duration timers aren't missed by the scheduler. Regression test for issue 44868.
func TestZeroTimer(t *testing.T) {
t.Run("impl=chan", func(t *testing.T) {
testZeroTimer(t, NewTimer)
})
t.Run("impl=func", func(t *testing.T) {
testZeroTimer(t, newTimerFunc)
})
t.Run("impl=cache", func(t *testing.T) {
timer := newTimerFunc(Hour)
testZeroTimer(t, func(d Duration) *Timer {
timer.Reset(d)
return timer
})
})
}
func testZeroTimer(t *testing.T, newTimer func(Duration) *Timer) {
if testing.Short() {
t.Skip("-short")
}
for i := 0; i < 1000000; i++ {
s := Now()
ti := newTimer(0)
<-ti.C
if diff := Since(s); diff > 2*Second {
t.Errorf("Expected time to get value from Timer channel in less than 2 sec, took %v", diff)
}
}
}
// Test that rapidly moving a timer earlier doesn't cause it to get dropped.
// Issue 47329.
func TestTimerModifiedEarlier(t *testing.T) {
if runtime.GOOS == "plan9" && runtime.GOARCH == "arm" {
testenv.SkipFlaky(t, 50470)
}
past := Until(Unix(0, 0))
count := 1000
fail := 0
for i := 0; i < count; i++ {
timer := newTimerFunc(Hour)
for j := 0; j < 10; j++ {
if !timer.Stop() {
<-timer.C
}
timer.Reset(past)
}
deadline := NewTimer(10 * Second)
defer deadline.Stop()
now := Now()
select {
case <-timer.C:
if since := Since(now); since > 8*Second {
t.Errorf("timer took too long (%v)", since)
fail++
}
case <-deadline.C:
t.Error("deadline expired")
}
}
if fail > 0 {
t.Errorf("%d failures", fail)
}
}
// Test that rapidly moving timers earlier and later doesn't cause
// some of the sleep times to be lost.
// Issue 47762
func TestAdjustTimers(t *testing.T) {
var rnd = rand.New(rand.NewSource(Now().UnixNano()))
timers := make([]*Timer, 100)
states := make([]int, len(timers))
indices := rnd.Perm(len(timers))
for len(indices) != 0 {
var ii = rnd.Intn(len(indices))
var i = indices[ii]
var timer = timers[i]
var state = states[i]
states[i]++
switch state {
case 0:
timers[i] = newTimerFunc(0)
case 1:
<-timer.C // Timer is now idle.
// Reset to various long durations, which we'll cancel.
case 2:
if timer.Reset(1 * Minute) {
panic("shouldn't be active (1)")
}
case 4:
if timer.Reset(3 * Minute) {
panic("shouldn't be active (3)")
}
case 6:
if timer.Reset(2 * Minute) {
panic("shouldn't be active (2)")
}
// Stop and drain a long-duration timer.
case 3, 5, 7:
if !timer.Stop() {
t.Logf("timer %d state %d Stop returned false", i, state)
<-timer.C
}
// Start a short-duration timer we expect to select without blocking.
case 8:
if timer.Reset(0) {
t.Fatal("timer.Reset returned true")
}
case 9:
now := Now()
<-timer.C
dur := Since(now)
if dur > 750*Millisecond {
t.Errorf("timer %d took %v to complete", i, dur)
}
// Timer is done. Swap with tail and remove.
case 10:
indices[ii] = indices[len(indices)-1]
indices = indices[:len(indices)-1]
}
}
}
func TestStopResult(t *testing.T) {
testStopResetResult(t, true)
}
func TestResetResult(t *testing.T) {
testStopResetResult(t, false)
}
// Test that when racing between running a timer and stopping a timer Stop
// consistently indicates whether a value can be read from the channel.
// Issue #69312.
func testStopResetResult(t *testing.T, testStop bool) {
for _, name := range []string{"0", "1", "2"} {
t.Run("asynctimerchan="+name, func(t *testing.T) {
testStopResetResultGODEBUG(t, testStop, name)
})
}
}
func testStopResetResultGODEBUG(t *testing.T, testStop bool, godebug string) {
t.Setenv("GODEBUG", "asynctimerchan="+godebug)
stopOrReset := func(timer *Timer) bool {
if testStop {
return timer.Stop()
} else {
return timer.Reset(1 * Hour)
}
}
start := make(chan struct{})
var wg sync.WaitGroup
const N = 1000
wg.Add(N)
for range N {
go func() {
defer wg.Done()
<-start
for j := 0; j < 100; j++ {
timer1 := NewTimer(1 * Millisecond)
timer2 := NewTimer(1 * Millisecond)
select {
case <-timer1.C:
if !stopOrReset(timer2) {
// The test fails if this
// channel read times out.
<-timer2.C
}
case <-timer2.C:
if !stopOrReset(timer1) {
// The test fails if this
// channel read times out.
<-timer1.C
}
}
}
}()
}
close(start)
wg.Wait()
}
// Test having a large number of goroutines wake up a ticker simultaneously.
// This used to trigger a crash when run under x/tools/cmd/stress.
func TestMultiWakeupTicker(t *testing.T) {
if testing.Short() {
t.Skip("-short")
}
goroutines := runtime.GOMAXPROCS(0)
timer := NewTicker(Microsecond)
var wg sync.WaitGroup
wg.Add(goroutines)
for range goroutines {
go func() {
defer wg.Done()
for range 100000 {
select {
case <-timer.C:
case <-After(Millisecond):
}
}
}()
}
wg.Wait()
}
// Test having a large number of goroutines wake up a timer simultaneously.
// This used to trigger a crash when run under x/tools/cmd/stress.
func TestMultiWakeupTimer(t *testing.T) {
if testing.Short() {
t.Skip("-short")
}
goroutines := runtime.GOMAXPROCS(0)
timer := NewTimer(Nanosecond)
var wg sync.WaitGroup
wg.Add(goroutines)
for range goroutines {
go func() {
defer wg.Done()
for range 10000 {
select {
case <-timer.C:
default:
}
timer.Reset(Nanosecond)
}
}()
}
wg.Wait()
}
// Benchmark timer latency when the thread that creates the timer is busy with
// other work and the timers must be serviced by other threads.
// https://golang.org/issue/38860
func BenchmarkParallelTimerLatency(b *testing.B) {
gmp := runtime.GOMAXPROCS(0)
if gmp < 2 || runtime.NumCPU() < gmp {
b.Skip("skipping with GOMAXPROCS < 2 or NumCPU < GOMAXPROCS")
}
// allocate memory now to avoid GC interference later.
timerCount := gmp - 1
stats := make([]struct {
sum float64
max Duration
count int64
_ [5]int64 // cache line padding
}, timerCount)
// Ensure the time to start new threads to service timers will not pollute
// the results.
warmupScheduler(gmp)
// Note that other than the AfterFunc calls this benchmark is measuring it
// avoids using any other timers. In particular, the main goroutine uses
// doWork to spin for some durations because up through Go 1.15 if all
// threads are idle sysmon could leave deep sleep when we wake.
// Ensure sysmon is in deep sleep.
doWork(30 * Millisecond)
b.ResetTimer()
const delay = Millisecond
var wg sync.WaitGroup
var count int32
for i := 0; i < b.N; i++ {
wg.Add(timerCount)
atomic.StoreInt32(&count, 0)
for j := 0; j < timerCount; j++ {
expectedWakeup := Now().Add(delay)
AfterFunc(delay, func() {
late := Since(expectedWakeup)
if late < 0 {
late = 0
}
stats[j].count++
stats[j].sum += float64(late.Nanoseconds())
if late > stats[j].max {
stats[j].max = late
}
atomic.AddInt32(&count, 1)
for atomic.LoadInt32(&count) < int32(timerCount) {
// spin until all timers fired
}
wg.Done()
})
}
for atomic.LoadInt32(&count) < int32(timerCount) {
// spin until all timers fired
}
wg.Wait()
// Spin for a bit to let the other scheduler threads go idle before the
// next round.
doWork(Millisecond)
}
var total float64
var samples float64
maximum := Duration(0)
for _, s := range stats {
maximum = max(maximum, s.max)
total += s.sum
samples += float64(s.count)
}
b.ReportMetric(0, "ns/op")
b.ReportMetric(total/samples, "avg-late-ns")
b.ReportMetric(float64(maximum.Nanoseconds()), "max-late-ns")
}
// Benchmark timer latency with staggered wakeup times and varying CPU bound
// workloads. https://golang.org/issue/38860
func BenchmarkStaggeredTickerLatency(b *testing.B) {
gmp := runtime.GOMAXPROCS(0)
if gmp < 2 || runtime.NumCPU() < gmp {
b.Skip("skipping with GOMAXPROCS < 2 or NumCPU < GOMAXPROCS")
}
const delay = 3 * Millisecond
for _, dur := range []Duration{300 * Microsecond, 2 * Millisecond} {
b.Run(fmt.Sprintf("work-dur=%s", dur), func(b *testing.B) {
for tickersPerP := 1; tickersPerP < int(delay/dur)+1; tickersPerP++ {
tickerCount := gmp * tickersPerP
b.Run(fmt.Sprintf("tickers-per-P=%d", tickersPerP), func(b *testing.B) {
// allocate memory now to avoid GC interference later.
stats := make([]struct {
sum float64
max Duration
count int64
_ [5]int64 // cache line padding
}, tickerCount)
// Ensure the time to start new threads to service timers
// will not pollute the results.
warmupScheduler(gmp)
b.ResetTimer()
var wg sync.WaitGroup
wg.Add(tickerCount)
for j := 0; j < tickerCount; j++ {
doWork(delay / Duration(gmp))
expectedWakeup := Now().Add(delay)
ticker := NewTicker(delay)
go func(c int, ticker *Ticker, firstWake Time) {
defer ticker.Stop()
for ; c > 0; c-- {
<-ticker.C
late := Since(expectedWakeup)
if late < 0 {
late = 0
}
stats[j].count++
stats[j].sum += float64(late.Nanoseconds())
if late > stats[j].max {
stats[j].max = late
}
expectedWakeup = expectedWakeup.Add(delay)
doWork(dur)
}
wg.Done()
}(b.N, ticker, expectedWakeup)
}
wg.Wait()
var total float64
var samples float64
max := Duration(0)
for _, s := range stats {
if s.max > max {
max = s.max
}
total += s.sum
samples += float64(s.count)
}
b.ReportMetric(0, "ns/op")
b.ReportMetric(total/samples, "avg-late-ns")
b.ReportMetric(float64(max.Nanoseconds()), "max-late-ns")
})
}
})
}
}
// warmupScheduler ensures the scheduler has at least targetThreadCount threads
// in its thread pool.
func warmupScheduler(targetThreadCount int) {
var wg sync.WaitGroup
var count int32
for i := 0; i < targetThreadCount; i++ {
wg.Add(1)
go func() {
atomic.AddInt32(&count, 1)
for atomic.LoadInt32(&count) < int32(targetThreadCount) {
// spin until all threads started
}
// spin a bit more to ensure they are all running on separate CPUs.
doWork(Millisecond)
wg.Done()
}()
}
wg.Wait()
}
func doWork(dur Duration) {
start := Now()
for Since(start) < dur {
}
}
func BenchmarkAdjustTimers10000(b *testing.B) {
benchmark(b, func(pb *testing.PB) {
for pb.Next() {
const n = 10000
timers := make([]*Timer, 0, n)
for range n {
t := AfterFunc(Hour, func() {})
timers = append(timers, t)
}
timers[n-1].Reset(Nanosecond)
Sleep(Microsecond)
for _, t := range timers {
t.Stop()
}
}
})
} | go | github | https://github.com/golang/go | src/time/sleep_test.go |
""" Fantasm: A taskqueue-based Finite State Machine for App Engine Python
Docs and examples: http://code.google.com/p/fantasm/
Copyright 2010 VendAsta Technologies Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Main module for fantasm implementation.
This module should be specified as a handler for fantasm URLs in app.yaml:
handlers:
- url: /fantasm/.*
login: admin
script: fantasm/main.py
"""
from google.appengine.ext import webapp
from google.appengine.ext.webapp import util
from fantasm import handlers, console
def createApplication():
"""Create new WSGIApplication and register all handlers.
Returns:
an instance of webapp.WSGIApplication with all fantasm handlers registered.
"""
return webapp.WSGIApplication([
(r"^/[^\/]+/fsm/.+", handlers.FSMHandler),
(r"^/[^\/]+/cleanup/", handlers.FSMFanInCleanupHandler),
(r"^/[^\/]+/graphviz/.+", handlers.FSMGraphvizHandler),
(r"^/[^\/]+/log/", handlers.FSMLogHandler),
(r"^/[^\/]+/?", console.Dashboard),
],
debug=True)
APP = createApplication()
def main():
""" Main entry point. """
import os
if os.environ.get('SERVER_SOFTWARE') == 'Development/1.0':
# this seems to be a dev_appserver.py bug. causes unicode errors when trying to process the request
os.environ['QUERY_STRING'] = str(os.environ['QUERY_STRING'])
util.run_wsgi_app(APP)
if __name__ == "__main__":
main() | unknown | codeparrot/codeparrot-clean | ||
# Copyright (c) 2015 Kurt Yoder
# See the file LICENSE for copying permission.
import logging
import os
import importlib
import yaml
from . import primitives
from .role_dirs import defaults_dir, files_dir, handlers_dir, meta_dir, tasks_dir, templates_dir, vars_dir
# TODO: split this into role and playbook
class Ansible(object):
'''This object encapsulates all knowledge about how to build Ansible
playbooks. Roles are structured according to Ansible best practices. See:
http://docs.ansible.com/ansible/playbooks_best_practices.html'''
top_dirs = ['tasks', 'handlers', 'templates', 'files', 'vars', 'defaults',
'meta']
# human-friendly description of the code this conf mgmt system generates:
descriptor = 'Ansible role'
def __init__(self, path, facts):
self.path = path
self.facts = facts
self.logger = logging.getLogger(__name__ + '.' + type(self).__name__)
assert os.path.isdir(self.path)
self.directory_handlers = {}
for dir_name in Ansible.top_dirs:
self.set_dir_handlers(dir_name)
def set_dir_handlers(self, dir_name):
'''Dynamically load directory name as an object of the correct
class.'''
# tasks => tasks_dir
path_name = dir_name+'_dir'
module_name = globals()[path_name]
# tasks => Tasks
class_name = dir_name.capitalize()
# == tasks_dir.Tasks
DirModule = getattr(module_name, class_name)
self.directory_handlers[dir_name] = DirModule(self.path)
def translate_set(self, set_obj):
'''First iteration: assume set info equates to action. For example: for
the set with info 'add|packages|apache2', assume the following: by
installing package apache2, this set's diffs are generated.'''
PrimitiveClass = primitives.get_class(set_obj.info.system)
primitive = PrimitiveClass(self.facts)
primitive.update_directives(set_obj.info.name, self.directory_handlers)
def write(self):
'''Write the role files using my handlers.'''
for handler_name, handler in self.directory_handlers.items():
self.logger.debug('writing handler: %s',handler_name)
handler.write()
def get_report(self):
'''Show what we did.'''
out_str = ''
for handler_name, handler in self.directory_handlers.items():
output = handler.get_report()
if output == '': continue
out_str += '- %s'%output
return out_str | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python2
import re
from psparser import PSLiteral
from glyphlist import glyphname2unicode
from latin_enc import ENCODING
## name2unicode
##
STRIP_NAME = re.compile(r'[0-9]+')
def name2unicode(name):
"""Converts Adobe glyph names to Unicode numbers."""
if name in glyphname2unicode:
return glyphname2unicode[name]
m = STRIP_NAME.search(name)
if not m: raise KeyError(name)
return unichr(int(m.group(0)))
## EncodingDB
##
class EncodingDB(object):
std2unicode = {}
mac2unicode = {}
win2unicode = {}
pdf2unicode = {}
for (name,std,mac,win,pdf) in ENCODING:
c = name2unicode(name)
if std: std2unicode[std] = c
if mac: mac2unicode[mac] = c
if win: win2unicode[win] = c
if pdf: pdf2unicode[pdf] = c
encodings = {
'StandardEncoding': std2unicode,
'MacRomanEncoding': mac2unicode,
'WinAnsiEncoding': win2unicode,
'PDFDocEncoding': pdf2unicode,
}
@classmethod
def get_encoding(klass, name, diff=None):
cid2unicode = klass.encodings.get(name, klass.std2unicode)
if diff:
cid2unicode = cid2unicode.copy()
cid = 0
for x in diff:
if isinstance(x, int):
cid = x
elif isinstance(x, PSLiteral):
try:
cid2unicode[cid] = name2unicode(x.name)
except KeyError:
pass
cid += 1
return cid2unicode | unknown | codeparrot/codeparrot-clean | ||
"""setuptools.command.egg_info
Create a distribution's .egg-info directory and contents"""
from distutils.filelist import FileList as _FileList
from distutils.util import convert_path
from distutils import log
import distutils.errors
import distutils.filelist
import os
import re
import sys
import io
import warnings
import time
import collections
from setuptools.extern import six
from setuptools.extern.six.moves import map
from setuptools import Command
from setuptools.command.sdist import sdist
from setuptools.command.sdist import walk_revctrl
from setuptools.command.setopt import edit_config
from setuptools.command import bdist_egg
from pkg_resources import (
parse_requirements, safe_name, parse_version,
safe_version, yield_lines, EntryPoint, iter_entry_points, to_filename)
import setuptools.unicode_utils as unicode_utils
from pkg_resources.extern import packaging
try:
from setuptools_svn import svn_utils
except ImportError:
pass
class egg_info(Command):
description = "create a distribution's .egg-info directory"
user_options = [
('egg-base=', 'e', "directory containing .egg-info directories"
" (default: top of the source tree)"),
('tag-svn-revision', 'r',
"Add subversion revision ID to version number"),
('tag-date', 'd', "Add date stamp (e.g. 20050528) to version number"),
('tag-build=', 'b', "Specify explicit tag to add to version number"),
('no-svn-revision', 'R',
"Don't add subversion revision ID [default]"),
('no-date', 'D', "Don't include date stamp [default]"),
]
boolean_options = ['tag-date', 'tag-svn-revision']
negative_opt = {'no-svn-revision': 'tag-svn-revision',
'no-date': 'tag-date'}
def initialize_options(self):
self.egg_name = None
self.egg_version = None
self.egg_base = None
self.egg_info = None
self.tag_build = None
self.tag_svn_revision = 0
self.tag_date = 0
self.broken_egg_info = False
self.vtags = None
def save_version_info(self, filename):
"""
Materialize the values of svn_revision and date into the
build tag. Install these keys in a deterministic order
to avoid arbitrary reordering on subsequent builds.
"""
# python 2.6 compatibility
odict = getattr(collections, 'OrderedDict', dict)
egg_info = odict()
# follow the order these keys would have been added
# when PYTHONHASHSEED=0
egg_info['tag_build'] = self.tags()
egg_info['tag_date'] = 0
egg_info['tag_svn_revision'] = 0
edit_config(filename, dict(egg_info=egg_info))
def finalize_options(self):
self.egg_name = safe_name(self.distribution.get_name())
self.vtags = self.tags()
self.egg_version = self.tagged_version()
parsed_version = parse_version(self.egg_version)
try:
is_version = isinstance(parsed_version, packaging.version.Version)
spec = (
"%s==%s" if is_version else "%s===%s"
)
list(
parse_requirements(spec % (self.egg_name, self.egg_version))
)
except ValueError:
raise distutils.errors.DistutilsOptionError(
"Invalid distribution name or version syntax: %s-%s" %
(self.egg_name, self.egg_version)
)
if self.egg_base is None:
dirs = self.distribution.package_dir
self.egg_base = (dirs or {}).get('', os.curdir)
self.ensure_dirname('egg_base')
self.egg_info = to_filename(self.egg_name) + '.egg-info'
if self.egg_base != os.curdir:
self.egg_info = os.path.join(self.egg_base, self.egg_info)
if '-' in self.egg_name:
self.check_broken_egg_info()
# Set package version for the benefit of dumber commands
# (e.g. sdist, bdist_wininst, etc.)
#
self.distribution.metadata.version = self.egg_version
# If we bootstrapped around the lack of a PKG-INFO, as might be the
# case in a fresh checkout, make sure that any special tags get added
# to the version info
#
pd = self.distribution._patched_dist
if pd is not None and pd.key == self.egg_name.lower():
pd._version = self.egg_version
pd._parsed_version = parse_version(self.egg_version)
self.distribution._patched_dist = None
def write_or_delete_file(self, what, filename, data, force=False):
"""Write `data` to `filename` or delete if empty
If `data` is non-empty, this routine is the same as ``write_file()``.
If `data` is empty but not ``None``, this is the same as calling
``delete_file(filename)`. If `data` is ``None``, then this is a no-op
unless `filename` exists, in which case a warning is issued about the
orphaned file (if `force` is false), or deleted (if `force` is true).
"""
if data:
self.write_file(what, filename, data)
elif os.path.exists(filename):
if data is None and not force:
log.warn(
"%s not set in setup(), but %s exists", what, filename
)
return
else:
self.delete_file(filename)
def write_file(self, what, filename, data):
"""Write `data` to `filename` (if not a dry run) after announcing it
`what` is used in a log message to identify what is being written
to the file.
"""
log.info("writing %s to %s", what, filename)
if six.PY3:
data = data.encode("utf-8")
if not self.dry_run:
f = open(filename, 'wb')
f.write(data)
f.close()
def delete_file(self, filename):
"""Delete `filename` (if not a dry run) after announcing it"""
log.info("deleting %s", filename)
if not self.dry_run:
os.unlink(filename)
def tagged_version(self):
version = self.distribution.get_version()
# egg_info may be called more than once for a distribution,
# in which case the version string already contains all tags.
if self.vtags and version.endswith(self.vtags):
return safe_version(version)
return safe_version(version + self.vtags)
def run(self):
self.mkpath(self.egg_info)
installer = self.distribution.fetch_build_egg
for ep in iter_entry_points('egg_info.writers'):
ep.require(installer=installer)
writer = ep.resolve()
writer(self, ep.name, os.path.join(self.egg_info, ep.name))
# Get rid of native_libs.txt if it was put there by older bdist_egg
nl = os.path.join(self.egg_info, "native_libs.txt")
if os.path.exists(nl):
self.delete_file(nl)
self.find_sources()
def tags(self):
version = ''
if self.tag_build:
version += self.tag_build
if self.tag_svn_revision:
version += '-r%s' % self.get_svn_revision()
if self.tag_date:
version += time.strftime("-%Y%m%d")
return version
@staticmethod
def get_svn_revision():
if 'svn_utils' not in globals():
return "0"
return str(svn_utils.SvnInfo.load(os.curdir).get_revision())
def find_sources(self):
"""Generate SOURCES.txt manifest file"""
manifest_filename = os.path.join(self.egg_info, "SOURCES.txt")
mm = manifest_maker(self.distribution)
mm.manifest = manifest_filename
mm.run()
self.filelist = mm.filelist
def check_broken_egg_info(self):
bei = self.egg_name + '.egg-info'
if self.egg_base != os.curdir:
bei = os.path.join(self.egg_base, bei)
if os.path.exists(bei):
log.warn(
"-" * 78 + '\n'
"Note: Your current .egg-info directory has a '-' in its name;"
'\nthis will not work correctly with "setup.py develop".\n\n'
'Please rename %s to %s to correct this problem.\n' + '-' * 78,
bei, self.egg_info
)
self.broken_egg_info = self.egg_info
self.egg_info = bei # make it work for now
class FileList(_FileList):
"""File list that accepts only existing, platform-independent paths"""
def append(self, item):
if item.endswith('\r'): # Fix older sdists built on Windows
item = item[:-1]
path = convert_path(item)
if self._safe_path(path):
self.files.append(path)
def extend(self, paths):
self.files.extend(filter(self._safe_path, paths))
def _repair(self):
"""
Replace self.files with only safe paths
Because some owners of FileList manipulate the underlying
``files`` attribute directly, this method must be called to
repair those paths.
"""
self.files = list(filter(self._safe_path, self.files))
def _safe_path(self, path):
enc_warn = "'%s' not %s encodable -- skipping"
# To avoid accidental trans-codings errors, first to unicode
u_path = unicode_utils.filesys_decode(path)
if u_path is None:
log.warn("'%s' in unexpected encoding -- skipping" % path)
return False
# Must ensure utf-8 encodability
utf8_path = unicode_utils.try_encode(u_path, "utf-8")
if utf8_path is None:
log.warn(enc_warn, path, 'utf-8')
return False
try:
# accept is either way checks out
if os.path.exists(u_path) or os.path.exists(utf8_path):
return True
# this will catch any encode errors decoding u_path
except UnicodeEncodeError:
log.warn(enc_warn, path, sys.getfilesystemencoding())
class manifest_maker(sdist):
template = "MANIFEST.in"
def initialize_options(self):
self.use_defaults = 1
self.prune = 1
self.manifest_only = 1
self.force_manifest = 1
def finalize_options(self):
pass
def run(self):
self.filelist = FileList()
if not os.path.exists(self.manifest):
self.write_manifest() # it must exist so it'll get in the list
self.filelist.findall()
self.add_defaults()
if os.path.exists(self.template):
self.read_template()
self.prune_file_list()
self.filelist.sort()
self.filelist.remove_duplicates()
self.write_manifest()
def _manifest_normalize(self, path):
path = unicode_utils.filesys_decode(path)
return path.replace(os.sep, '/')
def write_manifest(self):
"""
Write the file list in 'self.filelist' to the manifest file
named by 'self.manifest'.
"""
self.filelist._repair()
# Now _repairs should encodability, but not unicode
files = [self._manifest_normalize(f) for f in self.filelist.files]
msg = "writing manifest file '%s'" % self.manifest
self.execute(write_file, (self.manifest, files), msg)
def warn(self, msg): # suppress missing-file warnings from sdist
if not msg.startswith("standard file not found:"):
sdist.warn(self, msg)
def add_defaults(self):
sdist.add_defaults(self)
self.filelist.append(self.template)
self.filelist.append(self.manifest)
rcfiles = list(walk_revctrl())
if rcfiles:
self.filelist.extend(rcfiles)
elif os.path.exists(self.manifest):
self.read_manifest()
ei_cmd = self.get_finalized_command('egg_info')
self._add_egg_info(cmd=ei_cmd)
self.filelist.include_pattern("*", prefix=ei_cmd.egg_info)
def _add_egg_info(self, cmd):
"""
Add paths for egg-info files for an external egg-base.
The egg-info files are written to egg-base. If egg-base is
outside the current working directory, this method
searchs the egg-base directory for files to include
in the manifest. Uses distutils.filelist.findall (which is
really the version monkeypatched in by setuptools/__init__.py)
to perform the search.
Since findall records relative paths, prefix the returned
paths with cmd.egg_base, so add_default's include_pattern call
(which is looking for the absolute cmd.egg_info) will match
them.
"""
if cmd.egg_base == os.curdir:
# egg-info files were already added by something else
return
discovered = distutils.filelist.findall(cmd.egg_base)
resolved = (os.path.join(cmd.egg_base, path) for path in discovered)
self.filelist.allfiles.extend(resolved)
def prune_file_list(self):
build = self.get_finalized_command('build')
base_dir = self.distribution.get_fullname()
self.filelist.exclude_pattern(None, prefix=build.build_base)
self.filelist.exclude_pattern(None, prefix=base_dir)
sep = re.escape(os.sep)
self.filelist.exclude_pattern(r'(^|' + sep + r')(RCS|CVS|\.svn)' + sep,
is_regex=1)
def write_file(filename, contents):
"""Create a file with the specified name and write 'contents' (a
sequence of strings without line terminators) to it.
"""
contents = "\n".join(contents)
# assuming the contents has been vetted for utf-8 encoding
contents = contents.encode("utf-8")
with open(filename, "wb") as f: # always write POSIX-style manifest
f.write(contents)
def write_pkg_info(cmd, basename, filename):
log.info("writing %s", filename)
if not cmd.dry_run:
metadata = cmd.distribution.metadata
metadata.version, oldver = cmd.egg_version, metadata.version
metadata.name, oldname = cmd.egg_name, metadata.name
try:
# write unescaped data to PKG-INFO, so older pkg_resources
# can still parse it
metadata.write_pkg_info(cmd.egg_info)
finally:
metadata.name, metadata.version = oldname, oldver
safe = getattr(cmd.distribution, 'zip_safe', None)
bdist_egg.write_safety_flag(cmd.egg_info, safe)
def warn_depends_obsolete(cmd, basename, filename):
if os.path.exists(filename):
log.warn(
"WARNING: 'depends.txt' is not used by setuptools 0.6!\n"
"Use the install_requires/extras_require setup() args instead."
)
def _write_requirements(stream, reqs):
lines = yield_lines(reqs or ())
append_cr = lambda line: line + '\n'
lines = map(append_cr, lines)
stream.writelines(lines)
def write_requirements(cmd, basename, filename):
dist = cmd.distribution
data = six.StringIO()
_write_requirements(data, dist.install_requires)
extras_require = dist.extras_require or {}
for extra in sorted(extras_require):
data.write('\n[{extra}]\n'.format(**vars()))
_write_requirements(data, extras_require[extra])
cmd.write_or_delete_file("requirements", filename, data.getvalue())
def write_setup_requirements(cmd, basename, filename):
data = StringIO()
_write_requirements(data, cmd.distribution.setup_requires)
cmd.write_or_delete_file("setup-requirements", filename, data.getvalue())
def write_toplevel_names(cmd, basename, filename):
pkgs = dict.fromkeys(
[
k.split('.', 1)[0]
for k in cmd.distribution.iter_distribution_names()
]
)
cmd.write_file("top-level names", filename, '\n'.join(sorted(pkgs)) + '\n')
def overwrite_arg(cmd, basename, filename):
write_arg(cmd, basename, filename, True)
def write_arg(cmd, basename, filename, force=False):
argname = os.path.splitext(basename)[0]
value = getattr(cmd.distribution, argname, None)
if value is not None:
value = '\n'.join(value) + '\n'
cmd.write_or_delete_file(argname, filename, value, force)
def write_entries(cmd, basename, filename):
ep = cmd.distribution.entry_points
if isinstance(ep, six.string_types) or ep is None:
data = ep
elif ep is not None:
data = []
for section, contents in sorted(ep.items()):
if not isinstance(contents, six.string_types):
contents = EntryPoint.parse_group(section, contents)
contents = '\n'.join(sorted(map(str, contents.values())))
data.append('[%s]\n%s\n\n' % (section, contents))
data = ''.join(data)
cmd.write_or_delete_file('entry points', filename, data, True)
def get_pkg_info_revision():
"""
Get a -r### off of PKG-INFO Version in case this is an sdist of
a subversion revision.
"""
warnings.warn("get_pkg_info_revision is deprecated.", DeprecationWarning)
if os.path.exists('PKG-INFO'):
with io.open('PKG-INFO') as f:
for line in f:
match = re.match(r"Version:.*-r(\d+)\s*$", line)
if match:
return int(match.group(1))
return 0 | unknown | codeparrot/codeparrot-clean | ||
import gio
from plugin_base.provider import Mode
class File:
"""This is a wrapper class that provides file-like object but
uses gio.File for actual operations."""
def __init__(self, path, mode):
if mode == Mode.READ:
self._resource = gio.File(path).read()
elif mode == Mode.WRITE:
if gio.File(path).query_exists():
gio.File(path).delete()
self._resource = gio.File(path).create()
elif mode == Mode.APPEND:
self._resource = gio.File(path).append_to()
def close(self):
"""Close file"""
self._resource.close()
def closed(self):
"""If file is closed"""
self._resource.is_closed()
def flush(self):
"""Flush internal buffer"""
if hasattr(self._resource, 'flush'):
self._resource.flush()
def read(self, size=-1):
"""Read at most _size_ bytes from the file"""
result = self._resource.read(size)
if result is True:
result = ""
return result
def seek(self, offset, whence=0):
"""Set the file's current position"""
relative = (1, 0, 2)[whence]
if self._resource.can_seek():
self._resource.seek(offset, relative)
def tell(self):
"""Return file's current position"""
return self._resource.tell()
def truncate(self, size=None):
"""Truncate the file's size"""
if size is None:
size = self.tell()
if self._resource.can_truncate():
self._resource.truncate(size)
def write(self, buff):
"""Write string to the file"""
self._resource.write(buff) | unknown | codeparrot/codeparrot-clean | ||
##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2013 Stanford University and the Authors
#
# Authors: Robert McGibbon
# Contributors:
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
##############################################################################
# Imports
##############################################################################
from __future__ import print_function, division
import sys
import types
import warnings
import pkgutil
import importlib
from inspect import (isclass, ismodule, isfunction, ismethod,
getmembers, getdoc, getmodule, getargs, isbuiltin)
from mdtraj.testing.docscrape import NumpyDocString
from mdtraj.utils.six import get_function_code, get_function_closure, PY2
__all__ = ['docstring_verifiers', 'import_all_modules']
##############################################################################
# functions
##############################################################################
def docstring_verifiers(module, error_on_none=False):
"""Yield an iterable of tests that verify the docstrings (for
adherance to the NumpyDoc format) of all functions/classes defined
in a module.
Parameters
----------
module : module
The module to test
error_on_none : bool, default=False
Throw an error if no docstring is defined
"""
# These are the types that we want to check
# currently, the docstring on classes are not being checked, since
# isclass() is not in the list
acceptors = [isfunction, ismethod, isbuiltin]
accept = lambda f: any([acc(f) for acc in acceptors])
functions = [f for f in walk(module) if accept(f)]
def format(f):
"""
Format a method/function/class as a string
Parameters
----------
f : function, method, class
Returns
-------
repr : string
A string represntation
"""
if ismethod(f):
if PY2:
return '.'.join([getmodule(f).__name__, f.im_class.__name__, f.__name__])
else:
return '.'.join([getmodule(f).__name__, f.__self__.__class__.__name__, f.__name__])
if isfunction(f) or isbuiltin(f):
return '.'.join([getmodule(f).__name__, f.__name__])
if isclass(f):
return f.__name__
return 'Error'
def check_docstring(f):
"""
Ensure the docstring of `f` is in accordance with the numpy standard
Currently, only the Parameters section of the docstring is checked.
Parameters
----------
f : function, method, class
Returns
-------
repr : string
A string represntation
"""
doc = getdoc(f)
if doc is None:
if error_on_none:
raise ValueError('no docstring for %s' % format(f))
else:
with warnings.catch_warnings():
warnings.simplefilter('error')
parsed = NumpyDocString(doc)
param_names = {e[0] for e in parsed['Parameters']}
if isbuiltin(f):
# You can't get the arglist from a builtin, which
# is how cython functions turn up
# but you can, hackily, get the number of arguments it wants
# by parseing the error hen you supply too many
import re
try:
f(*list(range(100)))
except TypeError as e:
m = re.search('takes at most (\d+) positional arguments', str(e))
if not m:
return
n_args = int(m.group(1))
if len(param_names) != n_args:
raise ValueError("In %s, number of arguments, %d, doesn't "
" match the length of the Parameters in the "
"docstring, %d" % (format(f), n_args, len(param_names)))
return
args = set(getargs(get_function_code(f)).args)
if 'self' in args:
args.remove('self')
if 'cls' in args:
args.remove('cls')
if args != param_names:
raise ValueError("In %s, arguments %s don't "
"match Parameters list %s" % (format(f),
list(args), list(param_names)))
for f in functions:
qq = lambda: check_docstring(f)
qq.description = 'NumpyDoc: %s.%s' % (module.__name__, f.__name__)
qq.fname = f.__name__
yield qq
def ispackage(obj):
"""
Check if obj is a package. Simply look for whether its a module whose
filename is __init__.py(c)
Parameters
----------
obj : module
"""
if ismodule(obj):
return obj.__file__.endswith("__init__.pyc") or \
obj.__file__.endswith("__init__.py")
return False
def walk(module):
"""
Get all of the functions, classes and their methods defined within
a python module
Parameters
----------
module : module
Returns
-------
funcs : list
List of functions, classes and methods
"""
assert ismodule(module)
if ispackage(module):
raise ValueError('Sorry, you need to supply me a module, not a package')
def is_valid(obj):
if getmodule(obj) == module:
# cython specific stuff
if module.__file__.endswith('.so'):
if isbuiltin(obj):
return not obj.__name__.startswith('_')
if ismethod(obj) or isfunction(obj):
return not obj.__name__.startswith('_')
if isclass(obj):
return True
return False
instack = [v for k, v in getmembers(module) if is_valid(v)]
outstack = []
while True:
try:
item = instack.pop()
except IndexError:
break
outstack.append(item)
if isclass(item):
instack.extend([v for k, v in getmembers(item) if is_valid(v)])
return outstack
def import_all_modules(pkg):
result = []
for _, modname, ispkg in pkgutil.iter_modules(pkg.__path__):
c = '%s.%s' % (pkg.__name__, modname)
if modname.startswith('test_'):
continue
try:
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=DeprecationWarning)
mod = importlib.import_module(c)
if ispkg:
result.extend(import_all_modules(mod))
else:
result.append(mod)
except ImportError as e:
print('e', e)
continue
return result | unknown | codeparrot/codeparrot-clean | ||
# frozen_string_literal: true
module Kernel
###
# An alias for Psych.dump_stream meant to be used with IRB.
def y *objects
puts Psych.dump_stream(*objects)
end
private :y
end | ruby | github | https://github.com/ruby/ruby | ext/psych/lib/psych/y.rb |
"""
==============================
Probability Calibration curves
==============================
When performing classification one often wants to predict not only the class
label, but also the associated probability. This probability gives some
kind of confidence on the prediction. This example demonstrates how to display
how well calibrated the predicted probabilities are and how to calibrate an
uncalibrated classifier.
The experiment is performed on an artificial dataset for binary classification
with 100.000 samples (1.000 of them are used for model fitting) with 20
features. Of the 20 features, only 2 are informative and 10 are redundant. The
first figure shows the estimated probabilities obtained with logistic
regression, Gaussian naive Bayes, and Gaussian naive Bayes with both isotonic
calibration and sigmoid calibration. The calibration performance is evaluated
with Brier score, reported in the legend (the smaller the better). One can
observe here that logistic regression is well calibrated while raw Gaussian
naive Bayes performs very badly. This is because of the redundant features
which violate the assumption of feature-independence and result in an overly
confident classifier, which is indicated by the typical transposed-sigmoid
curve.
Calibration of the probabilities of Gaussian naive Bayes with isotonic
regression can fix this issue as can be seen from the nearly diagonal
calibration curve. Sigmoid calibration also improves the brier score slightly,
albeit not as strongly as the non-parametric isotonic regression. This can be
attributed to the fact that we have plenty of calibration data such that the
greater flexibility of the non-parametric model can be exploited.
The second figure shows the calibration curve of a linear support-vector
classifier (LinearSVC). LinearSVC shows the opposite behavior as Gaussian
naive Bayes: the calibration curve has a sigmoid curve, which is typical for
an under-confident classifier. In the case of LinearSVC, this is caused by the
margin property of the hinge loss, which lets the model focus on hard samples
that are close to the decision boundary (the support vectors).
Both kinds of calibration can fix this issue and yield nearly identical
results. This shows that sigmoid calibration can deal with situations where
the calibration curve of the base classifier is sigmoid (e.g., for LinearSVC)
but not where it is transposed-sigmoid (e.g., Gaussian naive Bayes).
"""
print(__doc__)
# Author: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD Style.
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import LinearSVC
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import (brier_score_loss, precision_score, recall_score,
f1_score)
from sklearn.calibration import CalibratedClassifierCV, calibration_curve
from sklearn.cross_validation import train_test_split
# Create dataset of classification task with many redundant and few
# informative features
X, y = datasets.make_classification(n_samples=100000, n_features=20,
n_informative=2, n_redundant=10,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.99,
random_state=42)
def plot_calibration_curve(est, name, fig_index):
"""Plot calibration curve for est w/o and with calibration. """
# Calibrated with isotonic calibration
isotonic = CalibratedClassifierCV(est, cv=2, method='isotonic')
# Calibrated with sigmoid calibration
sigmoid = CalibratedClassifierCV(est, cv=2, method='sigmoid')
# Logistic regression with no calibration as baseline
lr = LogisticRegression(C=1., solver='lbfgs')
fig = plt.figure(fig_index, figsize=(10, 10))
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
for clf, name in [(lr, 'Logistic'),
(est, name),
(isotonic, name + ' + Isotonic'),
(sigmoid, name + ' + Sigmoid')]:
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
if hasattr(clf, "predict_proba"):
prob_pos = clf.predict_proba(X_test)[:, 1]
else: # use decision function
prob_pos = clf.decision_function(X_test)
prob_pos = \
(prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
clf_score = brier_score_loss(y_test, prob_pos, pos_label=y.max())
print("%s:" % name)
print("\tBrier: %1.3f" % (clf_score))
print("\tPrecision: %1.3f" % precision_score(y_test, y_pred))
print("\tRecall: %1.3f" % recall_score(y_test, y_pred))
print("\tF1: %1.3f\n" % f1_score(y_test, y_pred))
fraction_of_positives, mean_predicted_value = \
calibration_curve(y_test, prob_pos, n_bins=10)
ax1.plot(mean_predicted_value, fraction_of_positives, "s-",
label="%s (%1.3f)" % (name, clf_score))
ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,
histtype="step", lw=2)
ax1.set_ylabel("Fraction of positives")
ax1.set_ylim([-0.05, 1.05])
ax1.legend(loc="lower right")
ax1.set_title('Calibration plots (reliability curve)')
ax2.set_xlabel("Mean predicted value")
ax2.set_ylabel("Count")
ax2.legend(loc="upper center", ncol=2)
plt.tight_layout()
# Plot calibration cuve for Gaussian Naive Bayes
plot_calibration_curve(GaussianNB(), "Naive Bayes", 1)
# Plot calibration cuve for Linear SVC
plot_calibration_curve(LinearSVC(), "SVC", 2)
plt.show() | unknown | codeparrot/codeparrot-clean | ||
import unittest
import vivisect.symboliks.analysis as vsym_analysis
class MockVw(object):
def __init__(self, *args, **kwargs):
self.psize = 4
class MockVar(object):
def __init__(self, va):
self.va = va
def solve(self, *args, **kwargs):
return self.va
def nop(*args, **kwargs):
pass
class AnalysisTests(unittest.TestCase):
def setUp(self):
self.sfe = vsym_analysis.SymbolikFunctionEmulator(MockVw())
self.sfe.setStackCounter = nop
def test_getStackOffset_above(self, addr=0xbfbff000, size=16384):
self.sfe.setStackBase(addr, size)
offset = self.sfe.getStackOffset(MockVar(addr+1))
self.assertIs(offset, None)
def test_getStackOffset_inside(self, addr=0xbfbff000, size=16384):
self.sfe.setStackBase(addr, size)
offset = self.sfe.getStackOffset(MockVar(addr-1))
self.assertIs(int(offset), -1)
def test_getStackOffset_below(self, addr=0xbfbff000, size=16384):
self.sfe.setStackBase(addr, size)
offset = self.sfe.getStackOffset(MockVar(addr-size))
self.assertIs(offset, None) | unknown | codeparrot/codeparrot-clean | ||
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import re
import urlparse
import flask
from sqlalchemy.event import listen
from sqlalchemy.schema import FetchedValue
from sqlalchemy.dialects import postgresql as pg
from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.ext.mutable import MutableDict
from sqlalchemy.orm import relationship
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy.sql.expression import text
from warehouse import db
from warehouse.database.mixins import UUIDPrimaryKeyMixin, TimeStampedMixin
from warehouse.database.schema import TableDDL
from warehouse.database.types import Enum
from warehouse.database.utils import table_args
from warehouse.utils import get_storage
_normalize_regex = re.compile(r"[^A-Za-z0-9.]+")
classifiers = db.Table("version_classifiers", # pylint: disable=C0103
db.Column("classifier_id",
pg.UUID(as_uuid=True),
db.ForeignKey("classifiers.id",
onupdate="CASCADE",
ondelete="CASCADE"
),
primary_key=True,
),
db.Column("version_id",
pg.UUID(as_uuid=True),
db.ForeignKey("versions.id",
onupdate="CASCADE",
ondelete="CASCADE"
),
primary_key=True,
),
)
class Classifier(UUIDPrimaryKeyMixin, db.Model):
__tablename__ = "classifiers"
trove = db.Column(db.UnicodeText, unique=True, nullable=False)
def __init__(self, trove):
self.trove = trove
def __repr__(self):
return "<Classifier: {trove}>".format(trove=self.trove)
@classmethod
def get_or_create(cls, trove):
try:
obj = cls.query.filter_by(trove=trove).one()
except NoResultFound:
obj = cls(trove)
return obj
class Project(UUIDPrimaryKeyMixin, TimeStampedMixin, db.Model):
__tablename__ = "projects"
__table_args__ = declared_attr(table_args((
TableDDL("""
CREATE OR REPLACE FUNCTION normalize_name()
RETURNS trigger AS $$
BEGIN
NEW.normalized = lower(
regexp_replace(new.name, '[^A-Za-z0-9.]+', '-', 'g'));
return NEW;
END;
$$ LANGUAGE plpgsql;
CREATE TRIGGER %(table)s_normalize_name
BEFORE INSERT OR UPDATE
ON %(table)s
FOR EACH ROW
EXECUTE PROCEDURE normalize_name();
"""),
TableDDL("""
CREATE CONSTRAINT TRIGGER cannot_unyank_projects
AFTER UPDATE OF yanked ON projects
FOR EACH ROW
WHEN (OLD.yanked = TRUE AND NEW.yanked = FALSE)
EXECUTE PROCEDURE cannot_unyank();
"""),
)))
yanked = db.Column(db.Boolean,
nullable=False,
server_default=text("FALSE")
)
name = db.Column(db.UnicodeText, unique=True, nullable=False)
normalized = db.Column(db.UnicodeText,
unique=True,
nullable=False,
server_default=FetchedValue(),
server_onupdate=FetchedValue()
)
versions = relationship("Version",
cascade="all,delete,delete-orphan",
backref="project",
)
links = relationship("ProjectLink",
cascade="all,delete,delete-orphan",
backref="project",
)
def __init__(self, name):
self.name = name
def __repr__(self):
return "<Project: {name}>".format(name=self.name)
@classmethod
def get(cls, name):
normalized = _normalize_regex.sub("-", name).lower()
return cls.query.filter_by(normalized=normalized).one()
@classmethod
def yank(cls, name, synchronize=None):
kwargs = {}
if synchronize:
kwargs["synchronize_session"] = synchronize
cls.query.filter_by(name=name).update({"yanked": True}, **kwargs)
def rename(self, name):
self.name = name
self.normalized = _normalize_regex.sub("-", name).lower()
return self
class Version(UUIDPrimaryKeyMixin, TimeStampedMixin, db.Model):
__tablename__ = "versions"
__table_args__ = declared_attr(table_args((
db.Index("idx_project_version", "project_id", "version", unique=True),
TableDDL("""
CREATE OR REPLACE RULE yank_versions_from_projects
AS ON UPDATE TO projects
WHERE NEW.yanked = TRUE
DO ALSO
UPDATE versions SET yanked = TRUE
WHERE project_id = NEW.id;
"""),
TableDDL("""
CREATE CONSTRAINT TRIGGER cannot_unyank_versions
AFTER UPDATE OF yanked ON versions
FOR EACH ROW
WHEN (OLD.yanked = TRUE AND NEW.yanked = FALSE)
EXECUTE PROCEDURE cannot_unyank();
"""),
TableDDL("""
CREATE OR REPLACE FUNCTION update_projects_created_from_versions()
RETURNS trigger as $$
BEGIN
UPDATE projects
SET created = NEW.created
WHERE id = NEW.project_id AND created > NEW.created;
return NULL;
END;
$$ LANGUAGE plpgsql;
CREATE TRIGGER %(table)s_insert_projects_created
AFTER INSERT
ON %(table)s
FOR EACH ROW
EXECUTE PROCEDURE update_projects_created_from_versions();
CREATE TRIGGER %(table)s_update_projects_created
AFTER UPDATE OF created
ON %(table)s
FOR EACH ROW
WHEN (NEW.created < OLD.created)
EXECUTE PROCEDURE update_projects_created_from_versions();
"""),
)))
yanked = db.Column(db.Boolean,
nullable=False,
server_default=text("FALSE")
)
project_id = db.Column(pg.UUID(as_uuid=True),
db.ForeignKey("projects.id", ondelete="CASCADE"),
nullable=False
)
version = db.Column(db.UnicodeText, nullable=False)
summary = db.Column(db.UnicodeText, nullable=False, server_default="")
description = db.Column(db.UnicodeText, nullable=False, server_default="")
keywords = db.Column(pg.ARRAY(db.UnicodeText, dimensions=1),
nullable=False,
server_default="{}"
)
author = db.Column(db.UnicodeText, nullable=False, server_default="")
author_email = db.Column(db.UnicodeText, nullable=False, server_default="")
maintainer = db.Column(db.UnicodeText, nullable=False, server_default="")
maintainer_email = db.Column(db.UnicodeText,
nullable=False,
server_default=""
)
license = db.Column(db.UnicodeText, nullable=False, server_default="")
# URIs
uris = db.Column(MutableDict.as_mutable(pg.HSTORE),
nullable=False,
server_default=text("''::hstore")
)
download_uri = db.Column(db.UnicodeText, nullable=False, server_default="")
# Requirements
requires_python = db.Column(db.UnicodeText,
nullable=False,
server_default=""
)
requires_external = db.Column(pg.ARRAY(db.UnicodeText, dimensions=1),
nullable=False,
server_default="{}"
)
requirements = relationship("Requirement",
cascade="all,delete,delete-orphan",
backref="version",
lazy="joined",
)
provides = relationship("Provide",
cascade="all,delete,delete-orphan",
backref="version",
lazy="joined",
)
obsoletes = relationship("Obsolete",
cascade="all,delete,delete-orphan",
backref="version",
lazy="joined",
)
requires_old = db.Column(pg.ARRAY(db.UnicodeText, dimensions=1),
nullable=False,
server_default="{}",
)
provides_old = db.Column(pg.ARRAY(db.UnicodeText, dimensions=1),
nullable=False,
server_default="{}",
)
obsoletes_old = db.Column(pg.ARRAY(db.UnicodeText, dimensions=1),
nullable=False,
server_default="{}",
)
# Classifiers
_classifiers = relationship("Classifier",
secondary=classifiers,
backref=db.backref("versions", lazy='dynamic')
)
classifiers = association_proxy("_classifiers", "trove",
creator=Classifier.get_or_create
)
files = relationship("File",
cascade="all,delete,delete-orphan",
backref="version",
)
def __repr__(self):
ctx = {"name": self.project.name, "version": self.version}
return "<Version: {name} {version}>".format(**ctx)
class Requirement(UUIDPrimaryKeyMixin, db.Model):
__tablename__ = "requires"
version_id = db.Column(pg.UUID(as_uuid=True),
db.ForeignKey("versions.id", ondelete="CASCADE"),
nullable=False
)
name = db.Column(db.UnicodeText, nullable=False)
versions = db.Column(pg.ARRAY(db.UnicodeText, dimensions=1),
nullable=False,
server_default="{}"
)
environment = db.Column(db.UnicodeText, nullable=False, server_default="")
approximate = db.Column(db.Boolean,
nullable=False,
server_default=text("FALSE"),
)
class Provide(UUIDPrimaryKeyMixin, db.Model):
__tablename__ = "provides"
version_id = db.Column(pg.UUID(as_uuid=True),
db.ForeignKey("versions.id", ondelete="CASCADE"),
nullable=False
)
name = db.Column(db.UnicodeText, nullable=False)
versions = db.Column(pg.ARRAY(db.UnicodeText, dimensions=1),
nullable=False,
server_default="{}"
)
environment = db.Column(db.UnicodeText, nullable=False, server_default="")
class Obsolete(UUIDPrimaryKeyMixin, db.Model):
__tablename__ = "obsoletes"
version_id = db.Column(pg.UUID(as_uuid=True),
db.ForeignKey("versions.id", ondelete="CASCADE"),
nullable=False
)
name = db.Column(db.UnicodeText, nullable=False)
versions = db.Column(pg.ARRAY(db.UnicodeText, dimensions=1),
nullable=False,
server_default="{}"
)
environment = db.Column(db.UnicodeText, nullable=False, server_default="")
class FileType(Enum):
source = "sdist", "Source"
egg = "bdist_egg", "Egg"
msi = "bdist_msi", "MSI"
dmg = "bdist_dmg", "DMG"
rpm = "bdist_rpm", "RPM",
dumb = "bdist_dumb", "Dumb Binary Distribution"
windows_installer = "bdist_wininst", "Windows Installer"
wheel = "bdist_wheel", "Wheel"
class File(UUIDPrimaryKeyMixin, TimeStampedMixin, db.Model):
__tablename__ = "files"
__table_args__ = declared_attr(table_args((
TableDDL("""
CREATE OR REPLACE RULE yank_files_from_versions
AS ON UPDATE TO versions
WHERE NEW.yanked = TRUE
DO ALSO
UPDATE files SET yanked = TRUE WHERE version_id = NEW.id;
"""),
TableDDL("""
CREATE CONSTRAINT TRIGGER cannot_unyank_files
AFTER UPDATE OF yanked ON files
FOR EACH ROW
WHEN (OLD.yanked = TRUE AND NEW.yanked = FALSE)
EXECUTE PROCEDURE cannot_unyank();
"""),
TableDDL("""
CREATE OR REPLACE FUNCTION update_versions_created_from_files()
RETURNS trigger as $$
BEGIN
UPDATE versions
SET created = NEW.created
WHERE id = NEW.version_id AND created > NEW.created;
return NULL;
END;
$$ LANGUAGE plpgsql;
CREATE TRIGGER %(table)s_insert_version_created
AFTER INSERT
ON %(table)s
FOR EACH ROW
EXECUTE PROCEDURE update_versions_created_from_files();
CREATE TRIGGER %(table)s_update_version_created
AFTER UPDATE OF created
ON %(table)s
FOR EACH ROW
WHEN (NEW.created < OLD.created)
EXECUTE PROCEDURE update_versions_created_from_files();
"""),
)))
yanked = db.Column(db.Boolean,
nullable=False,
server_default=text("FALSE")
)
version_id = db.Column(pg.UUID(as_uuid=True),
db.ForeignKey("versions.id", ondelete="CASCADE"),
nullable=False
)
file = db.Column(db.UnicodeText, nullable=False, unique=True)
filename = db.Column(db.UnicodeText, nullable=False, unique=True)
filesize = db.Column(db.Integer, nullable=False)
type = db.Column(FileType.db_type(), nullable=False)
python_version = db.Column(db.UnicodeText,
nullable=False,
server_default=""
)
comment = db.Column(db.UnicodeText, nullable=False, server_default="")
hashes = db.Column(MutableDict.as_mutable(pg.HSTORE),
nullable=False,
server_default=text("''::hstore")
)
@property
def uri(self):
storage = get_storage()
return storage.url(self.file)
@property
def hashed_uri(self):
algorithm = flask.current_app.config.get("FILE_URI_HASH")
digest = self.hashes.get(algorithm)
if algorithm is not None and digest is not None:
parsed = urlparse.urlparse(self.uri)
fragment = "=".join([algorithm, digest])
return urlparse.urlunparse(parsed[:5] + (fragment,))
else:
return self.uri
listen(db.metadata, "before_create",
db.DDL("""
CREATE OR REPLACE FUNCTION cannot_unyank()
RETURNS trigger AS $$
BEGIN
-- Check if unyanking is being attempted
IF OLD.yanked = TRUE AND NEW.yanked = FALSE THEN
RAISE EXCEPTION '%% cannot be unyanked.', TG_TABLE_NAME;
END IF;
RETURN NULL;
END;
$$ LANGUAGE plpgsql;
""")
) | unknown | codeparrot/codeparrot-clean | ||
//===--- OutputFileMap.h - Map of inputs to multiple outputs ----*- C++ -*-===//
//
// This source file is part of the Swift.org open source project
//
// Copyright (c) 2014 - 2018 Apple Inc. and the Swift project authors
// Licensed under Apache License v2.0 with Runtime Library Exception
//
// See https://swift.org/LICENSE.txt for license information
// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
//
//===----------------------------------------------------------------------===//
#ifndef SWIFT_BASIC_OUTPUTFILEMAP_H
#define SWIFT_BASIC_OUTPUTFILEMAP_H
#include "swift/Basic/FileTypes.h"
#include "swift/Basic/LLVM.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/SourceMgr.h"
#include "llvm/Support/YAMLParser.h"
#include <memory>
#include <string>
namespace swift {
using TypeToPathMap = llvm::DenseMap<file_types::ID, std::string>;
/// A two-tiered map used to specify paths for multiple output files associated
/// with each input file in a compilation job.
///
/// The structure is a map from input paths to sub-maps, each of which maps
/// file types to output paths.
class OutputFileMap {
private:
llvm::StringMap<TypeToPathMap> InputToOutputsMap;
public:
OutputFileMap() {}
~OutputFileMap() = default;
/// Loads an OutputFileMap from the given \p Path into the receiver, if
/// possible.
static llvm::Expected<OutputFileMap>
loadFromPath(StringRef Path, StringRef workingDirector);
static llvm::Expected<OutputFileMap>
loadFromBuffer(StringRef Data, StringRef workingDirectory);
/// Loads an OutputFileMap from the given \p Buffer, taking ownership
/// of the buffer in the process.
///
/// When non-empty, \p workingDirectory is used to resolve relative paths in
/// the output file map.
static llvm::Expected<OutputFileMap>
loadFromBuffer(std::unique_ptr<llvm::MemoryBuffer> Buffer,
StringRef workingDirectory);
/// Get the map of outputs for the given \p Input, if present in the
/// OutputFileMap. (If not present, returns nullptr.)
const TypeToPathMap *getOutputMapForInput(StringRef Input) const;
/// Get a map of outputs for the given \p Input, creating it in
/// the OutputFileMap if not already present.
TypeToPathMap &getOrCreateOutputMapForInput(StringRef Input);
/// Get the map of outputs for a single compile product.
const TypeToPathMap *getOutputMapForSingleOutput() const;
/// Get or create the map of outputs for a single compile product.
TypeToPathMap &getOrCreateOutputMapForSingleOutput();
/// Dump the OutputFileMap to the given \p os.
void dump(llvm::raw_ostream &os, bool Sort = false) const;
/// Write the OutputFileMap for the \p inputs so it can be parsed.
///
/// It is not an error if the map does not contain an entry for a particular
/// input. Instead, an empty sub-map will be written into the output.
void write(llvm::raw_ostream &os, ArrayRef<StringRef> inputs) const;
private:
/// Parses the given \p Buffer and returns either an OutputFileMap or
/// error, taking ownership of \p Buffer in the process.
static llvm::Expected<OutputFileMap>
parse(std::unique_ptr<llvm::MemoryBuffer> Buffer, StringRef workingDirectory);
};
} // end namespace swift
#endif | c | github | https://github.com/apple/swift | include/swift/Basic/OutputFileMap.h |
# -*- coding: utf-8 -*-
"""
========================================
A Minimal CSV writer for data collection
========================================
Problem
-------
Write (a subset of) the data to a CSV file during data collection.
Approach
--------
Write a callback function that integrates Python's built-in csv module with
bluesky.
Example Solution
----------------
"""
###############################################################################
# Boiler plate imports and configuration
import path
import os
import bluesky as bs
import bluesky.plans as bp
import bluesky.callbacks as bc
import csv
from ophyd.sim import motor, det
import matplotlib.pyplot as plt
# Do this if running the example interactively;
# skip it when building the documentation.
import os
if 'BUILDING_DOCS' not in os.environ:
from bluesky.utils import install_qt_kicker # for notebooks, qt -> nb
install_qt_kicker()
plt.ion()
det.exposure_time = .1 # simulate detector exposure time
RE = bs.RunEngine({})
###############################################################################
# Define a callback class which writes out a CSV file
class CSVWriter(bc.CallbackBase):
def __init__(self, fields, fname_format, fpath):
self._path = path.Path(fpath)
os.makedirs(self._path, exist_ok=True)
self._fname_fomat = fname_format
self._fields = fields
self._writer = None
self._fout = None
def close(self):
if self._fout is not None:
self._fout.close()
self._fout = None
self._writer = None
def start(self, doc):
self.close()
fname = self._path / self._fname_fomat.format(**doc)
self._fout = open(fname, 'xt')
self._writer = csv.writer(self._fout)
def descriptor(self, doc):
if self._writer is not None:
self._writer.writerow(self._fields)
def event(self, doc):
data = doc['data']
if self._writer is not None:
self._writer.writerow(data[k] for k in self._fields)
def stop(self, doc):
self.close()
###############################################################################
# Set up some callbacks
def create_cbs():
return [bc.LiveTable([motor, det]), bc.LivePlot('det', 'motor')]
fmt = '{user}_{uid:.6s}.csv'
export_path = '/tmp/export_demo'
csv_writer = CSVWriter(('motor', 'det'), fmt, export_path)
# send all documents to the CSV writer
RE.subscribe('all', csv_writer)
###############################################################################
# run the scan
uid, = RE(bp.scan([det], motor, -5, 5, 11),
create_cbs(), user='tcaswell')
###############################################################################
# check file
fname = os.path.join(export_path,
'{user}_{uid:.6s}.csv'.format(user='tcaswell', uid=uid))
print("--- {} ---".format(fname))
with open(fname, 'r') as fin:
for ln in fin:
print(ln.strip()) | unknown | codeparrot/codeparrot-clean | ||
import os
from enigma import eEPGCache, getBestPlayableServiceReference, \
eServiceReference, iRecordableService, quitMainloop, eActionMap, setPreferredTuner
from Components.config import config
from Components.UsageConfig import defaultMoviePath
from Components.TimerSanityCheck import TimerSanityCheck
from Screens.MessageBox import MessageBox
import Screens.Standby
import Screens.InfoBar
from Tools import Directories, Notifications, ASCIItranslit, Trashcan
from Tools.XMLTools import stringToXML
import timer
import xml.etree.cElementTree
import NavigationInstance
from ServiceReference import ServiceReference
from time import localtime, strftime, ctime, time
from bisect import insort
from sys import maxint
# ok, for descriptions etc we have:
# service reference (to get the service name)
# name (title)
# description (description)
# event data (ONLY for time adjustments etc.)
# parses an event, and gives out a (begin, end, name, duration, eit)-tuple.
# begin and end will be corrected
def parseEvent(ev, description = True):
if description:
name = ev.getEventName()
description = ev.getShortDescription()
if description == "":
description = ev.getExtendedDescription()
else:
name = ""
description = ""
begin = ev.getBeginTime()
end = begin + ev.getDuration()
eit = ev.getEventId()
begin -= config.recording.margin_before.value * 60
end += config.recording.margin_after.value * 60
return (begin, end, name, description, eit)
class AFTEREVENT:
NONE = 0
STANDBY = 1
DEEPSTANDBY = 2
AUTO = 3
def findSafeRecordPath(dirname):
if not dirname:
return None
from Components import Harddisk
dirname = os.path.realpath(dirname)
mountpoint = Harddisk.findMountPoint(dirname)
if mountpoint in ('/', '/media'):
print '[RecordTimer] media is not mounted:', dirname
return None
if not os.path.isdir(dirname):
try:
os.makedirs(dirname)
except Exception, ex:
print '[RecordTimer] Failed to create dir "%s":' % dirname, ex
return None
return dirname
def checkForRecordings():
if NavigationInstance.instance.getRecordings():
return True
rec_time = NavigationInstance.instance.RecordTimer.getNextTimerTime(isWakeup=True)
return rec_time > 0 and (rec_time - time()) < 360
# please do not translate log messages
class RecordTimerEntry(timer.TimerEntry, object):
######### the following static methods and members are only in use when the box is in (soft) standby
wasInStandby = False
wasInDeepStandby = False
receiveRecordEvents = False
@staticmethod
def keypress(key=None, flag=1):
if flag and (RecordTimerEntry.wasInStandby or RecordTimerEntry.wasInDeepStandby):
RecordTimerEntry.wasInStandby = False
RecordTimerEntry.wasInDeepStandby = False
eActionMap.getInstance().unbindAction('', RecordTimerEntry.keypress)
@staticmethod
def setWasInDeepStandby():
RecordTimerEntry.wasInDeepStandby = True
eActionMap.getInstance().bindAction('', -maxint - 1, RecordTimerEntry.keypress)
@staticmethod
def setWasInStandby():
if not RecordTimerEntry.wasInStandby:
if not RecordTimerEntry.wasInDeepStandby:
eActionMap.getInstance().bindAction('', -maxint - 1, RecordTimerEntry.keypress)
RecordTimerEntry.wasInDeepStandby = False
RecordTimerEntry.wasInStandby = True
@staticmethod
def shutdown():
quitMainloop(1)
@staticmethod
def staticGotRecordEvent(recservice, event):
if event == iRecordableService.evEnd:
print "RecordTimer.staticGotRecordEvent(iRecordableService.evEnd)"
if not checkForRecordings():
print "No recordings busy of sceduled within 6 minutes so shutdown"
RecordTimerEntry.shutdown() # immediate shutdown
elif event == iRecordableService.evStart:
print "RecordTimer.staticGotRecordEvent(iRecordableService.evStart)"
@staticmethod
def stopTryQuitMainloop():
print "RecordTimer.stopTryQuitMainloop"
NavigationInstance.instance.record_event.remove(RecordTimerEntry.staticGotRecordEvent)
RecordTimerEntry.receiveRecordEvents = False
@staticmethod
def TryQuitMainloop():
if not RecordTimerEntry.receiveRecordEvents and Screens.Standby.inStandby:
print "RecordTimer.TryQuitMainloop"
NavigationInstance.instance.record_event.append(RecordTimerEntry.staticGotRecordEvent)
RecordTimerEntry.receiveRecordEvents = True
# send fake event.. to check if another recordings are running or
# other timers start in a few seconds
RecordTimerEntry.staticGotRecordEvent(None, iRecordableService.evEnd)
#################################################################
def __init__(self, serviceref, begin, end, name, description, eit, disabled = False, justplay = False, afterEvent = AFTEREVENT.AUTO, checkOldTimers = False, dirname = None, tags = None, descramble = True, record_ecm = False, always_zap = False, zap_wakeup = "always", rename_repeat = True):
timer.TimerEntry.__init__(self, int(begin), int(end))
if checkOldTimers == True:
if self.begin < time() - 1209600:
self.begin = int(time())
if self.end < self.begin:
self.end = self.begin
assert isinstance(serviceref, ServiceReference)
if serviceref and serviceref.isRecordable():
self.service_ref = serviceref
else:
self.service_ref = ServiceReference(None)
self.eit = eit
self.dontSave = False
self.name = name
self.description = description
self.disabled = disabled
self.timer = None
self.__record_service = None
self.start_prepare = 0
self.justplay = justplay
self.always_zap = always_zap
self.zap_wakeup = zap_wakeup
self.afterEvent = afterEvent
self.dirname = dirname
self.dirnameHadToFallback = False
self.autoincrease = False
self.autoincreasetime = 3600 * 24 # 1 day
self.tags = tags or []
self.descramble = descramble
self.record_ecm = record_ecm
self.rename_repeat = rename_repeat
self.needChangePriorityFrontend = config.usage.recording_frontend_priority.value != "-2" and config.usage.recording_frontend_priority.value != config.usage.frontend_priority.value
self.change_frontend = False
self.log_entries = []
self.resetState()
def __repr__(self):
return "RecordTimerEntry(name=%s, begin=%s, serviceref=%s, justplay=%s)" % (self.name, ctime(self.begin), self.service_ref, self.justplay)
def log(self, code, msg):
self.log_entries.append((int(time()), code, msg))
print "[TIMER]", msg
def calculateFilename(self, name=None):
service_name = self.service_ref.getServiceName()
begin_date = strftime("%Y%m%d %H%M", localtime(self.begin))
name = name or self.name
filename = begin_date + " - " + service_name
if name:
if config.recording.filename_composition.value == "short":
filename = strftime("%Y%m%d", localtime(self.begin)) + " - " + name
elif config.recording.filename_composition.value == "long":
filename += " - " + name + " - " + self.description
else:
filename += " - " + name # standard
if config.recording.ascii_filenames.value:
filename = ASCIItranslit.legacyEncode(filename)
if not self.dirname:
dirname = findSafeRecordPath(defaultMoviePath())
else:
dirname = findSafeRecordPath(self.dirname)
if dirname is None:
dirname = findSafeRecordPath(defaultMoviePath())
self.dirnameHadToFallback = True
if not dirname:
return None
self.Filename = Directories.getRecordingFilename(filename, dirname)
self.log(0, "Filename calculated as: '%s'" % self.Filename)
return self.Filename
def tryPrepare(self):
if self.justplay:
return True
else:
if not self.calculateFilename():
self.do_backoff()
self.start_prepare = time() + self.backoff
return False
rec_ref = self.service_ref and self.service_ref.ref
if rec_ref and rec_ref.flags & eServiceReference.isGroup:
rec_ref = getBestPlayableServiceReference(rec_ref, eServiceReference())
if not rec_ref:
self.log(1, "'get best playable service for group... record' failed")
return False
self.setRecordingPreferredTuner()
self.record_service = rec_ref and NavigationInstance.instance.recordService(rec_ref)
if not self.record_service:
self.log(1, "'record service' failed")
self.setRecordingPreferredTuner(setdefault=True)
return False
name = self.name
description = self.description
if self.repeated:
epgcache = eEPGCache.getInstance()
queryTime=self.begin+(self.end-self.begin)/2
evt = epgcache.lookupEventTime(rec_ref, queryTime)
if evt:
if self.rename_repeat:
event_description = evt.getShortDescription()
if not event_description:
event_description = evt.getExtendedDescription()
if event_description and event_description != description:
description = event_description
event_name = evt.getEventName()
if event_name and event_name != name:
name = event_name
if not self.calculateFilename(event_name):
self.do_backoff()
self.start_prepare = time() + self.backoff
return False
event_id = evt.getEventId()
else:
event_id = -1
else:
event_id = self.eit
if event_id is None:
event_id = -1
prep_res=self.record_service.prepare(self.Filename + ".ts", self.begin, self.end, event_id, name.replace("\n", ""), description.replace("\n", ""), ' '.join(self.tags), bool(self.descramble), bool(self.record_ecm))
if prep_res:
if prep_res == -255:
self.log(4, "failed to write meta information")
else:
self.log(2, "'prepare' failed: error %d" % prep_res)
# we must calc nur start time before stopRecordService call because in Screens/Standby.py TryQuitMainloop tries to get
# the next start time in evEnd event handler...
self.do_backoff()
self.start_prepare = time() + self.backoff
NavigationInstance.instance.stopRecordService(self.record_service)
self.record_service = None
self.setRecordingPreferredTuner(setdefault=True)
return False
return True
def do_backoff(self):
if self.backoff == 0:
self.backoff = 5
else:
self.backoff *= 2
if self.backoff > 100:
self.backoff = 100
self.log(10, "backoff: retry in %d seconds" % self.backoff)
def activate(self):
next_state = self.state + 1
self.log(5, "activating state %d" % next_state)
if next_state == 1:
if self.always_zap:
if Screens.Standby.inStandby:
self.log(5, "wakeup and zap to recording service")
RecordTimerEntry.setWasInStandby()
#set service to zap after standby
Screens.Standby.inStandby.prev_running_service = self.service_ref.ref
Screens.Standby.inStandby.paused_service = None
#wakeup standby
Screens.Standby.inStandby.Power()
else:
if RecordTimerEntry.wasInDeepStandby:
RecordTimerEntry.setWasInStandby()
cur_zap_ref = NavigationInstance.instance.getCurrentlyPlayingServiceReference()
if cur_zap_ref and not cur_zap_ref.getPath():# we do not zap away if it is no live service
Notifications.AddNotification(MessageBox, _("In order to record a timer, the TV was switched to the recording service!\n"), type=MessageBox.TYPE_INFO, timeout=20)
self.setRecordingPreferredTuner()
self.failureCB(True)
self.log(5, "zap to recording service")
if next_state == self.StatePrepared:
if self.tryPrepare():
self.log(6, "prepare ok, waiting for begin")
# create file to "reserve" the filename
# because another recording at the same time on another service can try to record the same event
# i.e. cable / sat.. then the second recording needs an own extension... when we create the file
# here than calculateFilename is happy
if not self.justplay:
open(self.Filename + ".ts", "w").close()
# Give the Trashcan a chance to clean up
try:
Trashcan.instance.cleanIfIdle(self.Filename)
except Exception, e:
print "[TIMER] Failed to call Trashcan.instance.cleanIfIdle()"
print "[TIMER] Error:", e
# fine. it worked, resources are allocated.
self.next_activation = self.begin
self.backoff = 0
return True
self.log(7, "prepare failed")
if self.first_try_prepare:
self.first_try_prepare = False
cur_ref = NavigationInstance.instance.getCurrentlyPlayingServiceReference()
if cur_ref and not cur_ref.getPath():
if Screens.Standby.inStandby:
self.setRecordingPreferredTuner()
self.failureCB(True)
elif not config.recording.asktozap.value:
self.log(8, "asking user to zap away")
Notifications.AddNotificationWithCallback(self.failureCB, MessageBox, _("A timer failed to record!\nDisable TV and try again?\n"), timeout=20, default=True)
else: # zap without asking
self.log(9, "zap without asking")
Notifications.AddNotification(MessageBox, _("In order to record a timer, the TV was switched to the recording service!\n"), type=MessageBox.TYPE_INFO, timeout=20)
self.setRecordingPreferredTuner()
self.failureCB(True)
elif cur_ref:
self.log(8, "currently running service is not a live service.. so stop it makes no sense")
else:
self.log(8, "currently no service running... so we dont need to stop it")
return False
elif next_state == self.StateRunning:
# if this timer has been cancelled, just go to "end" state.
if self.cancelled:
return True
if self.justplay:
if Screens.Standby.inStandby:
if RecordTimerEntry.wasInDeepStandby and self.zap_wakeup in ("always", "from_deep_standby") or self.zap_wakeup in ("always", "from_standby"):
self.log(11, "wakeup and zap")
RecordTimerEntry.setWasInStandby()
#set service to zap after standby
Screens.Standby.inStandby.prev_running_service = self.service_ref.ref
Screens.Standby.inStandby.paused_service = None
#wakeup standby
Screens.Standby.inStandby.Power()
else:
if RecordTimerEntry.wasInDeepStandby:
RecordTimerEntry.setWasInStandby()
self.log(11, "zapping")
NavigationInstance.instance.playService(self.service_ref.ref)
return True
else:
self.log(11, "start recording")
if RecordTimerEntry.wasInDeepStandby:
RecordTimerEntry.keypress()
if Screens.Standby.inStandby: #In case some plugin did put the receiver already in standby
config.misc.standbyCounter.value = 0
else:
Notifications.AddNotification(Screens.Standby.Standby, StandbyCounterIncrease=False)
record_res = self.record_service.start()
self.setRecordingPreferredTuner(setdefault=True)
if record_res:
self.log(13, "start record returned %d" % record_res)
self.do_backoff()
# retry
self.begin = time() + self.backoff
return False
# Tell the trashcan we started recording. The trashcan gets events,
# but cannot tell what the associated path is.
Trashcan.instance.markDirty(self.Filename)
return True
elif next_state == self.StateEnded:
old_end = self.end
if self.setAutoincreaseEnd():
self.log(12, "autoincrase recording %d minute(s)" % int((self.end - old_end)/60))
self.state -= 1
return True
self.log(12, "stop recording")
if not self.justplay:
NavigationInstance.instance.stopRecordService(self.record_service)
self.record_service = None
if not checkForRecordings():
if self.afterEvent == AFTEREVENT.DEEPSTANDBY or self.afterEvent == AFTEREVENT.AUTO and (Screens.Standby.inStandby or RecordTimerEntry.wasInStandby) and not config.misc.standbyCounter.value:
if not Screens.Standby.inTryQuitMainloop:
if Screens.Standby.inStandby:
RecordTimerEntry.TryQuitMainloop()
else:
Notifications.AddNotificationWithCallback(self.sendTryQuitMainloopNotification, MessageBox, _("A finished record timer wants to shut down\nyour receiver. Shutdown now?"), timeout=20, default=True)
elif self.afterEvent == AFTEREVENT.STANDBY or self.afterEvent == AFTEREVENT.AUTO and RecordTimerEntry.wasInStandby:
if not Screens.Standby.inStandby:
Notifications.AddNotificationWithCallback(self.sendStandbyNotification, MessageBox, _("A finished record timer wants to set your\nreceiver to standby. Do that now?"), timeout=20, default=True)
else:
RecordTimerEntry.keypress()
return True
def setAutoincreaseEnd(self, entry = None):
if not self.autoincrease:
return False
if entry is None:
new_end = int(time()) + self.autoincreasetime
else:
new_end = entry.begin - 30
dummyentry = RecordTimerEntry(self.service_ref, self.begin, new_end, self.name, self.description, self.eit, disabled=True, justplay = self.justplay, afterEvent = self.afterEvent, dirname = self.dirname, tags = self.tags)
dummyentry.disabled = self.disabled
timersanitycheck = TimerSanityCheck(NavigationInstance.instance.RecordTimer.timer_list, dummyentry)
if not timersanitycheck.check():
simulTimerList = timersanitycheck.getSimulTimerList()
if simulTimerList is not None and len(simulTimerList) > 1:
new_end = simulTimerList[1].begin
new_end -= 30 # 30 Sekunden Prepare-Zeit lassen
if new_end <= time():
return False
self.end = new_end
return True
def setRecordingPreferredTuner(self, setdefault=False):
if self.needChangePriorityFrontend:
elem = None
if not self.change_frontend and not setdefault:
elem = config.usage.recording_frontend_priority.value
self.change_frontend = True
elif self.change_frontend and setdefault:
elem = config.usage.frontend_priority.value
self.change_frontend = False
if elem is not None:
setPreferredTuner(int(elem))
def sendStandbyNotification(self, answer):
RecordTimerEntry.keypress()
if answer:
Notifications.AddNotification(Screens.Standby.Standby)
def sendTryQuitMainloopNotification(self, answer):
RecordTimerEntry.keypress()
if answer:
Notifications.AddNotification(Screens.Standby.TryQuitMainloop, 1)
def getNextActivation(self):
if self.state == self.StateEnded:
return self.end
next_state = self.state + 1
return {self.StatePrepared: self.start_prepare,
self.StateRunning: self.begin,
self.StateEnded: self.end }[next_state]
def failureCB(self, answer):
if answer == True:
self.log(13, "ok, zapped away")
#NavigationInstance.instance.stopUserServices()
NavigationInstance.instance.playService(self.service_ref.ref)
else:
self.log(14, "user didn't want to zap away, record will probably fail")
def timeChanged(self):
old_prepare = self.start_prepare
self.start_prepare = self.begin - self.prepare_time
self.backoff = 0
if int(old_prepare) != int(self.start_prepare):
self.log(15, "record time changed, start prepare is now: %s" % ctime(self.start_prepare))
def gotRecordEvent(self, record, event):
# TODO: this is not working (never true), please fix. (comparing two swig wrapped ePtrs)
if self.__record_service.__deref__() != record.__deref__():
return
self.log(16, "record event %d" % event)
if event == iRecordableService.evRecordWriteError:
print "WRITE ERROR on recording, disk full?"
# show notification. the 'id' will make sure that it will be
# displayed only once, even if more timers are failing at the
# same time. (which is very likely in case of disk fullness)
Notifications.AddPopup(text = _("Write error while recording. Disk full?\n"), type = MessageBox.TYPE_ERROR, timeout = 0, id = "DiskFullMessage")
# ok, the recording has been stopped. we need to properly note
# that in our state, with also keeping the possibility to re-try.
# TODO: this has to be done.
elif event == iRecordableService.evStart:
text = _("A record has been started:\n%s") % self.name
notify = config.usage.show_message_when_recording_starts.value and \
not Screens.Standby.inStandby and \
Screens.InfoBar.InfoBar.instance and \
Screens.InfoBar.InfoBar.instance.execing
if self.dirnameHadToFallback:
text = '\n'.join((text, _("Please note that the previously selected media could not be accessed and therefore the default directory is being used instead.")))
notify = True
if notify:
Notifications.AddPopup(text = text, type = MessageBox.TYPE_INFO, timeout = 3)
elif event == iRecordableService.evRecordAborted:
NavigationInstance.instance.RecordTimer.removeEntry(self)
# we have record_service as property to automatically subscribe to record service events
def setRecordService(self, service):
if self.__record_service is not None:
print "[remove callback]"
NavigationInstance.instance.record_event.remove(self.gotRecordEvent)
self.__record_service = service
if self.__record_service is not None:
print "[add callback]"
NavigationInstance.instance.record_event.append(self.gotRecordEvent)
record_service = property(lambda self: self.__record_service, setRecordService)
def createTimer(xml):
begin = int(xml.get("begin"))
end = int(xml.get("end"))
serviceref = ServiceReference(xml.get("serviceref").encode("utf-8"))
description = xml.get("description").encode("utf-8")
repeated = xml.get("repeated").encode("utf-8")
rename_repeat = long(xml.get("rename_repeat") or "1")
disabled = long(xml.get("disabled") or "0")
justplay = long(xml.get("justplay") or "0")
always_zap = long(xml.get("always_zap") or "0")
zap_wakeup = str(xml.get("zap_wakeup") or "always")
afterevent = str(xml.get("afterevent") or "nothing")
afterevent = {
"nothing": AFTEREVENT.NONE,
"standby": AFTEREVENT.STANDBY,
"deepstandby": AFTEREVENT.DEEPSTANDBY,
"auto": AFTEREVENT.AUTO
}[afterevent]
eit = xml.get("eit")
if eit and eit != "None":
eit = long(eit)
else:
eit = None
location = xml.get("location")
if location and location != "None":
location = location.encode("utf-8")
else:
location = None
tags = xml.get("tags")
if tags and tags != "None":
tags = tags.encode("utf-8").split(' ')
else:
tags = None
descramble = int(xml.get("descramble") or "1")
record_ecm = int(xml.get("record_ecm") or "0")
name = xml.get("name").encode("utf-8")
#filename = xml.get("filename").encode("utf-8")
entry = RecordTimerEntry(serviceref, begin, end, name, description, eit, disabled, justplay, afterevent, dirname = location, tags = tags, descramble = descramble, record_ecm = record_ecm, always_zap = always_zap, zap_wakeup = zap_wakeup, rename_repeat = rename_repeat)
entry.repeated = int(repeated)
for l in xml.findall("log"):
time = int(l.get("time"))
code = int(l.get("code"))
msg = l.text.strip().encode("utf-8")
entry.log_entries.append((time, code, msg))
return entry
class RecordTimer(timer.Timer):
def __init__(self):
timer.Timer.__init__(self)
self.Filename = Directories.resolveFilename(Directories.SCOPE_CONFIG, "timers.xml")
try:
self.loadTimer()
except IOError:
print "unable to load timers from file!"
def doActivate(self, w):
# when activating a timer which has already passed,
# simply abort the timer. don't run trough all the stages.
if w.shouldSkip():
w.state = RecordTimerEntry.StateEnded
else:
# when active returns true, this means "accepted".
# otherwise, the current state is kept.
# the timer entry itself will fix up the delay then.
if w.activate():
w.state += 1
self.timer_list.remove(w)
# did this timer reached the last state?
if w.state < RecordTimerEntry.StateEnded:
# no, sort it into active list
insort(self.timer_list, w)
else:
# yes. Process repeated, and re-add.
if w.repeated:
w.processRepeated()
w.state = RecordTimerEntry.StateWaiting
w.first_try_prepare = True
self.addTimerEntry(w)
else:
# Remove old timers as set in config
self.cleanupDaily(config.recording.keep_timers.value)
insort(self.processed_timers, w)
self.stateChanged(w)
def isRecording(self):
for timer in self.timer_list:
if timer.isRunning() and not timer.justplay:
return True
return False
def loadTimer(self):
# TODO: PATH!
if not Directories.fileExists(self.Filename):
return
try:
doc = xml.etree.cElementTree.parse(self.Filename)
except SyntaxError:
from Tools.Notifications import AddPopup
from Screens.MessageBox import MessageBox
AddPopup(_("The timer file (timers.xml) is corrupt and could not be loaded."), type = MessageBox.TYPE_ERROR, timeout = 0, id = "TimerLoadFailed")
print "timers.xml failed to load!"
try:
import os
os.rename(self.Filename, self.Filename + "_old")
except (IOError, OSError):
print "renaming broken timer failed"
return
except IOError:
print "timers.xml not found!"
return
root = doc.getroot()
# put out a message when at least one timer overlaps
checkit = True
for timer in root.findall("timer"):
newTimer = createTimer(timer)
if (self.record(newTimer, True, dosave=False) is not None) and (checkit == True):
from Tools.Notifications import AddPopup
from Screens.MessageBox import MessageBox
AddPopup(_("Timer overlap in timers.xml detected!\nPlease recheck it!"), type = MessageBox.TYPE_ERROR, timeout = 0, id = "TimerLoadFailed")
checkit = False # at moment it is enough when the message is displayed one time
def saveTimer(self):
#root_element = xml.etree.cElementTree.Element('timers')
#root_element.text = "\n"
#for timer in self.timer_list + self.processed_timers:
# some timers (instant records) don't want to be saved.
# skip them
#if timer.dontSave:
#continue
#t = xml.etree.cElementTree.SubElement(root_element, 'timers')
#t.set("begin", str(int(timer.begin)))
#t.set("end", str(int(timer.end)))
#t.set("serviceref", str(timer.service_ref))
#t.set("repeated", str(timer.repeated))
#t.set("name", timer.name)
#t.set("description", timer.description)
#t.set("afterevent", str({
# AFTEREVENT.NONE: "nothing",
# AFTEREVENT.STANDBY: "standby",
# AFTEREVENT.DEEPSTANDBY: "deepstandby",
# AFTEREVENT.AUTO: "auto"}))
#if timer.eit is not None:
# t.set("eit", str(timer.eit))
#if timer.dirname is not None:
# t.set("location", str(timer.dirname))
#t.set("disabled", str(int(timer.disabled)))
#t.set("justplay", str(int(timer.justplay)))
#t.text = "\n"
#t.tail = "\n"
#for time, code, msg in timer.log_entries:
#l = xml.etree.cElementTree.SubElement(t, 'log')
#l.set("time", str(time))
#l.set("code", str(code))
#l.text = str(msg)
#l.tail = "\n"
#doc = xml.etree.cElementTree.ElementTree(root_element)
#doc.write(self.Filename)
list = []
list.append('<?xml version="1.0" ?>\n')
list.append('<timers>\n')
for timer in self.timer_list + self.processed_timers:
if timer.dontSave:
continue
list.append('<timer')
list.append(' begin="' + str(int(timer.begin)) + '"')
list.append(' end="' + str(int(timer.end)) + '"')
list.append(' serviceref="' + stringToXML(str(timer.service_ref)) + '"')
list.append(' repeated="' + str(int(timer.repeated)) + '"')
list.append(' name="' + str(stringToXML(timer.name)) + '"')
list.append(' description="' + str(stringToXML(timer.description)) + '"')
list.append(' afterevent="' + str(stringToXML({
AFTEREVENT.NONE: "nothing",
AFTEREVENT.STANDBY: "standby",
AFTEREVENT.DEEPSTANDBY: "deepstandby",
AFTEREVENT.AUTO: "auto"
}[timer.afterEvent])) + '"')
if timer.eit is not None:
list.append(' eit="' + str(timer.eit) + '"')
if timer.dirname is not None:
list.append(' location="' + str(stringToXML(timer.dirname)) + '"')
if timer.tags is not None:
list.append(' tags="' + str(stringToXML(' '.join(timer.tags))) + '"')
list.append(' disabled="' + str(int(timer.disabled)) + '"')
list.append(' justplay="' + str(int(timer.justplay)) + '"')
list.append(' always_zap="' + str(int(timer.always_zap)) + '"')
list.append(' zap_wakeup="' + str(timer.zap_wakeup) + '"')
list.append(' rename_repeat="' + str(int(timer.rename_repeat)) + '"')
list.append(' descramble="' + str(int(timer.descramble)) + '"')
list.append(' record_ecm="' + str(int(timer.record_ecm)) + '"')
list.append('>\n')
if config.recording.debug.value:
for time, code, msg in timer.log_entries:
list.append('<log')
list.append(' code="' + str(code) + '"')
list.append(' time="' + str(time) + '"')
list.append('>')
list.append(str(stringToXML(msg)))
list.append('</log>\n')
list.append('</timer>\n')
list.append('</timers>\n')
file = open(self.Filename + ".writing", "w")
for x in list:
file.write(x)
file.flush()
import os
os.fsync(file.fileno())
file.close()
os.rename(self.Filename + ".writing", self.Filename)
def getNextZapTime(self, isWakeup=False):
now = time()
for timer in self.timer_list:
if not timer.justplay or timer.begin < now or isWakeup and timer.zap_wakeup in ("from_standby", "never"):
continue
return timer.begin
return -1
def getNextRecordingTime(self):
now = time()
for timer in self.timer_list:
next_act = timer.getNextActivation()
if timer.justplay or next_act < now:
continue
return next_act
return -1
def getNextTimerTime(self, isWakeup=False):
now = time()
for timer in self.timer_list:
next_act = timer.getNextActivation()
if next_act < now or isWakeup and timer.justplay and timer.zap_wakeup in ("from_standby", "never"):
continue
return next_act
return -1
def isNextRecordAfterEventActionAuto(self):
now = time()
t = None
for timer in self.timer_list:
if timer.justplay or timer.begin < now:
continue
if t is None or t.begin == timer.begin:
t = timer
if t.afterEvent == AFTEREVENT.AUTO:
return True
return False
def record(self, entry, ignoreTSC=False, dosave=True): # wird von loadTimer mit dosave=False aufgerufen
timersanitycheck = TimerSanityCheck(self.timer_list,entry)
if not timersanitycheck.check():
if ignoreTSC != True:
print "timer conflict detected!"
print timersanitycheck.getSimulTimerList()
return timersanitycheck.getSimulTimerList()
else:
print "ignore timer conflict"
elif timersanitycheck.doubleCheck():
print "ignore double timer"
return None
entry.timeChanged()
print "[Timer] Record " + str(entry)
entry.Timer = self
self.addTimerEntry(entry)
if dosave:
self.saveTimer()
return None
def isInRepeatTimer(self, timer, event):
time_match = 0
is_editable = False
begin = event.getBeginTime()
duration = event.getDuration()
end = begin + duration
timer_end = timer.end
if timer.disabled and timer.isRunning():
if begin < timer.begin <= end or timer.begin <= begin <= timer_end:
return True
else:
return False
if timer.justplay and (timer_end - timer.begin) <= 1:
timer_end += 60
bt = localtime(begin)
bday = bt.tm_wday
begin2 = 1440 + bt.tm_hour * 60 + bt.tm_min
end2 = begin2 + duration / 60
xbt = localtime(timer.begin)
xet = localtime(timer_end)
offset_day = False
checking_time = timer.begin < begin or begin <= timer.begin <= end
if xbt.tm_yday != xet.tm_yday:
oday = bday - 1
if oday == -1: oday = 6
offset_day = timer.repeated & (1 << oday)
xbegin = 1440 + xbt.tm_hour * 60 + xbt.tm_min
xend = xbegin + ((timer_end - timer.begin) / 60)
if xend < xbegin:
xend += 1440
if timer.repeated & (1 << bday) and checking_time:
if begin2 < xbegin <= end2:
if xend < end2:
# recording within event
time_match = (xend - xbegin) * 60
is_editable = True
else:
# recording last part of event
time_match = (end2 - xbegin) * 60
summary_end = (xend - end2) * 60
is_editable = not summary_end and True or time_match >= summary_end
elif xbegin <= begin2 <= xend:
if xend < end2:
# recording first part of event
time_match = (xend - begin2) * 60
summary_end = (begin2 - xbegin) * 60
is_editable = not summary_end and True or time_match >= summary_end
else:
# recording whole event
time_match = (end2 - begin2) * 60
is_editable = True
elif offset_day:
xbegin -= 1440
xend -= 1440
if begin2 < xbegin <= end2:
if xend < end2:
# recording within event
time_match = (xend - xbegin) * 60
is_editable = True
else:
# recording last part of event
time_match = (end2 - xbegin) * 60
summary_end = (xend - end2) * 60
is_editable = not summary_end and True or time_match >= summary_end
elif xbegin <= begin2 <= xend:
if xend < end2:
# recording first part of event
time_match = (xend - begin2) * 60
summary_end = (begin2 - xbegin) * 60
is_editable = not summary_end and True or time_match >= summary_end
else:
# recording whole event
time_match = (end2 - begin2) * 60
is_editable = True
elif offset_day and checking_time:
xbegin -= 1440
xend -= 1440
if begin2 < xbegin <= end2:
if xend < end2:
# recording within event
time_match = (xend - xbegin) * 60
is_editable = True
else:
# recording last part of event
time_match = (end2 - xbegin) * 60
summary_end = (xend - end2) * 60
is_editable = not summary_end and True or time_match >= summary_end
elif xbegin <= begin2 <= xend:
if xend < end2:
# recording first part of event
time_match = (xend - begin2) * 60
summary_end = (begin2 - xbegin) * 60
is_editable = not summary_end and True or time_match >= summary_end
else:
# recording whole event
time_match = (end2 - begin2) * 60
is_editable = True
return time_match and is_editable
def isInTimer(self, eventid, begin, duration, service):
returnValue = None
type = 0
time_match = 0
bt = None
check_offset_time = not config.recording.margin_before.value and not config.recording.margin_after.value
end = begin + duration
refstr = ':'.join(service.split(':')[:11])
for x in self.timer_list:
check = ':'.join(x.service_ref.ref.toString().split(':')[:11]) == refstr
if not check:
sref = x.service_ref.ref
parent_sid = sref.getUnsignedData(5)
parent_tsid = sref.getUnsignedData(6)
if parent_sid and parent_tsid:
# check for subservice
sid = sref.getUnsignedData(1)
tsid = sref.getUnsignedData(2)
sref.setUnsignedData(1, parent_sid)
sref.setUnsignedData(2, parent_tsid)
sref.setUnsignedData(5, 0)
sref.setUnsignedData(6, 0)
check = sref.toCompareString() == refstr
num = 0
if check:
check = False
event = eEPGCache.getInstance().lookupEventId(sref, eventid)
num = event and event.getNumOfLinkageServices() or 0
sref.setUnsignedData(1, sid)
sref.setUnsignedData(2, tsid)
sref.setUnsignedData(5, parent_sid)
sref.setUnsignedData(6, parent_tsid)
for cnt in range(num):
subservice = event.getLinkageService(sref, cnt)
if sref.toCompareString() == subservice.toCompareString():
check = True
break
if check:
timer_end = x.end
timer_begin = x.begin
type_offset = 0
if not x.repeated and check_offset_time:
if 0 < end - timer_end <= 59:
timer_end = end
elif 0 < timer_begin - begin <= 59:
timer_begin = begin
if x.justplay:
type_offset = 5
if (timer_end - x.begin) <= 1:
timer_end += 60
if x.always_zap:
type_offset = 10
timer_repeat = x.repeated
# if set 'don't stop current event but disable coming events' for repeat timer
running_only_curevent = x.disabled and x.isRunning() and timer_repeat
if running_only_curevent:
timer_repeat = 0
type_offset += 15
if timer_repeat != 0:
type_offset += 15
if bt is None:
bt = localtime(begin)
bday = bt.tm_wday
begin2 = 1440 + bt.tm_hour * 60 + bt.tm_min
end2 = begin2 + duration / 60
xbt = localtime(x.begin)
xet = localtime(timer_end)
offset_day = False
checking_time = x.begin < begin or begin <= x.begin <= end
if xbt.tm_yday != xet.tm_yday:
oday = bday - 1
if oday == -1: oday = 6
offset_day = x.repeated & (1 << oday)
xbegin = 1440 + xbt.tm_hour * 60 + xbt.tm_min
xend = xbegin + ((timer_end - x.begin) / 60)
if xend < xbegin:
xend += 1440
if x.repeated & (1 << bday) and checking_time:
if begin2 < xbegin <= end2:
if xend < end2:
# recording within event
time_match = (xend - xbegin) * 60
type = type_offset + 3
else:
# recording last part of event
time_match = (end2 - xbegin) * 60
type = type_offset + 1
elif xbegin <= begin2 <= xend:
if xend < end2:
# recording first part of event
time_match = (xend - begin2) * 60
type = type_offset + 4
else:
# recording whole event
time_match = (end2 - begin2) * 60
type = type_offset + 2
elif offset_day:
xbegin -= 1440
xend -= 1440
if begin2 < xbegin <= end2:
if xend < end2:
# recording within event
time_match = (xend - xbegin) * 60
type = type_offset + 3
else:
# recording last part of event
time_match = (end2 - xbegin) * 60
type = type_offset + 1
elif xbegin <= begin2 <= xend:
if xend < end2:
# recording first part of event
time_match = (xend - begin2) * 60
type = type_offset + 4
else:
# recording whole event
time_match = (end2 - begin2) * 60
type = type_offset + 2
elif offset_day and checking_time:
xbegin -= 1440
xend -= 1440
if begin2 < xbegin <= end2:
if xend < end2:
# recording within event
time_match = (xend - xbegin) * 60
type = type_offset + 3
else:
# recording last part of event
time_match = (end2 - xbegin) * 60
type = type_offset + 1
elif xbegin <= begin2 <= xend:
if xend < end2:
# recording first part of event
time_match = (xend - begin2) * 60
type = type_offset + 4
else:
# recording whole event
time_match = (end2 - begin2) * 60
type = type_offset + 2
else:
if begin < timer_begin <= end:
if timer_end < end:
# recording within event
time_match = timer_end - timer_begin
type = type_offset + 3
else:
# recording last part of event
time_match = end - timer_begin
type = type_offset + 1
elif timer_begin <= begin <= timer_end:
if timer_end < end:
# recording first part of event
time_match = timer_end - begin
type = type_offset + 4
else:
# recording whole event
time_match = end - begin
type = type_offset + 2
if time_match:
if type in (2,7,12,17,22,27):
# When full recording do not look further
returnValue = (time_match, [type])
break
elif returnValue:
if type not in returnValue[1]:
returnValue[1].append(type)
else:
returnValue = (time_match, [type])
return returnValue
def removeEntry(self, entry):
print "[Timer] Remove " + str(entry)
# avoid re-enqueuing
entry.repeated = False
# abort timer.
# this sets the end time to current time, so timer will be stopped.
entry.autoincrease = False
entry.abort()
if entry.state != entry.StateEnded:
self.timeChanged(entry)
print "state: ", entry.state
print "in processed: ", entry in self.processed_timers
print "in running: ", entry in self.timer_list
# autoincrease instanttimer if possible
if not entry.dontSave:
for x in self.timer_list:
if x.setAutoincreaseEnd():
self.timeChanged(x)
# now the timer should be in the processed_timers list. remove it from there.
self.processed_timers.remove(entry)
self.saveTimer()
def shutdown(self):
self.saveTimer() | unknown | codeparrot/codeparrot-clean | ||
<?php declare(strict_types=1);
/*
* This file is part of Composer.
*
* (c) Nils Adermann <naderman@naderman.de>
* Jordi Boggiano <j.boggiano@seld.be>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace Composer;
use Composer\Config\JsonConfigSource;
use Composer\Json\JsonFile;
use Composer\IO\IOInterface;
use Composer\Package\Archiver;
use Composer\Package\Version\VersionGuesser;
use Composer\Package\RootPackageInterface;
use Composer\Repository\FilesystemRepository;
use Composer\Repository\RepositoryManager;
use Composer\Repository\RepositoryFactory;
use Composer\Util\Filesystem;
use Composer\Util\Platform;
use Composer\Util\ProcessExecutor;
use Composer\Util\HttpDownloader;
use Composer\Util\Loop;
use Composer\Util\Silencer;
use Composer\Plugin\PluginEvents;
use Composer\EventDispatcher\Event;
use Phar;
use Symfony\Component\Console\Formatter\OutputFormatter;
use Symfony\Component\Console\Formatter\OutputFormatterStyle;
use Symfony\Component\Console\Output\ConsoleOutput;
use Composer\EventDispatcher\EventDispatcher;
use Composer\Autoload\AutoloadGenerator;
use Composer\Package\Version\VersionParser;
use Composer\Downloader\TransportException;
use Composer\Json\JsonValidationException;
use Composer\Repository\InstalledRepositoryInterface;
use UnexpectedValueException;
use ZipArchive;
/**
* Creates a configured instance of composer.
*
* @author Ryan Weaver <ryan@knplabs.com>
* @author Jordi Boggiano <j.boggiano@seld.be>
* @author Igor Wiedler <igor@wiedler.ch>
* @author Nils Adermann <naderman@naderman.de>
*/
class Factory
{
/**
* @throws \RuntimeException
*/
protected static function getHomeDir(): string
{
$home = Platform::getEnv('COMPOSER_HOME');
if ($home) {
return $home;
}
if (Platform::isWindows()) {
if (!Platform::getEnv('APPDATA')) {
throw new \RuntimeException('The APPDATA or COMPOSER_HOME environment variable must be set for composer to run correctly');
}
return rtrim(strtr(Platform::getEnv('APPDATA'), '\\', '/'), '/') . '/Composer';
}
$userDir = self::getUserDir();
$dirs = [];
if (self::useXdg()) {
// XDG Base Directory Specifications
$xdgConfig = Platform::getEnv('XDG_CONFIG_HOME');
if (!$xdgConfig) {
$xdgConfig = $userDir . '/.config';
}
$dirs[] = $xdgConfig . '/composer';
}
$dirs[] = $userDir . '/.composer';
// select first dir which exists of: $XDG_CONFIG_HOME/composer or ~/.composer
foreach ($dirs as $dir) {
if (Silencer::call('is_dir', $dir)) {
return $dir;
}
}
// if none exists, we default to first defined one (XDG one if system uses it, or ~/.composer otherwise)
return $dirs[0];
}
protected static function getCacheDir(string $home): string
{
$cacheDir = Platform::getEnv('COMPOSER_CACHE_DIR');
if ($cacheDir) {
return $cacheDir;
}
$homeEnv = Platform::getEnv('COMPOSER_HOME');
if ($homeEnv) {
return $homeEnv . '/cache';
}
if (Platform::isWindows()) {
if ($cacheDir = Platform::getEnv('LOCALAPPDATA')) {
$cacheDir .= '/Composer';
} else {
$cacheDir = $home . '/cache';
}
return rtrim(strtr($cacheDir, '\\', '/'), '/');
}
$userDir = self::getUserDir();
if (PHP_OS === 'Darwin') {
// Migrate existing cache dir in old location if present
if (is_dir($home . '/cache') && !is_dir($userDir . '/Library/Caches/composer')) {
Silencer::call('rename', $home . '/cache', $userDir . '/Library/Caches/composer');
}
return $userDir . '/Library/Caches/composer';
}
if ($home === $userDir . '/.composer' && is_dir($home . '/cache')) {
return $home . '/cache';
}
if (self::useXdg()) {
$xdgCache = Platform::getEnv('XDG_CACHE_HOME') ?: $userDir . '/.cache';
return $xdgCache . '/composer';
}
return $home . '/cache';
}
protected static function getDataDir(string $home): string
{
$homeEnv = Platform::getEnv('COMPOSER_HOME');
if ($homeEnv) {
return $homeEnv;
}
if (Platform::isWindows()) {
return strtr($home, '\\', '/');
}
$userDir = self::getUserDir();
if ($home !== $userDir . '/.composer' && self::useXdg()) {
$xdgData = Platform::getEnv('XDG_DATA_HOME') ?: $userDir . '/.local/share';
return $xdgData . '/composer';
}
return $home;
}
public static function createConfig(?IOInterface $io = null, ?string $cwd = null): Config
{
$cwd = $cwd ?? Platform::getCwd(true);
$config = new Config(true, $cwd);
// determine and add main dirs to the config
$home = self::getHomeDir();
$config->merge([
'config' => [
'home' => $home,
'cache-dir' => self::getCacheDir($home),
'data-dir' => self::getDataDir($home),
],
], Config::SOURCE_DEFAULT);
// load global config
$file = new JsonFile($config->get('home').'/config.json');
if ($file->exists()) {
if ($io instanceof IOInterface) {
$io->writeError('Loading config file ' . $file->getPath(), true, IOInterface::DEBUG);
}
self::validateJsonSchema($io, $file);
$config->merge($file->read(), $file->getPath());
}
$config->setConfigSource(new JsonConfigSource($file));
$htaccessProtect = $config->get('htaccess-protect');
if ($htaccessProtect) {
// Protect directory against web access. Since HOME could be
// the www-data's user home and be web-accessible it is a
// potential security risk
$dirs = [$config->get('home'), $config->get('cache-dir'), $config->get('data-dir')];
foreach ($dirs as $dir) {
if (!file_exists($dir . '/.htaccess')) {
if (!is_dir($dir)) {
Silencer::call('mkdir', $dir, 0777, true);
}
Silencer::call('file_put_contents', $dir . '/.htaccess', 'Deny from all');
}
}
}
// load global auth file
$file = new JsonFile($config->get('home').'/auth.json');
if ($file->exists()) {
if ($io instanceof IOInterface) {
$io->writeError('Loading config file ' . $file->getPath(), true, IOInterface::DEBUG);
}
self::validateJsonSchema($io, $file, JsonFile::AUTH_SCHEMA);
$config->merge(['config' => $file->read()], $file->getPath());
}
$config->setAuthConfigSource(new JsonConfigSource($file, true));
self::loadComposerAuthEnv($config, $io);
return $config;
}
public static function getComposerFile(): string
{
$env = Platform::getEnv('COMPOSER');
if (is_string($env)) {
$env = trim($env);
if ('' !== $env) {
if (is_dir($env)) {
throw new \RuntimeException('The COMPOSER environment variable is set to '.$env.' which is a directory, this variable should point to a composer.json or be left unset.');
}
return $env;
}
}
return './composer.json';
}
public static function getLockFile(string $composerFile): string
{
return "json" === pathinfo($composerFile, PATHINFO_EXTENSION)
? substr($composerFile, 0, -4).'lock'
: $composerFile . '.lock';
}
/**
* @return array{highlight: OutputFormatterStyle, warning: OutputFormatterStyle}
*/
public static function createAdditionalStyles(): array
{
return [
'highlight' => new OutputFormatterStyle('red'),
'warning' => new OutputFormatterStyle('black', 'yellow'),
];
}
public static function createOutput(): ConsoleOutput
{
$styles = self::createAdditionalStyles();
$formatter = new OutputFormatter(false, $styles);
return new ConsoleOutput(ConsoleOutput::VERBOSITY_NORMAL, null, $formatter);
}
/**
* Creates a Composer instance
*
* @param IOInterface $io IO instance
* @param array<string, mixed>|string|null $localConfig either a configuration array or a filename to read from, if null it will
* read from the default filename
* @param bool|'local'|'global' $disablePlugins Whether plugins should not be loaded, can be set to local or global to only disable local/global plugins
* @param bool $disableScripts Whether scripts should not be run
* @param bool $fullLoad Whether to initialize everything or only main project stuff (used when loading the global composer)
* @throws \InvalidArgumentException
* @throws UnexpectedValueException
* @return Composer|PartialComposer Composer if $fullLoad is true, otherwise PartialComposer
* @phpstan-return ($fullLoad is true ? Composer : PartialComposer)
*/
public function createComposer(IOInterface $io, $localConfig = null, $disablePlugins = false, ?string $cwd = null, bool $fullLoad = true, bool $disableScripts = false)
{
// if a custom composer.json path is given, we change the default cwd to be that file's directory
if (is_string($localConfig) && is_file($localConfig) && null === $cwd) {
$cwd = dirname($localConfig);
}
$cwd = $cwd ?? Platform::getCwd(true);
// load Composer configuration
if (null === $localConfig) {
$localConfig = static::getComposerFile();
}
$localConfigSource = Config::SOURCE_UNKNOWN;
if (is_string($localConfig)) {
$composerFile = $localConfig;
$file = new JsonFile($localConfig, null, $io);
if (!$file->exists()) {
if ($localConfig === './composer.json' || $localConfig === 'composer.json') {
$message = 'Composer could not find a composer.json file in '.$cwd;
} else {
$message = 'Composer could not find the config file: '.$localConfig;
}
$instructions = $fullLoad ? 'To initialize a project, please create a composer.json file. See https://getcomposer.org/basic-usage' : '';
throw new \InvalidArgumentException($message.PHP_EOL.$instructions);
}
if (!Platform::isInputCompletionProcess()) {
try {
$file->validateSchema(JsonFile::LAX_SCHEMA);
} catch (JsonValidationException $e) {
$errors = ' - ' . implode(PHP_EOL . ' - ', $e->getErrors());
$message = $e->getMessage() . ':' . PHP_EOL . $errors;
throw new JsonValidationException($message);
}
}
$localConfig = $file->read();
$localConfigSource = $file->getPath();
}
// Load config and override with local config/auth config
$config = static::createConfig($io, $cwd);
$isGlobal = $localConfigSource !== Config::SOURCE_UNKNOWN && realpath($config->get('home')) === realpath(dirname($localConfigSource));
$config->merge($localConfig, $localConfigSource);
if (isset($composerFile)) {
$io->writeError('Loading config file ' . $composerFile .' ('.realpath($composerFile).')', true, IOInterface::DEBUG);
$config->setConfigSource(new JsonConfigSource(new JsonFile(realpath($composerFile), null, $io)));
$localAuthFile = new JsonFile(dirname(realpath($composerFile)) . '/auth.json', null, $io);
if ($localAuthFile->exists()) {
$io->writeError('Loading config file ' . $localAuthFile->getPath(), true, IOInterface::DEBUG);
self::validateJsonSchema($io, $localAuthFile, JsonFile::AUTH_SCHEMA);
$config->merge(['config' => $localAuthFile->read()], $localAuthFile->getPath());
$config->setLocalAuthConfigSource(new JsonConfigSource($localAuthFile, true));
}
}
// make sure we load the auth env again over the local auth.json + composer.json config
self::loadComposerAuthEnv($config, $io);
$vendorDir = $config->get('vendor-dir');
// initialize composer
$composer = $fullLoad ? new Composer() : new PartialComposer();
$composer->setConfig($config);
if ($isGlobal) {
$composer->setGlobal();
}
if ($fullLoad) {
// load auth configs into the IO instance
$io->loadConfiguration($config);
// load existing Composer\InstalledVersions instance if available and scripts/plugins are allowed, as they might need it
// we only load if the InstalledVersions class wasn't defined yet so that this is only loaded once
if (false === $disablePlugins && false === $disableScripts && !class_exists('Composer\InstalledVersions', false) && file_exists($installedVersionsPath = $config->get('vendor-dir').'/composer/installed.php')) {
// force loading the class at this point so it is loaded from the composer phar and not from the vendor dir
// as we cannot guarantee integrity of that file
if (class_exists('Composer\InstalledVersions')) {
FilesystemRepository::safelyLoadInstalledVersions($installedVersionsPath);
}
}
}
$httpDownloader = self::createHttpDownloader($io, $config);
$process = new ProcessExecutor($io);
$loop = new Loop($httpDownloader, $process);
$composer->setLoop($loop);
// initialize event dispatcher
$dispatcher = new EventDispatcher($composer, $io, $process);
$dispatcher->setRunScripts(!$disableScripts);
$composer->setEventDispatcher($dispatcher);
// initialize repository manager
$rm = RepositoryFactory::manager($io, $config, $httpDownloader, $dispatcher, $process);
$composer->setRepositoryManager($rm);
// force-set the version of the global package if not defined as
// guessing it adds no value and only takes time
if (!$fullLoad && !isset($localConfig['version'])) {
$localConfig['version'] = '1.0.0';
}
// load package
$parser = new VersionParser;
$guesser = new VersionGuesser($config, $process, $parser, $io);
$loader = $this->loadRootPackage($rm, $config, $parser, $guesser, $io);
$package = $loader->load($localConfig, 'Composer\Package\RootPackage', $cwd);
$composer->setPackage($package);
// load local repository
$this->addLocalRepository($io, $rm, $vendorDir, $package, $process);
// initialize installation manager
$im = $this->createInstallationManager($loop, $io, $dispatcher);
$composer->setInstallationManager($im);
if ($composer instanceof Composer) {
// initialize download manager
$dm = $this->createDownloadManager($io, $config, $httpDownloader, $process, $dispatcher);
$composer->setDownloadManager($dm);
// initialize autoload generator
$generator = new AutoloadGenerator($dispatcher, $io);
$composer->setAutoloadGenerator($generator);
// initialize archive manager
$am = $this->createArchiveManager($config, $dm, $loop);
$composer->setArchiveManager($am);
}
// add installers to the manager (must happen after download manager is created since they read it out of $composer)
$this->createDefaultInstallers($im, $composer, $io, $process);
// init locker if possible
if ($composer instanceof Composer && isset($composerFile)) {
$lockFile = self::getLockFile($composerFile);
if (!$config->get('lock') && file_exists($lockFile)) {
$io->writeError('<warning>'.$lockFile.' is present but ignored as the "lock" config option is disabled.</warning>');
}
$locker = new Package\Locker($io, new JsonFile($config->get('lock') ? $lockFile : Platform::getDevNull(), null, $io), $im, file_get_contents($composerFile), $process);
$composer->setLocker($locker);
} elseif ($composer instanceof Composer) {
$locker = new Package\Locker($io, new JsonFile(Platform::getDevNull(), null, $io), $im, JsonFile::encode($localConfig), $process);
$composer->setLocker($locker);
}
if ($composer instanceof Composer) {
$globalComposer = null;
if (!$composer->isGlobal()) {
$globalComposer = $this->createGlobalComposer($io, $config, $disablePlugins, $disableScripts);
}
$pm = $this->createPluginManager($io, $composer, $globalComposer, $disablePlugins);
$composer->setPluginManager($pm);
if ($composer->isGlobal()) {
$pm->setRunningInGlobalDir(true);
}
$pm->loadInstalledPlugins();
}
if ($fullLoad) {
$initEvent = new Event(PluginEvents::INIT);
$composer->getEventDispatcher()->dispatch($initEvent->getName(), $initEvent);
// once everything is initialized we can
// purge packages from local repos if they have been deleted on the filesystem
$this->purgePackages($rm->getLocalRepository(), $im);
}
return $composer;
}
/**
* @param bool $disablePlugins Whether plugins should not be loaded
* @param bool $disableScripts Whether scripts should not be executed
*/
public static function createGlobal(IOInterface $io, bool $disablePlugins = false, bool $disableScripts = false): ?Composer
{
$factory = new static();
return $factory->createGlobalComposer($io, static::createConfig($io), $disablePlugins, $disableScripts, true);
}
protected function addLocalRepository(IOInterface $io, RepositoryManager $rm, string $vendorDir, RootPackageInterface $rootPackage, ?ProcessExecutor $process = null): void
{
$fs = null;
if ($process) {
$fs = new Filesystem($process);
}
$rm->setLocalRepository(new Repository\InstalledFilesystemRepository(new JsonFile($vendorDir.'/composer/installed.json', null, $io), true, $rootPackage, $fs));
}
/**
* @param bool|'local'|'global' $disablePlugins Whether plugins should not be loaded, can be set to local or global to only disable local/global plugins
* @return PartialComposer|Composer|null By default PartialComposer, but Composer if $fullLoad is set to true
* @phpstan-return ($fullLoad is true ? Composer|null : PartialComposer|null)
*/
protected function createGlobalComposer(IOInterface $io, Config $config, $disablePlugins, bool $disableScripts, bool $fullLoad = false): ?PartialComposer
{
// make sure if disable plugins was 'local' it is now turned off
$disablePlugins = $disablePlugins === 'global' || $disablePlugins === true;
$composer = null;
try {
$composer = $this->createComposer($io, $config->get('home') . '/composer.json', $disablePlugins, $config->get('home'), $fullLoad, $disableScripts);
} catch (\Exception $e) {
$io->writeError('Failed to initialize global composer: '.$e->getMessage(), true, IOInterface::DEBUG);
}
return $composer;
}
public function createDownloadManager(IOInterface $io, Config $config, HttpDownloader $httpDownloader, ProcessExecutor $process, ?EventDispatcher $eventDispatcher = null): Downloader\DownloadManager
{
$cache = null;
if ($config->get('cache-files-ttl') > 0) {
$cache = new Cache($io, $config->get('cache-files-dir'), 'a-z0-9_./');
$cache->setReadOnly($config->get('cache-read-only'));
}
$fs = new Filesystem($process);
$dm = new Downloader\DownloadManager($io, false, $fs);
switch ($preferred = $config->get('preferred-install')) {
case 'dist':
$dm->setPreferDist(true);
break;
case 'source':
$dm->setPreferSource(true);
break;
case 'auto':
default:
// noop
break;
}
if (is_array($preferred)) {
$dm->setPreferences($preferred);
}
$dm->setDownloader('git', new Downloader\GitDownloader($io, $config, $process, $fs));
$dm->setDownloader('svn', new Downloader\SvnDownloader($io, $config, $process, $fs));
$dm->setDownloader('fossil', new Downloader\FossilDownloader($io, $config, $process, $fs));
$dm->setDownloader('hg', new Downloader\HgDownloader($io, $config, $process, $fs));
$dm->setDownloader('perforce', new Downloader\PerforceDownloader($io, $config, $process, $fs));
$dm->setDownloader('zip', new Downloader\ZipDownloader($io, $config, $httpDownloader, $eventDispatcher, $cache, $fs, $process));
$dm->setDownloader('rar', new Downloader\RarDownloader($io, $config, $httpDownloader, $eventDispatcher, $cache, $fs, $process));
$dm->setDownloader('tar', new Downloader\TarDownloader($io, $config, $httpDownloader, $eventDispatcher, $cache, $fs, $process));
$dm->setDownloader('gzip', new Downloader\GzipDownloader($io, $config, $httpDownloader, $eventDispatcher, $cache, $fs, $process));
$dm->setDownloader('xz', new Downloader\XzDownloader($io, $config, $httpDownloader, $eventDispatcher, $cache, $fs, $process));
$dm->setDownloader('phar', new Downloader\PharDownloader($io, $config, $httpDownloader, $eventDispatcher, $cache, $fs, $process));
$dm->setDownloader('file', new Downloader\FileDownloader($io, $config, $httpDownloader, $eventDispatcher, $cache, $fs, $process));
$dm->setDownloader('path', new Downloader\PathDownloader($io, $config, $httpDownloader, $eventDispatcher, $cache, $fs, $process));
return $dm;
}
/**
* @param Config $config The configuration
* @param Downloader\DownloadManager $dm Manager use to download sources
* @return Archiver\ArchiveManager
*/
public function createArchiveManager(Config $config, Downloader\DownloadManager $dm, Loop $loop)
{
$am = new Archiver\ArchiveManager($dm, $loop);
if (class_exists(ZipArchive::class)) {
$am->addArchiver(new Archiver\ZipArchiver);
}
if (class_exists(Phar::class)) {
$am->addArchiver(new Archiver\PharArchiver);
}
return $am;
}
/**
* @param bool|'local'|'global' $disablePlugins Whether plugins should not be loaded, can be set to local or global to only disable local/global plugins
*/
protected function createPluginManager(IOInterface $io, Composer $composer, ?PartialComposer $globalComposer = null, $disablePlugins = false): Plugin\PluginManager
{
return new Plugin\PluginManager($io, $composer, $globalComposer, $disablePlugins);
}
public function createInstallationManager(Loop $loop, IOInterface $io, ?EventDispatcher $eventDispatcher = null): Installer\InstallationManager
{
return new Installer\InstallationManager($loop, $io, $eventDispatcher);
}
protected function createDefaultInstallers(Installer\InstallationManager $im, PartialComposer $composer, IOInterface $io, ?ProcessExecutor $process = null): void
{
$fs = new Filesystem($process);
$binaryInstaller = new Installer\BinaryInstaller($io, rtrim($composer->getConfig()->get('bin-dir'), '/'), $composer->getConfig()->get('bin-compat'), $fs, rtrim($composer->getConfig()->get('vendor-dir'), '/'));
$im->addInstaller(new Installer\LibraryInstaller($io, $composer, null, $fs, $binaryInstaller));
$im->addInstaller(new Installer\PluginInstaller($io, $composer, $fs, $binaryInstaller));
$im->addInstaller(new Installer\MetapackageInstaller($io));
}
/**
* @param InstalledRepositoryInterface $repo repository to purge packages from
* @param Installer\InstallationManager $im manager to check whether packages are still installed
*/
protected function purgePackages(InstalledRepositoryInterface $repo, Installer\InstallationManager $im): void
{
foreach ($repo->getPackages() as $package) {
if (!$im->isPackageInstalled($repo, $package)) {
$repo->removePackage($package);
}
}
}
protected function loadRootPackage(RepositoryManager $rm, Config $config, VersionParser $parser, VersionGuesser $guesser, IOInterface $io): Package\Loader\RootPackageLoader
{
return new Package\Loader\RootPackageLoader($rm, $config, $parser, $guesser, $io);
}
/**
* @param IOInterface $io IO instance
* @param mixed $config either a configuration array or a filename to read from, if null it will read from
* the default filename
* @param bool|'local'|'global' $disablePlugins Whether plugins should not be loaded, can be set to local or global to only disable local/global plugins
* @param bool $disableScripts Whether scripts should not be run
*/
public static function create(IOInterface $io, $config = null, $disablePlugins = false, bool $disableScripts = false): Composer
{
$factory = new static();
// for BC reasons, if a config is passed in either as array or a path that is not the default composer.json path
// we disable local plugins as they really should not be loaded from CWD
// If you want to avoid this behavior, you should be calling createComposer directly with a $cwd arg set correctly
// to the path where the composer.json being loaded resides
if ($config !== null && $config !== self::getComposerFile() && $disablePlugins === false) {
$disablePlugins = 'local';
}
return $factory->createComposer($io, $config, $disablePlugins, null, true, $disableScripts);
}
/**
* If you are calling this in a plugin, you probably should instead use $composer->getLoop()->getHttpDownloader()
*
* @param IOInterface $io IO instance
* @param Config $config Config instance
* @param mixed[] $options Array of options passed directly to HttpDownloader constructor
*/
public static function createHttpDownloader(IOInterface $io, Config $config, array $options = []): HttpDownloader
{
static $warned = false;
$disableTls = false;
// allow running the config command if disable-tls is in the arg list, even if openssl is missing, to allow disabling it via the config command
if (isset($_SERVER['argv']) && in_array('disable-tls', $_SERVER['argv']) && (in_array('conf', $_SERVER['argv']) || in_array('config', $_SERVER['argv']))) {
$warned = true;
$disableTls = !extension_loaded('openssl');
} elseif ($config->get('disable-tls') === true) {
if (!$warned) {
$io->writeError('<warning>You are running Composer with SSL/TLS protection disabled.</warning>');
}
$warned = true;
$disableTls = true;
} elseif (!extension_loaded('openssl')) {
throw new Exception\NoSslException('The openssl extension is required for SSL/TLS protection but is not available. '
. 'If you can not enable the openssl extension, you can disable this error, at your own risk, by setting the \'disable-tls\' option to true.');
}
$httpDownloaderOptions = [];
if ($disableTls === false) {
if ('' !== $config->get('cafile')) {
$httpDownloaderOptions['ssl']['cafile'] = $config->get('cafile');
}
if ('' !== $config->get('capath')) {
$httpDownloaderOptions['ssl']['capath'] = $config->get('capath');
}
$httpDownloaderOptions = array_replace_recursive($httpDownloaderOptions, $options);
}
try {
$httpDownloader = new HttpDownloader($io, $config, $httpDownloaderOptions, $disableTls);
} catch (TransportException $e) {
if (false !== strpos($e->getMessage(), 'cafile')) {
$io->write('<error>Unable to locate a valid CA certificate file. You must set a valid \'cafile\' option.</error>');
$io->write('<error>A valid CA certificate file is required for SSL/TLS protection.</error>');
$io->write('<error>You can disable this error, at your own risk, by setting the \'disable-tls\' option to true.</error>');
}
throw $e;
}
return $httpDownloader;
}
private static function loadComposerAuthEnv(Config $config, ?IOInterface $io): void
{
$composerAuthEnv = Platform::getEnv('COMPOSER_AUTH');
if (false === $composerAuthEnv || '' === $composerAuthEnv) {
return;
}
$authData = json_decode($composerAuthEnv);
if (null === $authData) {
throw new UnexpectedValueException('COMPOSER_AUTH environment variable is malformed, should be a valid JSON object');
}
if ($io instanceof IOInterface) {
$io->writeError('Loading auth config from COMPOSER_AUTH', true, IOInterface::DEBUG);
}
self::validateJsonSchema($io, $authData, JsonFile::AUTH_SCHEMA, 'COMPOSER_AUTH');
$authData = json_decode($composerAuthEnv, true);
if (null !== $authData) {
$config->merge(['config' => $authData], 'COMPOSER_AUTH');
}
}
private static function useXdg(): bool
{
foreach (array_keys($_SERVER) as $key) {
if (strpos((string) $key, 'XDG_') === 0) {
return true;
}
}
if (Silencer::call('is_dir', '/etc/xdg')) {
return true;
}
return false;
}
/**
* @throws \RuntimeException
*/
private static function getUserDir(): string
{
$home = Platform::getEnv('HOME');
if (!$home) {
throw new \RuntimeException('The HOME or COMPOSER_HOME environment variable must be set for composer to run correctly');
}
return rtrim(strtr($home, '\\', '/'), '/');
}
/**
* @param mixed $fileOrData
* @param JsonFile::*_SCHEMA $schema
*/
private static function validateJsonSchema(?IOInterface $io, $fileOrData, int $schema = JsonFile::LAX_SCHEMA, ?string $source = null): void
{
if (Platform::isInputCompletionProcess()) {
return;
}
try {
if ($fileOrData instanceof JsonFile) {
$fileOrData->validateSchema($schema);
} else {
if (null === $source) {
throw new \InvalidArgumentException('$source is required to be provided if $fileOrData is arbitrary data');
}
JsonFile::validateJsonSchema($source, $fileOrData, $schema);
}
} catch (JsonValidationException $e) {
$msg = $e->getMessage().', this may result in errors and should be resolved:'.PHP_EOL.' - '.implode(PHP_EOL.' - ', $e->getErrors());
if ($io instanceof IOInterface) {
$io->writeError('<warning>'.$msg.'</>');
} else {
throw new UnexpectedValueException($msg);
}
}
}
} | php | github | https://github.com/composer/composer | src/Composer/Factory.php |
import seamless
from seamless.highlevel import Context
from silk.Silk import RichValue
import json
import numpy as np
print("Load graph...")
graph = json.load(open("snakegraph.seamless"))
ctx = seamless.highlevel.load_graph(graph)
ctx.add_zip("snakegraph.zip")
ctx.translate()
print("Bind files...")
files = [("data/genome.tgz", "b"),
("data/samples/A.fastq", "t"),
("data/samples/B.fastq", "t")
]
def bind(file, mode):
data = open(file, "r" + mode).read()
if mode == "b":
data = np.frombuffer(data, dtype=np.uint8)
setattr(ctx.fs, file, data)
for file, mode in files:
bind(file, mode)
print("Compute...")
ctx.compute()
print()
print("File system contents:")
"""
print(ctx.fs.status)
print(ctx.fs.exception)
fs = ctx.fs.value.unsilk
assert fs is not None
def print_file(f):
v = str(fs[f])
if len(v) > 80:
v = v[:35] + "." * 10 + v[-35:]
print(f, v)
print()
for f in sorted(list(fs.keys())):
print_file(f)
"""
finished = []
for fs_cellname in ctx.fs.get_children("cell"):
fs_cell = getattr(ctx.fs, fs_cellname)
value = fs_cell.value
value2 = RichValue(value, need_form=True)
if value2.value is None:
continue
finished.append(fs_cellname)
if value2.storage == "pure-plain":
v = str(value2.value)
if len(v) > 80:
v = v[:35] + "." * 10 + v[-35:]
else:
v = "< Binary data, length %d >" % len(value)
print(fs_cellname + ":", v)
print()
import os
if "calls/all.vcf" in finished:
print("SUCCESS, calls/all.vcf created")
os.system("mkdir -p calls")
with open("calls/all.vcf", "w") as f:
f.write(getattr(ctx.fs, "calls/all.vcf").value.unsilk) | unknown | codeparrot/codeparrot-clean | ||
//! Demonstrates construction and usage of a TLS-capable HTTP client.
extern crate tls_rustls_0_23 as rustls;
use std::{error::Error as StdError, sync::Arc};
use actix_tls::connect::rustls_0_23::webpki_roots_cert_store;
use rustls::ClientConfig;
#[actix_rt::main]
async fn main() -> Result<(), Box<dyn StdError>> {
env_logger::init_from_env(env_logger::Env::new().default_filter_or("info"));
let mut config = ClientConfig::builder()
.with_root_certificates(webpki_roots_cert_store())
.with_no_client_auth();
let protos = vec![b"h2".to_vec(), b"http/1.1".to_vec()];
config.alpn_protocols = protos;
// construct request builder with TLS support
let client = awc::Client::builder()
.connector(awc::Connector::new().rustls_0_23(Arc::new(config)))
.finish();
// configure request
let request = client
.get("https://www.rust-lang.org/")
.append_header(("User-Agent", "awc/3.0"));
println!("Request: {request:?}");
let mut response = request.send().await?;
// server response head
println!("Response: {response:?}");
// read response body
let body = response.body().await?;
println!("Downloaded: {:?} bytes", body.len());
Ok(())
} | rust | github | https://github.com/actix/actix-web | awc/examples/client.rs |
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j. F Y.'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = 'j. F Y. H:i'
YEAR_MONTH_FORMAT = 'F Y.'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'j.m.Y.'
SHORT_DATETIME_FORMAT = 'j.m.Y. H:i'
FIRST_DAY_OF_WEEK = 1
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d.%m.%Y.', '%d.%m.%y.', # '25.10.2006.', '25.10.06.'
'%d. %m. %Y.', '%d. %m. %y.', # '25. 10. 2006.', '25. 10. 06.'
# '%d. %b %y.', '%d. %B %y.', # '25. Oct 06.', '25. October 06.'
# '%d. %b \'%y.', '%d. %B \'%y.', # '25. Oct '06.', '25. October '06.'
# '%d. %b %Y.', '%d. %B %Y.', # '25. Oct 2006.', '25. October 2006.'
)
DATETIME_INPUT_FORMATS = (
'%d.%m.%Y. %H:%M:%S', # '25.10.2006. 14:30:59'
'%d.%m.%Y. %H:%M', # '25.10.2006. 14:30'
'%d.%m.%Y.', # '25.10.2006.'
'%d.%m.%y. %H:%M:%S', # '25.10.06. 14:30:59'
'%d.%m.%y. %H:%M', # '25.10.06. 14:30'
'%d.%m.%y.', # '25.10.06.'
'%d. %m. %Y. %H:%M:%S', # '25. 10. 2006. 14:30:59'
'%d. %m. %Y. %H:%M', # '25. 10. 2006. 14:30'
'%d. %m. %Y.', # '25. 10. 2006.'
'%d. %m. %y. %H:%M:%S', # '25. 10. 06. 14:30:59'
'%d. %m. %y. %H:%M', # '25. 10. 06. 14:30'
'%d. %m. %y.', # '25. 10. 06.'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3 | unknown | codeparrot/codeparrot-clean | ||
# Contains code from https://github.com/MagicStack/uvloop/tree/v0.16.0
# SPDX-License-Identifier: PSF-2.0 AND (MIT OR Apache-2.0)
# SPDX-FileCopyrightText: Copyright (c) 2015-2021 MagicStack Inc. http://magic.io
import enum
# After the connection is lost, log warnings after this many write()s.
LOG_THRESHOLD_FOR_CONNLOST_WRITES = 5
# Seconds to wait before retrying accept().
ACCEPT_RETRY_DELAY = 1
# Number of stack entries to capture in debug mode.
# The larger the number, the slower the operation in debug mode
# (see extract_stack() in format_helpers.py).
DEBUG_STACK_DEPTH = 10
# Number of seconds to wait for SSL handshake to complete
# The default timeout matches that of Nginx.
SSL_HANDSHAKE_TIMEOUT = 60.0
# Number of seconds to wait for SSL shutdown to complete
# The default timeout mimics lingering_time
SSL_SHUTDOWN_TIMEOUT = 30.0
# Used in sendfile fallback code. We use fallback for platforms
# that don't support sendfile, or for TLS connections.
SENDFILE_FALLBACK_READBUFFER_SIZE = 1024 * 256
FLOW_CONTROL_HIGH_WATER_SSL_READ = 256 # KiB
FLOW_CONTROL_HIGH_WATER_SSL_WRITE = 512 # KiB
# Default timeout for joining the threads in the threadpool
THREAD_JOIN_TIMEOUT = 300
# The enum should be here to break circular dependencies between
# base_events and sslproto
class _SendfileMode(enum.Enum):
UNSUPPORTED = enum.auto()
TRY_NATIVE = enum.auto()
FALLBACK = enum.auto() | python | github | https://github.com/python/cpython | Lib/asyncio/constants.py |
# coding: utf-8
import os
import time
from flask import render_template, redirect, url_for, session, flash, request, Response
from qiniu import Auth, put_file
from PIL import Image
from . import upload
from .. import db
from ..models import UsersInfo, WebConfig, Images
from ..forms import UploadForm
from ..common import random_str
@upload.route('/main')
def main_view():
current_username = session.get('username')
current_user_token = session.get('user_token')
current_user_id = session.get('user_id')
current_is_login = session.get('is_login')
if current_is_login is None or not current_is_login:
flash(u'请先登录!', 'danger')
return redirect(url_for('user.login'))
upload_form = UploadForm()
data = {
'title': 'Home',
'current_username': current_username,
'current_user_token': current_user_token,
'current_user_id': current_user_id,
'form': upload_form,
}
user_info = UsersInfo.query.filter_by(user_id=current_user_id).first()
if user_info is None or not user_info:
return redirect(url_for('error.e500'))
if user_info.qiniu_access_key == '' or user_info.qiniu_secret_key == '' or \
user_info.qiniu_bucket_name == '' or user_info.qiniu_domain == '' or \
user_info.qiniu_access_key is None or user_info.qiniu_secret_key is None or \
user_info.qiniu_bucket_name is None or user_info.qiniu_domain is None:
flash(u'您还未设置七牛密钥信息,请去个人中心中设置', 'danger')
data['no_account'] = 1
return render_template('upload/main.html', data=data)
images = Images.query.filter_by(upload_user_id=current_user_id, use_default_config=0).all()
max_upload_count = WebConfig.query.filter_by(config_name='default_upload_count').first()
if len(images) >= int(max_upload_count.config_value):
flash(u'您已上传的图片数量到达系统限制,使用自己的七牛密钥后可以上传更多图片,请到个人中心设置。', 'danger')
data['no_account'] = 1
return render_template('upload/main.html', data=data)
return render_template('upload/main.html', data=data)
@upload.route('/upload', methods=['POST'])
def upload():
if session.get('is_login') is None or not session.get('is_login'):
flash(u'请先登录!', 'danger')
return redirect(url_for('user.login'))
upload_form = UploadForm()
# print dir(upload_form.file_upload.data)
# print dir(upload_form.file_upload.data.stream)
# print upload_form.file_upload.data.stream.read()
if upload_form.validate_on_submit():
# check access key and secret key and image limit
current_user_id = session.get('user_id')
user_info = UsersInfo.query.filter_by(user_id=current_user_id).first()
if user_info is None or not user_info:
return redirect(url_for('error.e500'))
if user_info.qiniu_access_key == '' or user_info.qiniu_secret_key == '' or \
user_info.qiniu_bucket_name == '' or user_info.qiniu_domain == '' or \
user_info.qiniu_access_key is None or user_info.qiniu_secret_key is None or \
user_info.qiniu_bucket_name is None or user_info.qiniu_domain is None:
# flash(u'您还未设置七牛密钥信息,请去个人中心中设置', 'danger')
return redirect(url_for('.main_view'))
images = Images.query.filter_by(upload_user_id=current_user_id, use_default_config=0).all()
max_upload_count = WebConfig.query.filter_by(config_name='default_upload_count').first()
if len(images) >= int(max_upload_count.config_value):
# flash(u'您已上传的图片数量到达系统限制,
# 使用自己的七牛密钥后可以上传更多图片,请到个人中心设置。', 'danger')
return redirect(url_for('.main_view'))
upload_filename = upload_form.file_upload.data.filename
ext = os.path.splitext(upload_filename)[1]
# save file in tmp directory
local_filename = random_str(32) + ext
with open('tmp/'+local_filename, 'wb') as ff:
ff.write(upload_form.file_upload.data.stream.read())
# init qiniu
current_user_id = session.get('user_id')
users_info = UsersInfo.query.filter_by(user_id=current_user_id).first()
access_key = users_info.qiniu_access_key
secret_key = users_info.qiniu_secret_key
bucket_name = users_info.qiniu_bucket_name
domain = users_info.qiniu_domain
q = Auth(access_key, secret_key)
remote_filename = local_filename
upload_token = q.upload_token(bucket_name, remote_filename, 3600)
local_file = 'tmp/' + local_filename
ret, info = put_file(upload_token, remote_filename, local_file)
url = 'http://' + domain + '/' + ret['key']
# url2 = request.host_url + session.get('username') + '/' + local_filename
# insert into image table
title = upload_form.title.data
description = upload_form.description.data
qiniu_have_account = session.get('qiniu_have_account')
if qiniu_have_account is None:
flash(u'系统错误!请稍后再试!', 'danger')
return redirect(url_for('.main_view'))
# use_default_config = 0: use system config
# use_default_config = 1: use user's config
images = Images(image_id='', title=title, description=description, filename=local_filename,
link=url, upload_time=str(int(time.time())), upload_user_id=current_user_id,
use_default_config=qiniu_have_account)
db.session.add(images)
db.session.commit()
user_dir = 'tmp/' + session.get('username') + '/'
if not os.path.exists(user_dir):
os.mkdir(user_dir)
thumbnail_file = user_dir + local_filename
im = Image.open(local_file)
im.thumbnail((128, 128))
im.save(thumbnail_file)
if os.path.exists(local_file):
os.remove(local_file)
flash(u'上传成功!链接为 '+url+',您可在个人中心中查看已上传的全部图片。', 'success')
return redirect(url_for('.main_view'))
return redirect(url_for('.main_view')) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
'''Test that font.Text vertical alignment works.
Four labels will be aligned top, center, baseline and bottom.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
import unittest
from pyglet import gl
from pyglet import font
from . import base_text
class TEST_VALIGN(base_text.TextTestBase):
font_name = ''
window_size = 600, 200
def render(self):
fnt = font.load('', self.font_size)
h = fnt.ascent - fnt.descent
w = self.window.width
self.labels = []
x = 0
for align in 'top center baseline bottom'.split():
label = align.upper() + 'y'
self.labels.append(font.Text(fnt, label, x, 50, valign=align))
x += self.labels[-1].width
def draw(self):
gl.glColor3f(1, 1, 1)
gl.glBegin(gl.GL_LINES)
gl.glVertex2f(0, 50)
gl.glVertex2f(self.window.width, 50)
gl.glEnd()
for label in self.labels:
label.draw()
if __name__ == '__main__':
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
{
"BITPOS": {
"summary": "Finds the first set (1) or clear (0) bit in a string.",
"complexity": "O(N)",
"group": "bitmap",
"since": "2.8.7",
"arity": -3,
"function": "bitposCommand",
"history": [
[
"7.0.0",
"Added the `BYTE|BIT` option."
]
],
"command_flags": [
"READONLY"
],
"acl_categories": [
"BITMAP"
],
"key_specs": [
{
"flags": [
"RO",
"ACCESS"
],
"begin_search": {
"index": {
"pos": 1
}
},
"find_keys": {
"range": {
"lastkey": 0,
"step": 1,
"limit": 0
}
}
}
],
"arguments": [
{
"name": "key",
"type": "key",
"key_spec_index": 0
},
{
"name": "bit",
"type": "integer"
},
{
"name": "range",
"type": "block",
"optional": true,
"arguments": [
{
"name": "start",
"type": "integer"
},
{
"name": "end-unit-block",
"type": "block",
"optional": true,
"arguments": [
{
"name": "end",
"type": "integer"
},
{
"name": "unit",
"type": "oneof",
"optional": true,
"since": "7.0.0",
"arguments": [
{
"name": "byte",
"type": "pure-token",
"token": "BYTE"
},
{
"name": "bit",
"type": "pure-token",
"token": "BIT"
}
]
}
]
}
]
}
],
"reply_schema": {
"oneOf": [
{
"description": "the position of the first bit set to 1 or 0 according to the request",
"type": "integer",
"minimum": 0
},
{
"description": "In case the `bit` argument is 1 and the string is empty or composed of just zero bytes",
"const": -1
}
]
}
}
} | json | github | https://github.com/redis/redis | src/commands/bitpos.json |
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/interconnect/qcom,msm8996.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Qualcomm MSM8996 Network-On-Chip interconnect
maintainers:
- Konrad Dybcio <konradybcio@kernel.org>
description: |
The Qualcomm MSM8996 interconnect providers support adjusting the
bandwidth requirements between the various NoC fabrics.
properties:
compatible:
enum:
- qcom,msm8996-a0noc
- qcom,msm8996-a1noc
- qcom,msm8996-a2noc
- qcom,msm8996-bimc
- qcom,msm8996-cnoc
- qcom,msm8996-mnoc
- qcom,msm8996-pnoc
- qcom,msm8996-snoc
reg:
maxItems: 1
clock-names:
minItems: 1
maxItems: 3
clocks:
minItems: 1
maxItems: 3
power-domains:
maxItems: 1
required:
- compatible
- reg
unevaluatedProperties: false
allOf:
- $ref: qcom,rpm-common.yaml#
- if:
properties:
compatible:
const: qcom,msm8996-a0noc
then:
properties:
clocks:
items:
- description: Aggregate0 System NoC AXI Clock.
- description: Aggregate0 Config NoC AHB Clock.
- description: Aggregate0 NoC MPU Clock.
clock-names:
items:
- const: aggre0_snoc_axi
- const: aggre0_cnoc_ahb
- const: aggre0_noc_mpu_cfg
required:
- power-domains
- if:
properties:
compatible:
const: qcom,msm8996-mnoc
then:
properties:
clocks:
items:
- description: CPU-NoC High-performance Bus Clock.
clock-names:
const: iface
- if:
properties:
compatible:
const: qcom,msm8996-a2noc
then:
properties:
clocks:
items:
- description: Aggregate2 NoC UFS AXI Clock
- description: UFS AXI Clock
clock-names:
items:
- const: aggre2_ufs_axi
- const: ufs_axi
examples:
- |
#include <dt-bindings/clock/qcom,gcc-msm8996.h>
#include <dt-bindings/clock/qcom,mmcc-msm8996.h>
#include <dt-bindings/clock/qcom,rpmcc.h>
bimc: interconnect@408000 {
compatible = "qcom,msm8996-bimc";
reg = <0x00408000 0x5a000>;
#interconnect-cells = <1>;
};
a0noc: interconnect@543000 {
compatible = "qcom,msm8996-a0noc";
reg = <0x00543000 0x6000>;
#interconnect-cells = <1>;
clocks = <&gcc GCC_AGGRE0_SNOC_AXI_CLK>,
<&gcc GCC_AGGRE0_CNOC_AHB_CLK>,
<&gcc GCC_AGGRE0_NOC_MPU_CFG_AHB_CLK>;
clock-names = "aggre0_snoc_axi",
"aggre0_cnoc_ahb",
"aggre0_noc_mpu_cfg";
power-domains = <&gcc AGGRE0_NOC_GDSC>;
}; | unknown | github | https://github.com/torvalds/linux | Documentation/devicetree/bindings/interconnect/qcom,msm8996.yaml |
---
mapped_pages:
- https://www.elastic.co/guide/en/kibana/current/kuery-query.html
---
# {{kib}} Query Language [kuery-query]
:::{note}
This section provides detailed **reference information**.
Refer to [KQL overview](docs-content://explore-analyze/query-filter/languages/kql.md) in the **Explore and analyze** section for overview and conceptual information about the SQL query language.
:::
The {{kib}} Query Language (KQL) is a simple text-based query language for filtering data.
* KQL only filters data, and has no role in aggregating, transforming, or sorting data.
* KQL is not to be confused with the [Lucene query language](docs-content://explore-analyze/query-filter/languages/lucene-query-syntax.md), which has a different feature set.
Use KQL to filter documents where a value for a field exists, matches a given value, or is within a given range.
## Filter for documents where a field exists [_filter_for_documents_where_a_field_exists]
To filter documents for which an indexed value exists for a given field, use the `*` operator. For example, to filter for documents where the `http.request.method` field exists, use the following syntax:
```yaml
http.request.method: *
```
This checks for any indexed value, including an empty string.
## Filter for documents that match a value [_filter_for_documents_that_match_a_value]
Use KQL to filter for documents that match a specific number, text, date, or boolean value. For example, to filter for documents where the `http.request.method` is GET, use the following query:
```yaml
http.request.method: GET
```
The field parameter is optional. If not provided, all fields are searched for the given value. For example, to search all fields for “Hello”, use the following:
```yaml
Hello
```
When querying keyword, numeric, date, or boolean fields, the value must be an exact match, including punctuation and case. However, when querying text fields, {{es}} analyzes the value provided according to the [field’s mapping settings](docs-content://manage-data/data-store/text-analysis.md). For example, to search for documents where `http.request.body.content` (a `text` field) contains the text “null pointer”:
```yaml
http.request.body.content: null pointer
```
Because this is a `text` field, the order of these search terms does not matter, and even documents containing “pointer null” are returned. To search `text` fields where the terms are in the order provided, surround the value in quotation marks, as follows:
```yaml
http.request.body.content: "null pointer"
```
Certain characters must be escaped by a backslash (unless surrounded by quotes). For example, to search for documents where `http.request.referrer` is `https://<example-url>`, use either of the following queries:
```yaml
http.request.referrer: "https://<example-url>"
http.request.referrer: https\://<example-url>
```
You must escape following characters:
```yaml
\():<>"*
```
## Filter for documents within a range [_filter_for_documents_within_a_range]
To search documents that contain terms within a provided range, use KQL’s range syntax. For example, to search for all documents for which `http.response.bytes` is less than 10000, use the following syntax:
```yaml
http.response.bytes < 10000
```
To search for an inclusive range, combine multiple range queries. For example, to search for documents where `http.response.bytes` is greater than 10000 but less than or equal to 20000, use the following syntax:
```yaml
http.response.bytes > 10000 and http.response.bytes <= 20000
```
You can also use range syntax for string values, IP addresses, and timestamps. For example, to search for documents earlier than two weeks ago, use the following syntax:
```yaml
@timestamp < now-2w
```
For more examples on acceptable date formats, refer to [Date Math](/reference/elasticsearch/rest-apis/common-options.md#date-math).
## Filter for documents using wildcards [_filter_for_documents_using_wildcards]
To search for documents matching a pattern, use the wildcard syntax. For example, to find documents where `http.response.status_code` begins with a 4, use the following syntax:
```yaml
http.response.status_code: 4*
```
By default, leading wildcards are not allowed for performance reasons. You can modify this with the [`query:allowLeadingWildcards`](kibana://reference/advanced-settings.md#query-allowleadingwildcards) advanced setting.
::::{note}
Only `*` is currently supported. This matches zero or more characters.
::::
## Negating a query [_negating_a_query]
To negate or exclude a set of documents, use the `not` keyword (not case-sensitive). For example, to filter documents where the `http.request.method` is **not** GET, use the following query:
```yaml
NOT http.request.method: GET
```
## Combining multiple queries [_combining_multiple_queries]
To combine multiple queries, use the `and`/`or` keywords (not case-sensitive). For example, to find documents where the `http.request.method` is GET **or** the `http.response.status_code` is 400, use the following query:
```yaml
http.request.method: GET OR http.response.status_code: 400
```
Similarly, to find documents where the `http.request.method` is GET **and** the `http.response.status_code` is 400, use this query:
```yaml
http.request.method: GET AND http.response.status_code: 400
```
To specify precedence when combining multiple queries, use parentheses. For example, to find documents where the `http.request.method` is GET **and** the `http.response.status_code` is 200, **or** the `http.request.method` is POST **and** `http.response.status_code` is 400, use the following:
```yaml
(http.request.method: GET AND http.response.status_code: 200) OR
(http.request.method: POST AND http.response.status_code: 400)
```
You can also use parentheses for shorthand syntax when querying multiple values for the same field. For example, to find documents where the `http.request.method` is GET, POST, **or** DELETE, use the following:
```yaml
http.request.method: (GET OR POST OR DELETE)
```
## Matching multiple fields [_matching_multiple_fields]
Wildcards can also be used to query multiple fields. For example, to search for documents where any sub-field of `datastream` contains “logs”, use the following:
```yaml
datastream.*: logs
```
::::{note}
When using wildcards to query multiple fields, errors might occur if the fields are of different types. For example, if `datastream.*` matches both numeric and string fields, the above query will result in an error because numeric fields cannot be queried for string values.
::::
## Querying nested fields [_querying_nested_fields]
Querying [nested fields](/reference/elasticsearch/mapping-reference/nested.md) requires a special syntax. Consider the following document, where `user` is a nested field:
```yaml
{
"user" : [
{
"first" : "John",
"last" : "Smith"
},
{
"first" : "Alice",
"last" : "White"
}
]
}
```
To find documents where a single value inside the `user` array contains a first name of “Alice” and last name of “White”, use the following:
```yaml
user:{ first: "Alice" and last: "White" }
```
Because nested fields can be inside other nested fields, you must specify the full path of the nested field you want to query. For example, consider the following document where `user` and `names` are both nested fields:
```yaml
{
"user": [
{
"names": [
{
"first": "John",
"last": "Smith"
},
{
"first": "Alice",
"last": "White"
}
]
}
]
}
```
To find documents where a single value inside the `user.names` array contains a first name of “Alice” **and** last name of “White”, use the following:
```yaml
user.names:{ first: "Alice" and last: "White" }
``` | unknown | github | https://github.com/elastic/elasticsearch | docs/reference/query-languages/kql.md |
// Copyright 2025 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cache
import (
"fmt"
"testing"
"github.com/google/go-cmp/cmp"
"go.etcd.io/etcd/api/v3/mvccpb"
clientv3 "go.etcd.io/etcd/client/v3"
)
func TestPeekLatestAndOldest(t *testing.T) {
tests := []struct {
name string
capacity int
revs []int64
wantLatestRev int64
wantOldestRev int64
}{
{
name: "empty_buffer",
capacity: 4,
revs: nil,
wantLatestRev: 0,
wantOldestRev: 0,
},
{
name: "single_element",
capacity: 8,
revs: []int64{1},
wantLatestRev: 1,
wantOldestRev: 1,
},
{
name: "ascending_fill",
capacity: 4,
revs: []int64{1, 2, 3, 4},
wantLatestRev: 4,
wantOldestRev: 1,
},
{
name: "overwrite_when_full",
capacity: 3,
revs: []int64{5, 6, 7, 8},
wantLatestRev: 8,
wantOldestRev: 6,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
rb := newRingBuffer(tt.capacity, func(batch []*clientv3.Event) int64 { return batch[0].Kv.ModRevision })
for _, r := range tt.revs {
batch, err := makeEventBatch(r, "k", 1)
if err != nil {
t.Fatalf("makeEventBatch(%d, k, 1) failed: %v", r, err)
}
rb.Append(batch)
}
latestRev := rb.PeekLatest()
oldestRev := rb.PeekOldest()
gotLatestRev := latestRev
gotOldestRev := oldestRev
if tt.wantLatestRev != gotLatestRev {
t.Fatalf("PeekLatest()=%d, want=%d", gotLatestRev, tt.wantLatestRev)
}
if tt.wantOldestRev != gotOldestRev {
t.Fatalf("PeekOldest()=%d, want=%d", gotOldestRev, tt.wantOldestRev)
}
})
}
}
func TestIterationMethods(t *testing.T) {
type iterTestCase struct {
method iterMethod
pivot int64
wantIterRevisions []int64
}
tests := []struct {
name string
capacity int
setupRevisions []int64
cases []iterTestCase
}{
{
name: "empty_buffer",
capacity: 4,
setupRevisions: nil,
cases: []iterTestCase{
{ascendGTE, 0, []int64{}},
{ascendLT, 10, []int64{}},
{descendGT, 0, []int64{}},
{descendLTE, 10, []int64{}},
},
},
{
name: "basic_filtering",
capacity: 5,
setupRevisions: []int64{1, 2, 3},
cases: []iterTestCase{
{ascendGTE, 0, []int64{1, 2, 3}},
{ascendGTE, 2, []int64{2, 3}},
{ascendGTE, 100, []int64{}},
{ascendLT, 3, []int64{1, 2}},
{ascendLT, 1, []int64{}},
{ascendLT, 100, []int64{1, 2, 3}},
{descendGT, 1, []int64{3, 2}},
{descendGT, 3, []int64{}},
{descendGT, 0, []int64{3, 2, 1}},
{descendLTE, 2, []int64{2, 1}},
{descendLTE, 3, []int64{3, 2, 1}},
{descendLTE, 0, []int64{}},
},
},
{
name: "overflowed stores only entries within capacity",
capacity: 3,
setupRevisions: []int64{20, 21, 22, 23, 24}, // stored: 22, 23, 24
cases: []iterTestCase{
{ascendGTE, 23, []int64{23, 24}},
{ascendGTE, 0, []int64{22, 23, 24}},
{ascendLT, 23, []int64{22}},
{ascendLT, 25, []int64{22, 23, 24}},
{descendGT, 22, []int64{24, 23}},
{descendGT, 25, []int64{}},
{descendLTE, 23, []int64{23, 22}},
{descendLTE, 24, []int64{24, 23, 22}},
},
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
rb := setupRingBuffer(t, tt.capacity, tt.setupRevisions)
for _, tc := range tt.cases {
tc := tc
t.Run(fmt.Sprintf("%s_pivot_%d", tc.method, tc.pivot), func(t *testing.T) {
got := collectRevisions(rb, tc.method, tc.pivot)
if diff := cmp.Diff(tc.wantIterRevisions, got); diff != "" {
t.Fatalf("%s(%d) mismatch (-want +got):\n%s", tc.method, tc.pivot, diff)
}
})
}
})
}
}
func TestIterationWithBatching(t *testing.T) {
rb := newRingBuffer(6, func(batch []*clientv3.Event) int64 { return batch[0].Kv.ModRevision })
batchA := []*clientv3.Event{
{Kv: &mvccpb.KeyValue{Key: []byte("key-a"), ModRevision: 5}},
}
batchB := []*clientv3.Event{
{Kv: &mvccpb.KeyValue{Key: []byte("key-b-1"), ModRevision: 10}},
{Kv: &mvccpb.KeyValue{Key: []byte("key-b-2"), ModRevision: 10}},
{Kv: &mvccpb.KeyValue{Key: []byte("key-b-3"), ModRevision: 10}},
}
batchC := []*clientv3.Event{
{Kv: &mvccpb.KeyValue{Key: []byte("key-c"), ModRevision: 12}},
}
rb.Append(batchA)
rb.Append(batchB)
rb.Append(batchC)
tests := []struct {
name string
method iterMethod
pivot int64
want [][]*clientv3.Event
}{
{
name: "ascending_gte_includes_batched_revision",
method: ascendGTE,
pivot: 10,
want: [][]*clientv3.Event{
{
{Kv: &mvccpb.KeyValue{Key: []byte("key-b-1"), ModRevision: 10}},
{Kv: &mvccpb.KeyValue{Key: []byte("key-b-2"), ModRevision: 10}},
{Kv: &mvccpb.KeyValue{Key: []byte("key-b-3"), ModRevision: 10}},
},
{
{Kv: &mvccpb.KeyValue{Key: []byte("key-c"), ModRevision: 12}},
},
},
},
{
name: "ascending_lt_stops_before_batched_revision",
method: ascendLT,
pivot: 10,
want: [][]*clientv3.Event{
{
{Kv: &mvccpb.KeyValue{Key: []byte("key-a"), ModRevision: 5}},
},
},
},
{
name: "all_revisions_with_proper_batch_sizes",
method: ascendGTE,
pivot: 0,
want: [][]*clientv3.Event{
{
{Kv: &mvccpb.KeyValue{Key: []byte("key-a"), ModRevision: 5}},
},
{
{Kv: &mvccpb.KeyValue{Key: []byte("key-b-1"), ModRevision: 10}},
{Kv: &mvccpb.KeyValue{Key: []byte("key-b-2"), ModRevision: 10}},
{Kv: &mvccpb.KeyValue{Key: []byte("key-b-3"), ModRevision: 10}},
},
{
{Kv: &mvccpb.KeyValue{Key: []byte("key-c"), ModRevision: 12}},
},
},
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
var got [][]*clientv3.Event
rb.iterate(tt.method, tt.pivot, func(rev int64, events []*clientv3.Event) bool {
got = append(got, events)
return true
})
if diff := cmp.Diff(tt.want, got); diff != "" {
t.Fatalf("Events mismatch (-want +got):\n%s", diff)
}
})
}
}
func TestIterationEarlyStop(t *testing.T) {
rb := setupRingBuffer(t, 5, []int64{5, 10, 15, 20})
tests := []struct {
name string
method iterMethod
pivot int64
stopAfter int
want []int64
}{
{
name: "find_first_match_ascending",
method: ascendGTE,
pivot: 10,
stopAfter: 1,
want: []int64{10},
},
{
name: "find_first_two_ascending_lt",
method: ascendLT,
pivot: 20,
stopAfter: 2,
want: []int64{5, 10},
},
{
name: "find_first_two_descending_gt",
method: descendGT,
pivot: 5,
stopAfter: 2,
want: []int64{20, 15},
},
{
name: "find_first_match_descending_lte",
method: descendLTE,
pivot: 15,
stopAfter: 1,
want: []int64{15},
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
var collected []int64
callCount := 0
rb.iterate(tt.method, tt.pivot, func(rev int64, events []*clientv3.Event) bool {
collected = append(collected, rev)
callCount++
shouldContinue := callCount < tt.stopAfter
if !shouldContinue {
t.Logf("Stopping early after %d items (callback returned false)", callCount)
}
return shouldContinue
})
if diff := cmp.Diff(tt.want, collected); diff != "" {
t.Fatalf("Early stop failed.\nExpected: \nDiff (-want +got):\n%s", diff)
}
if callCount != tt.stopAfter {
t.Fatalf("Expected exactly %d callback calls, got %d", tt.stopAfter, callCount)
}
t.Logf("Successfully stopped early: collected %v after %d callbacks",
collected, callCount)
})
}
}
type iterMethod string
const (
ascendGTE iterMethod = "AscendGreaterOrEqual"
ascendLT iterMethod = "AscendLessThan"
descendGT iterMethod = "DescendGreaterThan"
descendLTE iterMethod = "DescendLessOrEqual"
)
func (r *ringBuffer[T]) iterate(method iterMethod, pivot int64, fn IterFunc[T]) {
switch method {
case ascendGTE:
r.AscendGreaterOrEqual(pivot, fn)
case ascendLT:
r.AscendLessThan(pivot, fn)
case descendGT:
r.DescendGreaterThan(pivot, fn)
case descendLTE:
r.DescendLessOrEqual(pivot, fn)
default:
panic(fmt.Sprintf("unknown iteration method: %s", method))
}
}
func TestAtomicOrdered(t *testing.T) {
tests := []struct {
name string
capacity int
inputs []struct {
rev int64
key string
size int
}
wantRev []int64
wantSize []int
}{
{
name: "unfiltered",
capacity: 5,
inputs: []struct {
rev int64
key string
size int
}{
{5, "a", 1},
{10, "b", 3},
{15, "c", 7},
{20, "d", 11},
},
wantRev: []int64{5, 10, 15, 20},
wantSize: []int{1, 3, 7, 11},
},
{
name: "across_wrap",
capacity: 3,
inputs: []struct {
rev int64
key string
size int
}{
{1, "a", 2},
{2, "b", 1},
{3, "c", 3},
{4, "d", 7},
},
wantRev: []int64{2, 3, 4},
wantSize: []int{1, 3, 7},
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
rb := newRingBuffer(tt.capacity, func(batch []*clientv3.Event) int64 { return batch[0].Kv.ModRevision })
for _, in := range tt.inputs {
batch, err := makeEventBatch(in.rev, in.key, in.size)
if err != nil {
t.Fatalf("makeEventBatch(%d, k, 1) failed: %v", in.rev, err)
}
rb.Append(batch)
}
gotRevs := []int64{}
var gotSizes []int
rb.AscendGreaterOrEqual(0, func(rev int64, events []*clientv3.Event) bool {
gotRevs = append(gotRevs, rev)
gotSizes = append(gotSizes, len(events))
return true
})
if len(gotRevs) != len(tt.wantRev) {
t.Fatalf("len(got) = %d, want %d", len(gotRevs), len(tt.wantRev))
}
for i := range gotRevs {
if gotRevs[i] != tt.wantRev[i] {
t.Errorf("at idx %d: rev = %d, want %d", i, gotRevs[i], tt.wantRev[i])
}
if gotSizes[i] != tt.wantSize[i] {
t.Errorf("at rev %d: events.len = %d, want %d", gotRevs[i], gotSizes[i], tt.wantSize[i])
}
}
})
}
}
func TestRebaseHistory(t *testing.T) {
tests := []struct {
name string
revs []int64
}{
{
name: "rebase_empty_buffer",
revs: nil,
},
{
name: "rebase_after_data",
revs: []int64{7, 8, 9},
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
rb := newRingBuffer(4, func(batch []*clientv3.Event) int64 { return batch[0].Kv.ModRevision })
for _, r := range tt.revs {
batch, err := makeEventBatch(r, "k", 1)
if err != nil {
t.Fatalf("makeEventBatch(%d, k, 1) failed: %v", r, err)
}
rb.Append(batch)
}
rb.RebaseHistory()
oldestRev := rb.PeekOldest()
latestRev := rb.PeekLatest()
if oldestRev != 0 {
t.Fatalf("PeekOldest()=%d, want=%d", oldestRev, 0)
}
if latestRev != 0 {
t.Fatalf("PeekLatest()=%d, want=%d", latestRev, 0)
}
gotRevs := []int64{}
rb.AscendGreaterOrEqual(0, func(rev int64, events []*clientv3.Event) bool {
gotRevs = append(gotRevs, rev)
return true
})
if len(gotRevs) != 0 {
t.Fatalf("AscendGreaterOrEqual() len(events)=%d, want=%d", len(gotRevs), 0)
}
})
}
}
func TestFull(t *testing.T) {
tests := []struct {
name string
capacity int
numAppends int
expectedFull bool
}{
{
name: "empty_buffer",
capacity: 3,
numAppends: 0,
expectedFull: false,
},
{
name: "partially_filled",
capacity: 5,
numAppends: 3,
expectedFull: false,
},
{
name: "exactly_at_capacity",
capacity: 3,
numAppends: 3,
expectedFull: true,
},
{
name: "beyond_capacity_wrapping",
capacity: 3,
numAppends: 5,
expectedFull: true,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
rb := newRingBuffer(tt.capacity, func(batch []*clientv3.Event) int64 { return batch[0].Kv.ModRevision })
for i := 1; i <= tt.numAppends; i++ {
batch, err := makeEventBatch(int64(i), "k", 1)
if err != nil {
t.Fatalf("makeEventBatch(%d, k, 1) failed: %v", i, err)
}
rb.Append(batch)
}
if got := rb.full(); got != tt.expectedFull {
t.Fatalf("full()=%t, want=%t (capacity=%d, appends=%d)",
got, tt.expectedFull, tt.capacity, tt.numAppends)
}
})
}
}
func setupRingBuffer(t *testing.T, capacity int, revs []int64) *ringBuffer[[]*clientv3.Event] {
rb := newRingBuffer(capacity, func(batch []*clientv3.Event) int64 { return batch[0].Kv.ModRevision })
for _, r := range revs {
batch, err := makeEventBatch(r, "key", 1)
if err != nil {
t.Fatalf("makeEventBatch(%d, %s, %d) failed: %v", r, "key", 1, err)
}
rb.Append(batch)
}
return rb
}
func collectRevisions(rb *ringBuffer[[]*clientv3.Event], method iterMethod, pivot int64) []int64 {
revs := []int64{}
rb.iterate(method, pivot, func(rev int64, events []*clientv3.Event) bool {
revs = append(revs, rev)
return true
})
return revs
}
func makeEventBatch(rev int64, key string, batchSize int) ([]*clientv3.Event, error) {
if batchSize < 0 {
return nil, fmt.Errorf("invalid batchSize %d", batchSize)
}
events := make([]*clientv3.Event, batchSize)
for i := range events {
events[i] = &clientv3.Event{
Kv: &mvccpb.KeyValue{
Key: []byte(fmt.Sprintf("%s-%d", key, i)),
ModRevision: rev,
},
}
}
return events, nil
} | go | github | https://github.com/etcd-io/etcd | cache/ringbuffer_test.go |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import functools
from typing import TYPE_CHECKING
import attrs
from airflow.models.asset import expand_alias_to_assets, resolve_ref_to_asset
from airflow.serialization.definitions.assets import (
SerializedAsset,
SerializedAssetAlias,
SerializedAssetBase,
SerializedAssetBooleanCondition,
SerializedAssetRef,
SerializedAssetUniqueKey,
)
if TYPE_CHECKING:
from sqlalchemy.orm import Session
@attrs.define
class AssetEvaluator:
"""Evaluates whether an asset-like object has been satisfied."""
_session: Session
def _resolve_asset_ref(self, o: SerializedAssetRef) -> SerializedAsset | None:
asset = resolve_ref_to_asset(**attrs.asdict(o), session=self._session)
return asset.to_serialized() if asset else None
def _resolve_asset_alias(self, o: SerializedAssetAlias) -> list[SerializedAsset]:
asset_models = expand_alias_to_assets(o.name, session=self._session)
return [m.to_serialized() for m in asset_models]
@functools.singledispatchmethod
def run(self, o: SerializedAssetBase, statuses: dict[SerializedAssetUniqueKey, bool]) -> bool:
raise NotImplementedError(f"can not evaluate {o!r}")
@run.register
def _(self, o: SerializedAsset, statuses: dict[SerializedAssetUniqueKey, bool]) -> bool:
return statuses.get(SerializedAssetUniqueKey.from_asset(o), False)
@run.register
def _(self, o: SerializedAssetRef, statuses: dict[SerializedAssetUniqueKey, bool]) -> bool:
if asset := self._resolve_asset_ref(o):
return self.run(asset, statuses)
return False
@run.register
def _(self, o: SerializedAssetAlias, statuses: dict[SerializedAssetUniqueKey, bool]) -> bool:
return any(self.run(x, statuses) for x in self._resolve_asset_alias(o))
@run.register
def _(self, o: SerializedAssetBooleanCondition, statuses: dict[SerializedAssetUniqueKey, bool]) -> bool:
return type(o).agg_func(self.run(x, statuses) for x in o.objects) | python | github | https://github.com/apache/airflow | airflow-core/src/airflow/assets/evaluation.py |
#!/usr/bin/python
# BSD LICENSE
#
# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# collection of static data
import sys
# keycode constants
CTRL_A = chr(1)
CTRL_B = chr(2)
CTRL_C = chr(3)
CTRL_D = chr(4)
CTRL_E = chr(5)
CTRL_F = chr(6)
CTRL_K = chr(11)
CTRL_L = chr(12)
CTRL_N = chr(14)
CTRL_P = chr(16)
CTRL_W = chr(23)
CTRL_Y = chr(25)
ALT_B = chr(27) + chr(98)
ALT_D = chr(27) + chr(100)
ALT_F = chr(27) + chr(102)
ALT_BKSPACE = chr(27) + chr(127)
DEL = chr(27) + chr(91) + chr(51) + chr(126)
TAB = chr(9)
HELP = chr(63)
BKSPACE = chr(127)
RIGHT = chr(27) + chr(91) + chr(67)
DOWN = chr(27) + chr(91) + chr(66)
LEFT = chr(27) + chr(91) + chr(68)
UP = chr(27) + chr(91) + chr(65)
ENTER2 = '\r'
ENTER = '\n'
# expected result constants
NOT_FOUND = "Command not found"
BAD_ARG = "Bad arguments"
AMBIG = "Ambiguous command"
CMD1 = "Command 1 parsed!"
CMD2 = "Command 2 parsed!"
SINGLE = "Single word command parsed!"
SINGLE_LONG = "Single long word command parsed!"
AUTO1 = "Autocomplete command 1 parsed!"
AUTO2 = "Autocomplete command 2 parsed!"
# misc defines
CMD_QUIT = "quit"
CMD_GET_BUFSIZE = "get_history_bufsize"
BUFSIZE_TEMPLATE = "History buffer size: "
PROMPT = "CMDLINE_TEST>>"
# test defines
# each test tests progressively diverse set of keys. this way for example
# if we want to use some key sequence in the test, we first need to test
# that it itself does what it is expected to do. Most of the tests are
# designed that way.
#
# example: "arrows & delete test 1". we enter a partially valid command,
# then move 3 chars left and use delete three times. this way we get to
# know that "delete", "left" and "ctrl+B" all work (because if any of
# them fails, the whole test will fail and next tests won't be run).
#
# each test consists of name, character sequence to send to child,
# and expected output (if any).
tests = [
# test basic commands
{"Name" : "command test 1",
"Sequence" : "ambiguous first" + ENTER,
"Result" : CMD1},
{"Name" : "command test 2",
"Sequence" : "ambiguous second" + ENTER,
"Result" : CMD2},
{"Name" : "command test 3",
"Sequence" : "ambiguous ambiguous" + ENTER,
"Result" : AMBIG},
{"Name" : "command test 4",
"Sequence" : "ambiguous ambiguous2" + ENTER,
"Result" : AMBIG},
{"Name" : "invalid command test 1",
"Sequence" : "ambiguous invalid" + ENTER,
"Result" : BAD_ARG},
# test invalid commands
{"Name" : "invalid command test 2",
"Sequence" : "invalid" + ENTER,
"Result" : NOT_FOUND},
{"Name" : "invalid command test 3",
"Sequence" : "ambiguousinvalid" + ENTER2,
"Result" : NOT_FOUND},
# test arrows and deletes
{"Name" : "arrows & delete test 1",
"Sequence" : "singlebad" + LEFT*2 + CTRL_B + DEL*3 + ENTER,
"Result" : SINGLE},
{"Name" : "arrows & delete test 2",
"Sequence" : "singlebad" + LEFT*5 + RIGHT + CTRL_F + DEL*3 + ENTER,
"Result" : SINGLE},
# test backspace
{"Name" : "backspace test",
"Sequence" : "singlebad" + BKSPACE*3 + ENTER,
"Result" : SINGLE},
# test goto left and goto right
{"Name" : "goto left test",
"Sequence" : "biguous first" + CTRL_A + "am" + ENTER,
"Result" : CMD1},
{"Name" : "goto right test",
"Sequence" : "biguous fir" + CTRL_A + "am" + CTRL_E + "st" + ENTER,
"Result" : CMD1},
# test goto words
{"Name" : "goto left word test",
"Sequence" : "ambiguous st" + ALT_B + "fir" + ENTER,
"Result" : CMD1},
{"Name" : "goto right word test",
"Sequence" : "ambig first" + CTRL_A + ALT_F + "uous" + ENTER,
"Result" : CMD1},
# test removing words
{"Name" : "remove left word 1",
"Sequence" : "single invalid" + CTRL_W + ENTER,
"Result" : SINGLE},
{"Name" : "remove left word 2",
"Sequence" : "single invalid" + ALT_BKSPACE + ENTER,
"Result" : SINGLE},
{"Name" : "remove right word",
"Sequence" : "single invalid" + ALT_B + ALT_D + ENTER,
"Result" : SINGLE},
# test kill buffer (copy and paste)
{"Name" : "killbuffer test 1",
"Sequence" : "ambiguous" + CTRL_A + CTRL_K + " first" + CTRL_A + CTRL_Y + ENTER,
"Result" : CMD1},
{"Name" : "killbuffer test 2",
"Sequence" : "ambiguous" + CTRL_A + CTRL_K + CTRL_Y*26 + ENTER,
"Result" : NOT_FOUND},
# test newline
{"Name" : "newline test",
"Sequence" : "invalid" + CTRL_C + "single" + ENTER,
"Result" : SINGLE},
# test redisplay (nothing should really happen)
{"Name" : "redisplay test",
"Sequence" : "single" + CTRL_L + ENTER,
"Result" : SINGLE},
# test autocomplete
{"Name" : "autocomplete test 1",
"Sequence" : "si" + TAB + ENTER,
"Result" : SINGLE},
{"Name" : "autocomplete test 2",
"Sequence" : "si" + TAB + "_" + TAB + ENTER,
"Result" : SINGLE_LONG},
{"Name" : "autocomplete test 3",
"Sequence" : "in" + TAB + ENTER,
"Result" : NOT_FOUND},
{"Name" : "autocomplete test 4",
"Sequence" : "am" + TAB + ENTER,
"Result" : BAD_ARG},
{"Name" : "autocomplete test 5",
"Sequence" : "am" + TAB + "fir" + TAB + ENTER,
"Result" : CMD1},
{"Name" : "autocomplete test 6",
"Sequence" : "am" + TAB + "fir" + TAB + TAB + ENTER,
"Result" : CMD1},
{"Name" : "autocomplete test 7",
"Sequence" : "am" + TAB + "fir" + TAB + " " + TAB + ENTER,
"Result" : CMD1},
{"Name" : "autocomplete test 8",
"Sequence" : "am" + TAB + " am" + TAB + " " + ENTER,
"Result" : AMBIG},
{"Name" : "autocomplete test 9",
"Sequence" : "am" + TAB + "inv" + TAB + ENTER,
"Result" : BAD_ARG},
{"Name" : "autocomplete test 10",
"Sequence" : "au" + TAB + ENTER,
"Result" : NOT_FOUND},
{"Name" : "autocomplete test 11",
"Sequence" : "au" + TAB + "1" + ENTER,
"Result" : AUTO1},
{"Name" : "autocomplete test 12",
"Sequence" : "au" + TAB + "2" + ENTER,
"Result" : AUTO2},
{"Name" : "autocomplete test 13",
"Sequence" : "au" + TAB + "2" + TAB + ENTER,
"Result" : AUTO2},
{"Name" : "autocomplete test 14",
"Sequence" : "au" + TAB + "2 " + TAB + ENTER,
"Result" : AUTO2},
{"Name" : "autocomplete test 15",
"Sequence" : "24" + TAB + ENTER,
"Result" : "24"},
# test history
{"Name" : "history test 1",
"Sequence" : "invalid" + ENTER + "single" + ENTER + "invalid" + ENTER + UP + CTRL_P + ENTER,
"Result" : SINGLE},
{"Name" : "history test 2",
"Sequence" : "invalid" + ENTER + "ambiguous first" + ENTER + "invalid" + ENTER + "single" + ENTER + UP * 3 + CTRL_N + DOWN + ENTER,
"Result" : SINGLE},
#
# tests that improve coverage
#
# empty space tests
{"Name" : "empty space test 1",
"Sequence" : RIGHT + LEFT + CTRL_B + CTRL_F + ENTER,
"Result" : PROMPT},
{"Name" : "empty space test 2",
"Sequence" : BKSPACE + ENTER,
"Result" : PROMPT},
{"Name" : "empty space test 3",
"Sequence" : CTRL_E*2 + CTRL_A*2 + ENTER,
"Result" : PROMPT},
{"Name" : "empty space test 4",
"Sequence" : ALT_F*2 + ALT_B*2 + ENTER,
"Result" : PROMPT},
{"Name" : "empty space test 5",
"Sequence" : " " + CTRL_E*2 + CTRL_A*2 + ENTER,
"Result" : PROMPT},
{"Name" : "empty space test 6",
"Sequence" : " " + CTRL_A + ALT_F*2 + ALT_B*2 + ENTER,
"Result" : PROMPT},
{"Name" : "empty space test 7",
"Sequence" : " " + CTRL_A + CTRL_D + CTRL_E + CTRL_D + ENTER,
"Result" : PROMPT},
{"Name" : "empty space test 8",
"Sequence" : " space" + CTRL_W*2 + ENTER,
"Result" : PROMPT},
{"Name" : "empty space test 9",
"Sequence" : " space" + ALT_BKSPACE*2 + ENTER,
"Result" : PROMPT},
{"Name" : "empty space test 10",
"Sequence" : " space " + CTRL_A + ALT_D*3 + ENTER,
"Result" : PROMPT},
# non-printable char tests
{"Name" : "non-printable test 1",
"Sequence" : chr(27) + chr(47) + ENTER,
"Result" : PROMPT},
{"Name" : "non-printable test 2",
"Sequence" : chr(27) + chr(128) + ENTER*7,
"Result" : PROMPT},
{"Name" : "non-printable test 3",
"Sequence" : chr(27) + chr(91) + chr(127) + ENTER*6,
"Result" : PROMPT},
# miscellaneous tests
{"Name" : "misc test 1",
"Sequence" : ENTER,
"Result" : PROMPT},
{"Name" : "misc test 2",
"Sequence" : "single #comment" + ENTER,
"Result" : SINGLE},
{"Name" : "misc test 3",
"Sequence" : "#empty line" + ENTER,
"Result" : PROMPT},
{"Name" : "misc test 4",
"Sequence" : " single " + ENTER,
"Result" : SINGLE},
{"Name" : "misc test 5",
"Sequence" : "single#" + ENTER,
"Result" : SINGLE},
{"Name" : "misc test 6",
"Sequence" : 'a' * 257 + ENTER,
"Result" : NOT_FOUND},
{"Name" : "misc test 7",
"Sequence" : "clear_history" + UP*5 + DOWN*5 + ENTER,
"Result" : PROMPT},
{"Name" : "misc test 8",
"Sequence" : "a" + HELP + CTRL_C,
"Result" : PROMPT},
{"Name" : "misc test 9",
"Sequence" : CTRL_D*3,
"Result" : None},
] | unknown | codeparrot/codeparrot-clean | ||
//===--- IDETypeChecking.cpp ----------------------------------------------===//
//
// This source file is part of the Swift.org open source project
//
// Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
// Licensed under Apache License v2.0 with Runtime Library Exception
//
// See https://swift.org/LICENSE.txt for license information
// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
//
//===----------------------------------------------------------------------===//
#include "swift/Sema/IDETypeChecking.h"
#include "ReadyForTypeCheckingCallback.h"
#include "swift/AST/ASTContext.h"
#include "swift/AST/ASTDemangler.h"
#include "swift/AST/ASTPrinter.h"
#include "swift/AST/Attr.h"
#include "swift/AST/ConformanceLookup.h"
#include "swift/AST/Decl.h"
#include "swift/AST/Expr.h"
#include "swift/AST/GenericEnvironment.h"
#include "swift/AST/GenericSignature.h"
#include "swift/AST/Identifier.h"
#include "swift/AST/Module.h"
#include "swift/AST/NameLookup.h"
#include "swift/AST/ProtocolConformance.h"
#include "swift/AST/Requirement.h"
#include "swift/AST/SourceFile.h"
#include "swift/AST/Types.h"
#include "swift/Basic/Assertions.h"
#include "swift/IDE/IDERequests.h"
#include "swift/IDE/SourceEntityWalker.h"
#include "swift/Parse/Lexer.h"
#include "swift/Sema/IDETypeCheckingRequests.h"
#include "swift/Subsystems.h"
#include "llvm/ADT/SmallVector.h"
using namespace swift;
using namespace ide;
void swift::getTopLevelDeclsForDisplay(ModuleDecl *M,
SmallVectorImpl<Decl *> &Results,
bool Recursive) {
auto getDisplayDeclsForModule =
[Recursive](ModuleDecl *M, SmallVectorImpl<Decl *> &Results) {
M->getDisplayDecls(Results, Recursive);
};
getTopLevelDeclsForDisplay(M, Results, std::move(getDisplayDeclsForModule));
}
void swift::getTopLevelDeclsForDisplay(
ModuleDecl *M, SmallVectorImpl<Decl *> &Results,
llvm::function_ref<void(ModuleDecl *, SmallVectorImpl<Decl *> &)>
getDisplayDeclsForModule) {
auto startingSize = Results.size();
getDisplayDeclsForModule(M, Results);
// Force Sendable on all public types, which might synthesize some extensions.
// FIXME: We can remove this if @_nonSendable stops creating extensions.
for (auto result : Results) {
if (auto NTD = dyn_cast<NominalTypeDecl>(result)) {
// Restrict this logic to public and package types. Non-public types
// may refer to implementation details and fail at deserialization.
auto accessScope = NTD->getFormalAccessScope();
if (!M->isMainModule() && !accessScope.isPublic() &&
!accessScope.isPackage())
continue;
auto proto = M->getASTContext().getProtocol(KnownProtocolKind::Sendable);
if (proto)
(void) lookupConformance(NTD->getDeclaredInterfaceType(), proto);
}
}
// Remove what we fetched and fetch again, possibly now with additional
// extensions.
Results.resize(startingSize);
getDisplayDeclsForModule(M, Results);
}
static bool shouldPrintAsFavorable(const Decl *D, const PrintOptions &Options) {
if (!Options.TransformContext ||
!isa<ExtensionDecl>(D->getDeclContext()) ||
!Options.TransformContext->isPrintingSynthesizedExtension())
return true;
auto DC = Options.TransformContext->getDeclContext();
auto BaseTy = Options.TransformContext->getBaseType();
const auto *FD = dyn_cast<FuncDecl>(D);
if (!FD)
return true;
// Don't check overload choices for accessor decls.
if (isa<AccessorDecl>(FD))
return true;
ResolvedMemberResult Result =
resolveValueMember(*DC, BaseTy, FD->getEffectiveFullName());
return !(Result.hasBestOverload() && Result.getBestOverload() != D);
}
class ModulePrinterPrintableChecker: public ShouldPrintChecker {
bool shouldPrint(const Decl *D, const PrintOptions &Options) override {
if (!shouldPrintAsFavorable(D, Options))
return false;
return ShouldPrintChecker::shouldPrint(D, Options);
}
};
PrintOptions PrintOptions::printModuleInterface(bool printFullConvention) {
PrintOptions result = printInterface(printFullConvention);
result.CurrentPrintabilityChecker.reset(new ModulePrinterPrintableChecker());
return result;
}
PrintOptions PrintOptions::printTypeInterface(Type T,
bool printFullConvention) {
PrintOptions result = printModuleInterface(printFullConvention);
result.PrintExtensionFromConformingProtocols = true;
result.TransformContext = TypeTransformContext(T);
result.printExtensionContentAsMembers = [T](const ExtensionDecl *ED) {
return isExtensionApplied(
T->getNominalOrBoundGenericNominal()->getDeclContext(), T, ED);
};
result.CurrentPrintabilityChecker.reset(new ModulePrinterPrintableChecker());
return result;
}
PrintOptions PrintOptions::printDocInterface() {
PrintOptions result =
PrintOptions::printModuleInterface(/*printFullConvention*/ false);
result.PrintAccess = false;
result.SkipUnavailable = false;
result.ExcludeAttrList.push_back(DeclAttrKind::Available);
result.ArgAndParamPrinting =
PrintOptions::ArgAndParamPrintingMode::BothAlways;
result.PrintDocumentationComments = false;
result.PrintFunctionRepresentationAttrs =
PrintOptions::FunctionRepresentationMode::None;
return result;
}
struct SynthesizedExtensionAnalyzer::Implementation {
static bool isMemberFavored(const NominalTypeDecl* Target, const Decl* D) {
DeclContext* DC = Target->getInnermostDeclContext();
Type BaseTy = Target->getDeclaredTypeInContext();
const auto *FD = dyn_cast<FuncDecl>(D);
if (!FD)
return true;
ResolvedMemberResult Result = resolveValueMember(*DC, BaseTy,
FD->getEffectiveFullName());
return !(Result.hasBestOverload() && Result.getBestOverload() != D);
}
static bool isExtensionFavored(const NominalTypeDecl* Target,
const ExtensionDecl *ED) {
return std::find_if(ED->getMembers().begin(), ED->getMembers().end(),
[&](DeclIterator It) {
return isMemberFavored(Target, *It);}) != ED->getMembers().end();
}
struct SynthesizedExtensionInfo {
ExtensionDecl *Ext = nullptr;
bool IsSynthesized;
ExtensionDecl *EnablingExt = nullptr;
operator bool() const { return Ext; }
SynthesizedExtensionInfo(bool IsSynthesized = false,
ExtensionDecl *EnablingExt = nullptr)
: IsSynthesized(IsSynthesized), EnablingExt(EnablingExt) {}
bool operator< (const SynthesizedExtensionInfo& Rhs) const {
// Synthesized are always after actual ones.
if (IsSynthesized != Rhs.IsSynthesized)
return !IsSynthesized;
// If not from the same file, sort by file name.
if (auto LFile = Ext->getSourceFileName()) {
if (auto RFile = Rhs.Ext->getSourceFileName()) {
int Result = LFile.value().compare(RFile.value());
if (Result != 0)
return Result < 0;
}
}
// Otherwise, sort by source order.
if (auto LeftOrder = Ext->getSourceOrder()) {
if (auto RightOrder = Rhs.Ext->getSourceOrder()) {
return LeftOrder.value() < RightOrder.value();
}
}
return false;
}
};
struct ExtensionMergeInfo {
struct Requirement {
swift::Requirement Req;
bool operator<(const Requirement& Rhs) const {
if (auto result = unsigned(Req.getKind()) - unsigned(Rhs.Req.getKind())) {
return result < 0;
} else if (!Req.getFirstType()->isEqual(Rhs.Req.getFirstType())) {
return (Req.getFirstType()->getCanonicalType() <
Rhs.Req.getFirstType()->getCanonicalType());
} else if (Req.getKind() != RequirementKind::Layout) {
return (Req.getSecondType()->getCanonicalType() <
Rhs.Req.getSecondType()->getCanonicalType());
}
return false;
}
bool operator== (const Requirement& Rhs) const {
return Req.getCanonical() == Rhs.Req.getCanonical();
}
};
bool Unmergable;
unsigned InheritsCount;
std::set<Requirement> Requirements;
void addRequirement(swift::Requirement Req) {
Requirements.insert({Req});
}
bool operator== (const ExtensionMergeInfo& Another) const {
// Trivially unmergeable.
if (Unmergable || Another.Unmergable)
return false;
if (InheritsCount != 0 || Another.InheritsCount != 0)
return false;
return Requirements == Another.Requirements;
}
bool isMergeableWithTypeDef() {
return !Unmergable && InheritsCount == 0 && Requirements.empty();
}
};
using ExtensionInfoMap =
llvm::MapVector<ExtensionDecl *, SynthesizedExtensionInfo>;
using ExtensionMergeInfoMap =
llvm::MapVector<ExtensionDecl *, ExtensionMergeInfo>;
struct ExtensionMergeGroup {
unsigned RequirementsCount;
unsigned InheritanceCount;
MergeGroupKind Kind;
std::vector<SynthesizedExtensionInfo*> Members;
ExtensionMergeGroup(SynthesizedExtensionInfo *Info,
unsigned RequirementsCount,
unsigned InheritanceCount,
bool MergeableWithType) :
RequirementsCount(RequirementsCount),
InheritanceCount(InheritanceCount),
Kind(MergeableWithType ? MergeGroupKind::MergeableWithTypeDef :
MergeGroupKind::UnmergeableWithTypeDef) {
Members.push_back(Info);
}
void removeUnfavored(const NominalTypeDecl *Target) {
Members.erase(std::remove_if(Members.begin(), Members.end(),
[&](SynthesizedExtensionInfo *Info){
return !isExtensionFavored(Target, Info->Ext);}), Members.end());
}
void sortMembers() {
std::sort(Members.begin(), Members.end(),
[](SynthesizedExtensionInfo *LHS, SynthesizedExtensionInfo *RHS) {
return (*LHS) < (*RHS);
});
}
bool operator< (const ExtensionMergeGroup& Rhs) const {
if (RequirementsCount == Rhs.RequirementsCount)
return InheritanceCount < Rhs.InheritanceCount;
return RequirementsCount < Rhs.RequirementsCount;
}
};
using MergeGroupVector = std::vector<ExtensionMergeGroup>;
NominalTypeDecl *Target;
DeclContext *DC;
bool IncludeUnconditional;
PrintOptions Options;
MergeGroupVector AllGroups;
ExtensionInfoMap InfoMap;
Implementation(NominalTypeDecl *Target, bool IncludeUnconditional,
PrintOptions &&Options)
: Target(Target), DC(Target), IncludeUnconditional(IncludeUnconditional),
Options(std::move(Options)), AllGroups(MergeGroupVector()),
InfoMap(collectSynthesizedExtensionInfo(AllGroups)) {}
unsigned countInherits(ExtensionDecl *ED) {
SmallVector<InheritedEntry, 4> Results;
getInheritedForPrinting(ED, Options, Results);
return Results.size();
}
std::pair<SynthesizedExtensionInfo, ExtensionMergeInfo>
isApplicable(ExtensionDecl *Ext, bool IsSynthesized,
ExtensionDecl *EnablingExt, NormalProtocolConformance *Conf) {
SynthesizedExtensionInfo Result(IsSynthesized, EnablingExt);
ExtensionMergeInfo MergeInfo;
MergeInfo.Unmergable =
!Ext->getRawComment().isEmpty() || // With comments
Ext->getAttrs().hasAttribute<AvailableAttr>(); // With @available
MergeInfo.InheritsCount = countInherits(Ext);
// There's (up to) two extensions here: the extension with the items that we
// might be merging, plus the "enabling extension", which is the route
// through which \c Ext itself applies, e.g. extension SomeProtocol {}
// extension SomeType: SomeProtocol where T: SomeProtocol {}. The former is
// Ext and the latter is EnablingExt/Conf. Either of these can be
// conditional in ways that need to be considered when merging.
auto isConditionalEnablingExt =
Conf && EnablingExt && !Conf->getConditionalRequirements().empty();
if (!Ext->isConstrainedExtension() && !isConditionalEnablingExt) {
if (IncludeUnconditional)
Result.Ext = Ext;
return {Result, MergeInfo};
}
auto handleRequirements = [&](ExtensionDecl *OwningExt,
ArrayRef<Requirement> Reqs) {
ProtocolDecl *BaseProto = OwningExt->getSelfProtocolDecl();
// Substitute the base conforming type into a protocol's generic signature
// if needed.
SubstitutionMap subMap;
if (Conf && BaseProto) {
subMap = SubstitutionMap::getProtocolSubstitutions(
ProtocolConformanceRef(Conf));
}
for (auto Req : Reqs) {
// Skip protocol's Self : <Protocol> requirement.
if (BaseProto &&
Req.getKind() == RequirementKind::Conformance &&
Req.getFirstType()->isEqual(BaseProto->getSelfInterfaceType()) &&
Req.getProtocolDecl() == BaseProto)
continue;
if (subMap) {
// Apply any substitutions we need to map the requirements from a
// a protocol extension to an extension on the conforming type. We
// need to lookup conformances outside of the substitution map since
// the extension may introduce new conformance constraints.
Req = Req.subst(QuerySubstitutionMap{subMap},
LookUpConformanceInModule());
if (Req.hasError()) {
// Substitution with interface type bases can only fail
// if a concrete type fails to conform to a protocol.
// In this case, just give up on the extension altogether.
return true;
}
}
assert(!Req.getFirstType()->hasArchetype());
if (Req.getKind() != RequirementKind::Layout)
assert(!Req.getSecondType()->hasArchetype());
// FIXME: This doesn't correctly handle conformance requirements, e.g:
//
// extension P where X: Q, X.Y == Int {}
//
// Since the archetype we have for `X` doesn't necessarily have a
// conformance to `Q` in the conforming type's generic environment. This
// results in a substitution failure for `X.Y`.
// https://github.com/swiftlang/swift/issues/83564
auto *env = Target->getGenericEnvironment();
SmallVector<Requirement, 2> subReqs;
subReqs.push_back(
Req.subst(
QueryInterfaceTypeSubstitutions(env),
LookUpConformanceInModule(),
SubstFlags::PreservePackExpansionLevel));
while (!subReqs.empty()) {
auto req = subReqs.pop_back_val();
switch (req.checkRequirement(subReqs, /*allowMissing=*/false)) {
case CheckRequirementResult::Success:
case CheckRequirementResult::PackRequirement:
case CheckRequirementResult::ConditionalConformance:
break;
case CheckRequirementResult::SubstitutionFailure:
return true;
case CheckRequirementResult::RequirementFailure:
if (!req.canBeSatisfied())
return true;
MergeInfo.addRequirement(Req);
break;
}
}
}
return false;
};
if (Ext->isConstrainedExtension()) {
assert(Ext->getGenericSignature() && "No generic signature.");
auto GenericSig = Ext->getGenericSignature();
if (handleRequirements(Ext, GenericSig.getRequirements()))
return {Result, MergeInfo};
}
if (isConditionalEnablingExt) {
if (handleRequirements(EnablingExt, Conf->getConditionalRequirements()))
return {Result, MergeInfo};
}
Result.Ext = Ext;
return {Result, MergeInfo};
}
void populateMergeGroup(ExtensionInfoMap &InfoMap,
ExtensionMergeInfoMap &MergeInfoMap,
MergeGroupVector &Results,
bool AllowMergeWithDefBody) {
for (auto &Pair : InfoMap) {
ExtensionDecl *ED = Pair.first;
ExtensionMergeInfo &MergeInfo = MergeInfoMap[ED];
SynthesizedExtensionInfo &ExtInfo = InfoMap[ED];
auto Found = std::find_if(Results.begin(), Results.end(),
[&](ExtensionMergeGroup &Group) {
return MergeInfo == MergeInfoMap[Group.Members.front()->Ext];
});
if (Found == Results.end()) {
Results.push_back({&ExtInfo,
(unsigned)MergeInfo.Requirements.size(),
MergeInfo.InheritsCount,
AllowMergeWithDefBody && MergeInfo.isMergeableWithTypeDef()});
} else {
Found->Members.push_back(&ExtInfo);
}
}
}
ExtensionInfoMap
collectSynthesizedExtensionInfoForProtocol(MergeGroupVector &AllGroups) {
ExtensionInfoMap InfoMap;
ExtensionMergeInfoMap MergeInfoMap;
for (auto *E : Target->getExtensions()) {
if (!Options.shouldPrint(E))
continue;
auto Pair = isApplicable(E, /*Synthesized*/ false,
/*EnablingExt*/ nullptr,
/*Conf*/ nullptr);
if (Pair.first) {
InfoMap.insert({E, Pair.first});
MergeInfoMap.insert({E, Pair.second});
}
}
populateMergeGroup(InfoMap, MergeInfoMap, AllGroups,
/*AllowMergeWithDefBody=*/false);
std::sort(AllGroups.begin(), AllGroups.end());
for (auto &Group : AllGroups) {
Group.sortMembers();
}
return InfoMap;
}
static bool isEnumRawType(const Decl* D, TypeLoc TL) {
assert (TL.getType());
if (auto ED = dyn_cast<EnumDecl>(D)) {
return ED->hasRawType() && ED->getRawType()->isEqual(TL.getType());
}
return false;
}
ExtensionInfoMap
collectSynthesizedExtensionInfo(MergeGroupVector &AllGroups) {
if (isa<ProtocolDecl>(Target)) {
return collectSynthesizedExtensionInfoForProtocol(AllGroups);
}
ExtensionInfoMap InfoMap;
ExtensionMergeInfoMap MergeInfoMap;
auto handleExtension = [&](ExtensionDecl *E, bool Synthesized,
ExtensionDecl *EnablingE,
NormalProtocolConformance *Conf) {
PrintOptions::OverrideScope AdjustedOpts(Options);
if (Synthesized) {
// Members from underscored system protocols should still appear as
// members of the target type, even if the protocols themselves are not
// printed.
OVERRIDE_PRINT_OPTION(AdjustedOpts, SkipUnderscoredSystemProtocols, false);
}
if (Options.shouldPrint(E)) {
auto Pair = isApplicable(E, Synthesized, EnablingE, Conf);
if (Pair.first) {
InfoMap.insert({E, Pair.first});
MergeInfoMap.insert({E, Pair.second});
}
}
};
for (auto *LocalConf : Target->getLocalConformances()) {
if (isa<InheritedProtocolConformance>(LocalConf))
continue;
auto RootConf = LocalConf->getRootConformance();
auto *Conf = dyn_cast<NormalProtocolConformance>(RootConf);
if (!Conf)
continue;
for (auto *E : Conf->getProtocol()->getExtensions())
handleExtension(E, true, nullptr, Conf);
}
// Merge with actual extensions.
for (auto *EnablingE : Target->getExtensions()) {
handleExtension(EnablingE, false, nullptr, nullptr);
for (auto *Conf : EnablingE->getLocalConformances()) {
auto NormalConf =
dyn_cast<NormalProtocolConformance>(Conf->getRootConformance());
if (!NormalConf) continue;
for (auto E : NormalConf->getProtocol()->getExtensions())
handleExtension(E, true, EnablingE, NormalConf);
}
}
populateMergeGroup(InfoMap, MergeInfoMap, AllGroups,
/*AllowMergeWithDefBody=*/true);
std::sort(AllGroups.begin(), AllGroups.end());
for (auto &Group : AllGroups) {
Group.removeUnfavored(Target);
Group.sortMembers();
}
AllGroups.erase(std::remove_if(AllGroups.begin(), AllGroups.end(),
[](ExtensionMergeGroup &Group) { return Group.Members.empty(); }),
AllGroups.end());
return InfoMap;
}
};
SynthesizedExtensionAnalyzer::SynthesizedExtensionAnalyzer(
NominalTypeDecl *Target, PrintOptions &&Options, bool IncludeUnconditional)
: Impl(*(new Implementation(Target, IncludeUnconditional, std::move(Options)))) {}
SynthesizedExtensionAnalyzer::~SynthesizedExtensionAnalyzer() {delete &Impl;}
bool SynthesizedExtensionAnalyzer::isInSynthesizedExtension(
const ValueDecl *VD) {
if (auto Ext = dyn_cast_or_null<ExtensionDecl>(VD->getDeclContext()->
getInnermostTypeContext())) {
auto It = Impl.InfoMap.find(Ext);
if (It != Impl.InfoMap.end() && It->second.IsSynthesized) {
// A synthesized extension will only be created if the underlying type
// is in the same module
return VD->getModuleContext() == Impl.Target->getModuleContext();
}
}
return false;
}
void SynthesizedExtensionAnalyzer::
forEachExtensionMergeGroup(MergeGroupKind Kind, ExtensionGroupOperation Fn) {
for (auto &Group : Impl.AllGroups) {
if (Kind != MergeGroupKind::All) {
if (Kind != Group.Kind)
continue;
}
std::vector<ExtensionInfo> GroupContent;
for (auto &Member : Group.Members) {
GroupContent.push_back(
{Member->Ext, Member->EnablingExt, Member->IsSynthesized});
}
Fn(llvm::ArrayRef(GroupContent));
}
}
bool SynthesizedExtensionAnalyzer::hasMergeGroup(MergeGroupKind Kind) {
for (auto &Group : Impl.AllGroups) {
if (Kind == MergeGroupKind::All)
return true;
if (Kind == Group.Kind)
return true;
}
return false;
}
void swift::
collectDefaultImplementationForProtocolMembers(ProtocolDecl *PD,
llvm::SmallDenseMap<ValueDecl*, ValueDecl*> &DefaultMap) {
auto HandleMembers = [&](DeclRange Members) {
for (Decl *D : Members) {
auto *VD = dyn_cast<ValueDecl>(D);
// Skip non-value decl.
if (!VD)
continue;
// Skip decls with empty names, e.g. setter/getters for properties.
if (VD->getBaseName().empty())
continue;
for (auto *Default: PD->lookupDirect(VD->getName())) {
if (Default->getDeclContext()->getExtendedProtocolDecl() == PD) {
DefaultMap.insert({Default, VD});
}
}
}
};
// Collect the default implementations for the members in this given protocol.
HandleMembers(PD->getMembers());
// Collect the default implementations for the members in the inherited
// protocols.
for (auto *IP : PD->getInheritedProtocols())
HandleMembers(IP->getMembers());
}
/// This walker will traverse the AST and report types for every expression.
class ExpressionTypeCollector: public SourceEntityWalker {
SourceManager &SM;
unsigned int BufferId;
std::vector<ExpressionTypeInfo> &Results;
// This is to where we print all types.
llvm::raw_ostream &OS;
// Map from a printed type to the offset in OS where the type starts.
llvm::StringMap<uint32_t> TypeOffsets;
// This keeps track of whether we have a type reported for a given
// [offset, length].
llvm::DenseMap<unsigned, llvm::DenseSet<unsigned>> AllPrintedTypes;
// When non empty, we only print expression types that conform to any of
// these protocols.
llvm::MapVector<ProtocolDecl*, StringRef> &InterestedProtocols;
// Specified by the client whether we should print fully qualified types
const bool FullyQualified;
// Specified by the client whether we should canonicalize types before printing
const bool CanonicalType;
bool shouldReport(unsigned Offset, unsigned Length, Expr *E,
std::vector<StringRef> &Conformances) {
assert(Conformances.empty());
// We shouldn't report null types.
if (E->getType().isNull())
return false;
// We should not report a type for implicit expressions, except for
// - `OptionalEvaluationExpr` to show the correct type when there is optional chaining
// - `DotSyntaxCallExpr` to report the method type without the metatype
if (E->isImplicit() &&
!isa<OptionalEvaluationExpr>(E) &&
!isa<DotSyntaxCallExpr>(E)) {
return false;
}
// If we have already reported types for this source range, we shouldn't
// report again. This makes sure we always report the outtermost type of
// several overlapping expressions.
auto &Bucket = AllPrintedTypes[Offset];
if (Bucket.find(Length) != Bucket.end())
return false;
// We print every expression if the interested protocols are empty.
if (InterestedProtocols.empty())
return true;
// Collecting protocols conformed by this expressions that are in the list.
for (auto Proto: InterestedProtocols) {
if (checkConformance(E->getType(), Proto.first)) {
Conformances.push_back(Proto.second);
}
}
// We only print the type of the expression if it conforms to any of the
// interested protocols.
return !Conformances.empty();
}
// Find an existing offset in the type buffer otherwise print the type to
// the buffer.
std::pair<uint32_t, uint32_t> getTypeOffsets(StringRef PrintedType) {
auto It = TypeOffsets.find(PrintedType);
if (It == TypeOffsets.end()) {
TypeOffsets[PrintedType] = OS.tell();
OS << PrintedType << '\0';
}
return {TypeOffsets[PrintedType], PrintedType.size()};
}
public:
ExpressionTypeCollector(
SourceFile &SF,
llvm::MapVector<ProtocolDecl *, StringRef> &InterestedProtocols,
std::vector<ExpressionTypeInfo> &Results, bool FullyQualified,
bool CanonicalType, llvm::raw_ostream &OS)
: SM(SF.getASTContext().SourceMgr),
BufferId(SF.getBufferID()), Results(Results), OS(OS),
InterestedProtocols(InterestedProtocols),
FullyQualified(FullyQualified), CanonicalType(CanonicalType) {}
bool walkToExprPre(Expr *E) override {
if (E->getSourceRange().isInvalid())
return true;
CharSourceRange Range =
Lexer::getCharSourceRangeFromSourceRange(SM, E->getSourceRange());
unsigned Offset = SM.getLocOffsetInBuffer(Range.getStart(), BufferId);
unsigned Length = Range.getByteLength();
std::vector<StringRef> Conformances;
if (!shouldReport(Offset, Length, E, Conformances))
return true;
// Print the type to a temporary buffer.
SmallString<64> Buffer;
{
llvm::raw_svector_ostream OS(Buffer);
auto Ty = E->getType()->getRValueType();
PrintOptions printOptions = PrintOptions();
printOptions.FullyQualifiedTypes = FullyQualified;
if (CanonicalType) {
Ty->getCanonicalType()->print(OS, printOptions);
} else {
Ty->reconstituteSugar(true)->print(OS, printOptions);
}
}
auto Ty = getTypeOffsets(Buffer.str());
// Add the type information to the result list.
Results.push_back({Offset, Length, Ty.first, Ty.second, {}});
// Adding all protocol names to the result.
for(auto Con: Conformances) {
auto Ty = getTypeOffsets(Con);
Results.back().protocols.push_back({Ty.first, Ty.second});
}
// Keep track of that we have a type reported for this range.
AllPrintedTypes[Offset].insert(Length);
return true;
}
};
ProtocolDecl* swift::resolveProtocolName(DeclContext *dc, StringRef name) {
return evaluateOrDefault(dc->getASTContext().evaluator,
ResolveProtocolNameRequest(ProtocolNameOwner(dc, name)),
nullptr);
}
ArrayRef<ExpressionTypeInfo> swift::collectExpressionType(
SourceFile &SF, ArrayRef<const char *> ExpectedProtocols,
std::vector<ExpressionTypeInfo> &Scratch, bool FullyQualified,
bool CanonicalType, llvm::raw_ostream &OS) {
llvm::MapVector<ProtocolDecl*, StringRef> InterestedProtocols;
for (auto Name: ExpectedProtocols) {
if (auto *pd = resolveProtocolName(&SF, Name)) {
InterestedProtocols.insert({pd, Name});
} else {
return {};
}
}
ExpressionTypeCollector Walker(SF, InterestedProtocols, Scratch,
FullyQualified, CanonicalType, OS);
Walker.walk(SF);
return Scratch;
}
/// This walker will traverse the AST and report types for every variable
/// declaration.
class VariableTypeCollector : public SourceEntityWalker {
private:
const SourceManager &SM;
unsigned int BufferId;
/// The range in which variable types are to be collected.
SourceRange TotalRange;
// Specified by the client whether we should print fully qualified types
const bool FullyQualified;
/// The output vector for VariableTypeInfos emitted during traversal.
std::vector<VariableTypeInfo> &Results;
/// We print all types into a single output stream (e.g. into a string buffer)
/// and provide offsets into this string buffer to describe individual types,
/// i.e. \c OS builds a string that contains all null-terminated printed type
/// strings. When referring to one of these types, we can use the offsets at
/// which it starts in the \c OS.
llvm::raw_ostream &OS;
/// Map from a printed type to the offset in \c OS where the type starts.
llvm::StringMap<uint32_t> TypeOffsets;
/// Returns the start offset of this string in \c OS. If \c PrintedType
/// hasn't been printed to \c OS yet, this function will do so.
uint32_t getTypeOffset(StringRef PrintedType) {
auto It = TypeOffsets.find(PrintedType);
if (It == TypeOffsets.end()) {
TypeOffsets[PrintedType] = OS.tell();
OS << PrintedType << '\0';
}
return TypeOffsets[PrintedType];
}
/// Checks whether the given range overlaps the total range in which we
/// collect variable types.
bool overlapsTotalRange(SourceRange Range) {
return TotalRange.isInvalid() || Range.overlaps(TotalRange);
}
public:
VariableTypeCollector(const SourceFile &SF, SourceRange Range,
bool FullyQualified,
std::vector<VariableTypeInfo> &Results,
llvm::raw_ostream &OS)
: SM(SF.getASTContext().SourceMgr), BufferId(SF.getBufferID()),
TotalRange(Range), FullyQualified(FullyQualified), Results(Results),
OS(OS) {}
bool walkToDeclPre(Decl *D, CharSourceRange DeclNameRange) override {
if (DeclNameRange.isInvalid()) {
return true;
}
// Skip this declaration and its subtree if outside the range
if (!overlapsTotalRange(D->getSourceRange())) {
return false;
}
if (auto VD = dyn_cast<VarDecl>(D)) {
unsigned VarOffset =
SM.getLocOffsetInBuffer(DeclNameRange.getStart(), BufferId);
unsigned VarLength = DeclNameRange.getByteLength();
// Print the type to a temporary buffer
SmallString<64> Buffer;
{
llvm::raw_svector_ostream OS(Buffer);
PrintOptions Options;
Options.SynthesizeSugarOnTypes = true;
Options.FullyQualifiedTypes = FullyQualified;
auto Ty = VD->getInterfaceType();
// Skip this declaration and its children if the type is an error type.
if (Ty->is<ErrorType>()) {
return false;
}
Ty->print(OS, Options);
}
// Transfer the type to `OS` if needed and get the offset of this string
// in `OS`.
auto TyOffset = getTypeOffset(Buffer.str());
bool HasExplicitType =
VD->getTypeReprOrParentPatternTypeRepr() != nullptr;
// Add the type information to the result list.
Results.emplace_back(VarOffset, VarLength, HasExplicitType, TyOffset);
}
return true;
}
bool walkToStmtPre(Stmt *S) override {
// Skip this statement and its subtree if outside the range
return overlapsTotalRange(S->getSourceRange());
}
bool walkToExprPre(Expr *E) override {
// Skip this expression and its subtree if outside the range
return overlapsTotalRange(E->getSourceRange());
}
bool walkToPatternPre(Pattern *P) override {
// Skip this pattern and its subtree if outside the range
return overlapsTotalRange(P->getSourceRange());
}
};
VariableTypeInfo::VariableTypeInfo(uint32_t Offset, uint32_t Length,
bool HasExplicitType, uint32_t TypeOffset)
: Offset(Offset), Length(Length), HasExplicitType(HasExplicitType),
TypeOffset(TypeOffset) {}
void swift::collectVariableType(
SourceFile &SF, SourceRange Range, bool FullyQualified,
std::vector<VariableTypeInfo> &VariableTypeInfos, llvm::raw_ostream &OS) {
VariableTypeCollector Walker(SF, Range, FullyQualified, VariableTypeInfos,
OS);
Walker.walk(SF);
}
ArrayRef<ValueDecl*> swift::
canDeclProvideDefaultImplementationFor(ValueDecl* VD) {
return evaluateOrDefault(VD->getASTContext().evaluator,
ProvideDefaultImplForRequest(VD),
ArrayRef<ValueDecl*>());
}
ArrayRef<ValueDecl*> swift::
collectAllOverriddenDecls(ValueDecl *VD, bool IncludeProtocolRequirements,
bool Transitive) {
return evaluateOrDefault(VD->getASTContext().evaluator,
CollectOverriddenDeclsRequest(OverridenDeclsOwner(VD,
IncludeProtocolRequirements, Transitive)), ArrayRef<ValueDecl*>());
}
bool swift::isExtensionApplied(const DeclContext *DC, Type BaseTy,
const ExtensionDecl *ED) {
return evaluateOrDefault(DC->getASTContext().evaluator,
IsDeclApplicableRequest(DeclApplicabilityOwner(DC, BaseTy, ED)), false);
}
bool swift::isMemberDeclApplied(const DeclContext *DC, Type BaseTy,
const ValueDecl *VD) {
return evaluateOrDefault(DC->getASTContext().evaluator,
IsDeclApplicableRequest(DeclApplicabilityOwner(DC, BaseTy, VD)), false);
}
Type swift::tryMergeBaseTypeForCompletionLookup(Type ty1, Type ty2,
DeclContext *dc) {
// Easy case, equivalent so just pick one.
if (ty1->isEqual(ty2))
return ty1;
// Check to see if one is an optional of another. In that case, prefer the
// optional since we can unwrap a single level when doing a lookup.
{
SmallVector<Type, 4> ty1Optionals;
SmallVector<Type, 4> ty2Optionals;
auto ty1Unwrapped = ty1->lookThroughAllOptionalTypes(ty1Optionals);
auto ty2Unwrapped = ty2->lookThroughAllOptionalTypes(ty2Optionals);
if (ty1Unwrapped->isEqual(ty2Unwrapped)) {
// We currently only unwrap a single level of optional, so if the
// difference is greater, don't merge.
if (ty1Optionals.size() == 1 && ty2Optionals.empty())
return ty1;
if (ty2Optionals.size() == 1 && ty1Optionals.empty())
return ty2;
}
// We don't want to consider subtyping for optional mismatches since
// optional promotion is modelled as a subtype, which isn't useful for us
// (i.e if we have T? and U, preferring U would miss members on T?).
if (ty1Optionals.size() != ty2Optionals.size())
return Type();
}
// In general we want to prefer a subtype over a supertype.
if (isSubtypeOf(ty1, ty2, dc))
return ty1;
if (isSubtypeOf(ty2, ty1, dc))
return ty2;
// Incomparable, return null.
return Type();
}
bool swift::isConvertibleTo(Type T1, Type T2, bool openArchetypes,
DeclContext &DC) {
return evaluateOrDefault(DC.getASTContext().evaluator,
TypeRelationCheckRequest(TypeRelationCheckInput(&DC, T1, T2,
TypeRelation::ConvertTo, openArchetypes)), false);
}
bool swift::isSubtypeOf(Type T1, Type T2, DeclContext *DC) {
return evaluateOrDefault(DC->getASTContext().evaluator,
TypeRelationCheckRequest(TypeRelationCheckInput(DC, T1, T2,
TypeRelation::SubtypeOf, /*openArchetypes*/ false)), false);
}
Type swift::getRootTypeOfKeypathDynamicMember(SubscriptDecl *SD) {
return evaluateOrDefault(SD->getASTContext().evaluator,
RootTypeOfKeypathDynamicMemberRequest{SD}, Type());
}
Type swift::getResultTypeOfKeypathDynamicMember(SubscriptDecl *SD) {
return evaluateOrDefault(SD->getASTContext().evaluator,
RootAndResultTypeOfKeypathDynamicMemberRequest{SD}, TypePair()).
SecondTy;
}
SmallVector<std::pair<ValueDecl *, ValueDecl *>, 1>
swift::getShorthandShadows(CaptureListExpr *CaptureList, DeclContext *DC) {
SmallVector<std::pair<ValueDecl *, ValueDecl *>, 1> Result;
for (auto Capture : CaptureList->getCaptureList()) {
if (Capture.PBD->getPatternList().size() != 1)
continue;
Expr *Init = Capture.PBD->getInit(0);
if (!Init)
continue;
auto DeclaredVar = Capture.getVar();
if (DeclaredVar->getLoc() != Init->getLoc()) {
// We have a capture like `[foo]` if the declared var and the
// reference share the same location.
continue;
}
if (auto UDRE = dyn_cast<UnresolvedDeclRefExpr>(Init)) {
if (DC)
Init = resolveDeclRefExpr(UDRE, DC);
}
auto *ReferencedVar = Init->getReferencedDecl().getDecl();
if (!ReferencedVar)
continue;
Result.emplace_back(std::make_pair(DeclaredVar, ReferencedVar));
}
return Result;
}
SmallVector<std::pair<ValueDecl *, ValueDecl *>, 1>
swift::getShorthandShadows(LabeledConditionalStmt *CondStmt, DeclContext *DC) {
SmallVector<std::pair<ValueDecl *, ValueDecl *>, 1> Result;
for (const StmtConditionElement &Cond : CondStmt->getCond()) {
if (Cond.getKind() != StmtConditionElement::CK_PatternBinding)
continue;
Expr *Init = Cond.getInitializer();
if (auto UDRE = dyn_cast<UnresolvedDeclRefExpr>(Init)) {
if (DC)
Init = resolveDeclRefExpr(UDRE, DC);
}
auto ReferencedVar = Init->getReferencedDecl().getDecl();
if (!ReferencedVar)
continue;
Cond.getPattern()->forEachVariable([&](VarDecl *DeclaredVar) {
if (DeclaredVar->getLoc() != Init->getLoc())
return;
Result.emplace_back(std::make_pair(DeclaredVar, ReferencedVar));
});
}
return Result;
}
void ReadyForTypeCheckingCallback::doneParsing(SourceFile *SrcFile) {
// Import resolution will have already been done by IDEInspectionInstance,
// we need to bind extensions here though since IDEInspectionSecondPassRequest
// can mutate the AST.
bindExtensions(*SrcFile->getParentModule());
readyForTypeChecking(SrcFile);
} | cpp | github | https://github.com/apple/swift | lib/IDE/IDETypeChecking.cpp |
import numpy as np
import pymbar
from pymbar.utils_for_testing import eq
import scipy.misc
from nose import SkipTest
def test_logsumexp():
a = np.random.normal(size=(200, 500, 5))
for axis in range(a.ndim):
ans_ne = pymbar.utils.logsumexp(a, axis=axis)
ans_no_ne = pymbar.utils.logsumexp(a, axis=axis, use_numexpr=False)
ans_scipy = scipy.misc.logsumexp(a, axis=axis)
eq(ans_ne, ans_no_ne)
eq(ans_ne, ans_scipy)
def test_logsumexp_b():
a = np.random.normal(size=(200, 500, 5))
b = np.random.normal(size=(200, 500, 5)) ** 2.
for axis in range(a.ndim):
ans_ne = pymbar.utils.logsumexp(a, b=b, axis=axis)
ans_no_ne = pymbar.utils.logsumexp(a, b=b, axis=axis, use_numexpr=False)
ans_scipy = scipy.misc.logsumexp(a, b=b, axis=axis)
eq(ans_ne, ans_no_ne)
eq(ans_ne, ans_scipy)
def test_logsum():
u = np.random.normal(size=(200))
y1 = pymbar.utils.logsumexp(u)
y2 = pymbar.utils._logsum(u)
eq(y1, y2, decimal=12) | unknown | codeparrot/codeparrot-clean | ||
<?php
/*
* This file is part of the Symfony package.
*
* (c) Fabien Potencier <fabien@symfony.com>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace Symfony\Bundle\FrameworkBundle\Tests\Functional;
use Symfony\Bundle\FrameworkBundle\Tests\Functional\app\JsonStreamer\Dto\Dummy;
use Symfony\Component\Filesystem\Filesystem;
use Symfony\Component\JsonStreamer\StreamerDumper;
use Symfony\Component\JsonStreamer\StreamReaderInterface;
use Symfony\Component\JsonStreamer\StreamWriterInterface;
use Symfony\Component\TypeInfo\Type;
/**
* @author Mathias Arlaud <mathias.arlaud@gmail.com>
*/
class JsonStreamerTest extends AbstractWebTestCase
{
protected function setUp(): void
{
static::bootKernel(['test_case' => 'JsonStreamer']);
}
public function testWrite()
{
/** @var StreamWriterInterface $writer */
$writer = static::getContainer()->get('json_streamer.stream_writer.alias');
$this->assertSame('{"@name":"DUMMY","range":"10..20"}', (string) $writer->write(new Dummy(), Type::object(Dummy::class)));
}
public function testRead()
{
/** @var StreamReaderInterface $reader */
$reader = static::getContainer()->get('json_streamer.stream_reader.alias');
$expected = new Dummy();
$expected->name = 'dummy';
$expected->range = [0, 1];
$this->assertEquals($expected, $reader->read('{"@name": "DUMMY", "range": "0..1"}', Type::object(Dummy::class)));
}
public function testWarmupStreamableClasses()
{
/** @var Filesystem $fs */
$fs = static::getContainer()->get('filesystem');
$streamWritersDir = \sprintf('%s/json_streamer/stream_writer/', static::getContainer()->getParameter('kernel.cache_dir'));
// clear already created stream writers
if ($fs->exists($streamWritersDir)) {
$fs->remove($streamWritersDir);
}
static::getContainer()->get('json_streamer.cache_warmer.streamer.alias')->warmUp(static::getContainer()->getParameter('kernel.cache_dir'));
$this->assertFileExists($streamWritersDir);
if (!class_exists(StreamerDumper::class)) {
$this->assertCount(2, glob($streamWritersDir.'/*'));
} else {
$this->assertCount(2, glob($streamWritersDir.'/*.php'));
$this->assertCount(2, glob($streamWritersDir.'/*.php.meta'));
$this->assertCount(2, glob($streamWritersDir.'/*.php.meta.json'));
}
}
} | php | github | https://github.com/symfony/symfony | src/Symfony/Bundle/FrameworkBundle/Tests/Functional/JsonStreamerTest.php |
from django.core.handlers.wsgi import WSGIRequest
from django.core.servers.basehttp import WSGIRequestHandler
from django.test import SimpleTestCase
from django.test.client import RequestFactory
from django.test.utils import captured_stderr
from django.utils.six import BytesIO
class Stub(object):
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
class WSGIRequestHandlerTestCase(SimpleTestCase):
def test_https(self):
request = WSGIRequest(RequestFactory().get('/').environ)
request.makefile = lambda *args, **kwargs: BytesIO()
handler = WSGIRequestHandler(request, '192.168.0.2', None)
with captured_stderr() as stderr:
handler.log_message("GET %s %s", str('\x16\x03'), "4")
self.assertIn(
"You're accessing the development server over HTTPS, "
"but it only supports HTTP.",
stderr.getvalue()
)
def test_strips_underscore_headers(self):
"""WSGIRequestHandler ignores headers containing underscores.
This follows the lead of nginx and Apache 2.4, and is to avoid
ambiguity between dashes and underscores in mapping to WSGI environ,
which can have security implications.
"""
def test_app(environ, start_response):
"""A WSGI app that just reflects its HTTP environ."""
start_response('200 OK', [])
http_environ_items = sorted(
'%s:%s' % (k, v) for k, v in environ.items()
if k.startswith('HTTP_')
)
yield (','.join(http_environ_items)).encode('utf-8')
rfile = BytesIO()
rfile.write(b"GET / HTTP/1.0\r\n")
rfile.write(b"Some-Header: good\r\n")
rfile.write(b"Some_Header: bad\r\n")
rfile.write(b"Other_Header: bad\r\n")
rfile.seek(0)
# WSGIRequestHandler closes the output file; we need to make this a
# no-op so we can still read its contents.
class UnclosableBytesIO(BytesIO):
def close(self):
pass
wfile = UnclosableBytesIO()
def makefile(mode, *a, **kw):
if mode == 'rb':
return rfile
elif mode == 'wb':
return wfile
request = Stub(makefile=makefile)
server = Stub(base_environ={}, get_app=lambda: test_app)
# We don't need to check stderr, but we don't want it in test output
with captured_stderr():
# instantiating a handler runs the request as side effect
WSGIRequestHandler(request, '192.168.0.2', server)
wfile.seek(0)
body = list(wfile.readlines())[-1]
self.assertEqual(body, b'HTTP_SOME_HEADER:good') | unknown | codeparrot/codeparrot-clean | ||
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/mm/memory_hotplug.c
*
* Copyright (C)
*/
#include <linux/stddef.h>
#include <linux/mm.h>
#include <linux/sched/signal.h>
#include <linux/swap.h>
#include <linux/interrupt.h>
#include <linux/pagemap.h>
#include <linux/compiler.h>
#include <linux/export.h>
#include <linux/writeback.h>
#include <linux/slab.h>
#include <linux/sysctl.h>
#include <linux/cpu.h>
#include <linux/memory.h>
#include <linux/memremap.h>
#include <linux/memory_hotplug.h>
#include <linux/vmalloc.h>
#include <linux/ioport.h>
#include <linux/delay.h>
#include <linux/migrate.h>
#include <linux/page-isolation.h>
#include <linux/pfn.h>
#include <linux/suspend.h>
#include <linux/mm_inline.h>
#include <linux/firmware-map.h>
#include <linux/stop_machine.h>
#include <linux/hugetlb.h>
#include <linux/memblock.h>
#include <linux/compaction.h>
#include <linux/rmap.h>
#include <linux/module.h>
#include <linux/node.h>
#include <asm/tlbflush.h>
#include "internal.h"
#include "shuffle.h"
enum {
MEMMAP_ON_MEMORY_DISABLE = 0,
MEMMAP_ON_MEMORY_ENABLE,
MEMMAP_ON_MEMORY_FORCE,
};
static int memmap_mode __read_mostly = MEMMAP_ON_MEMORY_DISABLE;
static inline unsigned long memory_block_memmap_size(void)
{
return PHYS_PFN(memory_block_size_bytes()) * sizeof(struct page);
}
static inline unsigned long memory_block_memmap_on_memory_pages(void)
{
unsigned long nr_pages = PFN_UP(memory_block_memmap_size());
/*
* In "forced" memmap_on_memory mode, we add extra pages to align the
* vmemmap size to cover full pageblocks. That way, we can add memory
* even if the vmemmap size is not properly aligned, however, we might waste
* memory.
*/
if (memmap_mode == MEMMAP_ON_MEMORY_FORCE)
return pageblock_align(nr_pages);
return nr_pages;
}
#ifdef CONFIG_MHP_MEMMAP_ON_MEMORY
/*
* memory_hotplug.memmap_on_memory parameter
*/
static int set_memmap_mode(const char *val, const struct kernel_param *kp)
{
int ret, mode;
bool enabled;
if (sysfs_streq(val, "force") || sysfs_streq(val, "FORCE")) {
mode = MEMMAP_ON_MEMORY_FORCE;
} else {
ret = kstrtobool(val, &enabled);
if (ret < 0)
return ret;
if (enabled)
mode = MEMMAP_ON_MEMORY_ENABLE;
else
mode = MEMMAP_ON_MEMORY_DISABLE;
}
*((int *)kp->arg) = mode;
if (mode == MEMMAP_ON_MEMORY_FORCE) {
unsigned long memmap_pages = memory_block_memmap_on_memory_pages();
pr_info_once("Memory hotplug will waste %ld pages in each memory block\n",
memmap_pages - PFN_UP(memory_block_memmap_size()));
}
return 0;
}
static int get_memmap_mode(char *buffer, const struct kernel_param *kp)
{
int mode = *((int *)kp->arg);
if (mode == MEMMAP_ON_MEMORY_FORCE)
return sprintf(buffer, "force\n");
return sprintf(buffer, "%c\n", mode ? 'Y' : 'N');
}
static const struct kernel_param_ops memmap_mode_ops = {
.set = set_memmap_mode,
.get = get_memmap_mode,
};
module_param_cb(memmap_on_memory, &memmap_mode_ops, &memmap_mode, 0444);
MODULE_PARM_DESC(memmap_on_memory, "Enable memmap on memory for memory hotplug\n"
"With value \"force\" it could result in memory wastage due "
"to memmap size limitations (Y/N/force)");
static inline bool mhp_memmap_on_memory(void)
{
return memmap_mode != MEMMAP_ON_MEMORY_DISABLE;
}
#else
static inline bool mhp_memmap_on_memory(void)
{
return false;
}
#endif
enum {
ONLINE_POLICY_CONTIG_ZONES = 0,
ONLINE_POLICY_AUTO_MOVABLE,
};
static const char * const online_policy_to_str[] = {
[ONLINE_POLICY_CONTIG_ZONES] = "contig-zones",
[ONLINE_POLICY_AUTO_MOVABLE] = "auto-movable",
};
static int set_online_policy(const char *val, const struct kernel_param *kp)
{
int ret = sysfs_match_string(online_policy_to_str, val);
if (ret < 0)
return ret;
*((int *)kp->arg) = ret;
return 0;
}
static int get_online_policy(char *buffer, const struct kernel_param *kp)
{
return sprintf(buffer, "%s\n", online_policy_to_str[*((int *)kp->arg)]);
}
/*
* memory_hotplug.online_policy: configure online behavior when onlining without
* specifying a zone (MMOP_ONLINE)
*
* "contig-zones": keep zone contiguous
* "auto-movable": online memory to ZONE_MOVABLE if the configuration
* (auto_movable_ratio, auto_movable_numa_aware) allows for it
*/
static int online_policy __read_mostly = ONLINE_POLICY_CONTIG_ZONES;
static const struct kernel_param_ops online_policy_ops = {
.set = set_online_policy,
.get = get_online_policy,
};
module_param_cb(online_policy, &online_policy_ops, &online_policy, 0644);
MODULE_PARM_DESC(online_policy,
"Set the online policy (\"contig-zones\", \"auto-movable\") "
"Default: \"contig-zones\"");
/*
* memory_hotplug.auto_movable_ratio: specify maximum MOVABLE:KERNEL ratio
*
* The ratio represent an upper limit and the kernel might decide to not
* online some memory to ZONE_MOVABLE -- e.g., because hotplugged KERNEL memory
* doesn't allow for more MOVABLE memory.
*/
static unsigned int auto_movable_ratio __read_mostly = 301;
module_param(auto_movable_ratio, uint, 0644);
MODULE_PARM_DESC(auto_movable_ratio,
"Set the maximum ratio of MOVABLE:KERNEL memory in the system "
"in percent for \"auto-movable\" online policy. Default: 301");
/*
* memory_hotplug.auto_movable_numa_aware: consider numa node stats
*/
#ifdef CONFIG_NUMA
static bool auto_movable_numa_aware __read_mostly = true;
module_param(auto_movable_numa_aware, bool, 0644);
MODULE_PARM_DESC(auto_movable_numa_aware,
"Consider numa node stats in addition to global stats in "
"\"auto-movable\" online policy. Default: true");
#endif /* CONFIG_NUMA */
/*
* online_page_callback contains pointer to current page onlining function.
* Initially it is generic_online_page(). If it is required it could be
* changed by calling set_online_page_callback() for callback registration
* and restore_online_page_callback() for generic callback restore.
*/
static online_page_callback_t online_page_callback = generic_online_page;
static DEFINE_MUTEX(online_page_callback_lock);
DEFINE_STATIC_PERCPU_RWSEM(mem_hotplug_lock);
void get_online_mems(void)
{
percpu_down_read(&mem_hotplug_lock);
}
void put_online_mems(void)
{
percpu_up_read(&mem_hotplug_lock);
}
bool movable_node_enabled = false;
static int mhp_default_online_type = -1;
int mhp_get_default_online_type(void)
{
if (mhp_default_online_type >= 0)
return mhp_default_online_type;
if (IS_ENABLED(CONFIG_MHP_DEFAULT_ONLINE_TYPE_OFFLINE))
mhp_default_online_type = MMOP_OFFLINE;
else if (IS_ENABLED(CONFIG_MHP_DEFAULT_ONLINE_TYPE_ONLINE_AUTO))
mhp_default_online_type = MMOP_ONLINE;
else if (IS_ENABLED(CONFIG_MHP_DEFAULT_ONLINE_TYPE_ONLINE_KERNEL))
mhp_default_online_type = MMOP_ONLINE_KERNEL;
else if (IS_ENABLED(CONFIG_MHP_DEFAULT_ONLINE_TYPE_ONLINE_MOVABLE))
mhp_default_online_type = MMOP_ONLINE_MOVABLE;
else
mhp_default_online_type = MMOP_OFFLINE;
return mhp_default_online_type;
}
void mhp_set_default_online_type(int online_type)
{
mhp_default_online_type = online_type;
}
static int __init setup_memhp_default_state(char *str)
{
const int online_type = mhp_online_type_from_str(str);
if (online_type >= 0)
mhp_default_online_type = online_type;
return 1;
}
__setup("memhp_default_state=", setup_memhp_default_state);
void mem_hotplug_begin(void)
{
cpus_read_lock();
percpu_down_write(&mem_hotplug_lock);
}
void mem_hotplug_done(void)
{
percpu_up_write(&mem_hotplug_lock);
cpus_read_unlock();
}
u64 max_mem_size = U64_MAX;
/* add this memory to iomem resource */
static struct resource *register_memory_resource(u64 start, u64 size,
const char *resource_name)
{
struct resource *res;
unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
if (strcmp(resource_name, "System RAM"))
flags |= IORESOURCE_SYSRAM_DRIVER_MANAGED;
if (!mhp_range_allowed(start, size, true))
return ERR_PTR(-E2BIG);
/*
* Make sure value parsed from 'mem=' only restricts memory adding
* while booting, so that memory hotplug won't be impacted. Please
* refer to document of 'mem=' in kernel-parameters.txt for more
* details.
*/
if (start + size > max_mem_size && system_state < SYSTEM_RUNNING)
return ERR_PTR(-E2BIG);
/*
* Request ownership of the new memory range. This might be
* a child of an existing resource that was present but
* not marked as busy.
*/
res = __request_region(&iomem_resource, start, size,
resource_name, flags);
if (!res) {
pr_debug("Unable to reserve System RAM region: %016llx->%016llx\n",
start, start + size);
return ERR_PTR(-EEXIST);
}
return res;
}
static void release_memory_resource(struct resource *res)
{
if (!res)
return;
release_resource(res);
kfree(res);
}
static int check_pfn_span(unsigned long pfn, unsigned long nr_pages)
{
/*
* Disallow all operations smaller than a sub-section and only
* allow operations smaller than a section for
* SPARSEMEM_VMEMMAP. Note that check_hotplug_memory_range()
* enforces a larger memory_block_size_bytes() granularity for
* memory that will be marked online, so this check should only
* fire for direct arch_{add,remove}_memory() users outside of
* add_memory_resource().
*/
unsigned long min_align;
if (IS_ENABLED(CONFIG_SPARSEMEM_VMEMMAP))
min_align = PAGES_PER_SUBSECTION;
else
min_align = PAGES_PER_SECTION;
if (!IS_ALIGNED(pfn | nr_pages, min_align))
return -EINVAL;
return 0;
}
/*
* Return page for the valid pfn only if the page is online. All pfn
* walkers which rely on the fully initialized page->flags and others
* should use this rather than pfn_valid && pfn_to_page
*/
struct page *pfn_to_online_page(unsigned long pfn)
{
unsigned long nr = pfn_to_section_nr(pfn);
struct dev_pagemap *pgmap;
struct mem_section *ms;
if (nr >= NR_MEM_SECTIONS)
return NULL;
ms = __nr_to_section(nr);
if (!online_section(ms))
return NULL;
/*
* Save some code text when online_section() +
* pfn_section_valid() are sufficient.
*/
if (IS_ENABLED(CONFIG_HAVE_ARCH_PFN_VALID) && !pfn_valid(pfn))
return NULL;
if (!pfn_section_valid(ms, pfn))
return NULL;
if (!online_device_section(ms))
return pfn_to_page(pfn);
/*
* Slowpath: when ZONE_DEVICE collides with
* ZONE_{NORMAL,MOVABLE} within the same section some pfns in
* the section may be 'offline' but 'valid'. Only
* get_dev_pagemap() can determine sub-section online status.
*/
pgmap = get_dev_pagemap(pfn);
put_dev_pagemap(pgmap);
/* The presence of a pgmap indicates ZONE_DEVICE offline pfn */
if (pgmap)
return NULL;
return pfn_to_page(pfn);
}
EXPORT_SYMBOL_GPL(pfn_to_online_page);
int __add_pages(int nid, unsigned long pfn, unsigned long nr_pages,
struct mhp_params *params)
{
const unsigned long end_pfn = pfn + nr_pages;
unsigned long cur_nr_pages;
int err;
struct vmem_altmap *altmap = params->altmap;
if (WARN_ON_ONCE(!pgprot_val(params->pgprot)))
return -EINVAL;
VM_BUG_ON(!mhp_range_allowed(PFN_PHYS(pfn), nr_pages * PAGE_SIZE, false));
if (altmap) {
/*
* Validate altmap is within bounds of the total request
*/
if (altmap->base_pfn != pfn
|| vmem_altmap_offset(altmap) > nr_pages) {
pr_warn_once("memory add fail, invalid altmap\n");
return -EINVAL;
}
altmap->alloc = 0;
}
if (check_pfn_span(pfn, nr_pages)) {
WARN(1, "Misaligned %s start: %#lx end: %#lx\n", __func__, pfn, pfn + nr_pages - 1);
return -EINVAL;
}
for (; pfn < end_pfn; pfn += cur_nr_pages) {
/* Select all remaining pages up to the next section boundary */
cur_nr_pages = min(end_pfn - pfn,
SECTION_ALIGN_UP(pfn + 1) - pfn);
err = sparse_add_section(nid, pfn, cur_nr_pages, altmap,
params->pgmap);
if (err)
break;
cond_resched();
}
vmemmap_populate_print_last();
return err;
}
/* find the smallest valid pfn in the range [start_pfn, end_pfn) */
static unsigned long find_smallest_section_pfn(int nid, struct zone *zone,
unsigned long start_pfn,
unsigned long end_pfn)
{
for (; start_pfn < end_pfn; start_pfn += PAGES_PER_SUBSECTION) {
if (unlikely(!pfn_to_online_page(start_pfn)))
continue;
if (unlikely(pfn_to_nid(start_pfn) != nid))
continue;
if (zone != page_zone(pfn_to_page(start_pfn)))
continue;
return start_pfn;
}
return 0;
}
/* find the biggest valid pfn in the range [start_pfn, end_pfn). */
static unsigned long find_biggest_section_pfn(int nid, struct zone *zone,
unsigned long start_pfn,
unsigned long end_pfn)
{
unsigned long pfn;
/* pfn is the end pfn of a memory section. */
pfn = end_pfn - 1;
for (; pfn >= start_pfn; pfn -= PAGES_PER_SUBSECTION) {
if (unlikely(!pfn_to_online_page(pfn)))
continue;
if (unlikely(pfn_to_nid(pfn) != nid))
continue;
if (zone != page_zone(pfn_to_page(pfn)))
continue;
return pfn;
}
return 0;
}
static void shrink_zone_span(struct zone *zone, unsigned long start_pfn,
unsigned long end_pfn)
{
unsigned long pfn;
int nid = zone_to_nid(zone);
if (zone->zone_start_pfn == start_pfn) {
/*
* If the section is smallest section in the zone, it need
* shrink zone->zone_start_pfn and zone->zone_spanned_pages.
* In this case, we find second smallest valid mem_section
* for shrinking zone.
*/
pfn = find_smallest_section_pfn(nid, zone, end_pfn,
zone_end_pfn(zone));
if (pfn) {
zone->spanned_pages = zone_end_pfn(zone) - pfn;
zone->zone_start_pfn = pfn;
} else {
zone->zone_start_pfn = 0;
zone->spanned_pages = 0;
}
} else if (zone_end_pfn(zone) == end_pfn) {
/*
* If the section is biggest section in the zone, it need
* shrink zone->spanned_pages.
* In this case, we find second biggest valid mem_section for
* shrinking zone.
*/
pfn = find_biggest_section_pfn(nid, zone, zone->zone_start_pfn,
start_pfn);
if (pfn)
zone->spanned_pages = pfn - zone->zone_start_pfn + 1;
else {
zone->zone_start_pfn = 0;
zone->spanned_pages = 0;
}
}
}
static void update_pgdat_span(struct pglist_data *pgdat)
{
unsigned long node_start_pfn = 0, node_end_pfn = 0;
struct zone *zone;
for (zone = pgdat->node_zones;
zone < pgdat->node_zones + MAX_NR_ZONES; zone++) {
unsigned long end_pfn = zone_end_pfn(zone);
/* No need to lock the zones, they can't change. */
if (!zone->spanned_pages)
continue;
if (!node_end_pfn) {
node_start_pfn = zone->zone_start_pfn;
node_end_pfn = end_pfn;
continue;
}
if (end_pfn > node_end_pfn)
node_end_pfn = end_pfn;
if (zone->zone_start_pfn < node_start_pfn)
node_start_pfn = zone->zone_start_pfn;
}
pgdat->node_start_pfn = node_start_pfn;
pgdat->node_spanned_pages = node_end_pfn - node_start_pfn;
}
void remove_pfn_range_from_zone(struct zone *zone,
unsigned long start_pfn,
unsigned long nr_pages)
{
const unsigned long end_pfn = start_pfn + nr_pages;
struct pglist_data *pgdat = zone->zone_pgdat;
unsigned long pfn, cur_nr_pages;
/* Poison struct pages because they are now uninitialized again. */
for (pfn = start_pfn; pfn < end_pfn; pfn += cur_nr_pages) {
cond_resched();
/* Select all remaining pages up to the next section boundary */
cur_nr_pages =
min(end_pfn - pfn, SECTION_ALIGN_UP(pfn + 1) - pfn);
page_init_poison(pfn_to_page(pfn),
sizeof(struct page) * cur_nr_pages);
}
/*
* Zone shrinking code cannot properly deal with ZONE_DEVICE. So
* we will not try to shrink the zones - which is okay as
* set_zone_contiguous() cannot deal with ZONE_DEVICE either way.
*/
if (zone_is_zone_device(zone))
return;
clear_zone_contiguous(zone);
shrink_zone_span(zone, start_pfn, start_pfn + nr_pages);
update_pgdat_span(pgdat);
set_zone_contiguous(zone);
}
/**
* __remove_pages() - remove sections of pages
* @pfn: starting pageframe (must be aligned to start of a section)
* @nr_pages: number of pages to remove (must be multiple of section size)
* @altmap: alternative device page map or %NULL if default memmap is used
*
* Generic helper function to remove section mappings and sysfs entries
* for the section of the memory we are removing. Caller needs to make
* sure that pages are marked reserved and zones are adjust properly by
* calling offline_pages().
*/
void __remove_pages(unsigned long pfn, unsigned long nr_pages,
struct vmem_altmap *altmap)
{
const unsigned long end_pfn = pfn + nr_pages;
unsigned long cur_nr_pages;
if (check_pfn_span(pfn, nr_pages)) {
WARN(1, "Misaligned %s start: %#lx end: %#lx\n", __func__, pfn, pfn + nr_pages - 1);
return;
}
for (; pfn < end_pfn; pfn += cur_nr_pages) {
cond_resched();
/* Select all remaining pages up to the next section boundary */
cur_nr_pages = min(end_pfn - pfn,
SECTION_ALIGN_UP(pfn + 1) - pfn);
sparse_remove_section(pfn, cur_nr_pages, altmap);
}
}
int set_online_page_callback(online_page_callback_t callback)
{
int rc = -EINVAL;
get_online_mems();
mutex_lock(&online_page_callback_lock);
if (online_page_callback == generic_online_page) {
online_page_callback = callback;
rc = 0;
}
mutex_unlock(&online_page_callback_lock);
put_online_mems();
return rc;
}
EXPORT_SYMBOL_GPL(set_online_page_callback);
int restore_online_page_callback(online_page_callback_t callback)
{
int rc = -EINVAL;
get_online_mems();
mutex_lock(&online_page_callback_lock);
if (online_page_callback == callback) {
online_page_callback = generic_online_page;
rc = 0;
}
mutex_unlock(&online_page_callback_lock);
put_online_mems();
return rc;
}
EXPORT_SYMBOL_GPL(restore_online_page_callback);
/* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
void generic_online_page(struct page *page, unsigned int order)
{
__free_pages_core(page, order, MEMINIT_HOTPLUG);
}
EXPORT_SYMBOL_GPL(generic_online_page);
static void online_pages_range(unsigned long start_pfn, unsigned long nr_pages)
{
const unsigned long end_pfn = start_pfn + nr_pages;
unsigned long pfn;
/*
* Online the pages in MAX_PAGE_ORDER aligned chunks. The callback might
* decide to not expose all pages to the buddy (e.g., expose them
* later). We account all pages as being online and belonging to this
* zone ("present").
* When using memmap_on_memory, the range might not be aligned to
* MAX_ORDER_NR_PAGES - 1, but pageblock aligned. __ffs() will detect
* this and the first chunk to online will be pageblock_nr_pages.
*/
for (pfn = start_pfn; pfn < end_pfn;) {
struct page *page = pfn_to_page(pfn);
int order;
/*
* Free to online pages in the largest chunks alignment allows.
*
* __ffs() behaviour is undefined for 0. start == 0 is
* MAX_PAGE_ORDER-aligned, Set order to MAX_PAGE_ORDER for
* the case.
*/
if (pfn)
order = min_t(int, MAX_PAGE_ORDER, __ffs(pfn));
else
order = MAX_PAGE_ORDER;
/*
* Exposing the page to the buddy by freeing can cause
* issues with debug_pagealloc enabled: some archs don't
* like double-unmappings. So treat them like any pages that
* were allocated from the buddy.
*/
debug_pagealloc_map_pages(page, 1 << order);
(*online_page_callback)(page, order);
pfn += (1UL << order);
}
/* mark all involved sections as online */
online_mem_sections(start_pfn, end_pfn);
}
static void __meminit resize_zone_range(struct zone *zone, unsigned long start_pfn,
unsigned long nr_pages)
{
unsigned long old_end_pfn = zone_end_pfn(zone);
if (zone_is_empty(zone) || start_pfn < zone->zone_start_pfn)
zone->zone_start_pfn = start_pfn;
zone->spanned_pages = max(start_pfn + nr_pages, old_end_pfn) - zone->zone_start_pfn;
}
static void __meminit resize_pgdat_range(struct pglist_data *pgdat, unsigned long start_pfn,
unsigned long nr_pages)
{
unsigned long old_end_pfn = pgdat_end_pfn(pgdat);
if (!pgdat->node_spanned_pages || start_pfn < pgdat->node_start_pfn)
pgdat->node_start_pfn = start_pfn;
pgdat->node_spanned_pages = max(start_pfn + nr_pages, old_end_pfn) - pgdat->node_start_pfn;
}
#ifdef CONFIG_ZONE_DEVICE
static void section_taint_zone_device(unsigned long pfn)
{
struct mem_section *ms = __pfn_to_section(pfn);
ms->section_mem_map |= SECTION_TAINT_ZONE_DEVICE;
}
#else
static inline void section_taint_zone_device(unsigned long pfn)
{
}
#endif
/*
* Associate the pfn range with the given zone, initializing the memmaps
* and resizing the pgdat/zone data to span the added pages. After this
* call, all affected pages are PageOffline().
*
* All aligned pageblocks are initialized to the specified migratetype
* (usually MIGRATE_MOVABLE). Besides setting the migratetype, no related
* zone stats (e.g., nr_isolate_pageblock) are touched.
*/
void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
unsigned long nr_pages,
struct vmem_altmap *altmap, int migratetype,
bool isolate_pageblock)
{
struct pglist_data *pgdat = zone->zone_pgdat;
int nid = pgdat->node_id;
clear_zone_contiguous(zone);
if (zone_is_empty(zone))
init_currently_empty_zone(zone, start_pfn, nr_pages);
resize_zone_range(zone, start_pfn, nr_pages);
resize_pgdat_range(pgdat, start_pfn, nr_pages);
/*
* Subsection population requires care in pfn_to_online_page().
* Set the taint to enable the slow path detection of
* ZONE_DEVICE pages in an otherwise ZONE_{NORMAL,MOVABLE}
* section.
*/
if (zone_is_zone_device(zone)) {
if (!IS_ALIGNED(start_pfn, PAGES_PER_SECTION))
section_taint_zone_device(start_pfn);
if (!IS_ALIGNED(start_pfn + nr_pages, PAGES_PER_SECTION))
section_taint_zone_device(start_pfn + nr_pages);
}
/*
* TODO now we have a visible range of pages which are not associated
* with their zone properly. Not nice but set_pfnblock_migratetype()
* expects the zone spans the pfn range. All the pages in the range
* are reserved so nobody should be touching them so we should be safe
*/
memmap_init_range(nr_pages, nid, zone_idx(zone), start_pfn, 0,
MEMINIT_HOTPLUG, altmap, migratetype,
isolate_pageblock);
set_zone_contiguous(zone);
}
struct auto_movable_stats {
unsigned long kernel_early_pages;
unsigned long movable_pages;
};
static void auto_movable_stats_account_zone(struct auto_movable_stats *stats,
struct zone *zone)
{
if (zone_idx(zone) == ZONE_MOVABLE) {
stats->movable_pages += zone->present_pages;
} else {
stats->kernel_early_pages += zone->present_early_pages;
#ifdef CONFIG_CMA
/*
* CMA pages (never on hotplugged memory) behave like
* ZONE_MOVABLE.
*/
stats->movable_pages += zone->cma_pages;
stats->kernel_early_pages -= zone->cma_pages;
#endif /* CONFIG_CMA */
}
}
struct auto_movable_group_stats {
unsigned long movable_pages;
unsigned long req_kernel_early_pages;
};
static int auto_movable_stats_account_group(struct memory_group *group,
void *arg)
{
const int ratio = READ_ONCE(auto_movable_ratio);
struct auto_movable_group_stats *stats = arg;
long pages;
/*
* We don't support modifying the config while the auto-movable online
* policy is already enabled. Just avoid the division by zero below.
*/
if (!ratio)
return 0;
/*
* Calculate how many early kernel pages this group requires to
* satisfy the configured zone ratio.
*/
pages = group->present_movable_pages * 100 / ratio;
pages -= group->present_kernel_pages;
if (pages > 0)
stats->req_kernel_early_pages += pages;
stats->movable_pages += group->present_movable_pages;
return 0;
}
static bool auto_movable_can_online_movable(int nid, struct memory_group *group,
unsigned long nr_pages)
{
unsigned long kernel_early_pages, movable_pages;
struct auto_movable_group_stats group_stats = {};
struct auto_movable_stats stats = {};
struct zone *zone;
int i;
/* Walk all relevant zones and collect MOVABLE vs. KERNEL stats. */
if (nid == NUMA_NO_NODE) {
/* TODO: cache values */
for_each_populated_zone(zone)
auto_movable_stats_account_zone(&stats, zone);
} else {
for (i = 0; i < MAX_NR_ZONES; i++) {
pg_data_t *pgdat = NODE_DATA(nid);
zone = pgdat->node_zones + i;
if (populated_zone(zone))
auto_movable_stats_account_zone(&stats, zone);
}
}
kernel_early_pages = stats.kernel_early_pages;
movable_pages = stats.movable_pages;
/*
* Kernel memory inside dynamic memory group allows for more MOVABLE
* memory within the same group. Remove the effect of all but the
* current group from the stats.
*/
walk_dynamic_memory_groups(nid, auto_movable_stats_account_group,
group, &group_stats);
if (kernel_early_pages <= group_stats.req_kernel_early_pages)
return false;
kernel_early_pages -= group_stats.req_kernel_early_pages;
movable_pages -= group_stats.movable_pages;
if (group && group->is_dynamic)
kernel_early_pages += group->present_kernel_pages;
/*
* Test if we could online the given number of pages to ZONE_MOVABLE
* and still stay in the configured ratio.
*/
movable_pages += nr_pages;
return movable_pages <= (auto_movable_ratio * kernel_early_pages) / 100;
}
/*
* Returns a default kernel memory zone for the given pfn range.
* If no kernel zone covers this pfn range it will automatically go
* to the ZONE_NORMAL.
*/
static struct zone *default_kernel_zone_for_pfn(int nid, unsigned long start_pfn,
unsigned long nr_pages)
{
struct pglist_data *pgdat = NODE_DATA(nid);
int zid;
for (zid = 0; zid < ZONE_NORMAL; zid++) {
struct zone *zone = &pgdat->node_zones[zid];
if (zone_intersects(zone, start_pfn, nr_pages))
return zone;
}
return &pgdat->node_zones[ZONE_NORMAL];
}
/*
* Determine to which zone to online memory dynamically based on user
* configuration and system stats. We care about the following ratio:
*
* MOVABLE : KERNEL
*
* Whereby MOVABLE is memory in ZONE_MOVABLE and KERNEL is memory in
* one of the kernel zones. CMA pages inside one of the kernel zones really
* behaves like ZONE_MOVABLE, so we treat them accordingly.
*
* We don't allow for hotplugged memory in a KERNEL zone to increase the
* amount of MOVABLE memory we can have, so we end up with:
*
* MOVABLE : KERNEL_EARLY
*
* Whereby KERNEL_EARLY is memory in one of the kernel zones, available since
* boot. We base our calculation on KERNEL_EARLY internally, because:
*
* a) Hotplugged memory in one of the kernel zones can sometimes still get
* hotunplugged, especially when hot(un)plugging individual memory blocks.
* There is no coordination across memory devices, therefore "automatic"
* hotunplugging, as implemented in hypervisors, could result in zone
* imbalances.
* b) Early/boot memory in one of the kernel zones can usually not get
* hotunplugged again (e.g., no firmware interface to unplug, fragmented
* with unmovable allocations). While there are corner cases where it might
* still work, it is barely relevant in practice.
*
* Exceptions are dynamic memory groups, which allow for more MOVABLE
* memory within the same memory group -- because in that case, there is
* coordination within the single memory device managed by a single driver.
*
* We rely on "present pages" instead of "managed pages", as the latter is
* highly unreliable and dynamic in virtualized environments, and does not
* consider boot time allocations. For example, memory ballooning adjusts the
* managed pages when inflating/deflating the balloon, and balloon page
* migration can even migrate inflated pages between zones.
*
* Using "present pages" is better but some things to keep in mind are:
*
* a) Some memblock allocations, such as for the crashkernel area, are
* effectively unused by the kernel, yet they account to "present pages".
* Fortunately, these allocations are comparatively small in relevant setups
* (e.g., fraction of system memory).
* b) Some hotplugged memory blocks in virtualized environments, especially
* hotplugged by virtio-mem, look like they are completely present, however,
* only parts of the memory block are actually currently usable.
* "present pages" is an upper limit that can get reached at runtime. As
* we base our calculations on KERNEL_EARLY, this is not an issue.
*/
static struct zone *auto_movable_zone_for_pfn(int nid,
struct memory_group *group,
unsigned long pfn,
unsigned long nr_pages)
{
unsigned long online_pages = 0, max_pages, end_pfn;
struct page *page;
if (!auto_movable_ratio)
goto kernel_zone;
if (group && !group->is_dynamic) {
max_pages = group->s.max_pages;
online_pages = group->present_movable_pages;
/* If anything is !MOVABLE online the rest !MOVABLE. */
if (group->present_kernel_pages)
goto kernel_zone;
} else if (!group || group->d.unit_pages == nr_pages) {
max_pages = nr_pages;
} else {
max_pages = group->d.unit_pages;
/*
* Take a look at all online sections in the current unit.
* We can safely assume that all pages within a section belong
* to the same zone, because dynamic memory groups only deal
* with hotplugged memory.
*/
pfn = ALIGN_DOWN(pfn, group->d.unit_pages);
end_pfn = pfn + group->d.unit_pages;
for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
page = pfn_to_online_page(pfn);
if (!page)
continue;
/* If anything is !MOVABLE online the rest !MOVABLE. */
if (!is_zone_movable_page(page))
goto kernel_zone;
online_pages += PAGES_PER_SECTION;
}
}
/*
* Online MOVABLE if we could *currently* online all remaining parts
* MOVABLE. We expect to (add+) online them immediately next, so if
* nobody interferes, all will be MOVABLE if possible.
*/
nr_pages = max_pages - online_pages;
if (!auto_movable_can_online_movable(NUMA_NO_NODE, group, nr_pages))
goto kernel_zone;
#ifdef CONFIG_NUMA
if (auto_movable_numa_aware &&
!auto_movable_can_online_movable(nid, group, nr_pages))
goto kernel_zone;
#endif /* CONFIG_NUMA */
return &NODE_DATA(nid)->node_zones[ZONE_MOVABLE];
kernel_zone:
return default_kernel_zone_for_pfn(nid, pfn, nr_pages);
}
static inline struct zone *default_zone_for_pfn(int nid, unsigned long start_pfn,
unsigned long nr_pages)
{
struct zone *kernel_zone = default_kernel_zone_for_pfn(nid, start_pfn,
nr_pages);
struct zone *movable_zone = &NODE_DATA(nid)->node_zones[ZONE_MOVABLE];
bool in_kernel = zone_intersects(kernel_zone, start_pfn, nr_pages);
bool in_movable = zone_intersects(movable_zone, start_pfn, nr_pages);
/*
* We inherit the existing zone in a simple case where zones do not
* overlap in the given range
*/
if (in_kernel ^ in_movable)
return (in_kernel) ? kernel_zone : movable_zone;
/*
* If the range doesn't belong to any zone or two zones overlap in the
* given range then we use movable zone only if movable_node is
* enabled because we always online to a kernel zone by default.
*/
return movable_node_enabled ? movable_zone : kernel_zone;
}
struct zone *zone_for_pfn_range(int online_type, int nid,
struct memory_group *group, unsigned long start_pfn,
unsigned long nr_pages)
{
if (online_type == MMOP_ONLINE_KERNEL)
return default_kernel_zone_for_pfn(nid, start_pfn, nr_pages);
if (online_type == MMOP_ONLINE_MOVABLE)
return &NODE_DATA(nid)->node_zones[ZONE_MOVABLE];
if (online_policy == ONLINE_POLICY_AUTO_MOVABLE)
return auto_movable_zone_for_pfn(nid, group, start_pfn, nr_pages);
return default_zone_for_pfn(nid, start_pfn, nr_pages);
}
/*
* This function should only be called by memory_block_{online,offline},
* and {online,offline}_pages.
*/
void adjust_present_page_count(struct page *page, struct memory_group *group,
long nr_pages)
{
struct zone *zone = page_zone(page);
const bool movable = zone_idx(zone) == ZONE_MOVABLE;
/*
* We only support onlining/offlining/adding/removing of complete
* memory blocks; therefore, either all is either early or hotplugged.
*/
if (early_section(__pfn_to_section(page_to_pfn(page))))
zone->present_early_pages += nr_pages;
zone->present_pages += nr_pages;
zone->zone_pgdat->node_present_pages += nr_pages;
if (group && movable)
group->present_movable_pages += nr_pages;
else if (group && !movable)
group->present_kernel_pages += nr_pages;
}
int mhp_init_memmap_on_memory(unsigned long pfn, unsigned long nr_pages,
struct zone *zone)
{
unsigned long end_pfn = pfn + nr_pages;
int ret, i;
ret = kasan_add_zero_shadow(__va(PFN_PHYS(pfn)), PFN_PHYS(nr_pages));
if (ret)
return ret;
move_pfn_range_to_zone(zone, pfn, nr_pages, NULL, MIGRATE_UNMOVABLE,
false);
for (i = 0; i < nr_pages; i++) {
struct page *page = pfn_to_page(pfn + i);
__ClearPageOffline(page);
SetPageVmemmapSelfHosted(page);
}
/*
* It might be that the vmemmap_pages fully span sections. If that is
* the case, mark those sections online here as otherwise they will be
* left offline.
*/
if (nr_pages >= PAGES_PER_SECTION)
online_mem_sections(pfn, ALIGN_DOWN(end_pfn, PAGES_PER_SECTION));
return ret;
}
void mhp_deinit_memmap_on_memory(unsigned long pfn, unsigned long nr_pages)
{
unsigned long end_pfn = pfn + nr_pages;
/*
* It might be that the vmemmap_pages fully span sections. If that is
* the case, mark those sections offline here as otherwise they will be
* left online.
*/
if (nr_pages >= PAGES_PER_SECTION)
offline_mem_sections(pfn, ALIGN_DOWN(end_pfn, PAGES_PER_SECTION));
/*
* The pages associated with this vmemmap have been offlined, so
* we can reset its state here.
*/
remove_pfn_range_from_zone(page_zone(pfn_to_page(pfn)), pfn, nr_pages);
kasan_remove_zero_shadow(__va(PFN_PHYS(pfn)), PFN_PHYS(nr_pages));
}
/*
* Must be called with mem_hotplug_lock in write mode.
*/
int online_pages(unsigned long pfn, unsigned long nr_pages,
struct zone *zone, struct memory_group *group)
{
struct memory_notify mem_arg = {
.start_pfn = pfn,
.nr_pages = nr_pages,
};
struct node_notify node_arg = {
.nid = NUMA_NO_NODE,
};
const int nid = zone_to_nid(zone);
int need_zonelists_rebuild = 0;
unsigned long flags;
int ret;
/*
* {on,off}lining is constrained to full memory sections (or more
* precisely to memory blocks from the user space POV).
* memmap_on_memory is an exception because it reserves initial part
* of the physical memory space for vmemmaps. That space is pageblock
* aligned.
*/
if (WARN_ON_ONCE(!nr_pages || !pageblock_aligned(pfn) ||
!IS_ALIGNED(pfn + nr_pages, PAGES_PER_SECTION)))
return -EINVAL;
/* associate pfn range with the zone */
move_pfn_range_to_zone(zone, pfn, nr_pages, NULL, MIGRATE_MOVABLE,
true);
if (!node_state(nid, N_MEMORY)) {
/* Adding memory to the node for the first time */
node_arg.nid = nid;
ret = node_notify(NODE_ADDING_FIRST_MEMORY, &node_arg);
ret = notifier_to_errno(ret);
if (ret)
goto failed_addition;
}
ret = memory_notify(MEM_GOING_ONLINE, &mem_arg);
ret = notifier_to_errno(ret);
if (ret)
goto failed_addition;
/*
* Fixup the number of isolated pageblocks before marking the sections
* onlining, such that undo_isolate_page_range() works correctly.
*/
spin_lock_irqsave(&zone->lock, flags);
zone->nr_isolate_pageblock += nr_pages / pageblock_nr_pages;
spin_unlock_irqrestore(&zone->lock, flags);
/*
* If this zone is not populated, then it is not in zonelist.
* This means the page allocator ignores this zone.
* So, zonelist must be updated after online.
*/
if (!populated_zone(zone)) {
need_zonelists_rebuild = 1;
setup_zone_pageset(zone);
}
online_pages_range(pfn, nr_pages);
adjust_present_page_count(pfn_to_page(pfn), group, nr_pages);
if (node_arg.nid >= 0)
node_set_state(nid, N_MEMORY);
if (need_zonelists_rebuild)
build_all_zonelists(NULL);
/* Basic onlining is complete, allow allocation of onlined pages. */
undo_isolate_page_range(pfn, pfn + nr_pages);
/*
* Freshly onlined pages aren't shuffled (e.g., all pages are placed to
* the tail of the freelist when undoing isolation). Shuffle the whole
* zone to make sure the just onlined pages are properly distributed
* across the whole freelist - to create an initial shuffle.
*/
shuffle_zone(zone);
/* reinitialise watermarks and update pcp limits */
init_per_zone_wmark_min();
kswapd_run(nid);
kcompactd_run(nid);
if (node_arg.nid >= 0)
/* First memory added successfully. Notify consumers. */
node_notify(NODE_ADDED_FIRST_MEMORY, &node_arg);
writeback_set_ratelimit();
memory_notify(MEM_ONLINE, &mem_arg);
return 0;
failed_addition:
pr_debug("online_pages [mem %#010llx-%#010llx] failed\n",
(unsigned long long) pfn << PAGE_SHIFT,
(((unsigned long long) pfn + nr_pages) << PAGE_SHIFT) - 1);
memory_notify(MEM_CANCEL_ONLINE, &mem_arg);
if (node_arg.nid != NUMA_NO_NODE)
node_notify(NODE_CANCEL_ADDING_FIRST_MEMORY, &node_arg);
remove_pfn_range_from_zone(zone, pfn, nr_pages);
return ret;
}
/* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
static pg_data_t *hotadd_init_pgdat(int nid)
{
struct pglist_data *pgdat;
/*
* NODE_DATA is preallocated (free_area_init) but its internal
* state is not allocated completely. Add missing pieces.
* Completely offline nodes stay around and they just need
* reinitialization.
*/
pgdat = NODE_DATA(nid);
/* init node's zones as empty zones, we don't have any present pages.*/
free_area_init_core_hotplug(pgdat);
/*
* The node we allocated has no zone fallback lists. For avoiding
* to access not-initialized zonelist, build here.
*/
build_all_zonelists(pgdat);
return pgdat;
}
/*
* __try_online_node - online a node if offlined
* @nid: the node ID
* @set_node_online: Whether we want to online the node
* called by cpu_up() to online a node without onlined memory.
*
* Returns:
* 1 -> a new node has been allocated
* 0 -> the node is already online
* -ENOMEM -> the node could not be allocated
*/
static int __try_online_node(int nid, bool set_node_online)
{
pg_data_t *pgdat;
int ret = 1;
if (node_online(nid))
return 0;
pgdat = hotadd_init_pgdat(nid);
if (!pgdat) {
pr_err("Cannot online node %d due to NULL pgdat\n", nid);
ret = -ENOMEM;
goto out;
}
if (set_node_online) {
node_set_online(nid);
ret = register_node(nid);
BUG_ON(ret);
}
out:
return ret;
}
/*
* Users of this function always want to online/register the node
*/
int try_online_node(int nid)
{
int ret;
mem_hotplug_begin();
ret = __try_online_node(nid, true);
mem_hotplug_done();
return ret;
}
static int check_hotplug_memory_range(u64 start, u64 size)
{
/* memory range must be block size aligned */
if (!size || !IS_ALIGNED(start, memory_block_size_bytes()) ||
!IS_ALIGNED(size, memory_block_size_bytes())) {
pr_err("Block size [%#lx] unaligned hotplug range: start %#llx, size %#llx",
memory_block_size_bytes(), start, size);
return -EINVAL;
}
return 0;
}
static int online_memory_block(struct memory_block *mem, void *arg)
{
mem->online_type = mhp_get_default_online_type();
return device_online(&mem->dev);
}
#ifndef arch_supports_memmap_on_memory
static inline bool arch_supports_memmap_on_memory(unsigned long vmemmap_size)
{
/*
* As default, we want the vmemmap to span a complete PMD such that we
* can map the vmemmap using a single PMD if supported by the
* architecture.
*/
return IS_ALIGNED(vmemmap_size, PMD_SIZE);
}
#endif
bool mhp_supports_memmap_on_memory(void)
{
unsigned long vmemmap_size = memory_block_memmap_size();
unsigned long memmap_pages = memory_block_memmap_on_memory_pages();
/*
* Besides having arch support and the feature enabled at runtime, we
* need a few more assumptions to hold true:
*
* a) The vmemmap pages span complete PMDs: We don't want vmemmap code
* to populate memory from the altmap for unrelated parts (i.e.,
* other memory blocks)
*
* b) The vmemmap pages (and thereby the pages that will be exposed to
* the buddy) have to cover full pageblocks: memory onlining/offlining
* code requires applicable ranges to be page-aligned, for example, to
* set the migratetypes properly.
*
* TODO: Although we have a check here to make sure that vmemmap pages
* fully populate a PMD, it is not the right place to check for
* this. A much better solution involves improving vmemmap code
* to fallback to base pages when trying to populate vmemmap using
* altmap as an alternative source of memory, and we do not exactly
* populate a single PMD.
*/
if (!mhp_memmap_on_memory())
return false;
/*
* Make sure the vmemmap allocation is fully contained
* so that we always allocate vmemmap memory from altmap area.
*/
if (!IS_ALIGNED(vmemmap_size, PAGE_SIZE))
return false;
/*
* start pfn should be pageblock_nr_pages aligned for correctly
* setting migrate types
*/
if (!pageblock_aligned(memmap_pages))
return false;
if (memmap_pages == PHYS_PFN(memory_block_size_bytes()))
/* No effective hotplugged memory doesn't make sense. */
return false;
return arch_supports_memmap_on_memory(vmemmap_size);
}
EXPORT_SYMBOL_GPL(mhp_supports_memmap_on_memory);
static void remove_memory_blocks_and_altmaps(u64 start, u64 size)
{
unsigned long memblock_size = memory_block_size_bytes();
u64 cur_start;
/*
* For memmap_on_memory, the altmaps were added on a per-memblock
* basis; we have to process each individual memory block.
*/
for (cur_start = start; cur_start < start + size;
cur_start += memblock_size) {
struct vmem_altmap *altmap = NULL;
struct memory_block *mem;
mem = find_memory_block(pfn_to_section_nr(PFN_DOWN(cur_start)));
if (WARN_ON_ONCE(!mem))
continue;
altmap = mem->altmap;
mem->altmap = NULL;
remove_memory_block_devices(cur_start, memblock_size);
arch_remove_memory(cur_start, memblock_size, altmap);
/* Verify that all vmemmap pages have actually been freed. */
WARN(altmap->alloc, "Altmap not fully unmapped");
kfree(altmap);
}
}
static int create_altmaps_and_memory_blocks(int nid, struct memory_group *group,
u64 start, u64 size)
{
unsigned long memblock_size = memory_block_size_bytes();
u64 cur_start;
int ret;
for (cur_start = start; cur_start < start + size;
cur_start += memblock_size) {
struct mhp_params params = { .pgprot =
pgprot_mhp(PAGE_KERNEL) };
struct vmem_altmap mhp_altmap = {
.base_pfn = PHYS_PFN(cur_start),
.end_pfn = PHYS_PFN(cur_start + memblock_size - 1),
};
mhp_altmap.free = memory_block_memmap_on_memory_pages();
params.altmap = kmemdup(&mhp_altmap, sizeof(struct vmem_altmap),
GFP_KERNEL);
if (!params.altmap) {
ret = -ENOMEM;
goto out;
}
/* call arch's memory hotadd */
ret = arch_add_memory(nid, cur_start, memblock_size, ¶ms);
if (ret < 0) {
kfree(params.altmap);
goto out;
}
/* create memory block devices after memory was added */
ret = create_memory_block_devices(cur_start, memblock_size, nid,
params.altmap, group);
if (ret) {
arch_remove_memory(cur_start, memblock_size, NULL);
kfree(params.altmap);
goto out;
}
}
return 0;
out:
if (ret && cur_start != start)
remove_memory_blocks_and_altmaps(start, cur_start - start);
return ret;
}
/*
* NOTE: The caller must call lock_device_hotplug() to serialize hotplug
* and online/offline operations (triggered e.g. by sysfs).
*
* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG
*/
int add_memory_resource(int nid, struct resource *res, mhp_t mhp_flags)
{
struct mhp_params params = { .pgprot = pgprot_mhp(PAGE_KERNEL) };
enum memblock_flags memblock_flags = MEMBLOCK_NONE;
struct memory_group *group = NULL;
u64 start, size;
bool new_node = false;
int ret;
start = res->start;
size = resource_size(res);
ret = check_hotplug_memory_range(start, size);
if (ret)
return ret;
if (mhp_flags & MHP_NID_IS_MGID) {
group = memory_group_find_by_id(nid);
if (!group)
return -EINVAL;
nid = group->nid;
}
if (!node_possible(nid)) {
WARN(1, "node %d was absent from the node_possible_map\n", nid);
return -EINVAL;
}
mem_hotplug_begin();
if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK)) {
if (res->flags & IORESOURCE_SYSRAM_DRIVER_MANAGED)
memblock_flags = MEMBLOCK_DRIVER_MANAGED;
ret = memblock_add_node(start, size, nid, memblock_flags);
if (ret)
goto error_mem_hotplug_end;
}
ret = __try_online_node(nid, false);
if (ret < 0)
goto error_memblock_remove;
if (ret) {
node_set_online(nid);
ret = register_node(nid);
if (WARN_ON(ret)) {
node_set_offline(nid);
goto error_memblock_remove;
}
new_node = true;
}
/*
* Self hosted memmap array
*/
if ((mhp_flags & MHP_MEMMAP_ON_MEMORY) &&
mhp_supports_memmap_on_memory()) {
ret = create_altmaps_and_memory_blocks(nid, group, start, size);
if (ret)
goto error;
} else {
ret = arch_add_memory(nid, start, size, ¶ms);
if (ret < 0)
goto error;
/* create memory block devices after memory was added */
ret = create_memory_block_devices(start, size, nid, NULL, group);
if (ret) {
arch_remove_memory(start, size, params.altmap);
goto error;
}
}
register_memory_blocks_under_node_hotplug(nid, PFN_DOWN(start),
PFN_UP(start + size - 1));
/* create new memmap entry */
if (!strcmp(res->name, "System RAM"))
firmware_map_add_hotplug(start, start + size, "System RAM");
/* device_online() will take the lock when calling online_pages() */
mem_hotplug_done();
/*
* In case we're allowed to merge the resource, flag it and trigger
* merging now that adding succeeded.
*/
if (mhp_flags & MHP_MERGE_RESOURCE)
merge_system_ram_resource(res);
/* online pages if requested */
if (mhp_get_default_online_type() != MMOP_OFFLINE)
walk_memory_blocks(start, size, NULL, online_memory_block);
return ret;
error:
if (new_node) {
node_set_offline(nid);
unregister_node(nid);
}
error_memblock_remove:
if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK))
memblock_remove(start, size);
error_mem_hotplug_end:
mem_hotplug_done();
return ret;
}
/* requires device_hotplug_lock, see add_memory_resource() */
int __add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags)
{
struct resource *res;
int ret;
res = register_memory_resource(start, size, "System RAM");
if (IS_ERR(res))
return PTR_ERR(res);
ret = add_memory_resource(nid, res, mhp_flags);
if (ret < 0)
release_memory_resource(res);
return ret;
}
int add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags)
{
int rc;
lock_device_hotplug();
rc = __add_memory(nid, start, size, mhp_flags);
unlock_device_hotplug();
return rc;
}
EXPORT_SYMBOL_GPL(add_memory);
/*
* Add special, driver-managed memory to the system as system RAM. Such
* memory is not exposed via the raw firmware-provided memmap as system
* RAM, instead, it is detected and added by a driver - during cold boot,
* after a reboot, and after kexec.
*
* Reasons why this memory should not be used for the initial memmap of a
* kexec kernel or for placing kexec images:
* - The booting kernel is in charge of determining how this memory will be
* used (e.g., use persistent memory as system RAM)
* - Coordination with a hypervisor is required before this memory
* can be used (e.g., inaccessible parts).
*
* For this memory, no entries in /sys/firmware/memmap ("raw firmware-provided
* memory map") are created. Also, the created memory resource is flagged
* with IORESOURCE_SYSRAM_DRIVER_MANAGED, so in-kernel users can special-case
* this memory as well (esp., not place kexec images onto it).
*
* The resource_name (visible via /proc/iomem) has to have the format
* "System RAM ($DRIVER)".
*/
int add_memory_driver_managed(int nid, u64 start, u64 size,
const char *resource_name, mhp_t mhp_flags)
{
struct resource *res;
int rc;
if (!resource_name ||
strstr(resource_name, "System RAM (") != resource_name ||
resource_name[strlen(resource_name) - 1] != ')')
return -EINVAL;
lock_device_hotplug();
res = register_memory_resource(start, size, resource_name);
if (IS_ERR(res)) {
rc = PTR_ERR(res);
goto out_unlock;
}
rc = add_memory_resource(nid, res, mhp_flags);
if (rc < 0)
release_memory_resource(res);
out_unlock:
unlock_device_hotplug();
return rc;
}
EXPORT_SYMBOL_GPL(add_memory_driver_managed);
/*
* Platforms should define arch_get_mappable_range() that provides
* maximum possible addressable physical memory range for which the
* linear mapping could be created. The platform returned address
* range must adhere to these following semantics.
*
* - range.start <= range.end
* - Range includes both end points [range.start..range.end]
*
* There is also a fallback definition provided here, allowing the
* entire possible physical address range in case any platform does
* not define arch_get_mappable_range().
*/
struct range __weak arch_get_mappable_range(void)
{
struct range mhp_range = {
.start = 0UL,
.end = -1ULL,
};
return mhp_range;
}
struct range mhp_get_pluggable_range(bool need_mapping)
{
const u64 max_phys = DIRECT_MAP_PHYSMEM_END;
struct range mhp_range;
if (need_mapping) {
mhp_range = arch_get_mappable_range();
if (mhp_range.start > max_phys) {
mhp_range.start = 0;
mhp_range.end = 0;
}
mhp_range.end = min_t(u64, mhp_range.end, max_phys);
} else {
mhp_range.start = 0;
mhp_range.end = max_phys;
}
return mhp_range;
}
EXPORT_SYMBOL_GPL(mhp_get_pluggable_range);
bool mhp_range_allowed(u64 start, u64 size, bool need_mapping)
{
struct range mhp_range = mhp_get_pluggable_range(need_mapping);
u64 end = start + size;
if (start < end && start >= mhp_range.start && (end - 1) <= mhp_range.end)
return true;
pr_warn("Hotplug memory [%#llx-%#llx] exceeds maximum addressable range [%#llx-%#llx]\n",
start, end, mhp_range.start, mhp_range.end);
return false;
}
#ifdef CONFIG_MEMORY_HOTREMOVE
/*
* Scan pfn range [start,end) to find movable/migratable pages (LRU and
* hugetlb folio, movable_ops pages). Will skip over most unmovable
* pages (esp., pages that can be skipped when offlining), but bail out on
* definitely unmovable pages.
*
* Returns:
* 0 in case a movable page is found and movable_pfn was updated.
* -ENOENT in case no movable page was found.
* -EBUSY in case a definitely unmovable page was found.
*/
static int scan_movable_pages(unsigned long start, unsigned long end,
unsigned long *movable_pfn)
{
unsigned long pfn;
for_each_valid_pfn(pfn, start, end) {
struct page *page;
struct folio *folio;
page = pfn_to_page(pfn);
if (PageLRU(page) || page_has_movable_ops(page))
goto found;
/*
* PageOffline() pages that do not have movable_ops and
* have a reference count > 0 (after MEM_GOING_OFFLINE) are
* definitely unmovable. If their reference count would be 0,
* they could at least be skipped when offlining memory.
*/
if (PageOffline(page) && page_count(page))
return -EBUSY;
if (!PageHuge(page))
continue;
folio = page_folio(page);
/*
* This test is racy as we hold no reference or lock. The
* hugetlb page could have been free'ed and head is no longer
* a hugetlb page before the following check. In such unlikely
* cases false positives and negatives are possible. Calling
* code must deal with these scenarios.
*/
if (folio_test_hugetlb_migratable(folio))
goto found;
pfn |= folio_nr_pages(folio) - 1;
}
return -ENOENT;
found:
*movable_pfn = pfn;
return 0;
}
static void do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
{
struct folio *folio;
unsigned long pfn;
LIST_HEAD(source);
static DEFINE_RATELIMIT_STATE(migrate_rs, DEFAULT_RATELIMIT_INTERVAL,
DEFAULT_RATELIMIT_BURST);
for_each_valid_pfn(pfn, start_pfn, end_pfn) {
struct page *page;
page = pfn_to_page(pfn);
folio = page_folio(page);
if (!folio_try_get(folio))
continue;
if (unlikely(page_folio(page) != folio))
goto put_folio;
if (folio_test_large(folio))
pfn = folio_pfn(folio) + folio_nr_pages(folio) - 1;
if (folio_contain_hwpoisoned_page(folio)) {
/*
* unmap_poisoned_folio() cannot handle large folios
* in all cases yet.
*/
if (folio_test_large(folio) && !folio_test_hugetlb(folio))
goto put_folio;
if (folio_test_lru(folio) && !folio_isolate_lru(folio))
goto put_folio;
if (folio_mapped(folio)) {
folio_lock(folio);
unmap_poisoned_folio(folio, pfn, false);
folio_unlock(folio);
}
goto put_folio;
}
if (!isolate_folio_to_list(folio, &source)) {
if (__ratelimit(&migrate_rs)) {
pr_warn("failed to isolate pfn %lx\n",
page_to_pfn(page));
dump_page(page, "isolation failed");
}
}
put_folio:
folio_put(folio);
}
if (!list_empty(&source)) {
nodemask_t nmask = node_states[N_MEMORY];
struct migration_target_control mtc = {
.nmask = &nmask,
.gfp_mask = GFP_KERNEL | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
.reason = MR_MEMORY_HOTPLUG,
};
int ret;
/*
* We have checked that migration range is on a single zone so
* we can use the nid of the first page to all the others.
*/
mtc.nid = folio_nid(list_first_entry(&source, struct folio, lru));
/*
* try to allocate from a different node but reuse this node
* if there are no other online nodes to be used (e.g. we are
* offlining a part of the only existing node)
*/
node_clear(mtc.nid, nmask);
if (nodes_empty(nmask))
node_set(mtc.nid, nmask);
ret = migrate_pages(&source, alloc_migration_target, NULL,
(unsigned long)&mtc, MIGRATE_SYNC, MR_MEMORY_HOTPLUG, NULL);
if (ret) {
list_for_each_entry(folio, &source, lru) {
if (__ratelimit(&migrate_rs)) {
pr_warn("migrating pfn %lx failed ret:%d\n",
folio_pfn(folio), ret);
dump_page(&folio->page,
"migration failure");
}
}
putback_movable_pages(&source);
}
}
}
static int __init cmdline_parse_movable_node(char *p)
{
movable_node_enabled = true;
return 0;
}
early_param("movable_node", cmdline_parse_movable_node);
static int count_system_ram_pages_cb(unsigned long start_pfn,
unsigned long nr_pages, void *data)
{
unsigned long *nr_system_ram_pages = data;
*nr_system_ram_pages += nr_pages;
return 0;
}
/*
* Must be called with mem_hotplug_lock in write mode.
*/
int offline_pages(unsigned long start_pfn, unsigned long nr_pages,
struct zone *zone, struct memory_group *group)
{
unsigned long pfn, managed_pages, system_ram_pages = 0;
const unsigned long end_pfn = start_pfn + nr_pages;
struct pglist_data *pgdat = zone->zone_pgdat;
const int node = zone_to_nid(zone);
struct memory_notify mem_arg = {
.start_pfn = start_pfn,
.nr_pages = nr_pages,
};
struct node_notify node_arg = {
.nid = NUMA_NO_NODE,
};
unsigned long flags;
char *reason;
int ret;
/*
* {on,off}lining is constrained to full memory sections (or more
* precisely to memory blocks from the user space POV).
* memmap_on_memory is an exception because it reserves initial part
* of the physical memory space for vmemmaps. That space is pageblock
* aligned.
*/
if (WARN_ON_ONCE(!nr_pages || !pageblock_aligned(start_pfn) ||
!IS_ALIGNED(start_pfn + nr_pages, PAGES_PER_SECTION)))
return -EINVAL;
/*
* Don't allow to offline memory blocks that contain holes.
* Consequently, memory blocks with holes can never get onlined
* via the hotplug path - online_pages() - as hotplugged memory has
* no holes. This way, we don't have to worry about memory holes,
* don't need pfn_valid() checks, and can avoid using
* walk_system_ram_range() later.
*/
walk_system_ram_range(start_pfn, nr_pages, &system_ram_pages,
count_system_ram_pages_cb);
if (system_ram_pages != nr_pages) {
ret = -EINVAL;
reason = "memory holes";
goto failed_removal;
}
/*
* We only support offlining of memory blocks managed by a single zone,
* checked by calling code. This is just a sanity check that we might
* want to remove in the future.
*/
if (WARN_ON_ONCE(page_zone(pfn_to_page(start_pfn)) != zone ||
page_zone(pfn_to_page(end_pfn - 1)) != zone)) {
ret = -EINVAL;
reason = "multizone range";
goto failed_removal;
}
/*
* Disable pcplists so that page isolation cannot race with freeing
* in a way that pages from isolated pageblock are left on pcplists.
*/
zone_pcp_disable(zone);
lru_cache_disable();
/* set above range as isolated */
ret = start_isolate_page_range(start_pfn, end_pfn,
PB_ISOLATE_MODE_MEM_OFFLINE);
if (ret) {
reason = "failure to isolate range";
goto failed_removal_pcplists_disabled;
}
/*
* Check whether the node will have no present pages after we offline
* 'nr_pages' more. If so, we know that the node will become empty, and
* so we will clear N_MEMORY for it.
*/
if (nr_pages >= pgdat->node_present_pages) {
node_arg.nid = node;
ret = node_notify(NODE_REMOVING_LAST_MEMORY, &node_arg);
ret = notifier_to_errno(ret);
if (ret) {
reason = "node notifier failure";
goto failed_removal_isolated;
}
}
ret = memory_notify(MEM_GOING_OFFLINE, &mem_arg);
ret = notifier_to_errno(ret);
if (ret) {
reason = "notifier failure";
goto failed_removal_isolated;
}
do {
pfn = start_pfn;
do {
/*
* Historically we always checked for any signal and
* can't limit it to fatal signals without eventually
* breaking user space.
*/
if (signal_pending(current)) {
ret = -EINTR;
reason = "signal backoff";
goto failed_removal_isolated;
}
cond_resched();
ret = scan_movable_pages(pfn, end_pfn, &pfn);
if (!ret) {
/*
* TODO: fatal migration failures should bail
* out
*/
do_migrate_range(pfn, end_pfn);
}
} while (!ret);
if (ret != -ENOENT) {
reason = "unmovable page";
goto failed_removal_isolated;
}
/*
* Dissolve free hugetlb folios in the memory block before doing
* offlining actually in order to make hugetlbfs's object
* counting consistent.
*/
ret = dissolve_free_hugetlb_folios(start_pfn, end_pfn);
if (ret) {
reason = "failure to dissolve huge pages";
goto failed_removal_isolated;
}
ret = test_pages_isolated(start_pfn, end_pfn,
PB_ISOLATE_MODE_MEM_OFFLINE);
} while (ret);
/* Mark all sections offline and remove free pages from the buddy. */
managed_pages = __offline_isolated_pages(start_pfn, end_pfn);
pr_debug("Offlined Pages %ld\n", nr_pages);
/*
* The memory sections are marked offline, and the pageblock flags
* effectively stale; nobody should be touching them. Fixup the number
* of isolated pageblocks, memory onlining will properly revert this.
*/
spin_lock_irqsave(&zone->lock, flags);
zone->nr_isolate_pageblock -= nr_pages / pageblock_nr_pages;
spin_unlock_irqrestore(&zone->lock, flags);
lru_cache_enable();
zone_pcp_enable(zone);
/* removal success */
adjust_managed_page_count(pfn_to_page(start_pfn), -managed_pages);
adjust_present_page_count(pfn_to_page(start_pfn), group, -nr_pages);
/* reinitialise watermarks and update pcp limits */
init_per_zone_wmark_min();
/*
* Make sure to mark the node as memory-less before rebuilding the zone
* list. Otherwise this node would still appear in the fallback lists.
*/
if (node_arg.nid >= 0)
node_clear_state(node, N_MEMORY);
if (!populated_zone(zone)) {
zone_pcp_reset(zone);
build_all_zonelists(NULL);
}
if (node_arg.nid >= 0) {
kcompactd_stop(node);
kswapd_stop(node);
/* Node went memoryless. Notify consumers */
node_notify(NODE_REMOVED_LAST_MEMORY, &node_arg);
}
writeback_set_ratelimit();
memory_notify(MEM_OFFLINE, &mem_arg);
remove_pfn_range_from_zone(zone, start_pfn, nr_pages);
return 0;
failed_removal_isolated:
/* pushback to free area */
undo_isolate_page_range(start_pfn, end_pfn);
memory_notify(MEM_CANCEL_OFFLINE, &mem_arg);
if (node_arg.nid != NUMA_NO_NODE)
node_notify(NODE_CANCEL_REMOVING_LAST_MEMORY, &node_arg);
failed_removal_pcplists_disabled:
lru_cache_enable();
zone_pcp_enable(zone);
failed_removal:
pr_debug("memory offlining [mem %#010llx-%#010llx] failed due to %s\n",
(unsigned long long) start_pfn << PAGE_SHIFT,
((unsigned long long) end_pfn << PAGE_SHIFT) - 1,
reason);
return ret;
}
static int check_memblock_offlined_cb(struct memory_block *mem, void *arg)
{
int *nid = arg;
*nid = mem->nid;
if (unlikely(mem->state != MEM_OFFLINE)) {
phys_addr_t beginpa, endpa;
beginpa = PFN_PHYS(section_nr_to_pfn(mem->start_section_nr));
endpa = beginpa + memory_block_size_bytes() - 1;
pr_warn("removing memory fails, because memory [%pa-%pa] is onlined\n",
&beginpa, &endpa);
return -EBUSY;
}
return 0;
}
static int count_memory_range_altmaps_cb(struct memory_block *mem, void *arg)
{
u64 *num_altmaps = (u64 *)arg;
if (mem->altmap)
*num_altmaps += 1;
return 0;
}
static int check_cpu_on_node(int nid)
{
int cpu;
for_each_present_cpu(cpu) {
if (cpu_to_node(cpu) == nid)
/*
* the cpu on this node isn't removed, and we can't
* offline this node.
*/
return -EBUSY;
}
return 0;
}
static int check_no_memblock_for_node_cb(struct memory_block *mem, void *arg)
{
int nid = *(int *)arg;
/*
* If a memory block belongs to multiple nodes, the stored nid is not
* reliable. However, such blocks are always online (e.g., cannot get
* offlined) and, therefore, are still spanned by the node.
*/
return mem->nid == nid ? -EEXIST : 0;
}
/**
* try_offline_node
* @nid: the node ID
*
* Offline a node if all memory sections and cpus of the node are removed.
*
* NOTE: The caller must call lock_device_hotplug() to serialize hotplug
* and online/offline operations before this call.
*/
void try_offline_node(int nid)
{
int rc;
/*
* If the node still spans pages (especially ZONE_DEVICE), don't
* offline it. A node spans memory after move_pfn_range_to_zone(),
* e.g., after the memory block was onlined.
*/
if (node_spanned_pages(nid))
return;
/*
* Especially offline memory blocks might not be spanned by the
* node. They will get spanned by the node once they get onlined.
* However, they link to the node in sysfs and can get onlined later.
*/
rc = for_each_memory_block(&nid, check_no_memblock_for_node_cb);
if (rc)
return;
if (check_cpu_on_node(nid))
return;
/*
* all memory/cpu of this node are removed, we can offline this
* node now.
*/
node_set_offline(nid);
unregister_node(nid);
}
EXPORT_SYMBOL(try_offline_node);
static int memory_blocks_have_altmaps(u64 start, u64 size)
{
u64 num_memblocks = size / memory_block_size_bytes();
u64 num_altmaps = 0;
if (!mhp_memmap_on_memory())
return 0;
walk_memory_blocks(start, size, &num_altmaps,
count_memory_range_altmaps_cb);
if (num_altmaps == 0)
return 0;
if (WARN_ON_ONCE(num_memblocks != num_altmaps))
return -EINVAL;
return 1;
}
static int try_remove_memory(u64 start, u64 size)
{
int rc, nid = NUMA_NO_NODE;
BUG_ON(check_hotplug_memory_range(start, size));
/*
* All memory blocks must be offlined before removing memory. Check
* whether all memory blocks in question are offline and return error
* if this is not the case.
*
* While at it, determine the nid. Note that if we'd have mixed nodes,
* we'd only try to offline the last determined one -- which is good
* enough for the cases we care about.
*/
rc = walk_memory_blocks(start, size, &nid, check_memblock_offlined_cb);
if (rc)
return rc;
/* remove memmap entry */
firmware_map_remove(start, start + size, "System RAM");
mem_hotplug_begin();
rc = memory_blocks_have_altmaps(start, size);
if (rc < 0) {
mem_hotplug_done();
return rc;
} else if (!rc) {
/*
* Memory block device removal under the device_hotplug_lock is
* a barrier against racing online attempts.
* No altmaps present, do the removal directly
*/
remove_memory_block_devices(start, size);
arch_remove_memory(start, size, NULL);
} else {
/* all memblocks in the range have altmaps */
remove_memory_blocks_and_altmaps(start, size);
}
if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK))
memblock_remove(start, size);
release_mem_region_adjustable(start, size);
if (nid != NUMA_NO_NODE)
try_offline_node(nid);
mem_hotplug_done();
return 0;
}
/**
* __remove_memory - Remove memory if every memory block is offline
* @start: physical address of the region to remove
* @size: size of the region to remove
*
* NOTE: The caller must call lock_device_hotplug() to serialize hotplug
* and online/offline operations before this call, as required by
* try_offline_node().
*/
void __remove_memory(u64 start, u64 size)
{
/*
* trigger BUG() if some memory is not offlined prior to calling this
* function
*/
if (try_remove_memory(start, size))
BUG();
}
/*
* Remove memory if every memory block is offline, otherwise return -EBUSY is
* some memory is not offline
*/
int remove_memory(u64 start, u64 size)
{
int rc;
lock_device_hotplug();
rc = try_remove_memory(start, size);
unlock_device_hotplug();
return rc;
}
EXPORT_SYMBOL_GPL(remove_memory);
static int try_offline_memory_block(struct memory_block *mem, void *arg)
{
uint8_t online_type = MMOP_ONLINE_KERNEL;
uint8_t **online_types = arg;
struct page *page;
int rc;
/*
* Sense the online_type via the zone of the memory block. Offlining
* with multiple zones within one memory block will be rejected
* by offlining code ... so we don't care about that.
*/
page = pfn_to_online_page(section_nr_to_pfn(mem->start_section_nr));
if (page && page_zonenum(page) == ZONE_MOVABLE)
online_type = MMOP_ONLINE_MOVABLE;
rc = device_offline(&mem->dev);
/*
* Default is MMOP_OFFLINE - change it only if offlining succeeded,
* so try_reonline_memory_block() can do the right thing.
*/
if (!rc)
**online_types = online_type;
(*online_types)++;
/* Ignore if already offline. */
return rc < 0 ? rc : 0;
}
static int try_reonline_memory_block(struct memory_block *mem, void *arg)
{
uint8_t **online_types = arg;
int rc;
if (**online_types != MMOP_OFFLINE) {
mem->online_type = **online_types;
rc = device_online(&mem->dev);
if (rc < 0)
pr_warn("%s: Failed to re-online memory: %d",
__func__, rc);
}
/* Continue processing all remaining memory blocks. */
(*online_types)++;
return 0;
}
/*
* Try to offline and remove memory. Might take a long time to finish in case
* memory is still in use. Primarily useful for memory devices that logically
* unplugged all memory (so it's no longer in use) and want to offline + remove
* that memory.
*/
int offline_and_remove_memory(u64 start, u64 size)
{
const unsigned long mb_count = size / memory_block_size_bytes();
uint8_t *online_types, *tmp;
int rc;
if (!IS_ALIGNED(start, memory_block_size_bytes()) ||
!IS_ALIGNED(size, memory_block_size_bytes()) || !size)
return -EINVAL;
/*
* We'll remember the old online type of each memory block, so we can
* try to revert whatever we did when offlining one memory block fails
* after offlining some others succeeded.
*/
online_types = kmalloc_array(mb_count, sizeof(*online_types),
GFP_KERNEL);
if (!online_types)
return -ENOMEM;
/*
* Initialize all states to MMOP_OFFLINE, so when we abort processing in
* try_offline_memory_block(), we'll skip all unprocessed blocks in
* try_reonline_memory_block().
*/
memset(online_types, MMOP_OFFLINE, mb_count);
lock_device_hotplug();
tmp = online_types;
rc = walk_memory_blocks(start, size, &tmp, try_offline_memory_block);
/*
* In case we succeeded to offline all memory, remove it.
* This cannot fail as it cannot get onlined in the meantime.
*/
if (!rc) {
rc = try_remove_memory(start, size);
if (rc)
pr_err("%s: Failed to remove memory: %d", __func__, rc);
}
/*
* Rollback what we did. While memory onlining might theoretically fail
* (nacked by a notifier), it barely ever happens.
*/
if (rc) {
tmp = online_types;
walk_memory_blocks(start, size, &tmp,
try_reonline_memory_block);
}
unlock_device_hotplug();
kfree(online_types);
return rc;
}
EXPORT_SYMBOL_GPL(offline_and_remove_memory);
#endif /* CONFIG_MEMORY_HOTREMOVE */ | c | github | https://github.com/torvalds/linux | mm/memory_hotplug.c |
import sys
import math
import clr
import time
clr.AddReference("MissionPlanner")
import MissionPlanner
clr.AddReference("MissionPlanner.Utilities") # includes the Utilities class
from MissionPlanner.Utilities import Locationwp
def gps_distance(lat1, lon1, lat2, lon2):
'''return distance between two points in meters,
coordinates are in degrees
thanks to http://www.movable-type.co.uk/scripts/latlong.html'''
radius_of_earth = 6378100.0
from math import radians, cos, sin, sqrt, atan2
lat1 = radians(lat1)
lat2 = radians(lat2)
lon1 = radians(lon1)
lon2 = radians(lon2)
dLat = lat2 - lat1
dLon = lon2 - lon1
a = sin(0.5*dLat)**2 + sin(0.5*dLon)**2 * cos(lat1) * cos(lat2)
c = 2.0 * atan2(sqrt(a), sqrt(1.0-a))
return radius_of_earth * c
print __name__
# main program
print "Start script"
######Mission variables######
dist_tolerance = 15 #(m)
ber_tolerance = 45 #heading tolerance
waypoint = 1 #desired Waypoint
######Time delays (ms)######
servo_delay = 50 #To be experimentally found
comm_delay = 50 #To be experimentally found
######Other constants######
payload_servo = 7 #5-8
gravity = 9.81
target = (-35, 117.98) # gps pos of target in degrees
time.sleep(5) # wait 10 seconds before starting
print 'Starting Mission'
Script.ChangeMode("Guided") # changes mode to "Guided"
item = MissionPlanner.Utilities.Locationwp() # creating waypoint
alt = 60.000000 # altitude value
Locationwp.lat.SetValue(item,target[0]) # sets latitude
Locationwp.lng.SetValue(item,target[1]) # sets longitude
Locationwp.alt.SetValue(item,alt) # sets altitude
print 'Drop zone set'
MAV.setGuidedModeWP(item) # tells UAV "go to" the set lat/long @ alt
print 'Going to DZ'
Good = True
while Good == True:
ground_speed = cs.groundspeed
alt = cs.alt
wp_dist = gps_distance(cs.lat ,cs.lng, math.radians(target[0]), math.radians(target[1]))
print wp_dist
ber_error = cs.ber_error
fall_time = ((2 * alt) / gravity) ** (0.5)
fall_dist = ground_speed * fall_time
release_time = fall_time + (servo_delay/1000) + (comm_delay/1000)
release_dist = release_time * ground_speed
if (math.fabs(release_dist - wp_dist) <= dist_tolerance):
if (math.fabs(ber_error) <= ber_tolerance):
######Payload Release######
Script.SendRC(payload_servo,1900,True)
print 'Bombs away!'
else:
print 'Heading outside of threshold, go around!'
Good = False
else:
print 'Outside of threshold!'
time.sleep (1.0) #sleep for a second
#Broken out of the loop as Bearing was not right
print 'Bearing was out of tolerance for the Drop - Start run again' | unknown | codeparrot/codeparrot-clean | ||
# Copyright (C) 2016 Zhixian MA <zxma_sjtu@qq.com>
# MIT license
"""
This module split simulated galaxy particles hdf5 file into two sub
hdf5 files.
"""
import os
import re
import h5py
import numpy as np
class PartSplit:
"""
The partticle split class
Parameters
----------
input_dir: string
The direction holding those father hdf5 files.
output_dir: string
The direction to save output files.
numgas: int
Number of gas particles of the main cluster.
numhalo: int
Number of halo particles of the main cluster.
Methods
-------
load_hdf5
load the father hdf5 file.
get_sub_part:
Get subset of particles according to the masses ratio.
References
----------
[1] Collette, A.,
"Python and HDF5",
O'reilly, 2013.
[2] h5py doc
https://docs.h5py.org
"""
def __init__(self, input_dir, output_dir, numhalo=83499, numgas=81935):
self.input_dir = input_dir
self.output_dir = output_dir
self.numhalo = numhalo
self.numgas = numgas
def load_hdf5(self, fname):
"""
Open the hdf5 file with h5py and return the object.
Paramater
---------
fname: string
File name
Return
------
filepart: hdf5 object
The object holding particle datasets.
"""
# open
filepath = os.path.join(self.input_dir, fname)
filepart = h5py.File(filepath, 'r')
return filepart
def get_sub_part(self, filepart, partidx, fname):
"""
Split the whole particles into two subsets according to
the number of particles for each.
Parameters
----------
filepart: hdf5 object
The object holding particle detasets.
partidx: np.ndarray list
Indices of the halo and gas particles to be extracted.
fname: string
File names of the sub set.
"""
# Init
filepath = os.path.join(self.output_dir, fname)
if os.path.exists(filepath):
os.remove(filepath)
subpart = h5py.File(filepath, 'a')
# Header
header = subpart.create_group('Header')
for key, value in filepart['Header'].attrs.items():
header.attrs[key] = value
# Change NumPart_Thisfile
numgas = partidx[0].shape[0]
numhalo = partidx[1].shape[0]
# header.attrs['NumPaat_Total'] = [numpart,numpart,0,0,0,0]
header.attrs['NumPart_ThisFile'] = [numgas, numhalo, 0, 0, 0, 0]
# PartType0
parttype0 = subpart.create_group('PartType0')
group_type0 = filepart['PartType0']
for key in group_type0.keys():
dataset = group_type0[key]
# Extract
partset = dataset[:]
try:
partset = partset[partidx[0], :]
except IndexError:
partset = partset[partidx[0]]
parttype0.create_dataset(key, data=partset)
# PartType1
parttype1 = subpart.create_group('PartType1')
group_type1 = filepart['PartType1']
for key in group_type1.keys():
dataset = group_type1[key]
# Extract
partset = dataset[:]
try:
partset = partset[partidx[1], :]
except IndexError:
partset = partset[partidx[1]]
parttype1.create_dataset(key, data=partset)
# close
subpart.close()
def get_single_file(self, fname):
"""
Processing on single file.
"""
# Init
# output
if not os.path.exists(self.output_dir):
os.mkdir(self.output_dir)
# Circle
snapidx = re.findall(r'[0-9][0-9][0-9]', fname)
snapidx = int(snapidx[0])
print('Snap %03d' % snapidx)
# load file
filepart = self.load_hdf5(fname)
# cluster1
# Indices
# gas
particle_gas = filepart['/PartType0/ParticleIDs']
gas_idx = np.where(particle_gas[:] < self.numgas)[0]
# halo
particle_halo = filepart['/PartType1/ParticleIDs']
numgas = particle_gas[:].shape[0]
halo_idx = np.where(particle_halo[:] <
self.numhalo + numgas)[0]
# idx
partidx_c1 = (gas_idx, halo_idx)
# split
subname_c1 = ("snap_%03d_c1.hdf5" % snapidx)
self.get_sub_part(filepart, partidx_c1, subname_c1)
# cluster2
# Indices
# gas
gas_idx = np.where(particle_gas[:] > self.numgas)[0]
# halo
halo_idx = np.where(particle_halo[:] >
self.numgas + numgas)[0]
# idx
partidx_c2 = (gas_idx, halo_idx)
# split
subname_c2 = ("snap_%03d_c2.hdf5" % snapidx)
self.get_sub_part(filepart, partidx_c2, subname_c2)
filepart.close()
def get_multi_files(self):
"""
Processing on multiple files.
"""
# Init
files = os.listdir(self.input_dir)
files.sort()
# output
if not os.path.exists(self.output_dir):
os.mkdir(self.output_dir)
# Circle
for f in files:
if os.path.splitext(f)[-1] == '.hdf5':
self.get_single_file(f) | unknown | codeparrot/codeparrot-clean | ||
{% extends "template_used/base.html" %} | html | github | https://github.com/django/django | tests/test_utils/templates/template_used/extends.html |
/*
* Copyright (c) 2007 Mockito contributors
* This program is made available under the terms of the MIT License.
*/
package org.mockito.internal;
import org.mockito.MockedSingleton;
import org.mockito.plugins.MockMaker;
public final class MockedSingletonImpl<T> extends ScopedMockImpl<MockMaker.SingletonMockControl<T>>
implements MockedSingleton<T> {
public MockedSingletonImpl(MockMaker.SingletonMockControl<T> control) {
super(control);
}
@Override
public T getInstance() {
return control.getInstance();
}
@Override
public String toString() {
return "singleton mock for " + control.getInstance();
}
} | java | github | https://github.com/mockito/mockito | mockito-core/src/main/java/org/mockito/internal/MockedSingletonImpl.java |
# -*- coding: utf-8 -*-
# Open Source Initiative OSI - The MIT License (MIT):Licensing
#
# The MIT License (MIT)
# Copyright (c) 2012 DotCloud Inc (opensource@dotcloud.com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
class LostRemote(Exception):
pass
class TimeoutExpired(Exception):
def __init__(self, timeout_s, when=None):
msg = 'timeout after {0}s'.format(timeout_s)
if when:
msg = '{0}, when {1}'.format(msg, when)
super(TimeoutExpired, self).__init__(msg)
class RemoteError(Exception):
def __init__(self, name, human_msg, human_traceback):
self.name = name
self.msg = human_msg
self.traceback = human_traceback
def __str__(self):
if self.traceback is not None:
return self.traceback
return '{0}: {1}'.format(self.name, self.msg) | unknown | codeparrot/codeparrot-clean | ||
import os
import shutil
import tempfile
import dnf
class DnfBaseWrapper(object):
def __init__(self, arch=None, cache_dir=None):
# prepopulate dict, everything else is redirected to the base object
self.__dict__["tmp_dir"] = None
self.__dict__["base"] = None
self.__dict__["conf"] = None
self.tmp_dir = tempfile.mkdtemp(prefix="dnf_")
self.conf = dnf.conf.Conf()
if cache_dir:
self.conf.cachedir = cache_dir
else:
self.conf.cachedir = os.path.join(self.tmp_dir, "cache")
if arch:
# override runtime arch if requested
self.conf.substitutions["arch"] = arch
self.conf.substitutions["basearch"] = dnf.rpm.basearch(self.conf.substitutions["arch"])
self.conf.assumeyes = False
self.base = dnf.Base(conf=self.conf)
def __del__(self):
try:
if self.tmp_dir:
shutil.rmtree(self.tmp_dir)
except:
pass
def __getattr__(self, name):
return getattr(self.base, name)
def __setattr__(self, name, value):
if name in self.__dict__:
self.__dict__[name] = value
return
setattr(self.base, name, value)
def add_repo(self, repo_id, baseurl):
if baseurl.startswith("/"):
baseurl = "file://" + baseurl
return self.base.repos.add_new_repo(repo_id, self.base.conf, baseurl=[baseurl], skip_if_unavailable=False)
def fill_sack(self):
self.base.fill_sack(load_system_repo=False, load_available_repos=True) | unknown | codeparrot/codeparrot-clean | ||
# Configuration file for jupyter-notebook.
#------------------------------------------------------------------------------
# Configurable configuration
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# LoggingConfigurable configuration
#------------------------------------------------------------------------------
# A parent class for Configurables that log.
#
# Subclasses have a log trait, and the default behavior is to get the logger
# from the currently running Application.
#------------------------------------------------------------------------------
# SingletonConfigurable configuration
#------------------------------------------------------------------------------
# A configurable that only allows one instance.
#
# This class is for classes that should only have one instance of itself or
# *any* subclass. To create and retrieve such a class use the
# :meth:`SingletonConfigurable.instance` method.
#------------------------------------------------------------------------------
# Application configuration
#------------------------------------------------------------------------------
# This is an application.
# The date format used by logging formatters for %(asctime)s
# c.Application.log_datefmt = '%Y-%m-%d %H:%M:%S'
# The Logging format template
# c.Application.log_format = '[%(name)s]%(highlevel)s %(message)s'
# Set the log level by value or name.
# c.Application.log_level = 30
#------------------------------------------------------------------------------
# JupyterApp configuration
#------------------------------------------------------------------------------
# Base class for Jupyter applications
# Answer yes to any prompts.
# c.JupyterApp.answer_yes = False
# Full path of a config file.
# c.JupyterApp.config_file = ''
# Specify a config file to load.
# c.JupyterApp.config_file_name = ''
# Generate default config file.
# c.JupyterApp.generate_config = False
#------------------------------------------------------------------------------
# NotebookApp configuration
#------------------------------------------------------------------------------
# Set the Access-Control-Allow-Credentials: true header
# c.NotebookApp.allow_credentials = False
# Set the Access-Control-Allow-Origin header
#
# Use '*' to allow any origin to access your server.
#
# Takes precedence over allow_origin_pat.
# c.NotebookApp.allow_origin = ''
# Use a regular expression for the Access-Control-Allow-Origin header
#
# Requests from an origin matching the expression will get replies with:
#
# Access-Control-Allow-Origin: origin
#
# where `origin` is the origin of the request.
#
# Ignored if allow_origin is set.
# c.NotebookApp.allow_origin_pat = ''
# DEPRECATED use base_url
# c.NotebookApp.base_project_url = '/'
# The base URL for the notebook server.
#
# Leading and trailing slashes can be omitted, and will automatically be added.
c.NotebookApp.base_url = '/jupyter/'
# Specify what command to use to invoke a web browser when opening the notebook.
# If not specified, the default browser will be determined by the `webbrowser`
# standard library module, which allows setting of the BROWSER environment
# variable to override it.
# c.NotebookApp.browser = ''
# The full path to an SSL/TLS certificate file.
c.NotebookApp.certfile = '/etc/ssl/bach.copperdog.org.crt'
# The full path to a certificate authority certifificate for SSL/TLS client
# authentication.
# c.NotebookApp.client_ca = ''
# The config manager class to use
# c.NotebookApp.config_manager_class = 'notebook.services.config.manager.ConfigManager'
# The notebook manager class to use.
# c.NotebookApp.contents_manager_class = 'notebook.services.contents.filemanager.FileContentsManager'
# The random bytes used to secure cookies. By default this is a new random
# number every time you start the Notebook. Set it to a value in a config file
# to enable logins to persist across server sessions.
#
# Note: Cookie secrets should be kept private, do not share config files with
# cookie_secret stored in plaintext (you can read the value from a file).
# c.NotebookApp.cookie_secret = b''
# The file where the cookie secret is stored.
# c.NotebookApp.cookie_secret_file = ''
# The default URL to redirect to from `/`
# c.NotebookApp.default_url = '/tree'
# Whether to enable MathJax for typesetting math/TeX
#
# MathJax is the javascript library Jupyter uses to render math/LaTeX. It is
# very large, so you may want to disable it if you have a slow internet
# connection, or for offline use of the notebook.
#
# When disabled, equations etc. will appear as their untransformed TeX source.
# c.NotebookApp.enable_mathjax = True
# extra paths to look for Javascript notebook extensions
# c.NotebookApp.extra_nbextensions_path = []
# Extra paths to search for serving static files.
#
# This allows adding javascript/css to be available from the notebook server
# machine, or overriding individual files in the IPython
# c.NotebookApp.extra_static_paths = []
# Extra paths to search for serving jinja templates.
#
# Can be used to override templates from notebook.templates.
# c.NotebookApp.extra_template_paths = []
#
# c.NotebookApp.file_to_run = ''
# Use minified JS file or not, mainly use during dev to avoid JS recompilation
# c.NotebookApp.ignore_minified_js = False
# The IP address the notebook server will listen on.
c.NotebookApp.ip = '*'
# Supply extra arguments that will be passed to Jinja environment.
# c.NotebookApp.jinja_environment_options = {}
# Extra variables to supply to jinja templates when rendering.
# c.NotebookApp.jinja_template_vars = {}
# The kernel manager class to use.
# c.NotebookApp.kernel_manager_class = 'notebook.services.kernels.kernelmanager.MappingKernelManager'
# The kernel spec manager class to use. Should be a subclass of
# `jupyter_client.kernelspec.KernelSpecManager`.
#
# The Api of KernelSpecManager is provisional and might change without warning
# between this version of Jupyter and the next stable one.
# c.NotebookApp.kernel_spec_manager_class = 'jupyter_client.kernelspec.KernelSpecManager'
# The full path to a private key file for usage with SSL/TLS.
c.NotebookApp.keyfile = '/etc/ssl/bach.copperdog.org.key'
# The login handler class to use.
# c.NotebookApp.login_handler_class = 'notebook.auth.login.LoginHandler'
# The logout handler class to use.
# c.NotebookApp.logout_handler_class = 'notebook.auth.logout.LogoutHandler'
# The url for MathJax.js.
# c.NotebookApp.mathjax_url = ''
# The directory to use for notebooks and kernels.
c.NotebookApp.notebook_dir = '/data/jupyter/'
# Whether to open in a browser after starting. The specific browser used is
# platform dependent and determined by the python standard library `webbrowser`
# module, unless it is overridden using the --browser (NotebookApp.browser)
# configuration option.
c.NotebookApp.open_browser = False
# Hashed password to use for web authentication.
#
# To generate, type in a python/IPython shell:
#
# from notebook.auth import passwd; passwd()
#
# The string should be of the form type:salt:hashed-password.
#c.NotebookApp.password = 'sha1:b32feb3f51c7:aa200891d28074c6dbd0a3435edc8854f496c1e4'
# The port the notebook server will listen on.
# c.NotebookApp.port = 8888
# The number of additional ports to try if the specified port is not available.
c.NotebookApp.port_retries = 0
# DISABLED: use %pylab or %matplotlib in the notebook to enable matplotlib.
# c.NotebookApp.pylab = 'disabled'
# Reraise exceptions encountered loading server extensions?
# c.NotebookApp.reraise_server_extension_failures = False
# Python modules to load as notebook server extensions. This is an experimental
# API, and may change in future releases.
# c.NotebookApp.server_extensions = []
# The session manager class to use.
# c.NotebookApp.session_manager_class = 'notebook.services.sessions.sessionmanager.SessionManager'
# Supply SSL options for the tornado HTTPServer. See the tornado docs for
# details.
# c.NotebookApp.ssl_options = {}
# Supply overrides for the tornado.web.Application that the Jupyter notebook
# uses.
# c.NotebookApp.tornado_settings = {}
# Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded-
# For headerssent by the upstream reverse proxy. Necessary if the proxy handles
# SSL
# c.NotebookApp.trust_xheaders = False
# DEPRECATED, use tornado_settings
# c.NotebookApp.webapp_settings = {}
# The base URL for websockets, if it differs from the HTTP server (hint: it
# almost certainly doesn't).
#
# Should be in the form of an HTTP origin: ws[s]://hostname[:port]
# c.NotebookApp.websocket_url = ''
#------------------------------------------------------------------------------
# ConnectionFileMixin configuration
#------------------------------------------------------------------------------
# Mixin for configurable classes that work with connection files
# JSON file in which to store connection info [default: kernel-<pid>.json]
#
# This file will contain the IP, ports, and authentication key needed to connect
# clients to this kernel. By default, this file will be created in the security
# dir of the current profile, but can be specified by absolute path.
# c.ConnectionFileMixin.connection_file = ''
# set the control (ROUTER) port [default: random]
# c.ConnectionFileMixin.control_port = 0
# set the heartbeat port [default: random]
# c.ConnectionFileMixin.hb_port = 0
# set the iopub (PUB) port [default: random]
# c.ConnectionFileMixin.iopub_port = 0
# Set the kernel's IP address [default localhost]. If the IP address is
# something other than localhost, then Consoles on other machines will be able
# to connect to the Kernel, so be careful!
# c.ConnectionFileMixin.ip = ''
# set the shell (ROUTER) port [default: random]
# c.ConnectionFileMixin.shell_port = 0
# set the stdin (ROUTER) port [default: random]
# c.ConnectionFileMixin.stdin_port = 0
#
# c.ConnectionFileMixin.transport = 'tcp'
#------------------------------------------------------------------------------
# KernelManager configuration
#------------------------------------------------------------------------------
# Manages a single kernel in a subprocess on this host.
#
# This version starts kernels with Popen.
# Should we autorestart the kernel if it dies.
# c.KernelManager.autorestart = False
# DEPRECATED: Use kernel_name instead.
#
# The Popen Command to launch the kernel. Override this if you have a custom
# kernel. If kernel_cmd is specified in a configuration file, Jupyter does not
# pass any arguments to the kernel, because it cannot make any assumptions about
# the arguments that the kernel understands. In particular, this means that the
# kernel does not receive the option --debug if it given on the Jupyter command
# line.
# c.KernelManager.kernel_cmd = []
#------------------------------------------------------------------------------
# Session configuration
#------------------------------------------------------------------------------
# Object for handling serialization and sending of messages.
#
# The Session object handles building messages and sending them with ZMQ sockets
# or ZMQStream objects. Objects can communicate with each other over the
# network via Session objects, and only need to work with the dict-based IPython
# message spec. The Session will handle serialization/deserialization, security,
# and metadata.
#
# Sessions support configurable serialization via packer/unpacker traits, and
# signing with HMAC digests via the key/keyfile traits.
#
# Parameters ----------
#
# debug : bool
# whether to trigger extra debugging statements
# packer/unpacker : str : 'json', 'pickle' or import_string
# importstrings for methods to serialize message parts. If just
# 'json' or 'pickle', predefined JSON and pickle packers will be used.
# Otherwise, the entire importstring must be used.
#
# The functions must accept at least valid JSON input, and output *bytes*.
#
# For example, to use msgpack:
# packer = 'msgpack.packb', unpacker='msgpack.unpackb'
# pack/unpack : callables
# You can also set the pack/unpack callables for serialization directly.
# session : bytes
# the ID of this Session object. The default is to generate a new UUID.
# username : unicode
# username added to message headers. The default is to ask the OS.
# key : bytes
# The key used to initialize an HMAC signature. If unset, messages
# will not be signed or checked.
# keyfile : filepath
# The file containing a key. If this is set, `key` will be initialized
# to the contents of the file.
# Threshold (in bytes) beyond which an object's buffer should be extracted to
# avoid pickling.
# c.Session.buffer_threshold = 1024
# Threshold (in bytes) beyond which a buffer should be sent without copying.
# c.Session.copy_threshold = 65536
# Debug output in the Session
# c.Session.debug = False
# The maximum number of digests to remember.
#
# The digest history will be culled when it exceeds this value.
# c.Session.digest_history_size = 65536
# The maximum number of items for a container to be introspected for custom
# serialization. Containers larger than this are pickled outright.
# c.Session.item_threshold = 64
# execution key, for signing messages.
# c.Session.key = b''
# path to file containing execution key.
# c.Session.keyfile = ''
# Metadata dictionary, which serves as the default top-level metadata dict for
# each message.
# c.Session.metadata = {}
# The name of the packer for serializing messages. Should be one of 'json',
# 'pickle', or an import name for a custom callable serializer.
# c.Session.packer = 'json'
# The UUID identifying this session.
# c.Session.session = ''
# The digest scheme used to construct the message signatures. Must have the form
# 'hmac-HASH'.
# c.Session.signature_scheme = 'hmac-sha256'
# The name of the unpacker for unserializing messages. Only used with custom
# functions for `packer`.
# c.Session.unpacker = 'json'
# Username for the Session. Default is your system username.
# c.Session.username = 'jupyter'
#------------------------------------------------------------------------------
# MultiKernelManager configuration
#------------------------------------------------------------------------------
# A class for managing multiple kernels.
# The name of the default kernel to start
# c.MultiKernelManager.default_kernel_name = 'python3'
# The kernel manager class. This is configurable to allow subclassing of the
# KernelManager for customized behavior.
# c.MultiKernelManager.kernel_manager_class = 'jupyter_client.ioloop.IOLoopKernelManager'
#------------------------------------------------------------------------------
# MappingKernelManager configuration
#------------------------------------------------------------------------------
# A KernelManager that handles notebook mapping and HTTP error handling
#
# c.MappingKernelManager.root_dir = ''
#------------------------------------------------------------------------------
# ContentsManager configuration
#------------------------------------------------------------------------------
# Base class for serving files and directories.
#
# This serves any text or binary file, as well as directories, with special
# handling for JSON notebook documents.
#
# Most APIs take a path argument, which is always an API-style unicode path, and
# always refers to a directory.
#
# - unicode, not url-escaped
# - '/'-separated
# - leading and trailing '/' will be stripped
# - if unspecified, path defaults to '',
# indicating the root path.
#
# c.ContentsManager.checkpoints = None
#
# c.ContentsManager.checkpoints_class = 'notebook.services.contents.checkpoints.Checkpoints'
#
# c.ContentsManager.checkpoints_kwargs = {}
# Glob patterns to hide in file and directory listings.
# c.ContentsManager.hide_globs = ['__pycache__', '*.pyc', '*.pyo', '.DS_Store', '*.so', '*.dylib', '*~']
# Python callable or importstring thereof
#
# To be called on a contents model prior to save.
#
# This can be used to process the structure, such as removing notebook outputs
# or other side effects that should not be saved.
#
# It will be called as (all arguments passed by keyword)::
#
# hook(path=path, model=model, contents_manager=self)
#
# - model: the model to be saved. Includes file contents.
# Modifying this dict will affect the file that is stored.
# - path: the API path of the save destination
# - contents_manager: this ContentsManager instance
# c.ContentsManager.pre_save_hook = None
# The base name used when creating untitled directories.
# c.ContentsManager.untitled_directory = 'Untitled Folder'
# The base name used when creating untitled files.
# c.ContentsManager.untitled_file = 'untitled'
# The base name used when creating untitled notebooks.
# c.ContentsManager.untitled_notebook = 'Untitled'
#------------------------------------------------------------------------------
# FileManagerMixin configuration
#------------------------------------------------------------------------------
# Mixin for ContentsAPI classes that interact with the filesystem.
#
# Provides facilities for reading, writing, and copying both notebooks and
# generic files.
#
# Shared by FileContentsManager and FileCheckpoints.
#
# Note ---- Classes using this mixin must provide the following attributes:
#
# root_dir : unicode
# A directory against against which API-style paths are to be resolved.
#
# log : logging.Logger
# By default notebooks are saved on disk on a temporary file and then if
# succefully written, it replaces the old ones. This procedure, namely
# 'atomic_writing', causes some bugs on file system whitout operation order
# enforcement (like some networked fs). If set to False, the new notebook is
# written directly on the old one which could fail (eg: full filesystem or quota
# )
# c.FileManagerMixin.use_atomic_writing = True
#------------------------------------------------------------------------------
# FileContentsManager configuration
#------------------------------------------------------------------------------
# Python callable or importstring thereof
#
# to be called on the path of a file just saved.
#
# This can be used to process the file on disk, such as converting the notebook
# to a script or HTML via nbconvert.
#
# It will be called as (all arguments passed by keyword)::
#
# hook(os_path=os_path, model=model, contents_manager=instance)
#
# - path: the filesystem path to the file just written - model: the model
# representing the file - contents_manager: this ContentsManager instance
# c.FileContentsManager.post_save_hook = None
#
# c.FileContentsManager.root_dir = ''
# DEPRECATED, use post_save_hook
# c.FileContentsManager.save_script = False
#------------------------------------------------------------------------------
# NotebookNotary configuration
#------------------------------------------------------------------------------
# A class for computing and verifying notebook signatures.
# The hashing algorithm used to sign notebooks.
# c.NotebookNotary.algorithm = 'sha256'
# The number of notebook signatures to cache. When the number of signatures
# exceeds this value, the oldest 25% of signatures will be culled.
# c.NotebookNotary.cache_size = 65535
# The sqlite file in which to store notebook signatures. By default, this will
# be in your Jupyter runtime directory. You can set it to ':memory:' to disable
# sqlite writing to the filesystem.
# c.NotebookNotary.db_file = ''
# The secret key with which notebooks are signed.
# c.NotebookNotary.secret = b''
# The file where the secret key is stored.
# c.NotebookNotary.secret_file = ''
#------------------------------------------------------------------------------
# KernelSpecManager configuration
#------------------------------------------------------------------------------
# Whitelist of allowed kernel names.
#
# By default, all installed kernels are allowed.
# c.KernelSpecManager.whitelist = set() | unknown | codeparrot/codeparrot-clean | ||
# Used to notify core maintainers about new model PR being merged
name: New model PR merged notification
on:
push:
branches:
- main
paths:
- 'src/transformers/models/*/modeling_*'
jobs:
notify_new_model:
name: Notify new model
runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Check new model
shell: bash
run: |
python -m pip install gitpython
python -c 'from utils.pr_slow_ci_models import get_new_model; new_model = get_new_model(diff_with_last_commit=True); print(new_model)' | tee output.txt
echo "NEW_MODEL=$(tail -n 1 output.txt)" >> $GITHUB_ENV
echo "COMMIT_SHA=$(git log -1 --format=%H)" >> $GITHUB_ENV
- name: print commit sha
if: ${{ env.NEW_MODEL != ''}}
shell: bash
run: |
echo "$COMMIT_SHA"
- name: print new model
if: ${{ env.NEW_MODEL != ''}}
shell: bash
run: |
echo "$NEW_MODEL"
- name: Notify
if: ${{ env.NEW_MODEL != ''}}
uses: slackapi/slack-github-action@6c661ce58804a1a20f6dc5fbee7f0381b469e001
with:
# Slack channel id, channel name, or user id to post message.
# See also: https://api.slack.com/methods/chat.postMessage#channels
channel-id: transformers-new-model-notification
# For posting a rich message using Block Kit
payload: |
{
"blocks": [
{
"type": "header",
"text": {
"type": "plain_text",
"text": "New model!",
"emoji": true
}
},
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": "<https://github.com/huggingface/transformers/commit/${{ env.COMMIT_SHA }}|New model: ${{ env.NEW_MODEL }}> GH_ArthurZucker, GH_lysandrejik, GH_ydshieh\ncommit SHA: ${{ env.COMMIT_SHA }}"
}
}
]
}
env:
SLACK_BOT_TOKEN: ${{ secrets.SLACK_CIFEEDBACK_BOT_TOKEN }} | unknown | github | https://github.com/huggingface/transformers | .github/workflows/new_model_pr_merged_notification.yml |
"""Backends for traces
Available backends
------------------
1. NumPy array (pymc3.backends.NDArray)
2. Text files (pymc3.backends.Text)
3. SQLite (pymc3.backends.SQLite)
The NDArray backend holds the entire trace in memory, whereas the Text
and SQLite backends store the values while sampling.
Selecting a backend
-------------------
By default, a NumPy array is used as the backend. To specify a different
backend, pass a backend instance to `sample`.
For example, the following would save the sampling values to CSV files
in the directory 'test'.
>>> import pymc3 as pm
>>> db = pm.backends.Text('test')
>>> trace = pm.sample(..., trace=db)
Selecting values from a backend
-------------------------------
After a backend is finished sampling, it returns a MultiTrace object.
Values can be accessed in a few ways. The easiest way is to index the
backend object with a variable or variable name.
>>> trace['x'] # or trace.x or trace[x]
The call will return the sampling values of `x`, with the values for
all chains concatenated. (For a single call to `sample`, the number of
chains will correspond to the `njobs` argument.)
To discard the first N values of each chain, slicing syntax can be
used.
>>> trace['x', 1000:]
The `get_values` method offers more control over which values are
returned. The call below will discard the first 1000 iterations
from each chain and keep the values for each chain as separate arrays.
>>> trace.get_values('x', burn=1000, combine=False)
The `chains` parameter of `get_values` can be used to limit the chains
that are retrieved.
>>> trace.get_values('x', burn=1000, chains=[0, 2])
MultiTrace objects also support slicing. For example, the following
call would return a new trace object without the first 1000 sampling
iterations for all traces and variables.
>>> sliced_trace = trace[1000:]
The backend for the new trace is always NDArray, regardless of the
type of original trace. Only the NDArray backend supports a stop
value in the slice.
Loading a saved backend
-----------------------
Saved backends can be loaded using `load` function in the module for the
specific backend.
>>> trace = pm.backends.text.load('test')
Writing custom backends
-----------------------
Backends consist of a class that handles sampling storage and value
selection. Three sampling methods of backend will be called:
- setup: Before sampling is started, the `setup` method will be called
with two arguments: the number of draws and the chain number. This is
useful setting up any structure for storing the sampling values that
require the above information.
- record: Record the sampling results for the current draw. This method
will be called with a dictionary of values mapped to the variable
names. This is the only sampling function that *must* do something to
have a meaningful backend.
- close: This method is called following sampling and should perform any
actions necessary for finalizing and cleaning up the backend.
The base storage class `backends.base.BaseTrace` provides common model
setup that is used by all the PyMC backends.
Several selection methods must also be defined:
- get_values: This is the core method for selecting values from the
backend. It can be called directly and is used by __getitem__ when the
backend is indexed with a variable name or object.
- _slice: Defines how the backend returns a slice of itself. This
is called if the backend is indexed with a slice range.
- point: Returns values for each variable at a single iteration. This is
called if the backend is indexed with a single integer.
- __len__: This should return the number of draws.
When `pymc3.sample` finishes, it wraps all trace objects in a MultiTrace
object that provides a consistent selection interface for all backends.
If the traces are stored on disk, then a `load` function should also be
defined that returns a MultiTrace object.
For specific examples, see pymc3.backends.{ndarray,text,sqlite}.py.
"""
from ..backends.ndarray import NDArray
from ..backends.text import Text
from ..backends.sqlite import SQLite
_shortcuts = {'text': {'backend': Text,
'name': 'mcmc'},
'sqlite': {'backend': SQLite,
'name': 'mcmc.sqlite'}} | unknown | codeparrot/codeparrot-clean | ||
import {Component} from '@angular/core';
import {TestBed} from '@angular/core/testing';
import {GreetComponent} from './greet.component';
describe('greet component', () => {
it('should allow binding to an input', () => {
const fixture = TestBed.createComponent(TestCmp);
fixture.detectChanges();
expect(fixture.nativeElement.textContent).toBe('Initial - initial-unset');
fixture.componentInstance.firstName = 'John';
fixture.changeDetectorRef.markForCheck();
fixture.detectChanges();
expect(fixture.nativeElement.textContent).toBe('John - initial-unset');
});
it('should emit an event for the click output', () => {
const fixture = TestBed.createComponent(TestCmp);
fixture.detectChanges();
fixture.nativeElement.querySelector('button').dispatchEvent(new MouseEvent('click'));
fixture.detectChanges();
expect(fixture.componentInstance.clickCount).toBe(1);
expect(fixture.componentInstance.clickCount2).toBe(1);
});
});
@Component({
template: `
<greet
[firstName]="firstName"
(clickFromInside)="clickCount = clickCount + 1"
(clickFromInside2)="clickCount2 = clickCount2 + 1"
/>
`,
imports: [GreetComponent],
})
class TestCmp {
clickCount = 0;
clickCount2 = 0;
firstName = 'Initial';
} | typescript | github | https://github.com/angular/angular | integration/cli-signal-inputs/src/app/greet.component.spec.ts |
# encoding: utf-8
from south.db import db
from south.v2 import SchemaMigration
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'AgendaBill'
db.create_table('agendas_agendabill', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('agenda', self.gf('django.db.models.fields.related.ForeignKey')(related_name='agendabills', to=orm['agendas.Agenda'])),
('bill', self.gf('django.db.models.fields.related.ForeignKey')(related_name='agendabills', to=orm['laws.Bill'])),
('score', self.gf('django.db.models.fields.FloatField')(default=0.0)),
('importance', self.gf('django.db.models.fields.FloatField')(default=1.0)),
('reasoning', self.gf('django.db.models.fields.TextField')(null=True)),
))
db.send_create_signal('agendas', ['AgendaBill'])
# Adding unique constraint on 'AgendaBill', fields ['agenda', 'bill']
db.create_unique('agendas_agendabill', ['agenda_id', 'bill_id'])
def backwards(self, orm):
# Removing unique constraint on 'AgendaBill', fields ['agenda', 'bill']
db.delete_unique('agendas_agendabill', ['agenda_id', 'bill_id'])
# Deleting model 'AgendaBill'
db.delete_table('agendas_agendabill')
models = {
'agendas.agenda': {
'Meta': {'unique_together': "(('name', 'public_owner_name'),)", 'object_name': 'Agenda'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'editors': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'agendas'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'num_followers': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'public_owner_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'votes': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['laws.Vote']", 'through': "orm['agendas.AgendaVote']", 'symmetrical': 'False'})
},
'agendas.agendabill': {
'Meta': {'unique_together': "(('agenda', 'bill'),)", 'object_name': 'AgendaBill'},
'agenda': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'agendabills'", 'to': "orm['agendas.Agenda']"}),
'bill': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'agendabills'", 'to': "orm['laws.Bill']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'importance': ('django.db.models.fields.FloatField', [], {'default': '1.0'}),
'reasoning': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'score': ('django.db.models.fields.FloatField', [], {'default': '0.0'})
},
'agendas.agendameeting': {
'Meta': {'unique_together': "(('agenda', 'meeting'),)", 'object_name': 'AgendaMeeting'},
'agenda': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'agendameetings'", 'to': "orm['agendas.Agenda']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'meeting': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'agendacommitteemeetings'", 'to': "orm['committees.CommitteeMeeting']"}),
'reasoning': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'score': ('django.db.models.fields.FloatField', [], {'default': '0.0'})
},
'agendas.agendavote': {
'Meta': {'unique_together': "(('agenda', 'vote'),)", 'object_name': 'AgendaVote'},
'agenda': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'agendavotes'", 'to': "orm['agendas.Agenda']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'importance': ('django.db.models.fields.FloatField', [], {'default': '1.0'}),
'reasoning': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'score': ('django.db.models.fields.FloatField', [], {'default': '0.0'}),
'vote': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'agendavotes'", 'to': "orm['laws.Vote']"})
},
'agendas.usersuggestedvote': {
'Meta': {'unique_together': "(('agenda', 'vote', 'user'),)", 'object_name': 'UserSuggestedVote'},
'agenda': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_suggested_votes'", 'to': "orm['agendas.Agenda']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reasoning': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'sent_to_editor': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'suggested_agenda_votes'", 'to': "orm['auth.User']"}),
'vote': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_suggested_agendas'", 'to': "orm['laws.Vote']"})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 10, 13, 10, 31, 36, 333166)'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 10, 13, 10, 31, 36, 333069)'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'committees.committee': {
'Meta': {'object_name': 'Committee'},
'aliases': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'chairpersons': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'chaired_committees'", 'blank': 'True', 'to': "orm['mks.Member']"}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'committees'", 'blank': 'True', 'to': "orm['mks.Member']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'portal_knesset_broadcasts_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'blank': 'True'}),
'replacements': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'replacing_in_committees'", 'blank': 'True', 'to': "orm['mks.Member']"})
},
'committees.committeemeeting': {
'Meta': {'ordering': "('-date',)", 'object_name': 'CommitteeMeeting'},
'committee': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'meetings'", 'to': "orm['committees.Committee']"}),
'date': ('django.db.models.fields.DateField', [], {}),
'date_string': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mks_attended': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'committee_meetings'", 'symmetrical': 'False', 'to': "orm['mks.Member']"}),
'protocol_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'src_url': ('django.db.models.fields.URLField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'topics': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'votes_mentioned': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'committee_meetings'", 'blank': 'True', 'to': "orm['laws.Vote']"})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'events.event': {
'Meta': {'object_name': 'Event'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'what': ('django.db.models.fields.TextField', [], {}),
'when': ('django.db.models.fields.DateTimeField', [], {}),
'when_over': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'when_over_guessed': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'where': ('django.db.models.fields.TextField', [], {}),
'which_pk': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'which_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'event_for_event'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'who': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['persons.Person']", 'symmetrical': 'False'})
},
'laws.bill': {
'Meta': {'ordering': "('-stage_date', '-id')", 'object_name': 'Bill'},
'approval_vote': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'bill_approved'", 'unique': 'True', 'null': 'True', 'to': "orm['laws.Vote']"}),
'first_committee_meetings': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'bills_first'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['committees.CommitteeMeeting']"}),
'first_vote': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'bills_first'", 'null': 'True', 'to': "orm['laws.Vote']"}),
'full_title': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'joiners': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'bills_joined'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['mks.Member']"}),
'law': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'bills'", 'null': 'True', 'to': "orm['laws.Law']"}),
'popular_name': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'blank': 'True'}),
'popular_name_slug': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'blank': 'True'}),
'pre_votes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'bills_pre_votes'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['laws.Vote']"}),
'proposers': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'bills'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['mks.Member']"}),
'second_committee_meetings': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'bills_second'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['committees.CommitteeMeeting']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '1000', 'db_index': 'True'}),
'stage': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'stage_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '1000'})
},
'laws.law': {
'Meta': {'object_name': 'Law'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'merged_into': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'duplicates'", 'null': 'True', 'to': "orm['laws.Law']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '1000'})
},
'laws.vote': {
'Meta': {'ordering': "('-time', '-id')", 'object_name': 'Vote'},
'against_coalition': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'against_opposition': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'against_own_bill': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'against_party': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'against_votes_count': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'controversy': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'for_votes_count': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'full_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'full_text_url': ('django.db.models.fields.URLField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'importance': ('django.db.models.fields.FloatField', [], {'default': '0.0'}),
'meeting_number': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'src_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'src_url': ('django.db.models.fields.URLField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'summary': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'time': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'time_string': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'vote_number': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'votes': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'votes'", 'blank': 'True', 'through': "orm['laws.VoteAction']", 'to': "orm['mks.Member']"}),
'votes_count': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'laws.voteaction': {
'Meta': {'object_name': 'VoteAction'},
'against_coalition': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'against_opposition': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'against_own_bill': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'against_party': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'member': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mks.Member']"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'vote': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['laws.Vote']"})
},
'mks.member': {
'Meta': {'ordering': "['name']", 'object_name': 'Member'},
'area_of_residence': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'average_monthly_committee_presence': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'average_weekly_presence_hours': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'backlinks_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'bills_stats_approved': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'bills_stats_first': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'bills_stats_pre': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'bills_stats_proposed': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'blog': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['planet.Blog']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'current_party': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'members'", 'null': 'True', 'to': "orm['mks.Party']"}),
'current_role_descriptions': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_of_death': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'family_status': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'img_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'is_current': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'number_of_children': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'parties': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'all_members'", 'symmetrical': 'False', 'through': "orm['mks.Membership']", 'to': "orm['mks.Party']"}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'place_of_birth': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'place_of_residence': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'place_of_residence_lat': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'place_of_residence_lon': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'residence_centrality': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'residence_economy': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'year_of_aliyah': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'mks.membership': {
'Meta': {'object_name': 'Membership'},
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'member': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mks.Member']"}),
'party': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mks.Party']"}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
'mks.party': {
'Meta': {'ordering': "('-number_of_seats',)", 'object_name': 'Party'},
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_coalition': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'number_of_members': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'number_of_seats': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
'persons.person': {
'Meta': {'ordering': "('name',)", 'object_name': 'Person'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mk': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'person'", 'null': 'True', 'to': "orm['mks.Member']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'titles': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'persons'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['persons.Title']"})
},
'persons.title': {
'Meta': {'object_name': 'Title'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'planet.blog': {
'Meta': {'ordering': "('title', 'url')", 'object_name': 'Blog'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '1024', 'db_index': 'True'})
},
'tagging.tag': {
'Meta': {'ordering': "('name',)", 'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'})
},
'tagging.taggeditem': {
'Meta': {'unique_together': "(('tag', 'content_type', 'object_id'),)", 'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'items'", 'to': "orm['tagging.Tag']"})
}
}
complete_apps = ['agendas'] | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
"""Python syntax checker with lint friendly output."""
import os
import parser
import re
import sys
def main():
paths, verbose, skip_patterns = parse_options()
paths = filter_paths(paths, skip_patterns)
check(paths, verbose)
def parse_options():
paths = []
skip_patterns = []
option = None
verbose = False
valid_options = [
'-x',
'-v',
]
for arg in sys.argv[1:]:
if option == '-x':
skip_patterns.append(re.compile(arg))
option = None
elif arg.startswith('-'):
if arg not in valid_options:
raise Exception('Unknown Option: %s' % arg)
if arg == '-v':
verbose = True
else:
option = arg
else:
paths.append(arg)
if option:
raise Exception('Incomplete Option: %s' % option)
return paths, verbose, skip_patterns
def filter_paths(paths, skip_patterns):
if not paths:
paths = ['.']
candidates = paths
paths = []
for candidate in candidates:
if os.path.isdir(candidate):
for root, directories, files in os.walk(candidate):
remove = []
for directory in directories:
if directory.startswith('.'):
remove.append(directory)
for path in remove:
directories.remove(path)
for f in files:
if f.endswith('.py'):
paths.append(os.path.join(root, f))
else:
paths.append(candidate)
final_paths = []
for path in sorted(paths):
skip = False
for skip_pattern in skip_patterns:
if skip_pattern.search(path):
skip = True
break
if skip:
continue
final_paths.append(path)
return final_paths
def check(paths, verbose):
status = 0
for path in paths:
if verbose:
sys.stderr.write('%s\n' % path)
sys.stderr.flush()
source_fd = open(path, 'r')
try:
source = source_fd.read()
finally:
source_fd.close()
try:
parser.suite(source)
except SyntaxError:
ex_type, ex, ex_traceback = sys.exc_info()
status = 1
message = ex.text.splitlines()[0].strip()
sys.stdout.write("%s:%d:%d: SyntaxError: %s\n" % (path, ex.lineno, ex.offset, message))
sys.stdout.flush()
sys.exit(status)
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import wx
from robotide.context import IS_WINDOWS
class _ClipboardHandler(object):
def __init__(self, grid):
self._grid = grid
self._clipboard = _GridClipboard()
def clipboard_content(self):
return self._clipboard.get_contents()
def copy(self):
"""Copy the contents of the selected cell(s). This does a normal copy
action if the user is editing a cell, otherwise it places the selected
range of cells on the data.
"""
if not self._edit_control_shown():
self._add_selected_data_to_clipboard()
def cut(self):
"""Cuts the contents of the selected cell(s). This does a normal cut
action if the user is editing a cell, otherwise it places the selected
range of cells on the clipboard.
"""
self._add_selected_data_to_clipboard()
def _add_selected_data_to_clipboard(self):
self._clipboard.set_contents(self._grid.get_selected_content())
def paste(self):
"""Paste the contents of the clipboard. If a cell is being edited just
do a normal paste. If a cell is not being edited, paste whole rows.
"""
if not self._edit_control_shown():
self._paste_to_grid()
def _paste_to_cell_editor(self):
clipboard = self._clipboard.get_contents()
if isinstance(clipboard, list):
cells_as_text = ' '.join([' '.join(row) for row in clipboard])
self._get_edit_control().WriteText(cells_as_text)
def _paste_to_grid(self):
clipboard = self._clipboard.get_contents()
if not clipboard:
return
cell = self._get_starting_cell()
if not isinstance(clipboard, list):
self._write_cell(cell.row, cell.col, clipboard)
else:
row = cell.row
for datarow in clipboard:
col = cell.col
for value in datarow:
self._write_cell(row, col, value)
col += 1
row += 1
def _get_starting_cell(self):
return self._grid.selection.topleft
def _write_cell(self, row, col, value):
self._grid.write_cell(row, col, value, update_history=False)
def _get_edit_control(self):
return self._grid.get_cell_edit_control()
def _edit_control_shown(self):
return self._grid.IsCellEditControlShown()
class _WindowsClipboardHandler(_ClipboardHandler):
def copy(self):
if self._edit_control_shown():
self._get_edit_control().Copy()
else:
_ClipboardHandler.copy(self)
def cut(self):
if self._edit_control_shown():
self._get_edit_control().Cut()
else:
_ClipboardHandler.copy(self)
def _paste_to_cell_editor(self):
self._get_edit_control().Paste()
ClipboardHandler = IS_WINDOWS and _WindowsClipboardHandler\
or _ClipboardHandler
class _GridClipboard(object):
"""Implements a "smart" clipboard."""
def set_contents(self, data):
"""Insert `data` to the system clipboard
`data` may be either a string or list of lists representing rows of
grid data. Other data is ignored
"""
data = self._format_data(data)
if not (data and wx.TheClipboard.Open()):
return
try:
tdo = wx.TextDataObject()
tdo.SetText(data)
wx.TheClipboard.SetData(tdo)
finally:
wx.TheClipboard.Close()
def _format_data(self, data):
if isinstance(data, list):
return os.linesep.join('\t'.join(row) for row in data)
if isinstance(data, basestring):
return data
return None
def get_contents(self):
"""Gets contents of the clipboard.
Returns either a string or a list of rows to be pasted into clipboard.
"""
return self._split_string_from_tabs_and_newlines(self._get_contents())
def _get_contents(self):
if not wx.TheClipboard.Open():
return ''
try:
tdo = wx.TextDataObject()
wx.TheClipboard.GetData(tdo)
return tdo.GetText() or ''
finally:
wx.TheClipboard.Close()
def _split_string_from_tabs_and_newlines(self, string):
return [line.split('\t') for line in string.splitlines()] | unknown | codeparrot/codeparrot-clean | ||
// errorcheck -0 -m=2
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Test that we restrict inlining into very large functions.
// See issue #26546.
package foo
func small(a []int) int { // ERROR "can inline small with cost .* as:.*" "a does not escape"
// Cost 16 body (need cost < 20).
// See cmd/compile/internal/gc/inl.go:inlineBigFunction*
return a[0] + a[1] + a[2] + a[3]
}
func medium(a []int) int { // ERROR "can inline medium with cost .* as:.*" "a does not escape"
// Cost 32 body (need cost > 20 and cost < 80).
// See cmd/compile/internal/gc/inl.go:inlineBigFunction*
return a[0] + a[1] + a[2] + a[3] + a[4] + a[5] + a[6] + a[7]
}
func f(a []int) int { // ERROR "cannot inline f:.*" "a does not escape" "function f considered 'big'"
// Add lots of nodes to f's body. We need >5000.
// See cmd/compile/internal/gc/inl.go:inlineBigFunction*
a[0] = 0
a[1] = 0
a[2] = 0
a[3] = 0
a[4] = 0
a[5] = 0
a[6] = 0
a[7] = 0
a[8] = 0
a[9] = 0
a[10] = 0
a[11] = 0
a[12] = 0
a[13] = 0
a[14] = 0
a[15] = 0
a[16] = 0
a[17] = 0
a[18] = 0
a[19] = 0
a[20] = 0
a[21] = 0
a[22] = 0
a[23] = 0
a[24] = 0
a[25] = 0
a[26] = 0
a[27] = 0
a[28] = 0
a[29] = 0
a[30] = 0
a[31] = 0
a[32] = 0
a[33] = 0
a[34] = 0
a[35] = 0
a[36] = 0
a[37] = 0
a[38] = 0
a[39] = 0
a[40] = 0
a[41] = 0
a[42] = 0
a[43] = 0
a[44] = 0
a[45] = 0
a[46] = 0
a[47] = 0
a[48] = 0
a[49] = 0
a[50] = 0
a[51] = 0
a[52] = 0
a[53] = 0
a[54] = 0
a[55] = 0
a[56] = 0
a[57] = 0
a[58] = 0
a[59] = 0
a[60] = 0
a[61] = 0
a[62] = 0
a[63] = 0
a[64] = 0
a[65] = 0
a[66] = 0
a[67] = 0
a[68] = 0
a[69] = 0
a[70] = 0
a[71] = 0
a[72] = 0
a[73] = 0
a[74] = 0
a[75] = 0
a[76] = 0
a[77] = 0
a[78] = 0
a[79] = 0
a[80] = 0
a[81] = 0
a[82] = 0
a[83] = 0
a[84] = 0
a[85] = 0
a[86] = 0
a[87] = 0
a[88] = 0
a[89] = 0
a[90] = 0
a[91] = 0
a[92] = 0
a[93] = 0
a[94] = 0
a[95] = 0
a[96] = 0
a[97] = 0
a[98] = 0
a[99] = 0
a[100] = 0
a[101] = 0
a[102] = 0
a[103] = 0
a[104] = 0
a[105] = 0
a[106] = 0
a[107] = 0
a[108] = 0
a[109] = 0
a[110] = 0
a[111] = 0
a[112] = 0
a[113] = 0
a[114] = 0
a[115] = 0
a[116] = 0
a[117] = 0
a[118] = 0
a[119] = 0
a[120] = 0
a[121] = 0
a[122] = 0
a[123] = 0
a[124] = 0
a[125] = 0
a[126] = 0
a[127] = 0
a[128] = 0
a[129] = 0
a[130] = 0
a[131] = 0
a[132] = 0
a[133] = 0
a[134] = 0
a[135] = 0
a[136] = 0
a[137] = 0
a[138] = 0
a[139] = 0
a[140] = 0
a[141] = 0
a[142] = 0
a[143] = 0
a[144] = 0
a[145] = 0
a[146] = 0
a[147] = 0
a[148] = 0
a[149] = 0
a[150] = 0
a[151] = 0
a[152] = 0
a[153] = 0
a[154] = 0
a[155] = 0
a[156] = 0
a[157] = 0
a[158] = 0
a[159] = 0
a[160] = 0
a[161] = 0
a[162] = 0
a[163] = 0
a[164] = 0
a[165] = 0
a[166] = 0
a[167] = 0
a[168] = 0
a[169] = 0
a[170] = 0
a[171] = 0
a[172] = 0
a[173] = 0
a[174] = 0
a[175] = 0
a[176] = 0
a[177] = 0
a[178] = 0
a[179] = 0
a[180] = 0
a[181] = 0
a[182] = 0
a[183] = 0
a[184] = 0
a[185] = 0
a[186] = 0
a[187] = 0
a[188] = 0
a[189] = 0
a[190] = 0
a[191] = 0
a[192] = 0
a[193] = 0
a[194] = 0
a[195] = 0
a[196] = 0
a[197] = 0
a[198] = 0
a[199] = 0
a[200] = 0
a[201] = 0
a[202] = 0
a[203] = 0
a[204] = 0
a[205] = 0
a[206] = 0
a[207] = 0
a[208] = 0
a[209] = 0
a[210] = 0
a[211] = 0
a[212] = 0
a[213] = 0
a[214] = 0
a[215] = 0
a[216] = 0
a[217] = 0
a[218] = 0
a[219] = 0
a[220] = 0
a[221] = 0
a[222] = 0
a[223] = 0
a[224] = 0
a[225] = 0
a[226] = 0
a[227] = 0
a[228] = 0
a[229] = 0
a[230] = 0
a[231] = 0
a[232] = 0
a[233] = 0
a[234] = 0
a[235] = 0
a[236] = 0
a[237] = 0
a[238] = 0
a[239] = 0
a[240] = 0
a[241] = 0
a[242] = 0
a[243] = 0
a[244] = 0
a[245] = 0
a[246] = 0
a[247] = 0
a[248] = 0
a[249] = 0
a[250] = 0
a[251] = 0
a[252] = 0
a[253] = 0
a[254] = 0
a[255] = 0
a[256] = 0
a[257] = 0
a[258] = 0
a[259] = 0
a[260] = 0
a[261] = 0
a[262] = 0
a[263] = 0
a[264] = 0
a[265] = 0
a[266] = 0
a[267] = 0
a[268] = 0
a[269] = 0
a[270] = 0
a[271] = 0
a[272] = 0
a[273] = 0
a[274] = 0
a[275] = 0
a[276] = 0
a[277] = 0
a[278] = 0
a[279] = 0
a[280] = 0
a[281] = 0
a[282] = 0
a[283] = 0
a[284] = 0
a[285] = 0
a[286] = 0
a[287] = 0
a[288] = 0
a[289] = 0
a[290] = 0
a[291] = 0
a[292] = 0
a[293] = 0
a[294] = 0
a[295] = 0
a[296] = 0
a[297] = 0
a[298] = 0
a[299] = 0
a[300] = 0
a[301] = 0
a[302] = 0
a[303] = 0
a[304] = 0
a[305] = 0
a[306] = 0
a[307] = 0
a[308] = 0
a[309] = 0
a[310] = 0
a[311] = 0
a[312] = 0
a[313] = 0
a[314] = 0
a[315] = 0
a[316] = 0
a[317] = 0
a[318] = 0
a[319] = 0
a[320] = 0
a[321] = 0
a[322] = 0
a[323] = 0
a[324] = 0
a[325] = 0
a[326] = 0
a[327] = 0
a[328] = 0
a[329] = 0
a[330] = 0
a[331] = 0
a[332] = 0
a[333] = 0
a[334] = 0
a[335] = 0
a[336] = 0
a[337] = 0
a[338] = 0
a[339] = 0
a[340] = 0
a[341] = 0
a[342] = 0
a[343] = 0
a[344] = 0
a[345] = 0
a[346] = 0
a[347] = 0
a[348] = 0
a[349] = 0
a[350] = 0
a[351] = 0
a[352] = 0
a[353] = 0
a[354] = 0
a[355] = 0
a[356] = 0
a[357] = 0
a[358] = 0
a[359] = 0
a[360] = 0
a[361] = 0
a[362] = 0
a[363] = 0
a[364] = 0
a[365] = 0
a[366] = 0
a[367] = 0
a[368] = 0
a[369] = 0
a[370] = 0
a[371] = 0
a[372] = 0
a[373] = 0
a[374] = 0
a[375] = 0
a[376] = 0
a[377] = 0
a[378] = 0
a[379] = 0
a[380] = 0
a[381] = 0
a[382] = 0
a[383] = 0
a[384] = 0
a[385] = 0
a[386] = 0
a[387] = 0
a[388] = 0
a[389] = 0
a[390] = 0
a[391] = 0
a[392] = 0
a[393] = 0
a[394] = 0
a[395] = 0
a[396] = 0
a[397] = 0
a[398] = 0
a[399] = 0
a[400] = 0
a[401] = 0
a[402] = 0
a[403] = 0
a[404] = 0
a[405] = 0
a[406] = 0
a[407] = 0
a[408] = 0
a[409] = 0
a[410] = 0
a[411] = 0
a[412] = 0
a[413] = 0
a[414] = 0
a[415] = 0
a[416] = 0
a[417] = 0
a[418] = 0
a[419] = 0
a[420] = 0
a[421] = 0
a[422] = 0
a[423] = 0
a[424] = 0
a[425] = 0
a[426] = 0
a[427] = 0
a[428] = 0
a[429] = 0
a[430] = 0
a[431] = 0
a[432] = 0
a[433] = 0
a[434] = 0
a[435] = 0
a[436] = 0
a[437] = 0
a[438] = 0
a[439] = 0
a[440] = 0
a[441] = 0
a[442] = 0
a[443] = 0
a[444] = 0
a[445] = 0
a[446] = 0
a[447] = 0
a[448] = 0
a[449] = 0
a[450] = 0
a[451] = 0
a[452] = 0
a[453] = 0
a[454] = 0
a[455] = 0
a[456] = 0
a[457] = 0
a[458] = 0
a[459] = 0
a[460] = 0
a[461] = 0
a[462] = 0
a[463] = 0
a[464] = 0
a[465] = 0
a[466] = 0
a[467] = 0
a[468] = 0
a[469] = 0
a[470] = 0
a[471] = 0
a[472] = 0
a[473] = 0
a[474] = 0
a[475] = 0
a[476] = 0
a[477] = 0
a[478] = 0
a[479] = 0
a[480] = 0
a[481] = 0
a[482] = 0
a[483] = 0
a[484] = 0
a[485] = 0
a[486] = 0
a[487] = 0
a[488] = 0
a[489] = 0
a[490] = 0
a[491] = 0
a[492] = 0
a[493] = 0
a[494] = 0
a[495] = 0
a[496] = 0
a[497] = 0
a[498] = 0
a[499] = 0
a[500] = 0
a[501] = 0
a[502] = 0
a[503] = 0
a[504] = 0
a[505] = 0
a[506] = 0
a[507] = 0
a[508] = 0
a[509] = 0
a[510] = 0
a[511] = 0
a[512] = 0
a[513] = 0
a[514] = 0
a[515] = 0
a[516] = 0
a[517] = 0
a[518] = 0
a[519] = 0
a[520] = 0
a[521] = 0
a[522] = 0
a[523] = 0
a[524] = 0
a[525] = 0
a[526] = 0
a[527] = 0
a[528] = 0
a[529] = 0
a[530] = 0
a[531] = 0
a[532] = 0
a[533] = 0
a[534] = 0
a[535] = 0
a[536] = 0
a[537] = 0
a[538] = 0
a[539] = 0
a[540] = 0
a[541] = 0
a[542] = 0
a[543] = 0
a[544] = 0
a[545] = 0
a[546] = 0
a[547] = 0
a[548] = 0
a[549] = 0
a[550] = 0
a[551] = 0
a[552] = 0
a[553] = 0
a[554] = 0
a[555] = 0
a[556] = 0
a[557] = 0
a[558] = 0
a[559] = 0
a[560] = 0
a[561] = 0
a[562] = 0
a[563] = 0
a[564] = 0
a[565] = 0
a[566] = 0
a[567] = 0
a[568] = 0
a[569] = 0
a[570] = 0
a[571] = 0
a[572] = 0
a[573] = 0
a[574] = 0
a[575] = 0
a[576] = 0
a[577] = 0
a[578] = 0
a[579] = 0
a[580] = 0
a[581] = 0
a[582] = 0
a[583] = 0
a[584] = 0
a[585] = 0
a[586] = 0
a[587] = 0
a[588] = 0
a[589] = 0
a[590] = 0
a[591] = 0
a[592] = 0
a[593] = 0
a[594] = 0
a[595] = 0
a[596] = 0
a[597] = 0
a[598] = 0
a[599] = 0
a[600] = 0
a[601] = 0
a[602] = 0
a[603] = 0
a[604] = 0
a[605] = 0
a[606] = 0
a[607] = 0
a[608] = 0
a[609] = 0
a[610] = 0
a[611] = 0
a[612] = 0
a[613] = 0
a[614] = 0
a[615] = 0
a[616] = 0
a[617] = 0
a[618] = 0
a[619] = 0
a[620] = 0
a[621] = 0
a[622] = 0
a[623] = 0
a[624] = 0
a[625] = 0
a[626] = 0
a[627] = 0
a[628] = 0
a[629] = 0
a[630] = 0
a[631] = 0
a[632] = 0
a[633] = 0
a[634] = 0
a[635] = 0
a[636] = 0
a[637] = 0
a[638] = 0
a[639] = 0
a[640] = 0
a[641] = 0
a[642] = 0
a[643] = 0
a[644] = 0
a[645] = 0
a[646] = 0
a[647] = 0
a[648] = 0
a[649] = 0
a[650] = 0
a[651] = 0
a[652] = 0
a[653] = 0
a[654] = 0
a[655] = 0
a[656] = 0
a[657] = 0
a[658] = 0
a[659] = 0
a[660] = 0
a[661] = 0
a[662] = 0
a[663] = 0
a[664] = 0
a[665] = 0
a[666] = 0
a[667] = 0
a[668] = 0
a[669] = 0
a[670] = 0
a[671] = 0
a[672] = 0
a[673] = 0
a[674] = 0
a[675] = 0
a[676] = 0
a[677] = 0
a[678] = 0
a[679] = 0
a[680] = 0
a[681] = 0
a[682] = 0
a[683] = 0
a[684] = 0
a[685] = 0
a[686] = 0
a[687] = 0
a[688] = 0
a[689] = 0
a[690] = 0
a[691] = 0
a[692] = 0
a[693] = 0
a[694] = 0
a[695] = 0
a[696] = 0
a[697] = 0
a[698] = 0
a[699] = 0
a[700] = 0
a[701] = 0
a[702] = 0
a[703] = 0
a[704] = 0
a[705] = 0
a[706] = 0
a[707] = 0
a[708] = 0
a[709] = 0
a[710] = 0
a[711] = 0
a[712] = 0
a[713] = 0
a[714] = 0
a[715] = 0
a[716] = 0
a[717] = 0
a[718] = 0
a[719] = 0
a[720] = 0
a[721] = 0
a[722] = 0
a[723] = 0
a[724] = 0
a[725] = 0
a[726] = 0
a[727] = 0
a[728] = 0
a[729] = 0
a[730] = 0
a[731] = 0
a[732] = 0
a[733] = 0
a[734] = 0
a[735] = 0
a[736] = 0
a[737] = 0
a[738] = 0
a[739] = 0
a[740] = 0
a[741] = 0
a[742] = 0
a[743] = 0
a[744] = 0
a[745] = 0
a[746] = 0
a[747] = 0
a[748] = 0
a[749] = 0
a[750] = 0
a[751] = 0
a[752] = 0
a[753] = 0
a[754] = 0
a[755] = 0
a[756] = 0
a[757] = 0
a[758] = 0
a[759] = 0
a[760] = 0
a[761] = 0
a[762] = 0
a[763] = 0
a[764] = 0
a[765] = 0
a[766] = 0
a[767] = 0
a[768] = 0
a[769] = 0
a[770] = 0
a[771] = 0
a[772] = 0
a[773] = 0
a[774] = 0
a[775] = 0
a[776] = 0
a[777] = 0
a[778] = 0
a[779] = 0
a[780] = 0
a[781] = 0
a[782] = 0
a[783] = 0
a[784] = 0
a[785] = 0
a[786] = 0
a[787] = 0
a[788] = 0
a[789] = 0
a[790] = 0
a[791] = 0
a[792] = 0
a[793] = 0
a[794] = 0
a[795] = 0
a[796] = 0
a[797] = 0
a[798] = 0
a[799] = 0
a[800] = 0
a[801] = 0
a[802] = 0
a[803] = 0
a[804] = 0
a[805] = 0
a[806] = 0
a[807] = 0
a[808] = 0
a[809] = 0
a[810] = 0
a[811] = 0
a[812] = 0
a[813] = 0
a[814] = 0
a[815] = 0
a[816] = 0
a[817] = 0
a[818] = 0
a[819] = 0
a[820] = 0
a[821] = 0
a[822] = 0
a[823] = 0
a[824] = 0
a[825] = 0
a[826] = 0
a[827] = 0
a[828] = 0
a[829] = 0
a[830] = 0
a[831] = 0
a[832] = 0
a[833] = 0
a[834] = 0
a[835] = 0
a[836] = 0
a[837] = 0
a[838] = 0
a[839] = 0
a[840] = 0
a[841] = 0
a[842] = 0
a[843] = 0
a[844] = 0
a[845] = 0
a[846] = 0
a[847] = 0
a[848] = 0
a[849] = 0
a[850] = 0
a[851] = 0
a[852] = 0
a[853] = 0
a[854] = 0
a[855] = 0
a[856] = 0
a[857] = 0
a[858] = 0
a[859] = 0
a[860] = 0
a[861] = 0
a[862] = 0
a[863] = 0
a[864] = 0
a[865] = 0
a[866] = 0
a[867] = 0
a[868] = 0
a[869] = 0
a[870] = 0
a[871] = 0
a[872] = 0
a[873] = 0
a[874] = 0
a[875] = 0
a[876] = 0
a[877] = 0
a[878] = 0
a[879] = 0
a[880] = 0
a[881] = 0
a[882] = 0
a[883] = 0
a[884] = 0
a[885] = 0
a[886] = 0
a[887] = 0
a[888] = 0
a[889] = 0
a[890] = 0
a[891] = 0
a[892] = 0
a[893] = 0
a[894] = 0
a[895] = 0
a[896] = 0
a[897] = 0
a[898] = 0
a[899] = 0
a[900] = 0
a[901] = 0
a[902] = 0
a[903] = 0
a[904] = 0
a[905] = 0
a[906] = 0
a[907] = 0
a[908] = 0
a[909] = 0
a[910] = 0
a[911] = 0
a[912] = 0
a[913] = 0
a[914] = 0
a[915] = 0
a[916] = 0
a[917] = 0
a[918] = 0
a[919] = 0
a[920] = 0
a[921] = 0
a[922] = 0
a[923] = 0
a[924] = 0
a[925] = 0
a[926] = 0
a[927] = 0
a[928] = 0
a[929] = 0
a[930] = 0
a[931] = 0
a[932] = 0
a[933] = 0
a[934] = 0
a[935] = 0
a[936] = 0
a[937] = 0
a[938] = 0
a[939] = 0
a[940] = 0
a[941] = 0
a[942] = 0
a[943] = 0
a[944] = 0
a[945] = 0
a[946] = 0
a[947] = 0
a[948] = 0
a[949] = 0
a[950] = 0
a[951] = 0
a[952] = 0
a[953] = 0
a[954] = 0
a[955] = 0
a[956] = 0
a[957] = 0
a[958] = 0
a[959] = 0
a[960] = 0
a[961] = 0
a[962] = 0
a[963] = 0
a[964] = 0
a[965] = 0
a[966] = 0
a[967] = 0
a[968] = 0
a[969] = 0
a[970] = 0
a[971] = 0
a[972] = 0
a[973] = 0
a[974] = 0
a[975] = 0
a[976] = 0
a[977] = 0
a[978] = 0
a[979] = 0
a[980] = 0
a[981] = 0
a[982] = 0
a[983] = 0
a[984] = 0
a[985] = 0
a[986] = 0
a[987] = 0
a[988] = 0
a[989] = 0
a[990] = 0
a[991] = 0
a[992] = 0
a[993] = 0
a[994] = 0
a[995] = 0
a[996] = 0
a[997] = 0
a[998] = 0
a[999] = 0
x := small(a) // ERROR "inlining call to small"
y := medium(a) // The crux of this test: medium is not inlined.
return x + y
} | go | github | https://github.com/golang/go | test/inline_big.go |
# Copyright (c) 2009-2012 testtools developers. See LICENSE for details.
__all__ = [
'KeysEqual',
]
from ..helpers import (
dict_subtract,
filter_values,
map_values,
)
from ._higherorder import (
AnnotatedMismatch,
PrefixedMismatch,
MismatchesAll,
)
from ._impl import Matcher, Mismatch
def LabelledMismatches(mismatches, details=None):
"""A collection of mismatches, each labelled."""
return MismatchesAll(
(PrefixedMismatch(k, v) for (k, v) in sorted(mismatches.items())),
wrap=False)
class MatchesAllDict(Matcher):
"""Matches if all of the matchers it is created with match.
A lot like ``MatchesAll``, but takes a dict of Matchers and labels any
mismatches with the key of the dictionary.
"""
def __init__(self, matchers):
super(MatchesAllDict, self).__init__()
self.matchers = matchers
def __str__(self):
return 'MatchesAllDict(%s)' % (_format_matcher_dict(self.matchers),)
def match(self, observed):
mismatches = {}
for label in self.matchers:
mismatches[label] = self.matchers[label].match(observed)
return _dict_to_mismatch(
mismatches, result_mismatch=LabelledMismatches)
class DictMismatches(Mismatch):
"""A mismatch with a dict of child mismatches."""
def __init__(self, mismatches, details=None):
super(DictMismatches, self).__init__(None, details=details)
self.mismatches = mismatches
def describe(self):
lines = ['{']
lines.extend(
[' %r: %s,' % (key, mismatch.describe())
for (key, mismatch) in sorted(self.mismatches.items())])
lines.append('}')
return '\n'.join(lines)
def _dict_to_mismatch(data, to_mismatch=None,
result_mismatch=DictMismatches):
if to_mismatch:
data = map_values(to_mismatch, data)
mismatches = filter_values(bool, data)
if mismatches:
return result_mismatch(mismatches)
class _MatchCommonKeys(Matcher):
"""Match on keys in a dictionary.
Given a dictionary where the values are matchers, this will look for
common keys in the matched dictionary and match if and only if all common
keys match the given matchers.
Thus::
>>> structure = {'a': Equals('x'), 'b': Equals('y')}
>>> _MatchCommonKeys(structure).match({'a': 'x', 'c': 'z'})
None
"""
def __init__(self, dict_of_matchers):
super(_MatchCommonKeys, self).__init__()
self._matchers = dict_of_matchers
def _compare_dicts(self, expected, observed):
common_keys = set(expected.keys()) & set(observed.keys())
mismatches = {}
for key in common_keys:
mismatch = expected[key].match(observed[key])
if mismatch:
mismatches[key] = mismatch
return mismatches
def match(self, observed):
mismatches = self._compare_dicts(self._matchers, observed)
if mismatches:
return DictMismatches(mismatches)
class _SubDictOf(Matcher):
"""Matches if the matched dict only has keys that are in given dict."""
def __init__(self, super_dict, format_value=repr):
super(_SubDictOf, self).__init__()
self.super_dict = super_dict
self.format_value = format_value
def match(self, observed):
excess = dict_subtract(observed, self.super_dict)
return _dict_to_mismatch(
excess, lambda v: Mismatch(self.format_value(v)))
class _SuperDictOf(Matcher):
"""Matches if all of the keys in the given dict are in the matched dict.
"""
def __init__(self, sub_dict, format_value=repr):
super(_SuperDictOf, self).__init__()
self.sub_dict = sub_dict
self.format_value = format_value
def match(self, super_dict):
return _SubDictOf(super_dict, self.format_value).match(self.sub_dict)
def _format_matcher_dict(matchers):
return '{%s}' % (
', '.join(sorted('%r: %s' % (k, v) for k, v in matchers.items())))
class _CombinedMatcher(Matcher):
"""Many matchers labelled and combined into one uber-matcher.
Subclass this and then specify a dict of matcher factories that take a
single 'expected' value and return a matcher. The subclass will match
only if all of the matchers made from factories match.
Not **entirely** dissimilar from ``MatchesAll``.
"""
matcher_factories = {}
def __init__(self, expected):
super(_CombinedMatcher, self).__init__()
self._expected = expected
def format_expected(self, expected):
return repr(expected)
def __str__(self):
return '%s(%s)' % (
self.__class__.__name__, self.format_expected(self._expected))
def match(self, observed):
matchers = dict(
(k, v(self._expected)) for k, v in self.matcher_factories.items())
return MatchesAllDict(matchers).match(observed)
class MatchesDict(_CombinedMatcher):
"""Match a dictionary exactly, by its keys.
Specify a dictionary mapping keys (often strings) to matchers. This is
the 'expected' dict. Any dictionary that matches this must have exactly
the same keys, and the values must match the corresponding matchers in the
expected dict.
"""
matcher_factories = {
'Extra': _SubDictOf,
'Missing': lambda m: _SuperDictOf(m, format_value=str),
'Differences': _MatchCommonKeys,
}
format_expected = lambda self, expected: _format_matcher_dict(expected)
class ContainsDict(_CombinedMatcher):
"""Match a dictionary for that contains a specified sub-dictionary.
Specify a dictionary mapping keys (often strings) to matchers. This is
the 'expected' dict. Any dictionary that matches this must have **at
least** these keys, and the values must match the corresponding matchers
in the expected dict. Dictionaries that have more keys will also match.
In other words, any matching dictionary must contain the dictionary given
to the constructor.
Does not check for strict sub-dictionary. That is, equal dictionaries
match.
"""
matcher_factories = {
'Missing': lambda m: _SuperDictOf(m, format_value=str),
'Differences': _MatchCommonKeys,
}
format_expected = lambda self, expected: _format_matcher_dict(expected)
class ContainedByDict(_CombinedMatcher):
"""Match a dictionary for which this is a super-dictionary.
Specify a dictionary mapping keys (often strings) to matchers. This is
the 'expected' dict. Any dictionary that matches this must have **only**
these keys, and the values must match the corresponding matchers in the
expected dict. Dictionaries that have fewer keys can also match.
In other words, any matching dictionary must be contained by the
dictionary given to the constructor.
Does not check for strict super-dictionary. That is, equal dictionaries
match.
"""
matcher_factories = {
'Extra': _SubDictOf,
'Differences': _MatchCommonKeys,
}
format_expected = lambda self, expected: _format_matcher_dict(expected)
class KeysEqual(Matcher):
"""Checks whether a dict has particular keys."""
def __init__(self, *expected):
"""Create a `KeysEqual` Matcher.
:param expected: The keys the dict is expected to have. If a dict,
then we use the keys of that dict, if a collection, we assume it
is a collection of expected keys.
"""
super(KeysEqual, self).__init__()
try:
self.expected = expected[0].keys()
except AttributeError:
self.expected = list(expected)
def __str__(self):
return "KeysEqual(%s)" % ', '.join(map(repr, self.expected))
def match(self, matchee):
from ._basic import _BinaryMismatch, Equals
expected = sorted(self.expected)
matched = Equals(expected).match(sorted(matchee.keys()))
if matched:
return AnnotatedMismatch(
'Keys not equal',
_BinaryMismatch(expected, 'does not match', matchee))
return None | unknown | codeparrot/codeparrot-clean | ||
---
#- name: Host in playbook is an integer
# hosts: 42
# tags: numeric_host
# tasks:
# - command: echo 'Running on {{ inventory_hostname }}'
#- name: Host in playbook is a string of digits
# hosts: "42"
# tags: string_digit_host
# tasks:
# - command: echo 'Running on {{ inventory_hostname }}'
#- name: Host in playbook is a list of integer
# hosts:
# - 42
# tags: numeric_host_in_list
# tasks:
# - command: echo 'Running on {{ inventory_hostname }}'
- name: Host in playbook is a list of strings of digits
hosts:
- "42"
gather_facts: False
tags: string_digit_host_in_list
tasks:
- command: echo 'Running on {{ inventory_hostname }}'
- name: Hosts taken from kv extra_var on the CLI
hosts: "{{ target_kv }}"
gather_facts: False
tags: hosts_from_kv_string
tasks:
- command: echo 'Running on {{ inventory_hostname }}'
- name: Hosts taken from a json string on the CLI
hosts: "{{ target_json_cli }}"
gather_facts: False
tags: hosts_from_cli_json_string
tasks:
- command: echo 'Running on {{ inventory_hostname }}'
- name: Hosts taken from a json list on the CLI
hosts: "{{ target_json_cli_list }}"
gather_facts: False
tags: hosts_from_cli_json_list
tasks:
- command: echo 'Running on {{ inventory_hostname }}'
- name: Hosts is taken from a json string in an extra_vars file
hosts: "{{ target_json_file }}"
gather_facts: False
tags: hosts_from_json_file_string
tasks:
- command: echo 'Running on {{ inventory_hostname }}'
- name: Hosts is taken from a json list in an extra_vars file
hosts: "{{ target_json_file_list }}"
gather_facts: False
tags: hosts_from_json_file_list
tasks:
- command: echo 'Running on {{ inventory_hostname }}' | unknown | github | https://github.com/ansible/ansible | test/integration/targets/hosts_field/test_hosts_field.yml |
// @panicThreshold:"none"
import {useNoAlias} from 'shared-runtime';
const cond = true;
function useFoo(props) {
props.x = 10;
if (cond) bar();
return useNoAlias({});
function bar() {
console.log('bar called');
return 5;
}
}
export const FIXTURE_ENTRYPOINT = {
fn: useFoo,
params: [{}],
}; | javascript | github | https://github.com/facebook/react | compiler/packages/babel-plugin-react-compiler/src/__tests__/fixtures/compiler/repro-retain-source-when-bailout.js |
//===--- FlattenList.swift ------------------------------------------------===//
//
// This source file is part of the Swift.org open source project
//
// Copyright (c) 2014 - 2018 Apple Inc. and the Swift project authors
// Licensed under Apache License v2.0 with Runtime Library Exception
//
// See https://swift.org/LICENSE.txt for license information
// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
//
//===----------------------------------------------------------------------===//
import TestsUtils
public let benchmarks = [
BenchmarkInfo(
name: "FlattenListLoop",
runFunction: run_FlattenListLoop,
tags: [.api, .validation],
setUpFunction: { blackHole(inputArray) }),
BenchmarkInfo(
name: "FlattenListFlatMap",
runFunction: run_FlattenListFlatMap,
tags: [.api, .validation],
setUpFunction: { blackHole(inputArray) }),
]
let inputArray: [(Int, Int, Int, Int)] = (0..<(1<<16)).map { _ in
(5, 6, 7, 8)
}
func flattenFlatMap(_ input: [(Int, Int, Int, Int)]) -> [Int] {
return input.flatMap { [$0.0, $0.1, $0.2, $0.3] }
}
func flattenLoop(_ input: [(Int, Int, Int, Int)]) -> [Int] {
var flattened: [Int] = []
flattened.reserveCapacity(input.count * 4)
for (x, y, z, w) in input {
flattened.append(x)
flattened.append(y)
flattened.append(z)
flattened.append(w)
}
return flattened
}
@inline(never)
public func run_FlattenListLoop(_ n: Int) {
for _ in 0..<5*n {
blackHole(flattenLoop(inputArray))
}
}
@inline(never)
public func run_FlattenListFlatMap(_ n: Int) {
for _ in 1...5*n {
blackHole(flattenFlatMap(inputArray))
}
} | swift | github | https://github.com/apple/swift | benchmark/single-source/FlattenList.swift |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Ansible module to manage PaloAltoNetworks Firewall
# (c) 2016, techbizdev <techbizdev@paloaltonetworks.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: panos_restart
short_description: restart a device
description:
- Restart a device
author: "Luigi Mori (@jtschichold), Ivan Bojer (@ivanbojer)"
version_added: "2.3"
requirements:
- pan-python
options:
ip_address:
description:
- IP address (or hostname) of PAN-OS device
required: true
password:
description:
- password for authentication
required: true
username:
description:
- username for authentication
required: false
default: "admin"
'''
EXAMPLES = '''
- panos_restart:
ip_address: "192.168.1.1"
username: "admin"
password: "admin"
'''
RETURN = '''
status:
description: success status
returned: success
type: string
sample: "okey dokey"
'''
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
from ansible.module_utils.basic import AnsibleModule
import sys
try:
import pan.xapi
HAS_LIB = True
except ImportError:
HAS_LIB = False
def main():
argument_spec = dict(
ip_address=dict(),
password=dict(no_log=True),
username=dict(default='admin')
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
if not HAS_LIB:
module.fail_json(msg='pan-python required for this module')
ip_address = module.params["ip_address"]
if not ip_address:
module.fail_json(msg="ip_address should be specified")
password = module.params["password"]
if not password:
module.fail_json(msg="password is required")
username = module.params['username']
xapi = pan.xapi.PanXapi(
hostname=ip_address,
api_username=username,
api_password=password
)
try:
xapi.op(cmd="<request><restart><system></system></restart></request>")
except Exception:
x = sys.exc_info()[1]
if 'succeeded' in str(x):
module.exit_json(changed=True, msg=str(msg))
else:
module.fail_json(msg=x)
raise
module.exit_json(changed=True, msg="okey dokey")
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
# ###
# Copyright (c) 2013, Rice University
# This software is subject to the provisions of the GNU Affero General
# Public License version 3 (AGPLv3).
# See LICENCE.txt for details.
# ###
"""Tests for the command-line interface cli.py
"""
import os
import sys
import tempfile
import unittest
from . import DB_CONNECTION_STRING
class CommandLineInterfaceTestCase(unittest.TestCase):
def call_target(self, argv):
from ..cli import main
argv = ['--db-conn-str={}'.format(DB_CONNECTION_STRING)] + argv
return main(argv=argv)
def mock(self, module_name, function=None):
# module_name is the module name in upgrades, of
# which the cli_loader will be mocked
# function is what will get called instead
self.call_count = 0
def default_mock_function(**kwargs):
self.call_count += 1
self.kwargs = kwargs
return 'run {}'.format(module_name)
if not function:
function = default_mock_function
# import module_name
module_name = 'cnxupgrade.upgrades.{}'.format(module_name)
__import__(module_name)
module = sys.modules[module_name]
# Unmock the cli_loader function in clean up
cli_loader = getattr(module, 'cli_loader')
self.addCleanup(setattr, module, 'cli_loader', cli_loader)
# Return the mock cli_command from cli_loader instead
def mock_cli_loader(*args, **kwargs):
cli_loader(*args, **kwargs)
return function
# Mock the cli_loader function of module_name
setattr(module, 'cli_loader', mock_cli_loader)
return function
def test_to_html(self):
# Mock to_html.cli_command
to_html = self.mock('to_html')
# Invoke cnx-upgrade to_html
result = self.call_target(['to_html'])
from ..upgrades.to_html import (
DEFAULT_ID_SELECT_QUERY, DEFAULT_FILENAME)
# Assert to_html.cli_command was called
self.assertEqual(self.call_count, 1)
self.assertEqual(self.kwargs, {
'cmmd': to_html,
'db_conn_str': DB_CONNECTION_STRING,
'id_select_query': DEFAULT_ID_SELECT_QUERY,
'filename': DEFAULT_FILENAME,
'no_modules': True,
'no_abstracts': True,
'overwrite_html': False})
self.assertEqual(result, 'run cnxupgrade.upgrades.to_html')
def test_to_html_with_id_select_query(self):
# Mock to_html.cli_command
to_html = self.mock('to_html')
# Invoke cnx-upgrade to_html
result = self.call_target(['to_html', '--id-select-query=SELECT 2'])
from ..upgrades.to_html import DEFAULT_FILENAME
# Assert to_html.cli_command was called
self.assertEqual(self.call_count, 1)
self.assertEqual(self.kwargs, {
'cmmd': to_html,
'db_conn_str': DB_CONNECTION_STRING,
'id_select_query': 'SELECT 2',
'filename': DEFAULT_FILENAME,
'no_modules': True,
'no_abstracts': True,
'overwrite_html': False})
self.assertEqual(result, 'run cnxupgrade.upgrades.to_html')
def test_to_html_force_overwrite_html(self):
# Mock to_html.cli_command
to_html = self.mock('to_html')
from ..upgrades.to_html import (
DEFAULT_ID_SELECT_QUERY, DEFAULT_FILENAME)
# Invoke cnx-upgrade to_html
result = self.call_target(['to_html', '--force'])
# Assert to_html.cli_command was called
self.assertEqual(self.call_count, 1)
self.assertEqual(self.kwargs, {
'cmmd': to_html,
'db_conn_str': DB_CONNECTION_STRING,
'id_select_query': DEFAULT_ID_SELECT_QUERY,
'filename': DEFAULT_FILENAME,
'no_modules': True,
'no_abstracts': True,
'overwrite_html': True})
self.assertEqual(result, 'run cnxupgrade.upgrades.to_html')
def test_to_html_specify_filename(self):
# Mock to_html.cli_command
to_html = self.mock('to_html')
from ..upgrades.to_html import DEFAULT_ID_SELECT_QUERY
# Invoke cnx-upgrade to_html
result = self.call_target(['to_html', '--filename', 'a.cnxml'])
# Assert to_html.cli_command was called
self.assertEqual(self.call_count, 1)
self.assertEqual(self.kwargs, {
'cmmd': to_html,
'db_conn_str': DB_CONNECTION_STRING,
'id_select_query': DEFAULT_ID_SELECT_QUERY,
'filename': 'a.cnxml',
'no_modules': True,
'no_abstracts': True,
'overwrite_html': False})
self.assertEqual(result, 'run cnxupgrade.upgrades.to_html')
def test_v1(self):
# Mock v1.cli_command
v1 = self.mock('v1')
# Invoke cnx-upgrade v1
result = self.call_target(['v1'])
# Assert v1.cli_command was called
self.assertEqual(self.call_count, 1)
self.assertEqual(self.kwargs, {
'cmmd': v1,
'db_conn_str': DB_CONNECTION_STRING})
self.assertEqual(result, 'run cnxupgrade.upgrades.v1')
def test_migrate_hit_counts(self):
# Mock migrate_hit_counts.cli_command
migrate_hit_counts = self.mock('migrate_hit_counts')
# Create a temporary file for migrate_hit_counts as input
file_handle, filename = tempfile.mkstemp()
self.addCleanup(os.remove, filename)
# Invoke cnx-upgrade migrate_hit_counts
result = self.call_target(['migrate_hit_counts',
'--input={}'.format(filename)])
# Assert migrate_hit_counts.cli_command was called
self.assertEqual(self.call_count, 1)
self.assertTrue(str(self.kwargs.pop('input')).startswith(
"<open file '{}'".format(filename)))
self.assertEqual(self.kwargs, {
'cmmd': migrate_hit_counts,
'db_conn_str': DB_CONNECTION_STRING})
self.assertEqual(result, 'run cnxupgrade.upgrades.migrate_hit_counts')
def test_create_collection_minor_versions(self):
# Mock create_collection_minor_versions.cli_command
create_collection = self.mock('create_collection_minor_versions')
# Invoke cnx-upgrade create_collection_minor_versions
result = self.call_target(['create_collection_minor_versions',
'--id-select-query', 'select 2'])
# Assert create_collection_minor_versions was called
self.assertEqual(self.call_count, 1)
self.assertEqual(self.kwargs, {
'cmmd': create_collection,
'db_conn_str': DB_CONNECTION_STRING,
'id_select_query': 'select 2',
})
self.assertEqual(result, 'run cnxupgrade.upgrades.create_collection_minor_versions')
def test_remove_testdraft(self):
# Mock remove_testdraft.cli_command
remove_testdraft = self.mock('remove_testdraft')
# Invoke cnx-upgrade remove_testdraft
result = self.call_target(['remove_testdraft'])
# Assert remove_testdraft was called
self.assertEqual(self.call_count, 1)
self.assertEqual(self.kwargs, {
'cmmd': remove_testdraft,
'db_conn_str': DB_CONNECTION_STRING,
})
self.assertEqual(result, 'run cnxupgrade.upgrades.remove_testdraft')
def test_migrate_ga(self):
# Mock migrate_ga.cli_command
migrate_ga = self.mock('migrate_ga')
# Invoke cnx-upgrade migrate_ga
result = self.call_target(['migrate_ga'])
# Assert migrate_ga was called
self.assertEqual(self.call_count, 1)
self.assertEqual(self.kwargs, {
'cmmd': migrate_ga,
'db_conn_str': DB_CONNECTION_STRING,
})
self.assertEqual(result, 'run cnxupgrade.upgrades.migrate_ga') | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
with open('README.md') as f:
rdm = f.read()
with open('LICENSE') as f:
lcs = f.read()
version = '1.0.1'
setup(
name='grayscale',
version=version,
packages=find_packages(exclude=['grayscale.test']),
url='https://github.com/softmixt/grayscale',
license=lcs,
author='Baghina Radu Adrian',
author_email='softmixt@gmail.com',
description='Simple Python application that let you easily convert '
'RGB image into grayscale.',
long_description=rdm,
keywords=['grayscale', 'rgb', 'image', 'convert'],
entry_points=
{'console_scripts': ['grayscale=grayscale.grayscale:main']},
include_package_data=True,
classifiers=[],
download_url='https://github.com/softmixt/grayscale/archive/{}.tar.gz'.format(
version),
# I'll explain this in a second
# install_requires=['pil'],
# package_data={'grayscale.core':['res/*.csv']},
# data_files=[('/etc/sample-app/conf/', ['conf/marvel.ini', 'conf/dc.ini'])]
), | unknown | codeparrot/codeparrot-clean | ||
# Copyright (c) 2008, Humanized, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of Enso nor the names of its contributors may
# be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY Humanized, Inc. ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL Humanized, Inc. BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
A collection of utility functions used in the various modules of
the context system. In general, if you find a utility function
which is defined in more than one context module, consider moving
it here.
"""
# ----------------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------------
import win32api
import win32con
import win32gui
import win32clipboard
import ctypes
import pywintypes
import time
import logging
from enso.utils.decorators import finalizeWrapper
# ----------------------------------------------------------------------------
# Public constants
# ----------------------------------------------------------------------------
# Max time to wait for clipboard to change after issuing a
# shortcut-key command to an application.
STANDARD_WAIT_TIME = 250
# Amount of time to wait between attempts to open the clipboard, in
# ms.
CLIPBOARD_OPEN_WAIT_INTERVAL = 10
# Total amount of time we can wait for the clipboard to become
# available for opening, in ms.
# The larger we make this, the rarer the clipboard will
# be unopenable, but with increased wait times for users. However,
# it should also be remembered that if some application has
# ownership of the clipboard, they should release it very soon,
# and if the system is very busy, this may understandably take
# a lot of time.
CLIPBOARD_OPEN_WAIT_AMOUNT = 1000
# The following three constants are obtained programatically.
# Since this code is executed the first time that ContextUtils is
# imported, and it is imported in __init__.py, these constants
# should always be available for other modules to import.
# Clipboard format code for ThornSoft's CF_CLIPBOARD_VIEWER_IGNORE
# format. For more information, see
# http://www.thornsoft.com/developer_ignore.htm.
CF_CLIPBOARD_VIEWER_IGNORE = win32clipboard.RegisterClipboardFormat(
"Clipboard Viewer Ignore"
)
# Clipboard formats for HTML and RTF are, annoyingly, not constant
# numbers; Windows reserves the right to vary them between runs;
# but we can fetch their current values as follows:
CF_HTML = win32clipboard.RegisterClipboardFormat("HTML Format")
CF_RTF = win32clipboard.RegisterClipboardFormat("Rich Text Format")
# Keyboard event types. According to MSDN documentation:
# "KEYEVENTF_KEYUP
# If specified, the key is being released. If not specified, the
# key is being depressed."
KEYEVENTF_KEYDOWN = 0
KEYEVENTF_KEYUP = win32con.KEYEVENTF_KEYUP
# ----------------------------------------------------------------------------
# Private Module Variables
# ----------------------------------------------------------------------------
_contextUtilsHasTheClipboardOpen = False
# ----------------------------------------------------------------------------
# Private Functions
# ----------------------------------------------------------------------------
def _hasTheClipboardOpen():
"""
Returns true if clipboard is currently held open by this module.
This should be used only for debugging, and is only accurate if the
clipboard was only opened and closed using the safeOpenClipboard
and safeCloseClipboard functions defined below.
"""
return _contextUtilsHasTheClipboardOpen
def _keyboardEvent( vkCode, eventType ):
"""
Causes Windows to generate an event for key vkCode, of type eventType.
"""
# win32all does not provide access to MapVirtualKey, so we have to use
# ctypes to access the DLL directly, and have to append "A" to the name
# since the function is implemented in Unicode and ANSI versions.
# This gives a hardware scancode for the virtual key.
scanCode = ctypes.windll.user32.MapVirtualKeyA( vkCode, 0 )
# This creates the keyboard event (this function is the one called
# by keyboard driver interupt handlers, so it's as low-level as it gets)
win32api.keybd_event( vkCode, scanCode, eventType, 0 )
# ----------------------------------------------------------------------------
# Public Functions
# ----------------------------------------------------------------------------
def safeOpenClipboard():
"""
Replacement for win32clipboard.OpenClipboard() that repeatedly
tries to open the clipboard over a short period of time, in case
another application already has the clipboard open.
Also maintains the module-level clipboard state variable.
Raises a ClipboardUnopenableError if it fails.
"""
# Preconditions:
assert( not _hasTheClipboardOpen() )
totalTime = 0
success = False
while not success:
try:
win32clipboard.OpenClipboard( 0 )
success = True
except pywintypes.error:
if totalTime < CLIPBOARD_OPEN_WAIT_AMOUNT:
sleepForMs( CLIPBOARD_OPEN_WAIT_INTERVAL )
totalTime += CLIPBOARD_OPEN_WAIT_INTERVAL
else:
# We failed to open the clipboard in the specified
# time.
success = False
break
if success:
global _contextUtilsHasTheClipboardOpen
_contextUtilsHasTheClipboardOpen = True
else:
raise ClipboardUnopenableError()
# Postconditions:
assert( _hasTheClipboardOpen() )
def safeCloseClipboard():
"""
Replacement for win32clipboard.CloseClipboard() that turns the
fatal error into a warning if the clipboard was already closed.
Also maintains the module-level clipboard state variable.
"""
# Postconditions:
assert( _hasTheClipboardOpen() )
try:
win32clipboard.CloseClipboard()
except pywintypes.error:
logging.warn( "Attempted to close clipboard when not open." )
global _contextUtilsHasTheClipboardOpen
_contextUtilsHasTheClipboardOpen = False
# Postconditions:
assert( not _hasTheClipboardOpen() )
def clipboardDependent( function ):
"""
A decorator which opens the clipboard before executing the wrapped
function, then closes it when the wrapped function is done,
whether or not the wrapped function throws an exception.
"""
def wrapperFunc( *args, **kwargs ):
# If safeOpenClipboard() raises an exception, this function will do
# nothing but allow it to be raised. (We shouldn't attempt to close
# the clipboard if we couldn't open it in the first place.)
safeOpenClipboard()
try:
result = function( *args, **kwargs )
finally:
# If function raises an exception, the finally clause
# will be executed and then the exception will be re-raised.
safeCloseClipboard()
return result
return finalizeWrapper( function,
wrapperFunc,
"clipboardDependent" )
@clipboardDependent
def clearClipboard():
"""
Opens the clipboard, empties it, and then closes it. Also sets
the CF_CLIPBOARD_VIEWER_IGNORE format so that clipboard viewers
will ignore this alteration of the clipboard.
"""
win32clipboard.EmptyClipboard()
setClipboardDataViewerIgnore()
def setClipboardDataViewerIgnore():
"""
Adds ThornSoft's CF_CLIPBOARD_VIEWER_IGNORE format to the
clipboard. Assumes that the clipboard is open and in a state
where data can be added to it.
"""
# Note the string we pass in is not altered before going to C
# and then the Windows Clipboard, so we must explicitly null-
# terminate it lest Bad Things happen.
win32clipboard.SetClipboardData(
CF_CLIPBOARD_VIEWER_IGNORE,
"HumanizedEnso\0"
)
def sleepForMs( ms ):
"""
Sleeps for the given number of milliseconds
"""
time.sleep( ms / 1000.0 )
def interpretFormatCode( format ):
"""
LONGTERM TODO: This is kept around for debugging but can be deleted from
production code.
Given a format code (of the kind returned from the windows clipboard
functions), returns a string describing the meaning of that
format code.
"""
formatCodeDictionary = {
win32con.CF_BITMAP:
"Bitmap Handle",
win32con.CF_DIB:
"Bitmap info structure and bits",
win32con.CF_DIF:
"Software Arts' Data Interchange Format",
win32con.CF_DSPBITMAP:
"Private bitmap display format",
win32con.CF_DSPENHMETAFILE:
"Private enhanced metafile display format",
win32con.CF_DSPMETAFILEPICT:
"Private metafile-picture display format",
win32con.CF_DSPTEXT:
"Private text display format",
win32con.CF_ENHMETAFILE:
"Handle to enhanced metafile",
win32con.CF_HDROP:
"HDROP dropped files",
win32con.CF_LOCALE:
"Handle to locale information associated with text",
win32con.CF_METAFILEPICT:
"Handle to metafile picture format",
win32con.CF_OEMTEXT:
"OEM Text",
win32con.CF_OWNERDISPLAY:
"Clipboard owner display message",
win32con.CF_PALETTE:
"Handle to a color palette",
win32con.CF_PENDATA:
"Microsoft Pen Computing data",
win32con.CF_RIFF:
"RIFF (Audio data)",
win32con.CF_SYLK:
"Microsoft Symbolic Link Format",
win32con.CF_TEXT:
"Plain Text",
win32con.CF_TIFF:
"TIFF (Tagged Image File Format)",
win32con.CF_UNICODETEXT:
"Unicode Text",
win32con.CF_WAVE:
"Audio data in wav format"
}
# Formats above 0xC000 are dynamically registered by other
# programs; formats below that correspond to named constants.
if format >= 0xC000:
return win32clipboard.GetClipboardFormatName( format )
elif formatCodeDictionary.has_key( format ):
return formatCodeDictionary[ format ]
else:
return "Unknown data format."
def typeCommandKey( key ):
"""
Given a character literal, simulates holding the Control key down
and typing that character. Useful for simulating menu shortcut keys.
"""
_keyboardEvent( win32con.VK_CONTROL, KEYEVENTF_KEYDOWN )
_keyboardEvent( ord(key.upper()), KEYEVENTF_KEYDOWN )
_keyboardEvent( ord(key.upper()), KEYEVENTF_KEYUP )
_keyboardEvent( win32con.VK_CONTROL, KEYEVENTF_KEYUP )
logging.info( "I am in typeCommandKey and I just typed " + key )
def typeAltKey( key ):
"""
Given a character literal, simulates holding the Alt key down
and typing that character.
"""
_keyboardEvent( win32con.VK_MENU, KEYEVENTF_KEYDOWN )
_keyboardEvent( ord(key.upper()), KEYEVENTF_KEYDOWN )
_keyboardEvent( ord(key.upper()), KEYEVENTF_KEYUP )
_keyboardEvent( win32con.VK_MENU, KEYEVENTF_KEYUP )
def tapKey( keyCode ):
"""
Given a virtual key code, simulates tapping that key.
"""
_keyboardEvent( keyCode, KEYEVENTF_KEYDOWN )
_keyboardEvent( keyCode, KEYEVENTF_KEYUP )
def typeSequence( keys ):
"""
Enables scripting of keystrokes. Useful for any case that a series
of keystrokes is required to accomplish the given task.
The argument is a space-separated string of keys, which can include
literal alphanumeric keys as well as codes for special keys and
codes for pauses. Codes for pauses are the character W followed
by a numeric literal describing the number of seconds to wait
(which can be fractional). Codes for special keys include "F1"
through "F12" for the function keys, "SD" for shift down, "SU"
for shift up, "LA" and "RA" for left and right arrow keys, and
"ESC" for escape. "AD" and "AU" correspond to alt down and alt
up, respectively. "ID" and "IU" correspond to windows down and
windows up, respectively. "CD" and "CU" correspond to control down
and control up, respectively.
LONGTERM TODO add a doctest here
"""
# LONGTERM TODO: Develop a decent scripting language that includes
# keydown, keyup, keypress, and pauses.
keys = keys.split( " " )
mapping = { "F1" : win32con.VK_F1,
"F2" : win32con.VK_F2,
"F3" : win32con.VK_F3,
"F4" : win32con.VK_F4,
"F5" : win32con.VK_F5,
"F6" : win32con.VK_F6,
"F7" : win32con.VK_F7,
"F8" : win32con.VK_F8,
"F9" : win32con.VK_F9,
"F10": win32con.VK_F10,
"F11": win32con.VK_F11,
"F12": win32con.VK_F12,
"CD" : win32con.VK_LCONTROL,
"CU" : win32con.VK_LCONTROL,
"SD" : win32con.VK_LSHIFT,
"SU" : win32con.VK_LSHIFT,
"AD" : win32con.VK_MENU,
"AU" : win32con.VK_MENU,
"ID" : win32con.VK_LWIN,
"IU" : win32con.VK_LWIN,
"LA": win32con.VK_LEFT,
"RA": win32con.VK_RIGHT,
"ESC": win32con.VK_ESCAPE,
"INS": win32con.VK_INSERT,
"DEL": win32con.VK_DELETE
}
for key in keys:
key = key.upper()
# Any one-character code means tap and release that literal key.
if len(key) == 1:
key_code = ord( key )
_keyboardEvent( key_code, KEYEVENTF_KEYDOWN )
_keyboardEvent( key_code, KEYEVENTF_KEYUP )
continue
# "W##" means wait
if key[0] == "W":
time.sleep( float(key[1:]) )
continue
# These keys require particular calls to the underlying
# keybd_event function, and therefore don't use our keyboard
# event wrapper.
if key in ["SD", "AD", "ID", "CD"]:
win32api.keybd_event( mapping[key], 0, KEYEVENTF_KEYDOWN, 0 )
continue
if key in ["SU", "AU", "IU", "CU"]:
win32api.keybd_event( mapping[key], 0, KEYEVENTF_KEYUP, 0 )
continue
# Any other multi-character code means look up the code
# in the table above, and tap the key.
key_code = mapping[key]
_keyboardEvent( key_code, KEYEVENTF_KEYDOWN )
_keyboardEvent( key_code, KEYEVENTF_KEYUP )
def getForegroundClassNameUnicode():
"""
Returns a unicode string containing the class name of the frontmost
application window.
"""
hwnd = win32gui.GetForegroundWindow()
# Maximum number of chars we'll accept for the class name; the
# rest will be truncated if it's longer than this.
MAX_LENGTH = 1024
classNameBuf = ctypes.create_unicode_buffer( MAX_LENGTH )
retval = ctypes.windll.User32.GetClassNameW(
hwnd,
classNameBuf,
len( classNameBuf )
)
if retval == 0:
raise ctypes.WinError()
return classNameBuf.value
# ----------------------------------------------------------------------------
# Exception
# ----------------------------------------------------------------------------
class ClipboardUnopenableError( Exception ):
"""
Exception raised if the clipboard was unable to be opened after
multiple attempts.
"""
pass | unknown | codeparrot/codeparrot-clean | ||
# stdlib
import time
from types import ListType
# 3p
from nose.plugins.attrib import attr
try:
import psycopg2 as pg
except ImportError:
pg = None
# project
from tests.checks.common import AgentCheckTest
@attr(requires='pgbouncer')
class TestPgbouncer(AgentCheckTest):
CHECK_NAME = 'pgbouncer'
def test_checks(self):
config = {
'init_config': {},
'instances': [
{
'host': 'localhost',
'port': 15433,
'username': 'datadog',
'password': 'datadog'
}
]
}
self.run_check(config)
self.assertMetric('pgbouncer.pools.cl_active')
self.assertMetric('pgbouncer.pools.cl_waiting')
self.assertMetric('pgbouncer.pools.sv_active')
self.assertMetric('pgbouncer.pools.sv_idle')
self.assertMetric('pgbouncer.pools.sv_used')
self.assertMetric('pgbouncer.pools.sv_tested')
self.assertMetric('pgbouncer.pools.sv_login')
self.assertMetric('pgbouncer.pools.maxwait')
self.assertMetric('pgbouncer.stats.total_query_time')
self.assertMetric('pgbouncer.stats.avg_req')
self.assertMetric('pgbouncer.stats.avg_recv')
self.assertMetric('pgbouncer.stats.avg_sent')
self.assertMetric('pgbouncer.stats.avg_query')
# Rate metrics, need 2 collection rounds
try:
connection = pg.connect(
host='localhost',
port='15433',
user='datadog',
password='datadog',
database='datadog_test')
connection.set_isolation_level(pg.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
cur = connection.cursor()
cur.execute('SELECT * FROM persons;')
except Exception:
pass
time.sleep(1)
self.run_check(config)
self.assertMetric('pgbouncer.stats.requests_per_second')
self.assertMetric('pgbouncer.stats.bytes_received_per_second')
self.assertMetric('pgbouncer.stats.bytes_sent_per_second')
# Service checks
service_checks_count = len(self.service_checks)
self.assertTrue(isinstance(self.service_checks, ListType))
self.assertTrue(service_checks_count > 0)
self.assertServiceCheckOK(
'pgbouncer.can_connect',
tags=['host:localhost', 'port:15433', 'db:pgbouncer'],
count=service_checks_count) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python3
#
# Copyright 2020 MongoDB Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
import argparse
import glob
import os
import pathlib
import shutil
import subprocess
import sys
from typing import List
ZSTD_EXTRACTION = "tar --zstd -xf"
def get_cmd(tarball: str, extraction_command: str) -> List[str]:
shell = os.environ.get("SHELL", "/bin/bash")
if sys.platform == "win32":
proc = subprocess.run(
["C:/cygwin/bin/cygpath.exe", "-w", shell], text=True, capture_output=True
)
bash = pathlib.Path(proc.stdout.strip())
return [bash.as_posix(), "-c", f"{extraction_command} {tarball}"]
return [shell, "-c", f"{extraction_command} {tarball}"]
parser = argparse.ArgumentParser()
parser.add_argument(
"--change-dir",
type=str,
action="store",
help="The directory to change into to perform the extraction.",
)
parser.add_argument(
"--extraction-command", type=str, action="store", help="The command to use for the extraction."
)
parser.add_argument(
"--tarball", type=str, action="store", help="The tarball to perform the extraction on."
)
parser.add_argument(
"--move-output",
type=str,
action="append",
help="Move an extracted entry to a new location after extraction. Format is colon separated, e.g. '--move-output=file/to/move:path/to/destination'. Can accept glob like wildcards.",
)
parser.add_argument(
"--optional",
action="store_true",
help="Should this fail if extraction fails. Useful for optional success.",
)
parser.add_argument(
"--try-zstd",
type=str,
action="store",
help="Try extracting zstd archive first given archive name.",
)
args = parser.parse_args()
if args.change_dir:
working_dir = pathlib.Path(args.change_dir).as_posix()
tarball = pathlib.Path(args.tarball).resolve().as_posix()
print(f"Switching to {working_dir} to perform the extraction in.")
os.makedirs(working_dir, exist_ok=True)
else:
working_dir = None
tarball = pathlib.Path(args.tarball).as_posix()
# Attempt zstd extraction first, if enabled.
zstd_succeeded = False
if args.try_zstd:
print("Attempting zstd extraction...")
zstd_archive = args.try_zstd
cmd = get_cmd(zstd_archive, ZSTD_EXTRACTION)
print(f"Extracting: {' '.join(cmd)}")
proc = subprocess.run(
cmd, text=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=working_dir
)
zstd_succeeded = proc.returncode == 0
if not zstd_succeeded:
cmd = get_cmd(tarball, args.extraction_command)
print(f"Extracting: {' '.join(cmd)}")
proc = subprocess.run(
cmd, text=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=working_dir
)
print(proc.stdout)
if args.move_output:
for arg in args.move_output:
try:
src, dst = arg.split(":")
print(f"Moving {src} to {dst}...")
files_to_move = glob.glob(src, recursive=True)
for file in files_to_move:
result_dst = shutil.move(file, dst)
print(f"Moved {file} to {result_dst}")
except ValueError as exc:
print(f"Bad format, needs to be glob like paths in the from 'src:dst', got: {arg}")
raise exc
if args.optional:
sys.exit(0)
sys.exit(proc.returncode) | python | github | https://github.com/mongodb/mongo | evergreen/functions/binaries_extract.py |
######################################################################
# Cloud Routes Web Application
# -------------------------------------------------------------------
# SaltStack Services Reaction - Forms Class
######################################################################
from wtforms import TextField, SelectField
from wtforms.validators import DataRequired
from ..base import BaseReactForm
class ReactForm(BaseReactForm):
''' Class that creates a Saltstack Reaction form for the dashboard '''
recipe_id = TextField(
"Recipe ID",
validators=[DataRequired(message='Recipe ID is a required field')])
group_id = TextField(
"Group ID",
validators=[DataRequired(message='Group ID is a required field')])
halt_on_stderr = SelectField(
"Halt on Error",
choices=[('false', 'False'), ('true', 'True')],
validators=[DataRequired(message='Halt on Error is a required field')])
user = TextField(
"User ID",
validators=[DataRequired(message='User ID is a required field')])
apikey = TextField(
"API Key",
validators=[DataRequired(message='API Key is a required field')])
call_on = SelectField(
"Call On",
choices=[('false', 'False Monitors'), ('true', 'True Monitors')],
validators=[DataRequired(message='Call On is a required field')])
if __name__ == '__main__':
pass | unknown | codeparrot/codeparrot-clean | ||
import sys
from PyQt4 import QtGui, QtCore
class MultipleChoicesDialog(QtGui.QDialog):
"""Dialog with the possibility of selecting one or more
items from a list"""
def __init__(self, choices=None, title="Title"):
super().__init__(None, QtCore.Qt.WindowSystemMenuHint |
QtCore.Qt.WindowTitleHint)
if choices is None:
choices = ["Item %d"%i for i in range(10)]
self.setWindowTitle(title)
self.selection = []
main_widget = QtGui.QWidget()
main_layout = QtGui.QVBoxLayout()
main_widget.setLayout(main_layout)
self.choices_widget = QtGui.QListWidget()
self.choices_widget.setSelectionMode(
QtGui.QAbstractItemView.ExtendedSelection)
for choice in choices:
item = QtGui.QListWidgetItem()
item.setText(choice)
self.choices_widget.addItem(item)
main_layout.addWidget(self.choices_widget)
button_box_layout = QtGui.QGridLayout()
selection_completed_btn = QtGui.QPushButton("Ok")
selection_completed_btn.clicked.connect(self.selection_completed)
select_all_btn = QtGui.QPushButton("Select all")
select_all_btn.clicked.connect(self.select_all)
clear_all_btn = QtGui.QPushButton("Clear all")
clear_all_btn.clicked.connect(self.clear_all)
cancel_btn = QtGui.QPushButton("Cancel")
cancel_btn.clicked.connect(self.cancel)
button_box = QtGui.QWidget()
button_box_layout.addWidget(select_all_btn, 0, 0)
button_box_layout.addWidget(clear_all_btn, 1, 0)
button_box_layout.addWidget(cancel_btn, 0, 1)
button_box_layout.addWidget(selection_completed_btn, 1, 1)
button_box.setLayout(button_box_layout)
main_layout.addWidget(button_box)
self.setLayout(main_layout)
self.show()
def selection_completed(self):
"""Selection completed, set the value and close"""
self.selection = [item.text() for item in
self.choices_widget.selectedItems()]
self.close()
def select_all(self):
"""Set all possible values as selected"""
self.choices_widget.selectAll()
self.selection = [item.text() for item in
self.choices_widget.selectedItems()]
def clear_all(self):
"""Reset to have no selected values"""
self.choices_widget.clearSelection()
self.selection = []
def cancel(self):
"""cancel and set the selection to an empty list"""
self.selection = []
self.close()
if __name__ == '__main__':
def get_choices(title="Title", choices=None):
"""Show a list of possible choices to be selected;
returns the values selected in a list"""
app = QtGui.QApplication([])
dialog = MultipleChoicesDialog(title=title, choices=choices)
app.exec_()
return dialog.selection
b = get_choices()
print(b) | unknown | codeparrot/codeparrot-clean | ||
# Copyright (C) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import StringIO
from webkitpy.common.checkout.scm import CheckoutNeedsUpdate
from webkitpy.common.checkout.scm.scm_mock import MockSCM
from webkitpy.common.net.layouttestresults import LayoutTestResults
from webkitpy.common.net.bugzilla import Attachment
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.layout_tests.models import test_results
from webkitpy.layout_tests.models import test_failures
from webkitpy.thirdparty.mock import Mock
from webkitpy.tool.commands.commandtest import CommandsTest
from webkitpy.tool.commands.queues import *
from webkitpy.tool.commands.queuestest import QueuesTest
from webkitpy.tool.commands.stepsequence import StepSequence
from webkitpy.common.net.statusserver_mock import MockStatusServer
from webkitpy.tool.mocktool import MockTool, MockOptions
class TestCommitQueue(CommitQueue):
def __init__(self, tool=None):
CommitQueue.__init__(self)
if tool:
self.bind_to_tool(tool)
self._options = MockOptions(confirm=False, parent_command="commit-queue", port=None)
def begin_work_queue(self):
output_capture = OutputCapture()
output_capture.capture_output()
CommitQueue.begin_work_queue(self)
output_capture.restore_output()
class TestQueue(AbstractPatchQueue):
name = "test-queue"
class TestReviewQueue(AbstractReviewQueue):
name = "test-review-queue"
class TestFeederQueue(FeederQueue):
_sleep_duration = 0
class AbstractQueueTest(CommandsTest):
def test_log_directory(self):
self.assertEqual(TestQueue()._log_directory(), os.path.join("..", "test-queue-logs"))
def _assert_run_webkit_patch(self, run_args, port=None):
queue = TestQueue()
tool = MockTool()
tool.status_server.bot_id = "gort"
tool.executive = Mock()
queue.bind_to_tool(tool)
queue._options = Mock()
queue._options.port = port
queue.run_webkit_patch(run_args)
expected_run_args = ["echo", "--status-host=example.com", "--bot-id=gort"]
if port:
expected_run_args.append("--port=%s" % port)
expected_run_args.extend(run_args)
tool.executive.run_command.assert_called_with(expected_run_args, cwd='/mock-checkout')
def test_run_webkit_patch(self):
self._assert_run_webkit_patch([1])
self._assert_run_webkit_patch(["one", 2])
self._assert_run_webkit_patch([1], port="mockport")
def test_iteration_count(self):
queue = TestQueue()
queue._options = Mock()
queue._options.iterations = 3
self.assertTrue(queue.should_continue_work_queue())
self.assertTrue(queue.should_continue_work_queue())
self.assertTrue(queue.should_continue_work_queue())
self.assertFalse(queue.should_continue_work_queue())
def test_no_iteration_count(self):
queue = TestQueue()
queue._options = Mock()
self.assertTrue(queue.should_continue_work_queue())
self.assertTrue(queue.should_continue_work_queue())
self.assertTrue(queue.should_continue_work_queue())
self.assertTrue(queue.should_continue_work_queue())
def _assert_log_message(self, script_error, log_message):
failure_log = AbstractQueue._log_from_script_error_for_upload(script_error, output_limit=10)
self.assertTrue(failure_log.read(), log_message)
def test_log_from_script_error_for_upload(self):
self._assert_log_message(ScriptError("test"), "test")
unicode_tor = u"WebKit \u2661 Tor Arne Vestb\u00F8!"
utf8_tor = unicode_tor.encode("utf-8")
self._assert_log_message(ScriptError(unicode_tor), utf8_tor)
script_error = ScriptError(unicode_tor, output=unicode_tor)
expected_output = "%s\nLast %s characters of output:\n%s" % (utf8_tor, 10, utf8_tor[-10:])
self._assert_log_message(script_error, expected_output)
class FeederQueueTest(QueuesTest):
def test_feeder_queue(self):
self.maxDiff = None
queue = TestFeederQueue()
tool = MockTool(log_executive=True)
expected_logs = {
"begin_work_queue": self._default_begin_work_queue_logs("feeder-queue"),
"process_work_item": """Warning, attachment 10001 on bug 50000 has invalid committer (non-committer@example.com)
Warning, attachment 10001 on bug 50000 has invalid committer (non-committer@example.com)
MOCK setting flag 'commit-queue' to '-' on attachment '10001' with comment 'Rejecting attachment 10001 from commit-queue.\n\nnon-committer@example.com does not have committer permissions according to http://trac.webkit.org/browser/trunk/Tools/Scripts/webkitpy/common/config/contributors.json.
- If you do not have committer rights please read http://webkit.org/coding/contributing.html for instructions on how to use bugzilla flags.
- If you have committer rights please correct the error in Tools/Scripts/webkitpy/common/config/contributors.json by adding yourself to the file (no review needed). The commit-queue restarts itself every 2 hours. After restart the commit-queue will correctly respect your committer rights.'
Feeding commit-queue high priority items [10005], regular items [10000]
MOCK: update_work_items: commit-queue [10005, 10000]
Feeding EWS (1 r? patch, 1 new)
MOCK: submit_to_ews: 10002
""",
"handle_unexpected_error": "Mock error message\n",
}
self.assert_queue_outputs(queue, tool=tool, expected_logs=expected_logs)
class AbstractPatchQueueTest(CommandsTest):
def test_next_patch(self):
queue = AbstractPatchQueue()
tool = MockTool()
queue.bind_to_tool(tool)
queue._options = Mock()
queue._options.port = None
self.assertIsNone(queue._next_patch())
tool.status_server = MockStatusServer(work_items=[2, 10000, 10001])
expected_stdout = "MOCK: fetch_attachment: 2 is not a known attachment id\n" # A mock-only message to prevent us from making mistakes.
expected_logs = "MOCK: release_work_item: None 2\n"
patch = OutputCapture().assert_outputs(self, queue._next_patch, expected_stdout=expected_stdout, expected_logs=expected_logs)
# The patch.id() == 2 is ignored because it doesn't exist.
self.assertEqual(patch.id(), 10000)
self.assertEqual(queue._next_patch().id(), 10001)
self.assertEqual(queue._next_patch(), None) # When the queue is empty
class PatchProcessingQueueTest(CommandsTest):
def test_upload_results_archive_for_patch(self):
queue = PatchProcessingQueue()
queue.name = "mock-queue"
tool = MockTool()
queue.bind_to_tool(tool)
queue._options = Mock()
queue._options.port = None
patch = queue._tool.bugs.fetch_attachment(10001)
expected_logs = """MOCK add_attachment_to_bug: bug_id=50000, description=Archive of layout-test-results from bot for mac-snowleopard filename=layout-test-results.zip mimetype=None
-- Begin comment --
The attached test failures were seen while running run-webkit-tests on the mock-queue.
Port: mac-snowleopard Platform: MockPlatform 1.0
-- End comment --
"""
OutputCapture().assert_outputs(self, queue._upload_results_archive_for_patch, [patch, Mock()], expected_logs=expected_logs)
class NeedsUpdateSequence(StepSequence):
def _run(self, tool, options, state):
raise CheckoutNeedsUpdate([], 1, "", None)
class AlwaysCommitQueueTool(object):
def __init__(self):
self.status_server = MockStatusServer()
def command_by_name(self, name):
return CommitQueue
class SecondThoughtsCommitQueue(TestCommitQueue):
def __init__(self, tool=None):
self._reject_patch = False
TestCommitQueue.__init__(self, tool)
def run_command(self, command):
# We want to reject the patch after the first validation,
# so wait to reject it until after some other command has run.
self._reject_patch = True
return CommitQueue.run_command(self, command)
def refetch_patch(self, patch):
if not self._reject_patch:
return self._tool.bugs.fetch_attachment(patch.id())
attachment_dictionary = {
"id": patch.id(),
"bug_id": patch.bug_id(),
"name": "Rejected",
"is_obsolete": True,
"is_patch": False,
"review": "-",
"reviewer_email": "foo@bar.com",
"commit-queue": "-",
"committer_email": "foo@bar.com",
"attacher_email": "Contributer1",
}
return Attachment(attachment_dictionary, None)
class CommitQueueTest(QueuesTest):
def _mock_test_result(self, testname):
return test_results.TestResult(testname, [test_failures.FailureTextMismatch()])
def test_commit_queue(self):
tool = MockTool()
tool.filesystem.write_text_file('/tmp/layout-test-results/full_results.json', '') # Otherwise the commit-queue will hit a KeyError trying to read the results from the MockFileSystem.
tool.filesystem.write_text_file('/tmp/layout-test-results/webkit_unit_tests_output.xml', '')
expected_logs = {
"begin_work_queue": self._default_begin_work_queue_logs("commit-queue"),
"process_work_item": """Running: webkit-patch --status-host=example.com clean --port=mac
MOCK: update_status: commit-queue Cleaned working directory
Running: webkit-patch --status-host=example.com update --port=mac
MOCK: update_status: commit-queue Updated working directory
Running: webkit-patch --status-host=example.com apply-attachment --no-update --non-interactive 10000 --port=mac
MOCK: update_status: commit-queue Applied patch
Running: webkit-patch --status-host=example.com validate-changelog --check-oops --non-interactive 10000 --port=mac
MOCK: update_status: commit-queue ChangeLog validated
Running: webkit-patch --status-host=example.com build --no-clean --no-update --build-style=release --port=mac
MOCK: update_status: commit-queue Built patch
Running: webkit-patch --status-host=example.com build-and-test --no-clean --no-update --test --non-interactive --build-style=release --port=mac
MOCK: update_status: commit-queue Passed tests
Running: webkit-patch --status-host=example.com land-attachment --force-clean --non-interactive --parent-command=commit-queue 10000 --port=mac
MOCK: update_status: commit-queue Landed patch
MOCK: update_status: commit-queue Pass
MOCK: release_work_item: commit-queue 10000
""",
"handle_script_error": "ScriptError error message\n\nMOCK output\n",
"handle_unexpected_error": "MOCK setting flag 'commit-queue' to '-' on attachment '10000' with comment 'Rejecting attachment 10000 from commit-queue.\n\nMock error message'\n",
}
self.assert_queue_outputs(CommitQueue(), tool=tool, expected_logs=expected_logs)
def test_commit_queue_failure(self):
expected_logs = {
"begin_work_queue": self._default_begin_work_queue_logs("commit-queue"),
"process_work_item": """MOCK: update_status: commit-queue Cleaned working directory
MOCK: update_status: commit-queue Updated working directory
MOCK: update_status: commit-queue Patch does not apply
MOCK setting flag 'commit-queue' to '-' on attachment '10000' with comment 'Rejecting attachment 10000 from commit-queue.\n\nMOCK script error
Full output: http://dummy_url'
MOCK: update_status: commit-queue Fail
MOCK: release_work_item: commit-queue 10000
""",
"handle_script_error": "ScriptError error message\n\nMOCK output\n",
"handle_unexpected_error": "MOCK setting flag 'commit-queue' to '-' on attachment '10000' with comment 'Rejecting attachment 10000 from commit-queue.\n\nMock error message'\n",
}
queue = CommitQueue()
def mock_run_webkit_patch(command):
if command[0] == 'clean' or command[0] == 'update':
# We want cleaning to succeed so we can error out on a step
# that causes the commit-queue to reject the patch.
return
raise ScriptError('MOCK script error')
queue.run_webkit_patch = mock_run_webkit_patch
self.assert_queue_outputs(queue, expected_logs=expected_logs)
def test_commit_queue_failure_with_failing_tests(self):
expected_logs = {
"begin_work_queue": self._default_begin_work_queue_logs("commit-queue"),
"process_work_item": """MOCK: update_status: commit-queue Cleaned working directory
MOCK: update_status: commit-queue Updated working directory
MOCK: update_status: commit-queue Patch does not apply
MOCK setting flag 'commit-queue' to '-' on attachment '10000' with comment 'Rejecting attachment 10000 from commit-queue.\n\nNew failing tests:
mock_test_name.html
Full output: http://dummy_url'
MOCK: update_status: commit-queue Fail
MOCK: release_work_item: commit-queue 10000
""",
"handle_script_error": "ScriptError error message\n\nMOCK output\n",
"handle_unexpected_error": "MOCK setting flag 'commit-queue' to '-' on attachment '10000' with comment 'Rejecting attachment 10000 from commit-queue.\n\nMock error message'\n",
}
class MockCommitQueueTask(CommitQueueTask):
def results_from_patch_test_run(self, patch):
return LayoutTestResults([test_results.TestResult("mock_test_name.html", failures=[test_failures.FailureTextMismatch()])], did_exceed_test_failure_limit=False)
queue = CommitQueue(MockCommitQueueTask)
def mock_run_webkit_patch(command):
if command[0] == 'clean' or command[0] == 'update':
# We want cleaning to succeed so we can error out on a step
# that causes the commit-queue to reject the patch.
return
raise ScriptError('MOCK script error')
queue.run_webkit_patch = mock_run_webkit_patch
self.assert_queue_outputs(queue, expected_logs=expected_logs)
def test_rollout(self):
tool = MockTool()
tool.filesystem.write_text_file('/tmp/layout-test-results/full_results.json', '') # Otherwise the commit-queue will hit a KeyError trying to read the results from the MockFileSystem.
tool.filesystem.write_text_file('/tmp/layout-test-results/webkit_unit_tests_output.xml', '')
tool.buildbot.light_tree_on_fire()
expected_logs = {
"begin_work_queue": self._default_begin_work_queue_logs("commit-queue"),
"process_work_item": """Running: webkit-patch --status-host=example.com clean --port=%(port)s
MOCK: update_status: commit-queue Cleaned working directory
Running: webkit-patch --status-host=example.com update --port=%(port)s
MOCK: update_status: commit-queue Updated working directory
Running: webkit-patch --status-host=example.com apply-attachment --no-update --non-interactive 10000 --port=%(port)s
MOCK: update_status: commit-queue Applied patch
Running: webkit-patch --status-host=example.com validate-changelog --check-oops --non-interactive 10000 --port=%(port)s
MOCK: update_status: commit-queue ChangeLog validated
Running: webkit-patch --status-host=example.com build --no-clean --no-update --build-style=release --port=%(port)s
MOCK: update_status: commit-queue Built patch
Running: webkit-patch --status-host=example.com build-and-test --no-clean --no-update --test --non-interactive --build-style=release --port=%(port)s
MOCK: update_status: commit-queue Passed tests
Running: webkit-patch --status-host=example.com land-attachment --force-clean --non-interactive --parent-command=commit-queue 10000 --port=%(port)s
MOCK: update_status: commit-queue Landed patch
MOCK: update_status: commit-queue Pass
MOCK: release_work_item: commit-queue 10000
""" % {"port": "mac"},
"handle_script_error": "ScriptError error message\n\nMOCK output\n",
"handle_unexpected_error": "MOCK setting flag 'commit-queue' to '-' on attachment '10000' with comment 'Rejecting attachment 10000 from commit-queue.\n\nMock error message'\n",
}
self.assert_queue_outputs(CommitQueue(), tool=tool, expected_logs=expected_logs)
def test_rollout_lands(self):
tool = MockTool()
tool.buildbot.light_tree_on_fire()
rollout_patch = tool.bugs.fetch_attachment(10005) # _patch6, a rollout patch.
assert(rollout_patch.is_rollout())
expected_logs = {
"begin_work_queue": self._default_begin_work_queue_logs("commit-queue"),
"process_work_item": """Running: webkit-patch --status-host=example.com clean --port=%(port)s
MOCK: update_status: commit-queue Cleaned working directory
Running: webkit-patch --status-host=example.com update --port=%(port)s
MOCK: update_status: commit-queue Updated working directory
Running: webkit-patch --status-host=example.com apply-attachment --no-update --non-interactive 10005 --port=%(port)s
MOCK: update_status: commit-queue Applied patch
Running: webkit-patch --status-host=example.com validate-changelog --check-oops --non-interactive 10005 --port=%(port)s
MOCK: update_status: commit-queue ChangeLog validated
Running: webkit-patch --status-host=example.com land-attachment --force-clean --non-interactive --parent-command=commit-queue 10005 --port=%(port)s
MOCK: update_status: commit-queue Landed patch
MOCK: update_status: commit-queue Pass
MOCK: release_work_item: commit-queue 10005
""" % {"port": "mac"},
"handle_script_error": "ScriptError error message\n\nMOCK output\n",
"handle_unexpected_error": "MOCK setting flag 'commit-queue' to '-' on attachment '10005' with comment 'Rejecting attachment 10005 from commit-queue.\n\nMock error message'\n",
}
self.assert_queue_outputs(CommitQueue(), tool=tool, work_item=rollout_patch, expected_logs=expected_logs)
def test_non_valid_patch(self):
tool = MockTool()
patch = tool.bugs.fetch_attachment(10007) # _patch8, resolved bug, without review flag, not marked obsolete (maybe already landed)
expected_logs = {
"begin_work_queue": self._default_begin_work_queue_logs("commit-queue"),
"process_work_item": """MOCK: update_status: commit-queue Error: commit-queue did not process patch. Reason: Bug is already closed.
MOCK: release_work_item: commit-queue 10007
""",
}
self.assert_queue_outputs(CommitQueue(), tool=tool, work_item=patch, expected_logs=expected_logs)
def test_auto_retry(self):
queue = CommitQueue()
options = Mock()
options.parent_command = "commit-queue"
tool = AlwaysCommitQueueTool()
sequence = NeedsUpdateSequence(None)
expected_logs = """Commit failed because the checkout is out of date. Please update and try again.
MOCK: update_status: commit-queue Tests passed, but commit failed (checkout out of date). Updating, then landing without building or re-running tests.
"""
state = {'patch': None}
OutputCapture().assert_outputs(self, sequence.run_and_handle_errors, [tool, options, state], expected_exception=TryAgain, expected_logs=expected_logs)
self.assertTrue(options.update)
self.assertFalse(options.build)
self.assertFalse(options.test)
def test_manual_reject_during_processing(self):
queue = SecondThoughtsCommitQueue(MockTool())
queue.begin_work_queue()
queue._tool.filesystem.write_text_file('/tmp/layout-test-results/full_results.json', '') # Otherwise the commit-queue will hit a KeyError trying to read the results from the MockFileSystem.
queue._tool.filesystem.write_text_file('/tmp/layout-test-results/webkit_unit_tests_output.xml', '')
queue._options = Mock()
queue._options.port = None
expected_logs = """Running: webkit-patch --status-host=example.com clean --port=mac
MOCK: update_status: commit-queue Cleaned working directory
MOCK: update_status: commit-queue Error: commit-queue did not process patch. Reason: Patch is obsolete.
MOCK: release_work_item: commit-queue 10000
"""
self.maxDiff = None
OutputCapture().assert_outputs(self, queue.process_work_item, [QueuesTest.mock_work_item], expected_logs=expected_logs)
def test_report_flaky_tests(self):
queue = TestCommitQueue(MockTool())
expected_logs = """MOCK bug comment: bug_id=50002, cc=None
--- Begin comment ---
The commit-queue just saw foo/bar.html flake (text diff) while processing attachment 10000 on bug 50000.
Port: MockPort Platform: MockPlatform 1.0
--- End comment ---
MOCK add_attachment_to_bug: bug_id=50002, description=Failure diff from bot filename=failure.diff mimetype=None
MOCK bug comment: bug_id=50002, cc=None
--- Begin comment ---
The commit-queue just saw bar/baz.html flake (text diff) while processing attachment 10000 on bug 50000.
Port: MockPort Platform: MockPlatform 1.0
--- End comment ---
bar/baz-diffs.txt does not exist in results archive, uploading entire archive.
MOCK add_attachment_to_bug: bug_id=50002, description=Archive of layout-test-results from bot filename=layout-test-results.zip mimetype=None
MOCK bug comment: bug_id=50000, cc=None
--- Begin comment ---
The commit-queue encountered the following flaky tests while processing attachment 10000:
foo/bar.html bug 50002 (author: abarth@webkit.org)
bar/baz.html bug 50002 (author: abarth@webkit.org)
The commit-queue is continuing to process your patch.
--- End comment ---
"""
test_names = ["foo/bar.html", "bar/baz.html"]
test_results = [self._mock_test_result(name) for name in test_names]
class MockZipFile(object):
def __init__(self):
self.fp = StringIO()
def read(self, path):
return ""
def namelist(self):
# This is intentionally missing one diffs.txt to exercise the "upload the whole zip" codepath.
return ['foo/bar-diffs.txt']
OutputCapture().assert_outputs(self, queue.report_flaky_tests, [QueuesTest.mock_work_item, test_results, MockZipFile()], expected_logs=expected_logs)
def test_did_pass_testing_ews(self):
tool = MockTool()
patch = tool.bugs.fetch_attachment(10000)
queue = TestCommitQueue(tool)
self.assertFalse(queue.did_pass_testing_ews(patch))
class StyleQueueTest(QueuesTest):
def test_style_queue_with_style_exception(self):
expected_logs = {
"begin_work_queue": self._default_begin_work_queue_logs("style-queue"),
"process_work_item": """MOCK: update_status: style-queue Started processing patch
Running: webkit-patch --status-host=example.com clean
MOCK: update_status: style-queue Cleaned working directory
Running: webkit-patch --status-host=example.com update
MOCK: update_status: style-queue Updated working directory
Running: webkit-patch --status-host=example.com apply-attachment --no-update --non-interactive 10000
MOCK: update_status: style-queue Applied patch
Running: webkit-patch --status-host=example.com apply-watchlist-local 50000
MOCK: update_status: style-queue Watchlist applied
Running: webkit-patch --status-host=example.com check-style-local --non-interactive --quiet
MOCK: update_status: style-queue Style checked
MOCK: update_status: style-queue Pass
MOCK: release_work_item: style-queue 10000
""",
"handle_unexpected_error": "Mock error message\n",
"handle_script_error": "MOCK output\n",
}
tool = MockTool(executive_throws_when_run=set(['check-style']))
self.assert_queue_outputs(StyleQueue(), expected_logs=expected_logs, tool=tool)
def test_style_queue_with_watch_list_exception(self):
expected_logs = {
"begin_work_queue": self._default_begin_work_queue_logs("style-queue"),
"process_work_item": """MOCK: update_status: style-queue Started processing patch
Running: webkit-patch --status-host=example.com clean
MOCK: update_status: style-queue Cleaned working directory
Running: webkit-patch --status-host=example.com update
MOCK: update_status: style-queue Updated working directory
Running: webkit-patch --status-host=example.com apply-attachment --no-update --non-interactive 10000
MOCK: update_status: style-queue Applied patch
Running: webkit-patch --status-host=example.com apply-watchlist-local 50000
Exception for ['echo', '--status-host=example.com', 'apply-watchlist-local', 50000]
MOCK command output
MOCK: update_status: style-queue Unabled to apply watchlist
Running: webkit-patch --status-host=example.com check-style-local --non-interactive --quiet
MOCK: update_status: style-queue Style checked
MOCK: update_status: style-queue Pass
MOCK: release_work_item: style-queue 10000
""",
"handle_unexpected_error": "Mock error message\n",
"handle_script_error": "MOCK output\n",
}
tool = MockTool(executive_throws_when_run=set(['apply-watchlist-local']))
self.assert_queue_outputs(StyleQueue(), expected_logs=expected_logs, tool=tool)
def test_non_valid_patch(self):
tool = MockTool()
patch = tool.bugs.fetch_attachment(10007) # _patch8, resolved bug, without review flag, not marked obsolete (maybe already landed)
expected_logs = {
"begin_work_queue": self._default_begin_work_queue_logs("style-queue"),
"process_work_item": """MOCK: update_status: style-queue Started processing patch
MOCK: update_status: style-queue Error: style-queue did not process patch. Reason: Bug is already closed.
MOCK: release_work_item: style-queue 10007
""",
}
self.assert_queue_outputs(StyleQueue(), tool=tool, work_item=patch, expected_logs=expected_logs) | unknown | codeparrot/codeparrot-clean | ||
"""A collection of modules for building different kinds of tree from
HTML documents.
To create a treebuilder for a new type of tree, you need to do
implement several things:
1) A set of classes for various types of elements: Document, Doctype,
Comment, Element. These must implement the interface of
_base.treebuilders.Node (although comment nodes have a different
signature for their constructor, see treebuilders.etree.Comment)
Textual content may also be implemented as another node type, or not, as
your tree implementation requires.
2) A treebuilder object (called TreeBuilder by convention) that
inherits from treebuilders._base.TreeBuilder. This has 4 required attributes:
documentClass - the class to use for the bottommost node of a document
elementClass - the class to use for HTML Elements
commentClass - the class to use for comments
doctypeClass - the class to use for doctypes
It also has one required method:
getDocument - Returns the root node of the complete document tree
3) If you wish to run the unit tests, you must also create a
testSerializer method on your treebuilder which accepts a node and
returns a string containing Node and its children serialized according
to the format used in the unittests
"""
from __future__ import absolute_import, division, unicode_literals
from ..utils import default_etree
treeBuilderCache = {}
def getTreeBuilder(treeType, implementation=None, **kwargs):
"""Get a TreeBuilder class for various types of tree with built-in support
treeType - the name of the tree type required (case-insensitive). Supported
values are:
"dom" - A generic builder for DOM implementations, defaulting to
a xml.dom.minidom based implementation.
"etree" - A generic builder for tree implementations exposing an
ElementTree-like interface, defaulting to
xml.etree.cElementTree if available and
xml.etree.ElementTree if not.
"lxml" - A etree-based builder for lxml.etree, handling
limitations of lxml's implementation.
implementation - (Currently applies to the "etree" and "dom" tree types). A
module implementing the tree type e.g.
xml.etree.ElementTree or xml.etree.cElementTree."""
treeType = treeType.lower()
if treeType not in treeBuilderCache:
if treeType == "dom":
from . import dom
# Come up with a sane default (pref. from the stdlib)
if implementation is None:
from xml.dom import minidom
implementation = minidom
# NEVER cache here, caching is done in the dom submodule
return dom.getDomModule(implementation, **kwargs).TreeBuilder
elif treeType == "lxml":
from . import etree_lxml
treeBuilderCache[treeType] = etree_lxml.TreeBuilder
elif treeType == "etree":
from . import etree
if implementation is None:
implementation = default_etree
# NEVER cache here, caching is done in the etree submodule
return etree.getETreeModule(implementation, **kwargs).TreeBuilder
else:
raise ValueError("""Unrecognised treebuilder "%s" """ % treeType)
return treeBuilderCache.get(treeType) | unknown | codeparrot/codeparrot-clean |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.