code
stringlengths
1
25.8M
language
stringclasses
18 values
source
stringclasses
4 values
repo
stringclasses
78 values
path
stringlengths
0
268
#!/usr/bin/env python import os import markdown from string import atof,atoi class MDHist(object): def __init__(self, text): self.text = text.replace('\t', ' ') self.fname = self.text.split(' ')[0] self.hname = self.text.split(' ')[1] #self.title = self.get('title') #self.title = self['title'] def __getitem__(self, opt): retvals = [] si = self.text.split(opt+'=') for s in si[1:]: opttx = s.split('=')[0] if len(opttx)>0: sopttx = opttx.split(' ') del sopttx[-1] retvals.append(' '.join(sopttx)) if len(retvals) < 1: retvals.append(None) return retvals[-1] def __repr__(self): return 'fname:[{}] hname:[{}] title:[{}] dopt:[{}]'.format(self.fname, self.hname, str(self['title']), str(self['dopt'])) class MDFigure(object): def __init__(self, text): self.data = text self.md = markdown.Markdown(extensions = ['markdown.extensions.meta']) self.html = self.md.convert(text) #print self.md.Meta def string(self, key): try: smeta = self.md.Meta[key] except: return '' return ' '.join(smeta) def strings(self, key, n=None): try: smeta = self.md.Meta[key] except: return [''] s = ' '.join(smeta) s = s.replace('\t', ' ') si = s.split(' ') retvals = [] if n==None: n=len(si) for i in xrange(n): val = '' if len(si) > i: val = si[i] retvals.append(val) return retvals def floats(self, key, n=None): retvals = [] rs = self.strings(key, n) for s in rs: try: retvals.append(atof(s)) except: pass return retvals def options(self, key, opt): retvals = [] try: smeta = self.md.Meta[key] except: retvals.append('{} key not found'.format(key)) return retvals for sm in smeta: si = sm.split(opt+'=') for s in si[1:]: opttx = s.split('=')[0].split(' ') if len(opttx)>1: soptx = ' '.join(opttx[:len(opttx)-1]) else: soptx = opttx[0] retvals.append(soptx) return retvals def option(self, key, opt): retvals = self.options(key, opt) if len(retvals) < 1: retvals.append(None) return retvals[-1] def objects(self, key='h'): retvals = [] try: smeta = self.md.Meta[key] except: retvals.append('{} key not found'.format(key)) return retvals for sm in smeta: smm = ''.join(sm) h = MDHist(smm) retvals.append(h) return retvals def demo(self): print print '---> new figure <---' print self.md.Meta print 'geom 2 floats:',self.floats('geometry', 2) print 'geom 1 float: ',self.floats('geometry', 1) print 'title:', self.strings('title') print 'title:', self.string('title') print 'logz:',self.option('options','logz') hists = self.objects('h') print 'n objects:',len(hists) for h in hists: print h print 'now fit objects...' hists = self.objects('fit') for h in hists: print h class MDFigureFile(object): def __init__(self, fname): self.data = self.load_file_to_strings(fname) self.text = '\n'.join(self.data) self.fig_texts = self.text.split('figure:') self.figures = [] for ftx in self.fig_texts[1:len(self.fig_texts)]: ftxs = 'figure:{}'.format(ftx) mdfig = MDFigure(ftxs) self.figures.append(mdfig) def load_file_to_strings(self, fname): outl = [] if fname != None: if os.path.isfile(fname): with open(fname) as f: outl = [l.strip('\n') for l in f.readlines()] else: f = sys.stdin outl = [l.strip('\n') for l in f.readlines()] for l in outl: if len(l.strip('\n')) < 1: outl.remove(l) return outl def demo(self): for f in self.figures: f.demo() def main(): mdfilefig = MDFigureFile('test.txt') mdfilefig.demo() if __name__ == '__main__': main()
unknown
codeparrot/codeparrot-clean
"""Tests for grudger strategies.""" import random import axelrod from .test_player import TestPlayer C, D = axelrod.Actions.C, axelrod.Actions.D class TestGrudger(TestPlayer): name = "Grudger" player = axelrod.Grudger expected_classifier = { 'memory_depth': float('inf'), # Long memory 'stochastic': False, 'inspects_source': False, 'manipulates_source': False, 'manipulates_state': False } def test_initial_strategy(self): """ Starts by cooperating """ self.first_play_test(C) def test_strategy(self): """ If opponent defects at any point then the player will defect forever """ self.responses_test([C, D, D, D], [C, C, C, C], [C]) self.responses_test([C, C, D, D, D], [C, D, C, C, C], [D]) class TestForgetfulGrudger(TestPlayer): name = "Forgetful Grudger" player = axelrod.ForgetfulGrudger expected_classifier = { 'memory_depth': 10, 'stochastic': False, 'inspects_source': False, 'manipulates_source': False, 'manipulates_state': False } def test_strategy(self): P1 = axelrod.ForgetfulGrudger() P2 = axelrod.Player() self.assertEqual(P1.grudged, False) # Starts by playing C self.assertEqual(P1.strategy(P2), C) self.assertEqual(P1.grudged, False) P2.history.append(C) self.assertEqual(P1.strategy(P2), C) self.assertEqual(P1.grudged, False) P2.history.append(C) self.assertEqual(P1.strategy(P2), C) self.assertEqual(P1.grudged, False) P2.history.append(C) self.assertEqual(P1.strategy(P2), C) self.assertEqual(P1.grudged, False) P2.history.append(D) self.assertEqual(P2.history, [C, C, C, D]) self.assertEqual(P1.strategy(P2), D) self.assertEqual(P1.grudged, True) for turn in range(P1.mem_length-1): self.assertEqual(P1.strategy(P2), D) # Doesn't matter what opponent plays now P2.history.append(random.choice([C, D])) self.assertEqual(P1.grudged, True) self.assertEqual(P1.strategy(P2), D) self.assertEqual(P1.grudge_memory, 10) self.assertEqual(P1.grudged, True) P2.history.append(C) # Back to being not grudged self.assertEqual(P1.strategy(P2), C) self.assertEqual(P1.grudged, False) P2.history.append(C) self.assertEqual(P1.strategy(P2), C) self.assertEqual(P1.grudged, False) P2.history.append(C) def test_reset_method(self): """ tests the reset method """ P1 = axelrod.ForgetfulGrudger() P1.history = [C, D, D, D] P1.grudged = True P1.grudge_memory = 4 P1.reset() self.assertEqual(P1.history, []) self.assertEqual(P1.grudged, False) self.assertEqual(P1.grudge_memory, 0) class TestOppositeGrudger(TestPlayer): name = 'Opposite Grudger' player = axelrod.OppositeGrudger expected_classifier = { 'memory_depth': float('inf'), # Long memory 'stochastic': False, 'inspects_source': False, 'manipulates_source': False, 'manipulates_state': False } def test_initial_strategy(self): """ Starts by defecting """ self.first_play_test(D) def test_strategy(self): """ If opponent cooperates at any point then the player will cooperate forever. """ self.responses_test([C, D, D, D], [D, D, D, D], [D]) self.responses_test([C, C, D, D, D], [C, D, C, C, C], [C]) class TestAggravater(TestPlayer): name = "Aggravater" player = axelrod.Aggravater expected_classifier = { 'memory_depth': float('inf'), # Long memory 'stochastic': False, 'inspects_source': False, 'manipulates_source': False, 'manipulates_state': False } def test_initial_strategy(self): """ Starts by defecting """ self.first_play_test(D) def test_strategy(self): """ If opponent defects at any point then the player will defect forever """ self.responses_test([C, D, D, D], [C, C, C, C], [C]) self.responses_test([C, C, D, D, D], [C, D, C, C, C], [D])
unknown
codeparrot/codeparrot-clean
#!/usr/bin/python # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # ANSIBLE_METADATA = { 'metadata_version': '1.0', 'status': ['preview'], 'supported_by': 'community' } DOCUMENTATION = ''' --- module: nxos_vrf extends_documentation_fragment: nxos version_added: "2.1" short_description: Manages global VRF configuration. description: - Manages global VRF configuration. author: - Jason Edelman (@jedelman8) - Gabriele Gerbino (@GGabriele) notes: - Cisco NX-OS creates the default VRF by itself. Therefore, you're not allowed to use default as I(vrf) name in this module. - C(vrf) name must be shorter than 32 chars. - VRF names are not case sensible in NX-OS. Anyway, the name is stored just like it's inserted by the user and it'll not be changed again unless the VRF is removed and re-created. i.e. C(vrf=NTC) will create a VRF named NTC, but running it again with C(vrf=ntc) will not cause a configuration change. options: vrf: description: - Name of VRF to be managed. required: true admin_state: description: - Administrative state of the VRF. required: false default: up choices: ['up','down'] vni: description: - Specify virtual network identifier. Valid values are Integer or keyword 'default'. required: false default: null version_added: "2.2" route_distinguisher: description: - VPN Route Distinguisher (RD). Valid values are a string in one of the route-distinguisher formats (ASN2:NN, ASN4:NN, or IPV4:NN); the keyword 'auto', or the keyword 'default'. required: false default: null version_added: "2.2" state: description: - Manages desired state of the resource. required: false default: present choices: ['present','absent'] description: description: - Description of the VRF. required: false default: null ''' EXAMPLES = ''' - name: Ensure ntc VRF exists on switch nxos_vrf: vrf: ntc state: present ''' RETURN = ''' commands: description: commands sent to the device returned: always type: list sample: ["vrf context ntc", "shutdown"] ''' import re from ansible.module_utils.nxos import load_config, run_commands from ansible.module_utils.nxos import nxos_argument_spec, check_args from ansible.module_utils.basic import AnsibleModule def execute_show_command(command, module): transport = module.params['transport'] if transport == 'cli': if 'show run' not in command: command += ' | json' cmds = [command] body = run_commands(module, cmds) return body def apply_key_map(key_map, table): new_dict = {} for key in table: new_key = key_map.get(key) if new_key: new_dict[new_key] = str(table.get(key)) return new_dict def get_commands_to_config_vrf(delta, vrf): commands = [] for param, value in delta.items(): command = '' if param == 'description': command = 'description {0}'.format(value) elif param == 'admin_state': if value.lower() == 'up': command = 'no shutdown' elif value.lower() == 'down': command = 'shutdown' elif param == 'rd': command = 'rd {0}'.format(value) elif param == 'vni': command = 'vni {0}'.format(value) if command: commands.append(command) if commands: commands.insert(0, 'vrf context {0}'.format(vrf)) return commands def get_vrf_description(vrf, module): command = (r'show run section vrf | begin ^vrf\scontext\s{0} | end ^vrf.*'.format(vrf)) description = '' descr_regex = r".*description\s(?P<descr>[\S+\s]+).*" try: body = execute_show_command(command, module)[0] except IndexError: return description if body: splitted_body = body.split('\n') for element in splitted_body: if 'description' in element: match_description = re.match(descr_regex, element, re.DOTALL) group_description = match_description.groupdict() description = group_description["descr"] return description def get_value(arg, config, module): extra_arg_regex = re.compile(r'(?:{0}\s)(?P<value>.*)$'.format(arg), re.M) value = '' if arg in config: value = extra_arg_regex.search(config).group('value') return value def get_vrf(vrf, module): command = 'show vrf {0}'.format(vrf) vrf_key = { 'vrf_name': 'vrf', 'vrf_state': 'admin_state' } try: body = execute_show_command(command, module)[0] vrf_table = body['TABLE_vrf']['ROW_vrf'] except (TypeError, IndexError): return {} parsed_vrf = apply_key_map(vrf_key, vrf_table) parsed_vrf['admin_state'] = parsed_vrf['admin_state'].lower() command = 'show run all | section vrf.context.{0}'.format(vrf) body = execute_show_command(command, module)[0] extra_params = ['vni', 'rd', 'description'] for param in extra_params: parsed_vrf[param] = get_value(param, body, module) return parsed_vrf def main(): argument_spec = dict( vrf=dict(required=True), description=dict(default=None, required=False), vni=dict(required=False, type='str'), rd=dict(required=False, type='str'), admin_state=dict(default='up', choices=['up', 'down'], required=False), state=dict(default='present', choices=['present', 'absent'], required=False), include_defaults=dict(default=False), config=dict(), save=dict(type='bool', default=False) ) argument_spec.update(nxos_argument_spec) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) warnings = list() check_args(module, warnings) results = dict(changed=False, warnings=warnings) vrf = module.params['vrf'] admin_state = module.params['admin_state'].lower() description = module.params['description'] rd = module.params['rd'] vni = module.params['vni'] state = module.params['state'] if vrf == 'default': module.fail_json(msg='cannot use default as name of a VRF') elif len(vrf) > 32: module.fail_json(msg='VRF name exceeded max length of 32', vrf=vrf) existing = get_vrf(vrf, module) args = dict(vrf=vrf, description=description, vni=vni, admin_state=admin_state, rd=rd) proposed = dict((k, v) for k, v in args.items() if v is not None) delta = dict(set(proposed.items()).difference(existing.items())) commands = [] if state == 'absent': if existing: command = ['no vrf context {0}'.format(vrf)] commands.extend(command) elif state == 'present': if not existing: command = get_commands_to_config_vrf(delta, vrf) commands.extend(command) elif delta: command = get_commands_to_config_vrf(delta, vrf) commands.extend(command) if commands: if proposed.get('vni'): if existing.get('vni') and existing.get('vni') != '': commands.insert(1, 'no vni {0}'.format(existing['vni'])) if not module.check_mode: load_config(module, commands) results['changed'] = True if 'configure' in commands: commands.pop(0) results['commands'] = commands module.exit_json(**results) if __name__ == '__main__': main()
unknown
codeparrot/codeparrot-clean
package v0alpha1 import ( "fmt" "strings" "time" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "github.com/grafana/grafana/pkg/apimachinery/utils" "github.com/grafana/grafana/pkg/registry/fieldselectors" ) const ( GROUP = "iam.grafana.app" VERSION = "v0alpha1" APIVERSION = GROUP + "/" + VERSION ) var CoreRoleInfo = utils.NewResourceInfo(GROUP, VERSION, "coreroles", "corerole", "CoreRole", func() runtime.Object { return &CoreRole{} }, func() runtime.Object { return &CoreRoleList{} }, utils.TableColumns{ Definition: []metav1.TableColumnDefinition{ {Name: "Name", Type: "string", Format: "name"}, {Name: "Group", Type: "string", Format: "group", Description: "Core role group"}, {Name: "Title", Type: "string", Format: "string", Description: "Core role name"}, {Name: "Created At", Type: "date"}, }, Reader: func(obj any) ([]interface{}, error) { core, ok := obj.(*CoreRole) if ok { if core != nil { return []interface{}{ core.Name, core.Spec.Group, core.Spec.Title, core.CreationTimestamp.UTC().Format(time.RFC3339), }, nil } } return nil, fmt.Errorf("expected core role") }, }, ) var RoleInfo = utils.NewResourceInfo(GROUP, VERSION, "roles", "role", "Role", func() runtime.Object { return &Role{} }, func() runtime.Object { return &RoleList{} }, utils.TableColumns{ Definition: []metav1.TableColumnDefinition{ {Name: "Name", Type: "string", Format: "name"}, {Name: "Group", Type: "string", Format: "group", Description: "Role group"}, {Name: "Title", Type: "string", Format: "string", Description: "Role name"}, {Name: "Created At", Type: "date"}, }, Reader: func(obj any) ([]interface{}, error) { role, ok := obj.(*Role) if ok { if role != nil { return []interface{}{ role.Name, role.Spec.Group, role.Spec.Title, role.CreationTimestamp.UTC().Format(time.RFC3339), }, nil } } return nil, fmt.Errorf("expected role") }, }, ) var GlobalRoleInfo = globalRoleInfo.WithClusterScope() var globalRoleInfo = utils.NewResourceInfo(GROUP, VERSION, "globalroles", "globalrole", "GlobalRole", func() runtime.Object { return &GlobalRole{} }, func() runtime.Object { return &GlobalRoleList{} }, utils.TableColumns{ Definition: []metav1.TableColumnDefinition{ {Name: "Name", Type: "string", Format: "name"}, {Name: "Group", Type: "string", Format: "group", Description: "Role group"}, {Name: "Title", Type: "string", Format: "string", Description: "Role name"}, {Name: "Created At", Type: "date"}, }, Reader: func(obj any) ([]interface{}, error) { globalRole, ok := obj.(*GlobalRole) if ok { return []interface{}{ globalRole.Name, globalRole.Spec.Group, globalRole.Spec.Title, globalRole.CreationTimestamp.UTC().Format(time.RFC3339), }, nil } return nil, fmt.Errorf("expected global role") }, }, ) var ResourcePermissionInfo = utils.NewResourceInfo(GROUP, VERSION, "resourcepermissions", "resourcepermission", "ResourcePermission", func() runtime.Object { return &ResourcePermission{} }, func() runtime.Object { return &ResourcePermissionList{} }, utils.TableColumns{ Definition: []metav1.TableColumnDefinition{ {Name: "Name", Type: "string", Format: "name"}, {Name: "Created At", Type: "date"}, }, Reader: func(obj any) ([]interface{}, error) { perm, ok := obj.(*ResourcePermission) if ok { if perm != nil { return []interface{}{ perm.Name, perm.CreationTimestamp.UTC().Format(time.RFC3339), }, nil } } return nil, fmt.Errorf("expected resource permission") }, }, ) var userKind = UserKind() var UserResourceInfo = utils.NewResourceInfo(userKind.Group(), userKind.Version(), userKind.GroupVersionResource().Resource, strings.ToLower(userKind.Kind()), userKind.Kind(), func() runtime.Object { return userKind.ZeroValue() }, func() runtime.Object { return userKind.ZeroListValue() }, utils.TableColumns{ Definition: []metav1.TableColumnDefinition{ {Name: "Name", Type: "string", Format: "name"}, {Name: "Login", Type: "string", Format: "string", Description: "The user login"}, {Name: "Email", Type: "string", Format: "string", Description: "The user email"}, {Name: "Created At", Type: "date"}, }, Reader: func(obj any) ([]interface{}, error) { u, ok := obj.(*User) if ok { return []interface{}{ u.Name, u.Spec.Login, u.Spec.Email, u.CreationTimestamp.UTC().Format(time.RFC3339), }, nil } return nil, fmt.Errorf("expected user") }, }, ) var teamKind = TeamKind() var TeamResourceInfo = utils.NewResourceInfo(teamKind.Group(), teamKind.Version(), teamKind.GroupVersionResource().Resource, strings.ToLower(teamKind.Kind()), teamKind.Kind(), func() runtime.Object { return teamKind.ZeroValue() }, func() runtime.Object { return teamKind.ZeroListValue() }, utils.TableColumns{ Definition: []metav1.TableColumnDefinition{ {Name: "Name", Type: "string", Format: "name"}, {Name: "Title", Type: "string", Format: "string", Description: "The team name"}, {Name: "Email", Type: "string", Format: "string", Description: "team email"}, {Name: "Created At", Type: "date"}, }, Reader: func(obj any) ([]interface{}, error) { m, ok := obj.(*Team) if !ok { return nil, fmt.Errorf("expected team") } return []interface{}{ m.Name, m.Spec.Title, m.Spec.Email, m.CreationTimestamp.UTC().Format(time.RFC3339), }, nil }, }, ) var serviceAccountKind = ServiceAccountKind() var ServiceAccountResourceInfo = utils.NewResourceInfo(serviceAccountKind.Group(), serviceAccountKind.Version(), serviceAccountKind.GroupVersionResource().Resource, strings.ToLower(serviceAccountKind.Kind()), serviceAccountKind.Kind(), func() runtime.Object { return serviceAccountKind.ZeroValue() }, func() runtime.Object { return serviceAccountKind.ZeroListValue() }, utils.TableColumns{ Definition: []metav1.TableColumnDefinition{ {Name: "Name", Type: "string", Format: "name"}, {Name: "Title", Type: "string", Format: "string"}, {Name: "Disabled", Type: "boolean"}, {Name: "Created At", Type: "date"}, }, Reader: func(obj any) ([]interface{}, error) { sa, ok := obj.(*ServiceAccount) if ok { return []interface{}{ sa.Name, sa.Spec.Title, sa.Spec.Disabled, sa.CreationTimestamp.UTC().Format(time.RFC3339), }, nil } return nil, fmt.Errorf("expected service account") }, }, ) var teamBindingKind = TeamBindingKind() var TeamBindingResourceInfo = utils.NewResourceInfo( teamBindingKind.Group(), teamBindingKind.Version(), teamBindingKind.GroupVersionResource().Resource, strings.ToLower(teamBindingKind.Kind()), teamBindingKind.Kind(), func() runtime.Object { return teamBindingKind.ZeroValue() }, func() runtime.Object { return teamBindingKind.ZeroListValue() }, utils.TableColumns{ Definition: []metav1.TableColumnDefinition{ {Name: "Name", Type: "string", Format: "name"}, {Name: "Team", Type: "string"}, {Name: "Created At", Type: "string", Format: "date"}, }, Reader: func(obj any) ([]interface{}, error) { m, ok := obj.(*TeamBinding) if !ok { return nil, fmt.Errorf("expected team binding") } return []interface{}{ m.Name, m.Spec.TeamRef.Name, m.CreationTimestamp.UTC().Format(time.RFC3339), }, nil }, }, ) var teamLBACRuleKind = TeamLBACRuleKind() var TeamLBACRuleInfo = utils.NewResourceInfo( teamLBACRuleKind.Group(), teamLBACRuleKind.Version(), teamLBACRuleKind.GroupVersionResource().Resource, strings.ToLower(teamLBACRuleKind.Kind()), teamLBACRuleKind.Kind(), func() runtime.Object { return teamLBACRuleKind.ZeroValue() }, func() runtime.Object { return teamLBACRuleKind.ZeroListValue() }, utils.TableColumns{ Definition: []metav1.TableColumnDefinition{ {Name: "Name", Type: "string", Format: "name"}, {Name: "Datasource Type", Type: "string", Format: "string", Description: "Data source type"}, {Name: "Datasource UID", Type: "string", Format: "string", Description: "Data source UID"}, {Name: "Created At", Type: "date"}, }, Reader: func(obj any) ([]interface{}, error) { t, ok := obj.(*TeamLBACRule) if !ok { return nil, fmt.Errorf("expected teamlbacrule") } return []interface{}{ t.Name, t.Spec.DatasourceType, t.Spec.DatasourceUid, t.CreationTimestamp.UTC().Format(time.RFC3339), }, nil }, }, ) var ExternalGroupMappingResourceInfo = utils.NewResourceInfo(GROUP, VERSION, "externalgroupmappings", "externalgroupmapping", "ExternalGroupMapping", func() runtime.Object { return &ExternalGroupMapping{} }, func() runtime.Object { return &ExternalGroupMappingList{} }, utils.TableColumns{ Definition: []metav1.TableColumnDefinition{ {Name: "Name", Type: "string", Format: "name"}, {Name: "Created At", Type: "date"}, }, Reader: func(obj any) ([]interface{}, error) { mapping, ok := obj.(*ExternalGroupMapping) if ok { if mapping != nil { return []interface{}{ mapping.Name, mapping.CreationTimestamp.UTC().Format(time.RFC3339), }, nil } } return nil, fmt.Errorf("expected external group mapping") }, }, ) var RoleBindingInfo = utils.NewResourceInfo(GROUP, VERSION, "rolebindings", "rolebinding", "RoleBinding", func() runtime.Object { return &RoleBinding{} }, func() runtime.Object { return &RoleBindingList{} }, utils.TableColumns{ Definition: []metav1.TableColumnDefinition{ {Name: "Name", Type: "string", Format: "name"}, {Name: "Created At", Type: "date"}, }, Reader: func(obj any) ([]interface{}, error) { roleBinding, ok := obj.(*RoleBinding) if ok { if roleBinding != nil { return []interface{}{ roleBinding.Name, roleBinding.CreationTimestamp.UTC().Format(time.RFC3339), }, nil } } return nil, fmt.Errorf("expected role binding") }, }, ) var ( SchemeBuilder runtime.SchemeBuilder localSchemeBuilder = &SchemeBuilder AddToScheme = localSchemeBuilder.AddToScheme SchemeGroupVersion = schema.GroupVersion{Group: GROUP, Version: VERSION} InternalGroupVersion = schema.GroupVersion{Group: GROUP, Version: runtime.APIVersionInternal} ) func init() { localSchemeBuilder.Register(func(s *runtime.Scheme) error { err := AddAuthZKnownTypes(s) if err != nil { return err } err = AddAuthNKnownTypes(s) if err != nil { return err } metav1.AddToGroupVersion(s, SchemeGroupVersion) return nil }, addDefaultingFuncs) } func AddAuthZKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &CoreRole{}, &CoreRoleList{}, &Role{}, &RoleList{}, &RoleBinding{}, &RoleBindingList{}, // What is this about? &metav1.PartialObjectMetadata{}, &metav1.PartialObjectMetadataList{}, ) return nil } func AddTeamLBACRuleTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &TeamLBACRule{}, &TeamLBACRuleList{}, // What is this about? &metav1.PartialObjectMetadata{}, &metav1.PartialObjectMetadataList{}, ) return nil } func AddResourcePermissionKnownTypes(scheme *runtime.Scheme, version schema.GroupVersion) error { scheme.AddKnownTypes(version, &ResourcePermission{}, &ResourcePermissionList{}, // What is this about? &metav1.PartialObjectMetadata{}, &metav1.PartialObjectMetadataList{}, ) return nil } func AddGlobalRoleKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &GlobalRole{}, &GlobalRoleList{}, ) return nil } func AddAuthNKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, // Identity &User{}, &UserList{}, &ServiceAccount{}, &ServiceAccountList{}, &Team{}, &TeamList{}, &GetSearchTeamsResponse{}, &TeamBinding{}, &TeamBindingList{}, &ExternalGroupMapping{}, &ExternalGroupMappingList{}, &GetGroupsResponse{}, // For now these are registered in pkg/apis/iam/v0alpha1/register.go // &UserTeamList{}, // &ServiceAccountTokenList{}, // &DisplayList{}, // &SSOSetting{}, // &SSOSettingList{}, // &TeamMemberList{}, &metav1.PartialObjectMetadata{}, &metav1.PartialObjectMetadataList{}, ) // Enable field selectors for TeamBinding err := fieldselectors.AddSelectableFieldLabelConversions(scheme, SchemeGroupVersion, TeamBindingKind()) if err != nil { return err } return nil } func addDefaultingFuncs(scheme *runtime.Scheme) error { // return RegisterDefaults(scheme) return nil }
go
github
https://github.com/grafana/grafana
apps/iam/pkg/apis/iam/v0alpha1/register.go
"""The tests for the Group components.""" # pylint: disable=protected-access from collections import OrderedDict import unittest from unittest.mock import patch from homeassistant.bootstrap import setup_component from homeassistant.const import ( STATE_ON, STATE_OFF, STATE_HOME, STATE_UNKNOWN, ATTR_ICON, ATTR_HIDDEN, ATTR_ASSUMED_STATE, STATE_NOT_HOME, ) import homeassistant.components.group as group from tests.common import get_test_home_assistant class TestComponentsGroup(unittest.TestCase): """Test Group component.""" # pylint: disable=invalid-name def setUp(self): """Setup things to be run when tests are started.""" self.hass = get_test_home_assistant() # pylint: disable=invalid-name def tearDown(self): """Stop everything that was started.""" self.hass.stop() def test_setup_group_with_mixed_groupable_states(self): """Try to setup a group with mixed groupable states.""" self.hass.states.set('light.Bowl', STATE_ON) self.hass.states.set('device_tracker.Paulus', STATE_HOME) group.Group.create_group( self.hass, 'person_and_light', ['light.Bowl', 'device_tracker.Paulus']) self.assertEqual( STATE_ON, self.hass.states.get( group.ENTITY_ID_FORMAT.format('person_and_light')).state) def test_setup_group_with_a_non_existing_state(self): """Try to setup a group with a non existing state.""" self.hass.states.set('light.Bowl', STATE_ON) grp = group.Group.create_group( self.hass, 'light_and_nothing', ['light.Bowl', 'non.existing']) self.assertEqual(STATE_ON, grp.state) def test_setup_group_with_non_groupable_states(self): """Test setup with groups which are not groupable.""" self.hass.states.set('cast.living_room', "Plex") self.hass.states.set('cast.bedroom', "Netflix") grp = group.Group.create_group( self.hass, 'chromecasts', ['cast.living_room', 'cast.bedroom']) self.assertEqual(STATE_UNKNOWN, grp.state) def test_setup_empty_group(self): """Try to setup an empty group.""" grp = group.Group.create_group(self.hass, 'nothing', []) self.assertEqual(STATE_UNKNOWN, grp.state) def test_monitor_group(self): """Test if the group keeps track of states.""" self.hass.states.set('light.Bowl', STATE_ON) self.hass.states.set('light.Ceiling', STATE_OFF) test_group = group.Group.create_group( self.hass, 'init_group', ['light.Bowl', 'light.Ceiling'], False) # Test if group setup in our init mode is ok self.assertIn(test_group.entity_id, self.hass.states.entity_ids()) group_state = self.hass.states.get(test_group.entity_id) self.assertEqual(STATE_ON, group_state.state) self.assertTrue(group_state.attributes.get(group.ATTR_AUTO)) def test_group_turns_off_if_all_off(self): """Test if turn off if the last device that was on turns off.""" self.hass.states.set('light.Bowl', STATE_OFF) self.hass.states.set('light.Ceiling', STATE_OFF) test_group = group.Group.create_group( self.hass, 'init_group', ['light.Bowl', 'light.Ceiling'], False) self.hass.block_till_done() group_state = self.hass.states.get(test_group.entity_id) self.assertEqual(STATE_OFF, group_state.state) def test_group_turns_on_if_all_are_off_and_one_turns_on(self): """Test if turn on if all devices were turned off and one turns on.""" self.hass.states.set('light.Bowl', STATE_OFF) self.hass.states.set('light.Ceiling', STATE_OFF) test_group = group.Group.create_group( self.hass, 'init_group', ['light.Bowl', 'light.Ceiling'], False) # Turn one on self.hass.states.set('light.Ceiling', STATE_ON) self.hass.block_till_done() group_state = self.hass.states.get(test_group.entity_id) self.assertEqual(STATE_ON, group_state.state) def test_is_on(self): """Test is_on method.""" self.hass.states.set('light.Bowl', STATE_ON) self.hass.states.set('light.Ceiling', STATE_OFF) test_group = group.Group.create_group( self.hass, 'init_group', ['light.Bowl', 'light.Ceiling'], False) self.assertTrue(group.is_on(self.hass, test_group.entity_id)) self.hass.states.set('light.Bowl', STATE_OFF) self.hass.block_till_done() self.assertFalse(group.is_on(self.hass, test_group.entity_id)) # Try on non existing state self.assertFalse(group.is_on(self.hass, 'non.existing')) def test_expand_entity_ids(self): """Test expand_entity_ids method.""" self.hass.states.set('light.Bowl', STATE_ON) self.hass.states.set('light.Ceiling', STATE_OFF) test_group = group.Group.create_group( self.hass, 'init_group', ['light.Bowl', 'light.Ceiling'], False) self.assertEqual(sorted(['light.ceiling', 'light.bowl']), sorted(group.expand_entity_ids( self.hass, [test_group.entity_id]))) def test_expand_entity_ids_does_not_return_duplicates(self): """Test that expand_entity_ids does not return duplicates.""" self.hass.states.set('light.Bowl', STATE_ON) self.hass.states.set('light.Ceiling', STATE_OFF) test_group = group.Group.create_group( self.hass, 'init_group', ['light.Bowl', 'light.Ceiling'], False) self.assertEqual( ['light.bowl', 'light.ceiling'], sorted(group.expand_entity_ids( self.hass, [test_group.entity_id, 'light.Ceiling']))) self.assertEqual( ['light.bowl', 'light.ceiling'], sorted(group.expand_entity_ids( self.hass, ['light.bowl', test_group.entity_id]))) def test_expand_entity_ids_ignores_non_strings(self): """Test that non string elements in lists are ignored.""" self.assertEqual([], group.expand_entity_ids(self.hass, [5, True])) def test_get_entity_ids(self): """Test get_entity_ids method.""" self.hass.states.set('light.Bowl', STATE_ON) self.hass.states.set('light.Ceiling', STATE_OFF) test_group = group.Group.create_group( self.hass, 'init_group', ['light.Bowl', 'light.Ceiling'], False) self.assertEqual( ['light.bowl', 'light.ceiling'], sorted(group.get_entity_ids(self.hass, test_group.entity_id))) def test_get_entity_ids_with_domain_filter(self): """Test if get_entity_ids works with a domain_filter.""" self.hass.states.set('switch.AC', STATE_OFF) mixed_group = group.Group.create_group( self.hass, 'mixed_group', ['light.Bowl', 'switch.AC'], False) self.assertEqual( ['switch.ac'], group.get_entity_ids( self.hass, mixed_group.entity_id, domain_filter="switch")) def test_get_entity_ids_with_non_existing_group_name(self): """Test get_entity_ids with a non existing group.""" self.assertEqual([], group.get_entity_ids(self.hass, 'non_existing')) def test_get_entity_ids_with_non_group_state(self): """Test get_entity_ids with a non group state.""" self.assertEqual([], group.get_entity_ids(self.hass, 'switch.AC')) def test_group_being_init_before_first_tracked_state_is_set_to_on(self): """Test if the groups turn on. If no states existed and now a state it is tracking is being added as ON. """ test_group = group.Group.create_group( self.hass, 'test group', ['light.not_there_1']) self.hass.states.set('light.not_there_1', STATE_ON) self.hass.block_till_done() group_state = self.hass.states.get(test_group.entity_id) self.assertEqual(STATE_ON, group_state.state) def test_group_being_init_before_first_tracked_state_is_set_to_off(self): """Test if the group turns off. If no states existed and now a state it is tracking is being added as OFF. """ test_group = group.Group.create_group( self.hass, 'test group', ['light.not_there_1']) self.hass.states.set('light.not_there_1', STATE_OFF) self.hass.block_till_done() group_state = self.hass.states.get(test_group.entity_id) self.assertEqual(STATE_OFF, group_state.state) def test_setup(self): """Test setup method.""" self.hass.states.set('light.Bowl', STATE_ON) self.hass.states.set('light.Ceiling', STATE_OFF) test_group = group.Group.create_group( self.hass, 'init_group', ['light.Bowl', 'light.Ceiling'], False) group_conf = OrderedDict() group_conf['second_group'] = { 'entities': 'light.Bowl, ' + test_group.entity_id, 'icon': 'mdi:work', 'view': True, 'control': 'hidden', } group_conf['test_group'] = 'hello.world,sensor.happy' group_conf['empty_group'] = {'name': 'Empty Group', 'entities': None} setup_component(self.hass, 'group', {'group': group_conf}) group_state = self.hass.states.get( group.ENTITY_ID_FORMAT.format('second_group')) self.assertEqual(STATE_ON, group_state.state) self.assertEqual(set((test_group.entity_id, 'light.bowl')), set(group_state.attributes['entity_id'])) self.assertIsNone(group_state.attributes.get(group.ATTR_AUTO)) self.assertEqual('mdi:work', group_state.attributes.get(ATTR_ICON)) self.assertTrue(group_state.attributes.get(group.ATTR_VIEW)) self.assertEqual('hidden', group_state.attributes.get(group.ATTR_CONTROL)) self.assertTrue(group_state.attributes.get(ATTR_HIDDEN)) self.assertEqual(1, group_state.attributes.get(group.ATTR_ORDER)) group_state = self.hass.states.get( group.ENTITY_ID_FORMAT.format('test_group')) self.assertEqual(STATE_UNKNOWN, group_state.state) self.assertEqual(set(('sensor.happy', 'hello.world')), set(group_state.attributes['entity_id'])) self.assertIsNone(group_state.attributes.get(group.ATTR_AUTO)) self.assertIsNone(group_state.attributes.get(ATTR_ICON)) self.assertIsNone(group_state.attributes.get(group.ATTR_VIEW)) self.assertIsNone(group_state.attributes.get(group.ATTR_CONTROL)) self.assertIsNone(group_state.attributes.get(ATTR_HIDDEN)) self.assertEqual(2, group_state.attributes.get(group.ATTR_ORDER)) def test_groups_get_unique_names(self): """Two groups with same name should both have a unique entity id.""" grp1 = group.Group.create_group(self.hass, 'Je suis Charlie') grp2 = group.Group.create_group(self.hass, 'Je suis Charlie') self.assertNotEqual(grp1.entity_id, grp2.entity_id) def test_expand_entity_ids_expands_nested_groups(self): """Test if entity ids epands to nested groups.""" group.Group.create_group( self.hass, 'light', ['light.test_1', 'light.test_2']) group.Group.create_group( self.hass, 'switch', ['switch.test_1', 'switch.test_2']) group.Group.create_group(self.hass, 'group_of_groups', ['group.light', 'group.switch']) self.assertEqual( ['light.test_1', 'light.test_2', 'switch.test_1', 'switch.test_2'], sorted(group.expand_entity_ids(self.hass, ['group.group_of_groups']))) def test_set_assumed_state_based_on_tracked(self): """Test assumed state.""" self.hass.states.set('light.Bowl', STATE_ON) self.hass.states.set('light.Ceiling', STATE_OFF) test_group = group.Group.create_group( self.hass, 'init_group', ['light.Bowl', 'light.Ceiling', 'sensor.no_exist']) state = self.hass.states.get(test_group.entity_id) self.assertIsNone(state.attributes.get(ATTR_ASSUMED_STATE)) self.hass.states.set('light.Bowl', STATE_ON, { ATTR_ASSUMED_STATE: True }) self.hass.block_till_done() state = self.hass.states.get(test_group.entity_id) self.assertTrue(state.attributes.get(ATTR_ASSUMED_STATE)) self.hass.states.set('light.Bowl', STATE_ON) self.hass.block_till_done() state = self.hass.states.get(test_group.entity_id) self.assertIsNone(state.attributes.get(ATTR_ASSUMED_STATE)) def test_group_updated_after_device_tracker_zone_change(self): """Test group state when device tracker in group changes zone.""" self.hass.states.set('device_tracker.Adam', STATE_HOME) self.hass.states.set('device_tracker.Eve', STATE_NOT_HOME) self.hass.block_till_done() group.Group.create_group( self.hass, 'peeps', ['device_tracker.Adam', 'device_tracker.Eve']) self.hass.states.set('device_tracker.Adam', 'cool_state_not_home') self.hass.block_till_done() self.assertEqual(STATE_NOT_HOME, self.hass.states.get( group.ENTITY_ID_FORMAT.format('peeps')).state) def test_reloading_groups(self): """Test reloading the group config.""" assert setup_component(self.hass, 'group', {'group': { 'second_group': { 'entities': 'light.Bowl', 'icon': 'mdi:work', 'view': True, }, 'test_group': 'hello.world,sensor.happy', 'empty_group': {'name': 'Empty Group', 'entities': None}, } }) assert sorted(self.hass.states.entity_ids()) == \ ['group.empty_group', 'group.second_group', 'group.test_group'] assert self.hass.bus.listeners['state_changed'] == 3 with patch('homeassistant.config.load_yaml_config_file', return_value={ 'group': { 'hello': { 'entities': 'light.Bowl', 'icon': 'mdi:work', 'view': True, }}}): group.reload(self.hass) self.hass.block_till_done() assert self.hass.states.entity_ids() == ['group.hello'] assert self.hass.bus.listeners['state_changed'] == 1 def test_stopping_a_group(self): """Test that a group correctly removes itself.""" grp = group.Group.create_group( self.hass, 'light', ['light.test_1', 'light.test_2']) assert self.hass.states.entity_ids() == ['group.light'] grp.stop() assert self.hass.states.entity_ids() == [] def test_changing_group_visibility(self): """Test that a group can be hidden and shown.""" assert setup_component(self.hass, 'group', { 'group': { 'test_group': 'hello.world,sensor.happy' } }) group_entity_id = group.ENTITY_ID_FORMAT.format('test_group') # Hide the group group.set_visibility(self.hass, group_entity_id, False) self.hass.block_till_done() group_state = self.hass.states.get(group_entity_id) self.assertTrue(group_state.attributes.get(ATTR_HIDDEN)) # Show it again group.set_visibility(self.hass, group_entity_id, True) self.hass.block_till_done() group_state = self.hass.states.get(group_entity_id) self.assertIsNone(group_state.attributes.get(ATTR_HIDDEN))
unknown
codeparrot/codeparrot-clean
function Component(props) { const count = new MaybeMutable(); return ( <View> <View> {<span>Text</span>} {<span>{maybeMutate(count)}</span>} </View> </View> ); }
javascript
github
https://github.com/facebook/react
compiler/packages/babel-plugin-react-compiler/src/__tests__/fixtures/compiler/inner-memo-value-not-promoted-to-outer-scope-static.js
# Authors: Peter Prettenhofer <peter.prettenhofer@gmail.com> (main author) # Mathieu Blondel (partial_fit support) # # License: BSD 3 clause """Classification and regression using Stochastic Gradient Descent (SGD).""" import numpy as np import warnings from abc import ABCMeta, abstractmethod from ..utils import Parallel, delayed from .base import LinearClassifierMixin, SparseCoefMixin from .base import make_dataset from ..base import BaseEstimator, RegressorMixin from ..utils import check_array, check_random_state, check_X_y from ..utils.extmath import safe_sparse_dot from ..utils.multiclass import _check_partial_fit_first_call from ..utils.validation import check_is_fitted from ..exceptions import ConvergenceWarning from ..externals import six from ..model_selection import train_test_split from .sgd_fast import plain_sgd, average_sgd from ..utils import compute_class_weight from ..utils import deprecated from .sgd_fast import Hinge from .sgd_fast import SquaredHinge from .sgd_fast import Log from .sgd_fast import ModifiedHuber from .sgd_fast import SquaredLoss from .sgd_fast import Huber from .sgd_fast import EpsilonInsensitive from .sgd_fast import SquaredEpsilonInsensitive LEARNING_RATE_TYPES = {"constant": 1, "optimal": 2, "invscaling": 3, "adaptive": 4, "pa1": 5, "pa2": 6} PENALTY_TYPES = {"none": 0, "l2": 2, "l1": 1, "elasticnet": 3} DEFAULT_EPSILON = 0.1 # Default value of ``epsilon`` parameter. class BaseSGD(six.with_metaclass(ABCMeta, BaseEstimator, SparseCoefMixin)): """Base class for SGD classification and regression.""" def __init__(self, loss, penalty='l2', alpha=0.0001, C=1.0, l1_ratio=0.15, fit_intercept=True, max_iter=None, tol=None, shuffle=True, verbose=0, epsilon=0.1, random_state=None, learning_rate="optimal", eta0=0.0, power_t=0.5, early_stopping=False, validation_fraction=0.1, n_iter_no_change=5, warm_start=False, average=False, n_iter=None): self.loss = loss self.penalty = penalty self.learning_rate = learning_rate self.epsilon = epsilon self.alpha = alpha self.C = C self.l1_ratio = l1_ratio self.fit_intercept = fit_intercept self.shuffle = shuffle self.random_state = random_state self.verbose = verbose self.eta0 = eta0 self.power_t = power_t self.early_stopping = early_stopping self.validation_fraction = validation_fraction self.n_iter_no_change = n_iter_no_change self.warm_start = warm_start self.average = average self.n_iter = n_iter self.max_iter = max_iter self.tol = tol # current tests expect init to do parameter validation # but we are not allowed to set attributes self._validate_params(set_max_iter=False) def set_params(self, *args, **kwargs): super(BaseSGD, self).set_params(*args, **kwargs) self._validate_params(set_max_iter=False) return self @abstractmethod def fit(self, X, y): """Fit model.""" def _validate_params(self, set_max_iter=True, for_partial_fit=False): """Validate input params. """ if not isinstance(self.shuffle, bool): raise ValueError("shuffle must be either True or False") if not isinstance(self.early_stopping, bool): raise ValueError("early_stopping must be either True or False") if self.early_stopping and for_partial_fit: raise ValueError("early_stopping should be False with partial_fit") if self.max_iter is not None and self.max_iter <= 0: raise ValueError("max_iter must be > zero. Got %f" % self.max_iter) if not (0.0 <= self.l1_ratio <= 1.0): raise ValueError("l1_ratio must be in [0, 1]") if self.alpha < 0.0: raise ValueError("alpha must be >= 0") if self.n_iter_no_change < 1: raise ValueError("n_iter_no_change must be >= 1") if not (0.0 < self.validation_fraction < 1.0): raise ValueError("validation_fraction must be in ]0, 1[") if self.learning_rate in ("constant", "invscaling", "adaptive"): if self.eta0 <= 0.0: raise ValueError("eta0 must be > 0") if self.learning_rate == "optimal" and self.alpha == 0: raise ValueError("alpha must be > 0 since " "learning_rate is 'optimal'. alpha is used " "to compute the optimal learning rate.") # raises ValueError if not registered self._get_penalty_type(self.penalty) self._get_learning_rate_type(self.learning_rate) if self.loss not in self.loss_functions: raise ValueError("The loss %s is not supported. " % self.loss) if not set_max_iter: return # n_iter deprecation, set self._max_iter, self._tol self._tol = self.tol if self.n_iter is not None: warnings.warn("n_iter parameter is deprecated in 0.19 and will be" " removed in 0.21. Use max_iter and tol instead.", DeprecationWarning) # Same behavior as before 0.19 max_iter = self.n_iter self._tol = None elif self.tol is None and self.max_iter is None: if not for_partial_fit: warnings.warn( "max_iter and tol parameters have been " "added in %s in 0.19. If both are left unset, " "they default to max_iter=5 and tol=None. " "If tol is not None, max_iter defaults to max_iter=1000. " "From 0.21, default max_iter will be 1000, and" " default tol will be 1e-3." % type(self).__name__, FutureWarning) # Before 0.19, default was n_iter=5 max_iter = 5 else: max_iter = self.max_iter if self.max_iter is not None else 1000 self._max_iter = max_iter def _get_loss_function(self, loss): """Get concrete ``LossFunction`` object for str ``loss``. """ try: loss_ = self.loss_functions[loss] loss_class, args = loss_[0], loss_[1:] if loss in ('huber', 'epsilon_insensitive', 'squared_epsilon_insensitive'): args = (self.epsilon, ) return loss_class(*args) except KeyError: raise ValueError("The loss %s is not supported. " % loss) def _get_learning_rate_type(self, learning_rate): try: return LEARNING_RATE_TYPES[learning_rate] except KeyError: raise ValueError("learning rate %s " "is not supported. " % learning_rate) def _get_penalty_type(self, penalty): penalty = str(penalty).lower() try: return PENALTY_TYPES[penalty] except KeyError: raise ValueError("Penalty %s is not supported. " % penalty) def _validate_sample_weight(self, sample_weight, n_samples): """Set the sample weight array.""" if sample_weight is None: # uniform sample weights sample_weight = np.ones(n_samples, dtype=np.float64, order='C') else: # user-provided array sample_weight = np.asarray(sample_weight, dtype=np.float64, order="C") if sample_weight.shape[0] != n_samples: raise ValueError("Shapes of X and sample_weight do not match.") return sample_weight def _allocate_parameter_mem(self, n_classes, n_features, coef_init=None, intercept_init=None): """Allocate mem for parameters; initialize if provided.""" if n_classes > 2: # allocate coef_ for multi-class if coef_init is not None: coef_init = np.asarray(coef_init, order="C") if coef_init.shape != (n_classes, n_features): raise ValueError("Provided ``coef_`` does not match " "dataset. ") self.coef_ = coef_init else: self.coef_ = np.zeros((n_classes, n_features), dtype=np.float64, order="C") # allocate intercept_ for multi-class if intercept_init is not None: intercept_init = np.asarray(intercept_init, order="C") if intercept_init.shape != (n_classes, ): raise ValueError("Provided intercept_init " "does not match dataset.") self.intercept_ = intercept_init else: self.intercept_ = np.zeros(n_classes, dtype=np.float64, order="C") else: # allocate coef_ for binary problem if coef_init is not None: coef_init = np.asarray(coef_init, dtype=np.float64, order="C") coef_init = coef_init.ravel() if coef_init.shape != (n_features,): raise ValueError("Provided coef_init does not " "match dataset.") self.coef_ = coef_init else: self.coef_ = np.zeros(n_features, dtype=np.float64, order="C") # allocate intercept_ for binary problem if intercept_init is not None: intercept_init = np.asarray(intercept_init, dtype=np.float64) if intercept_init.shape != (1,) and intercept_init.shape != (): raise ValueError("Provided intercept_init " "does not match dataset.") self.intercept_ = intercept_init.reshape(1,) else: self.intercept_ = np.zeros(1, dtype=np.float64, order="C") # initialize average parameters if self.average > 0: self.standard_coef_ = self.coef_ self.standard_intercept_ = self.intercept_ self.average_coef_ = np.zeros(self.coef_.shape, dtype=np.float64, order="C") self.average_intercept_ = np.zeros(self.standard_intercept_.shape, dtype=np.float64, order="C") def _make_validation_split(self, X, y, sample_weight): """Split the dataset between training set and validation set. Parameters ---------- X : {array, sparse matrix}, shape (n_samples, n_features) Training data. y : array, shape (n_samples, ) Target values. sample_weight : array, shape (n_samples, ) Weights applied to individual samples. Returns ------- validation_mask : array, shape (n_samples, ) Equal to 1 on the validation set, 0 on the training set. """ n_samples = X.shape[0] validation_mask = np.zeros(n_samples, dtype=np.uint8) if not self.early_stopping: # use the full set for training, with an empty validation set return validation_mask tmp = train_test_split(X, y, np.arange(n_samples), sample_weight, test_size=self.validation_fraction, random_state=self.random_state) X_train, X_val, y_train, y_val = tmp[:4] idx_train, idx_val, sample_weight_train, sample_weight_val = tmp[4:8] if X_train.shape[0] == 0 or X_val.shape[0] == 0: raise ValueError( "Splitting %d samples into a train set and a validation set " "with validation_fraction=%r led to an empty set (%d and %d " "samples). Please either change validation_fraction, increase " "number of samples, or disable early_stopping." % (n_samples, self.validation_fraction, X_train.shape[0], X_val.shape[0])) self._X_val = X_val self._y_val = y_val self._sample_weight_val = sample_weight_val validation_mask[idx_val] = 1 return validation_mask def _delete_validation_split(self): if self.early_stopping: del self._X_val del self._y_val del self._sample_weight_val def _validation_score(self, coef, intercept): """Compute the score on the validation set. Used for early stopping.""" # store attributes old_coefs, old_intercept = self.coef_, self.intercept_ # replace them with current coefficients for scoring self.coef_ = coef.reshape(1, -1) self.intercept_ = np.atleast_1d(intercept) score = self.score(self._X_val, self._y_val, self._sample_weight_val) # restore old attributes self.coef_, self.intercept_ = old_coefs, old_intercept return score def _prepare_fit_binary(est, y, i): """Initialization for fit_binary. Returns y, coef, intercept, average_coef, average_intercept. """ y_i = np.ones(y.shape, dtype=np.float64, order="C") y_i[y != est.classes_[i]] = -1.0 average_intercept = 0 average_coef = None if len(est.classes_) == 2: if not est.average: coef = est.coef_.ravel() intercept = est.intercept_[0] else: coef = est.standard_coef_.ravel() intercept = est.standard_intercept_[0] average_coef = est.average_coef_.ravel() average_intercept = est.average_intercept_[0] else: if not est.average: coef = est.coef_[i] intercept = est.intercept_[i] else: coef = est.standard_coef_[i] intercept = est.standard_intercept_[i] average_coef = est.average_coef_[i] average_intercept = est.average_intercept_[i] return y_i, coef, intercept, average_coef, average_intercept def fit_binary(est, i, X, y, alpha, C, learning_rate, max_iter, pos_weight, neg_weight, sample_weight): """Fit a single binary classifier. The i'th class is considered the "positive" class. Parameters ---------- est : Estimator object The estimator to fit i : int Index of the positive class X : numpy array or sparse matrix of shape [n_samples,n_features] Training data y : numpy array of shape [n_samples, ] Target values alpha : float The regularization parameter C : float Maximum step size for passive aggressive learning_rate : string The learning rate. Accepted values are 'constant', 'optimal', 'invscaling', 'pa1' and 'pa2'. max_iter : int The maximum number of iterations (epochs) pos_weight : float The weight of the positive class neg_weight : float The weight of the negative class sample_weight : numpy array of shape [n_samples, ] The weight of each sample """ # if average is not true, average_coef, and average_intercept will be # unused y_i, coef, intercept, average_coef, average_intercept = \ _prepare_fit_binary(est, y, i) assert y_i.shape[0] == y.shape[0] == sample_weight.shape[0] dataset, intercept_decay = make_dataset(X, y_i, sample_weight) penalty_type = est._get_penalty_type(est.penalty) learning_rate_type = est._get_learning_rate_type(learning_rate) validation_mask = est._make_validation_split(X, y, sample_weight) # XXX should have random_state_! random_state = check_random_state(est.random_state) # numpy mtrand expects a C long which is a signed 32 bit integer under # Windows seed = random_state.randint(0, np.iinfo(np.int32).max) tol = est.tol if est.tol is not None else -np.inf if not est.average: result = plain_sgd(coef, intercept, est.loss_function_, penalty_type, alpha, C, est.l1_ratio, dataset, validation_mask, est.early_stopping, est, int(est.n_iter_no_change), max_iter, tol, int(est.fit_intercept), int(est.verbose), int(est.shuffle), seed, pos_weight, neg_weight, learning_rate_type, est.eta0, est.power_t, est.t_, intercept_decay) else: standard_coef, standard_intercept, average_coef, average_intercept, \ n_iter_ = average_sgd(coef, intercept, average_coef, average_intercept, est.loss_function_, penalty_type, alpha, C, est.l1_ratio, dataset, validation_mask, est.early_stopping, est, int(est.n_iter_no_change), max_iter, tol, int(est.fit_intercept), int(est.verbose), int(est.shuffle), seed, pos_weight, neg_weight, learning_rate_type, est.eta0, est.power_t, est.t_, intercept_decay, est.average) if len(est.classes_) == 2: est.average_intercept_[0] = average_intercept else: est.average_intercept_[i] = average_intercept result = standard_coef, standard_intercept, n_iter_ est._delete_validation_split() return result class BaseSGDClassifier(six.with_metaclass(ABCMeta, BaseSGD, LinearClassifierMixin)): loss_functions = { "hinge": (Hinge, 1.0), "squared_hinge": (SquaredHinge, 1.0), "perceptron": (Hinge, 0.0), "log": (Log, ), "modified_huber": (ModifiedHuber, ), "squared_loss": (SquaredLoss, ), "huber": (Huber, DEFAULT_EPSILON), "epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON), "squared_epsilon_insensitive": (SquaredEpsilonInsensitive, DEFAULT_EPSILON), } @abstractmethod def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15, fit_intercept=True, max_iter=None, tol=None, shuffle=True, verbose=0, epsilon=DEFAULT_EPSILON, n_jobs=None, random_state=None, learning_rate="optimal", eta0=0.0, power_t=0.5, early_stopping=False, validation_fraction=0.1, n_iter_no_change=5, class_weight=None, warm_start=False, average=False, n_iter=None): super(BaseSGDClassifier, self).__init__( loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio, fit_intercept=fit_intercept, max_iter=max_iter, tol=tol, shuffle=shuffle, verbose=verbose, epsilon=epsilon, random_state=random_state, learning_rate=learning_rate, eta0=eta0, power_t=power_t, early_stopping=early_stopping, validation_fraction=validation_fraction, n_iter_no_change=n_iter_no_change, warm_start=warm_start, average=average, n_iter=n_iter) self.class_weight = class_weight self.n_jobs = n_jobs @property @deprecated("Attribute loss_function was deprecated in version 0.19 and " "will be removed in 0.21. Use ``loss_function_`` instead") def loss_function(self): return self.loss_function_ def _partial_fit(self, X, y, alpha, C, loss, learning_rate, max_iter, classes, sample_weight, coef_init, intercept_init): X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C", accept_large_sparse=False) n_samples, n_features = X.shape _check_partial_fit_first_call(self, classes) n_classes = self.classes_.shape[0] # Allocate datastructures from input arguments self._expanded_class_weight = compute_class_weight(self.class_weight, self.classes_, y) sample_weight = self._validate_sample_weight(sample_weight, n_samples) if getattr(self, "coef_", None) is None or coef_init is not None: self._allocate_parameter_mem(n_classes, n_features, coef_init, intercept_init) elif n_features != self.coef_.shape[-1]: raise ValueError("Number of features %d does not match previous " "data %d." % (n_features, self.coef_.shape[-1])) self.loss_function_ = self._get_loss_function(loss) if not hasattr(self, "t_"): self.t_ = 1.0 # delegate to concrete training procedure if n_classes > 2: self._fit_multiclass(X, y, alpha=alpha, C=C, learning_rate=learning_rate, sample_weight=sample_weight, max_iter=max_iter) elif n_classes == 2: self._fit_binary(X, y, alpha=alpha, C=C, learning_rate=learning_rate, sample_weight=sample_weight, max_iter=max_iter) else: raise ValueError( "The number of classes has to be greater than one;" " got %d class" % n_classes) return self def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None, intercept_init=None, sample_weight=None): self._validate_params() if hasattr(self, "classes_"): self.classes_ = None X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C", accept_large_sparse=False) n_samples, n_features = X.shape # labels can be encoded as float, int, or string literals # np.unique sorts in asc order; largest class id is positive class classes = np.unique(y) if self.warm_start and hasattr(self, "coef_"): if coef_init is None: coef_init = self.coef_ if intercept_init is None: intercept_init = self.intercept_ else: self.coef_ = None self.intercept_ = None if self.average > 0: self.standard_coef_ = self.coef_ self.standard_intercept_ = self.intercept_ self.average_coef_ = None self.average_intercept_ = None # Clear iteration count for multiple call to fit. self.t_ = 1.0 self._partial_fit(X, y, alpha, C, loss, learning_rate, self._max_iter, classes, sample_weight, coef_init, intercept_init) if (self._tol is not None and self._tol > -np.inf and self.n_iter_ == self._max_iter): warnings.warn("Maximum number of iteration reached before " "convergence. Consider increasing max_iter to " "improve the fit.", ConvergenceWarning) return self def _fit_binary(self, X, y, alpha, C, sample_weight, learning_rate, max_iter): """Fit a binary classifier on X and y. """ coef, intercept, n_iter_ = fit_binary(self, 1, X, y, alpha, C, learning_rate, max_iter, self._expanded_class_weight[1], self._expanded_class_weight[0], sample_weight) self.t_ += n_iter_ * X.shape[0] self.n_iter_ = n_iter_ # need to be 2d if self.average > 0: if self.average <= self.t_ - 1: self.coef_ = self.average_coef_.reshape(1, -1) self.intercept_ = self.average_intercept_ else: self.coef_ = self.standard_coef_.reshape(1, -1) self.standard_intercept_ = np.atleast_1d(intercept) self.intercept_ = self.standard_intercept_ else: self.coef_ = coef.reshape(1, -1) # intercept is a float, need to convert it to an array of length 1 self.intercept_ = np.atleast_1d(intercept) def _fit_multiclass(self, X, y, alpha, C, learning_rate, sample_weight, max_iter): """Fit a multi-class classifier by combining binary classifiers Each binary classifier predicts one class versus all others. This strategy is called OVA: One Versus All. """ # Use joblib to fit OvA in parallel. result = Parallel(n_jobs=self.n_jobs, prefer="threads", verbose=self.verbose)( delayed(fit_binary)(self, i, X, y, alpha, C, learning_rate, max_iter, self._expanded_class_weight[i], 1., sample_weight) for i in range(len(self.classes_))) # take the maximum of n_iter_ over every binary fit n_iter_ = 0. for i, (_, intercept, n_iter_i) in enumerate(result): self.intercept_[i] = intercept n_iter_ = max(n_iter_, n_iter_i) self.t_ += n_iter_ * X.shape[0] self.n_iter_ = n_iter_ if self.average > 0: if self.average <= self.t_ - 1.0: self.coef_ = self.average_coef_ self.intercept_ = self.average_intercept_ else: self.coef_ = self.standard_coef_ self.standard_intercept_ = np.atleast_1d(self.intercept_) self.intercept_ = self.standard_intercept_ def partial_fit(self, X, y, classes=None, sample_weight=None): """Fit linear model with Stochastic Gradient Descent. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Subset of the training data y : numpy array, shape (n_samples,) Subset of the target values classes : array, shape (n_classes,) Classes across all calls to partial_fit. Can be obtained by via `np.unique(y_all)`, where y_all is the target vector of the entire dataset. This argument is required for the first call to partial_fit and can be omitted in the subsequent calls. Note that y doesn't need to contain all labels in `classes`. sample_weight : array-like, shape (n_samples,), optional Weights applied to individual samples. If not provided, uniform weights are assumed. Returns ------- self : returns an instance of self. """ self._validate_params(for_partial_fit=True) if self.class_weight in ['balanced']: raise ValueError("class_weight '{0}' is not supported for " "partial_fit. In order to use 'balanced' weights," " use compute_class_weight('{0}', classes, y). " "In place of y you can us a large enough sample " "of the full training set target to properly " "estimate the class frequency distributions. " "Pass the resulting weights as the class_weight " "parameter.".format(self.class_weight)) return self._partial_fit(X, y, alpha=self.alpha, C=1.0, loss=self.loss, learning_rate=self.learning_rate, max_iter=1, classes=classes, sample_weight=sample_weight, coef_init=None, intercept_init=None) def fit(self, X, y, coef_init=None, intercept_init=None, sample_weight=None): """Fit linear model with Stochastic Gradient Descent. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Training data y : numpy array, shape (n_samples,) Target values coef_init : array, shape (n_classes, n_features) The initial coefficients to warm-start the optimization. intercept_init : array, shape (n_classes,) The initial intercept to warm-start the optimization. sample_weight : array-like, shape (n_samples,), optional Weights applied to individual samples. If not provided, uniform weights are assumed. These weights will be multiplied with class_weight (passed through the constructor) if class_weight is specified Returns ------- self : returns an instance of self. """ return self._fit(X, y, alpha=self.alpha, C=1.0, loss=self.loss, learning_rate=self.learning_rate, coef_init=coef_init, intercept_init=intercept_init, sample_weight=sample_weight) class SGDClassifier(BaseSGDClassifier): """Linear classifiers (SVM, logistic regression, a.o.) with SGD training. This estimator implements regularized linear models with stochastic gradient descent (SGD) learning: the gradient of the loss is estimated each sample at a time and the model is updated along the way with a decreasing strength schedule (aka learning rate). SGD allows minibatch (online/out-of-core) learning, see the partial_fit method. For best results using the default learning rate schedule, the data should have zero mean and unit variance. This implementation works with data represented as dense or sparse arrays of floating point values for the features. The model it fits can be controlled with the loss parameter; by default, it fits a linear support vector machine (SVM). The regularizer is a penalty added to the loss function that shrinks model parameters towards the zero vector using either the squared euclidean norm L2 or the absolute norm L1 or a combination of both (Elastic Net). If the parameter update crosses the 0.0 value because of the regularizer, the update is truncated to 0.0 to allow for learning sparse models and achieve online feature selection. Read more in the :ref:`User Guide <sgd>`. Parameters ---------- loss : str, default: 'hinge' The loss function to be used. Defaults to 'hinge', which gives a linear SVM. The possible options are 'hinge', 'log', 'modified_huber', 'squared_hinge', 'perceptron', or a regression loss: 'squared_loss', 'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'. The 'log' loss gives logistic regression, a probabilistic classifier. 'modified_huber' is another smooth loss that brings tolerance to outliers as well as probability estimates. 'squared_hinge' is like hinge but is quadratically penalized. 'perceptron' is the linear loss used by the perceptron algorithm. The other losses are designed for regression but can be useful in classification as well; see SGDRegressor for a description. penalty : str, 'none', 'l2', 'l1', or 'elasticnet' The penalty (aka regularization term) to be used. Defaults to 'l2' which is the standard regularizer for linear SVM models. 'l1' and 'elasticnet' might bring sparsity to the model (feature selection) not achievable with 'l2'. alpha : float Constant that multiplies the regularization term. Defaults to 0.0001 Also used to compute learning_rate when set to 'optimal'. l1_ratio : float The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1. l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1. Defaults to 0.15. fit_intercept : bool Whether the intercept should be estimated or not. If False, the data is assumed to be already centered. Defaults to True. max_iter : int, optional The maximum number of passes over the training data (aka epochs). It only impacts the behavior in the ``fit`` method, and not the `partial_fit`. Defaults to 5. Defaults to 1000 from 0.21, or if tol is not None. .. versionadded:: 0.19 tol : float or None, optional The stopping criterion. If it is not None, the iterations will stop when (loss > previous_loss - tol). Defaults to None. Defaults to 1e-3 from 0.21. .. versionadded:: 0.19 shuffle : bool, optional Whether or not the training data should be shuffled after each epoch. Defaults to True. verbose : integer, optional The verbosity level epsilon : float Epsilon in the epsilon-insensitive loss functions; only if `loss` is 'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'. For 'huber', determines the threshold at which it becomes less important to get the prediction exactly right. For epsilon-insensitive, any differences between the current prediction and the correct label are ignored if they are less than this threshold. n_jobs : int or None, optional (default=None) The number of CPUs to use to do the OVA (One Versus All, for multi-class problems) computation. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details. random_state : int, RandomState instance or None, optional (default=None) The seed of the pseudo random number generator to use when shuffling the data. If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. learning_rate : string, optional The learning rate schedule: 'constant': eta = eta0 'optimal': [default] eta = 1.0 / (alpha * (t + t0)) where t0 is chosen by a heuristic proposed by Leon Bottou. 'invscaling': eta = eta0 / pow(t, power_t) 'adaptive': eta = eta0, as long as the training keeps decreasing. Each time n_iter_no_change consecutive epochs fail to decrease the training loss by tol or fail to increase validation score by tol if early_stopping is True, the current learning rate is divided by 5. eta0 : double The initial learning rate for the 'constant', 'invscaling' or 'adaptive' schedules. The default value is 0.0 as eta0 is not used by the default schedule 'optimal'. power_t : double The exponent for inverse scaling learning rate [default 0.5]. early_stopping : bool, default=False Whether to use early stopping to terminate training when validation score is not improving. If set to True, it will automatically set aside a fraction of training data as validation and terminate training when validation score is not improving by at least tol for n_iter_no_change consecutive epochs. .. versionadded:: 0.20 validation_fraction : float, default=0.1 The proportion of training data to set aside as validation set for early stopping. Must be between 0 and 1. Only used if early_stopping is True. .. versionadded:: 0.20 n_iter_no_change : int, default=5 Number of iterations with no improvement to wait before early stopping. .. versionadded:: 0.20 class_weight : dict, {class_label: weight} or "balanced" or None, optional Preset for the class_weight fit parameter. Weights associated with classes. If not given, all classes are supposed to have weight one. The "balanced" mode uses the values of y to automatically adjust weights inversely proportional to class frequencies in the input data as ``n_samples / (n_classes * np.bincount(y))`` warm_start : bool, optional When set to True, reuse the solution of the previous call to fit as initialization, otherwise, just erase the previous solution. See :term:`the Glossary <warm_start>`. Repeatedly calling fit or partial_fit when warm_start is True can result in a different solution than when calling fit a single time because of the way the data is shuffled. If a dynamic learning rate is used, the learning rate is adapted depending on the number of samples already seen. Calling ``fit`` resets this counter, while ``partial_fit`` will result in increasing the existing counter. average : bool or int, optional When set to True, computes the averaged SGD weights and stores the result in the ``coef_`` attribute. If set to an int greater than 1, averaging will begin once the total number of samples seen reaches average. So ``average=10`` will begin averaging after seeing 10 samples. n_iter : int, optional The number of passes over the training data (aka epochs). Defaults to None. Deprecated, will be removed in 0.21. .. versionchanged:: 0.19 Deprecated Attributes ---------- coef_ : array, shape (1, n_features) if n_classes == 2 else (n_classes,\ n_features) Weights assigned to the features. intercept_ : array, shape (1,) if n_classes == 2 else (n_classes,) Constants in decision function. n_iter_ : int The actual number of iterations to reach the stopping criterion. For multiclass fits, it is the maximum over every binary fit. loss_function_ : concrete ``LossFunction`` Examples -------- >>> import numpy as np >>> from sklearn import linear_model >>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]]) >>> Y = np.array([1, 1, 2, 2]) >>> clf = linear_model.SGDClassifier(max_iter=1000) >>> clf.fit(X, Y) ... #doctest: +NORMALIZE_WHITESPACE SGDClassifier(alpha=0.0001, average=False, class_weight=None, early_stopping=False, epsilon=0.1, eta0=0.0, fit_intercept=True, l1_ratio=0.15, learning_rate='optimal', loss='hinge', max_iter=1000, n_iter=None, n_iter_no_change=5, n_jobs=None, penalty='l2', power_t=0.5, random_state=None, shuffle=True, tol=None, validation_fraction=0.1, verbose=0, warm_start=False) >>> print(clf.predict([[-0.8, -1]])) [1] See also -------- sklearn.svm.LinearSVC, LogisticRegression, Perceptron """ def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15, fit_intercept=True, max_iter=None, tol=None, shuffle=True, verbose=0, epsilon=DEFAULT_EPSILON, n_jobs=None, random_state=None, learning_rate="optimal", eta0=0.0, power_t=0.5, early_stopping=False, validation_fraction=0.1, n_iter_no_change=5, class_weight=None, warm_start=False, average=False, n_iter=None): super(SGDClassifier, self).__init__( loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio, fit_intercept=fit_intercept, max_iter=max_iter, tol=tol, shuffle=shuffle, verbose=verbose, epsilon=epsilon, n_jobs=n_jobs, random_state=random_state, learning_rate=learning_rate, eta0=eta0, power_t=power_t, early_stopping=early_stopping, validation_fraction=validation_fraction, n_iter_no_change=n_iter_no_change, class_weight=class_weight, warm_start=warm_start, average=average, n_iter=n_iter) def _check_proba(self): if self.loss not in ("log", "modified_huber"): raise AttributeError("probability estimates are not available for" " loss=%r" % self.loss) @property def predict_proba(self): """Probability estimates. This method is only available for log loss and modified Huber loss. Multiclass probability estimates are derived from binary (one-vs.-rest) estimates by simple normalization, as recommended by Zadrozny and Elkan. Binary probability estimates for loss="modified_huber" are given by (clip(decision_function(X), -1, 1) + 1) / 2. For other loss functions it is necessary to perform proper probability calibration by wrapping the classifier with :class:`sklearn.calibration.CalibratedClassifierCV` instead. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Returns ------- array, shape (n_samples, n_classes) Returns the probability of the sample for each class in the model, where classes are ordered as they are in `self.classes_`. References ---------- Zadrozny and Elkan, "Transforming classifier scores into multiclass probability estimates", SIGKDD'02, http://www.research.ibm.com/people/z/zadrozny/kdd2002-Transf.pdf The justification for the formula in the loss="modified_huber" case is in the appendix B in: http://jmlr.csail.mit.edu/papers/volume2/zhang02c/zhang02c.pdf """ self._check_proba() return self._predict_proba def _predict_proba(self, X): check_is_fitted(self, "t_") if self.loss == "log": return self._predict_proba_lr(X) elif self.loss == "modified_huber": binary = (len(self.classes_) == 2) scores = self.decision_function(X) if binary: prob2 = np.ones((scores.shape[0], 2)) prob = prob2[:, 1] else: prob = scores np.clip(scores, -1, 1, prob) prob += 1. prob /= 2. if binary: prob2[:, 0] -= prob prob = prob2 else: # the above might assign zero to all classes, which doesn't # normalize neatly; work around this to produce uniform # probabilities prob_sum = prob.sum(axis=1) all_zero = (prob_sum == 0) if np.any(all_zero): prob[all_zero, :] = 1 prob_sum[all_zero] = len(self.classes_) # normalize prob /= prob_sum.reshape((prob.shape[0], -1)) return prob else: raise NotImplementedError("predict_(log_)proba only supported when" " loss='log' or loss='modified_huber' " "(%r given)" % self.loss) @property def predict_log_proba(self): """Log of probability estimates. This method is only available for log loss and modified Huber loss. When loss="modified_huber", probability estimates may be hard zeros and ones, so taking the logarithm is not possible. See ``predict_proba`` for details. Parameters ---------- X : array-like, shape (n_samples, n_features) Returns ------- T : array-like, shape (n_samples, n_classes) Returns the log-probability of the sample for each class in the model, where classes are ordered as they are in `self.classes_`. """ self._check_proba() return self._predict_log_proba def _predict_log_proba(self, X): return np.log(self.predict_proba(X)) class BaseSGDRegressor(BaseSGD, RegressorMixin): loss_functions = { "squared_loss": (SquaredLoss, ), "huber": (Huber, DEFAULT_EPSILON), "epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON), "squared_epsilon_insensitive": (SquaredEpsilonInsensitive, DEFAULT_EPSILON), } @abstractmethod def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001, l1_ratio=0.15, fit_intercept=True, max_iter=None, tol=None, shuffle=True, verbose=0, epsilon=DEFAULT_EPSILON, random_state=None, learning_rate="invscaling", eta0=0.01, power_t=0.25, early_stopping=False, validation_fraction=0.1, n_iter_no_change=5, warm_start=False, average=False, n_iter=None): super(BaseSGDRegressor, self).__init__( loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio, fit_intercept=fit_intercept, max_iter=max_iter, tol=tol, shuffle=shuffle, verbose=verbose, epsilon=epsilon, random_state=random_state, learning_rate=learning_rate, eta0=eta0, power_t=power_t, early_stopping=early_stopping, validation_fraction=validation_fraction, n_iter_no_change=n_iter_no_change, warm_start=warm_start, average=average, n_iter=n_iter) def _partial_fit(self, X, y, alpha, C, loss, learning_rate, max_iter, sample_weight, coef_init, intercept_init): X, y = check_X_y(X, y, "csr", copy=False, order='C', dtype=np.float64, accept_large_sparse=False) y = y.astype(np.float64, copy=False) n_samples, n_features = X.shape # Allocate datastructures from input arguments sample_weight = self._validate_sample_weight(sample_weight, n_samples) if getattr(self, "coef_", None) is None: self._allocate_parameter_mem(1, n_features, coef_init, intercept_init) elif n_features != self.coef_.shape[-1]: raise ValueError("Number of features %d does not match previous " "data %d." % (n_features, self.coef_.shape[-1])) if self.average > 0 and getattr(self, "average_coef_", None) is None: self.average_coef_ = np.zeros(n_features, dtype=np.float64, order="C") self.average_intercept_ = np.zeros(1, dtype=np.float64, order="C") self._fit_regressor(X, y, alpha, C, loss, learning_rate, sample_weight, max_iter) return self def partial_fit(self, X, y, sample_weight=None): """Fit linear model with Stochastic Gradient Descent. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Subset of training data y : numpy array of shape (n_samples,) Subset of target values sample_weight : array-like, shape (n_samples,), optional Weights applied to individual samples. If not provided, uniform weights are assumed. Returns ------- self : returns an instance of self. """ self._validate_params(for_partial_fit=True) return self._partial_fit(X, y, self.alpha, C=1.0, loss=self.loss, learning_rate=self.learning_rate, max_iter=1, sample_weight=sample_weight, coef_init=None, intercept_init=None) def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None, intercept_init=None, sample_weight=None): self._validate_params() if self.warm_start and getattr(self, "coef_", None) is not None: if coef_init is None: coef_init = self.coef_ if intercept_init is None: intercept_init = self.intercept_ else: self.coef_ = None self.intercept_ = None if self.average > 0: self.standard_intercept_ = self.intercept_ self.standard_coef_ = self.coef_ self.average_coef_ = None self.average_intercept_ = None # Clear iteration count for multiple call to fit. self.t_ = 1.0 self._partial_fit(X, y, alpha, C, loss, learning_rate, self._max_iter, sample_weight, coef_init, intercept_init) if (self._tol is not None and self._tol > -np.inf and self.n_iter_ == self._max_iter): warnings.warn("Maximum number of iteration reached before " "convergence. Consider increasing max_iter to " "improve the fit.", ConvergenceWarning) return self def fit(self, X, y, coef_init=None, intercept_init=None, sample_weight=None): """Fit linear model with Stochastic Gradient Descent. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Training data y : numpy array, shape (n_samples,) Target values coef_init : array, shape (n_features,) The initial coefficients to warm-start the optimization. intercept_init : array, shape (1,) The initial intercept to warm-start the optimization. sample_weight : array-like, shape (n_samples,), optional Weights applied to individual samples (1. for unweighted). Returns ------- self : returns an instance of self. """ return self._fit(X, y, alpha=self.alpha, C=1.0, loss=self.loss, learning_rate=self.learning_rate, coef_init=coef_init, intercept_init=intercept_init, sample_weight=sample_weight) def _decision_function(self, X): """Predict using the linear model Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Returns ------- array, shape (n_samples,) Predicted target values per element in X. """ check_is_fitted(self, ["t_", "coef_", "intercept_"], all_or_any=all) X = check_array(X, accept_sparse='csr') scores = safe_sparse_dot(X, self.coef_.T, dense_output=True) + self.intercept_ return scores.ravel() def predict(self, X): """Predict using the linear model Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Returns ------- array, shape (n_samples,) Predicted target values per element in X. """ return self._decision_function(X) def _fit_regressor(self, X, y, alpha, C, loss, learning_rate, sample_weight, max_iter): dataset, intercept_decay = make_dataset(X, y, sample_weight) loss_function = self._get_loss_function(loss) penalty_type = self._get_penalty_type(self.penalty) learning_rate_type = self._get_learning_rate_type(learning_rate) if not hasattr(self, "t_"): self.t_ = 1.0 validation_mask = self._make_validation_split(X, y, sample_weight) random_state = check_random_state(self.random_state) # numpy mtrand expects a C long which is a signed 32 bit integer under # Windows seed = random_state.randint(0, np.iinfo(np.int32).max) tol = self._tol if self._tol is not None else -np.inf if self.average > 0: self.standard_coef_, self.standard_intercept_, \ self.average_coef_, self.average_intercept_, self.n_iter_ =\ average_sgd(self.standard_coef_, self.standard_intercept_[0], self.average_coef_, self.average_intercept_[0], loss_function, penalty_type, alpha, C, self.l1_ratio, dataset, validation_mask, self.early_stopping, self, int(self.n_iter_no_change), max_iter, tol, int(self.fit_intercept), int(self.verbose), int(self.shuffle), seed, 1.0, 1.0, learning_rate_type, self.eta0, self.power_t, self.t_, intercept_decay, self.average) self.average_intercept_ = np.atleast_1d(self.average_intercept_) self.standard_intercept_ = np.atleast_1d(self.standard_intercept_) self.t_ += self.n_iter_ * X.shape[0] if self.average <= self.t_ - 1.0: self.coef_ = self.average_coef_ self.intercept_ = self.average_intercept_ else: self.coef_ = self.standard_coef_ self.intercept_ = self.standard_intercept_ else: self.coef_, self.intercept_, self.n_iter_ = \ plain_sgd(self.coef_, self.intercept_[0], loss_function, penalty_type, alpha, C, self.l1_ratio, dataset, validation_mask, self.early_stopping, self, int(self.n_iter_no_change), max_iter, tol, int(self.fit_intercept), int(self.verbose), int(self.shuffle), seed, 1.0, 1.0, learning_rate_type, self.eta0, self.power_t, self.t_, intercept_decay) self.t_ += self.n_iter_ * X.shape[0] self.intercept_ = np.atleast_1d(self.intercept_) self._delete_validation_split() class SGDRegressor(BaseSGDRegressor): """Linear model fitted by minimizing a regularized empirical loss with SGD SGD stands for Stochastic Gradient Descent: the gradient of the loss is estimated each sample at a time and the model is updated along the way with a decreasing strength schedule (aka learning rate). The regularizer is a penalty added to the loss function that shrinks model parameters towards the zero vector using either the squared euclidean norm L2 or the absolute norm L1 or a combination of both (Elastic Net). If the parameter update crosses the 0.0 value because of the regularizer, the update is truncated to 0.0 to allow for learning sparse models and achieve online feature selection. This implementation works with data represented as dense numpy arrays of floating point values for the features. Read more in the :ref:`User Guide <sgd>`. Parameters ---------- loss : str, default: 'squared_loss' The loss function to be used. The possible values are 'squared_loss', 'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive' The 'squared_loss' refers to the ordinary least squares fit. 'huber' modifies 'squared_loss' to focus less on getting outliers correct by switching from squared to linear loss past a distance of epsilon. 'epsilon_insensitive' ignores errors less than epsilon and is linear past that; this is the loss function used in SVR. 'squared_epsilon_insensitive' is the same but becomes squared loss past a tolerance of epsilon. penalty : str, 'none', 'l2', 'l1', or 'elasticnet' The penalty (aka regularization term) to be used. Defaults to 'l2' which is the standard regularizer for linear SVM models. 'l1' and 'elasticnet' might bring sparsity to the model (feature selection) not achievable with 'l2'. alpha : float Constant that multiplies the regularization term. Defaults to 0.0001 Also used to compute learning_rate when set to 'optimal'. l1_ratio : float The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1. l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1. Defaults to 0.15. fit_intercept : bool Whether the intercept should be estimated or not. If False, the data is assumed to be already centered. Defaults to True. max_iter : int, optional The maximum number of passes over the training data (aka epochs). It only impacts the behavior in the ``fit`` method, and not the `partial_fit`. Defaults to 5. Defaults to 1000 from 0.21, or if tol is not None. .. versionadded:: 0.19 tol : float or None, optional The stopping criterion. If it is not None, the iterations will stop when (loss > previous_loss - tol). Defaults to None. Defaults to 1e-3 from 0.21. .. versionadded:: 0.19 shuffle : bool, optional Whether or not the training data should be shuffled after each epoch. Defaults to True. verbose : integer, optional The verbosity level. epsilon : float Epsilon in the epsilon-insensitive loss functions; only if `loss` is 'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'. For 'huber', determines the threshold at which it becomes less important to get the prediction exactly right. For epsilon-insensitive, any differences between the current prediction and the correct label are ignored if they are less than this threshold. random_state : int, RandomState instance or None, optional (default=None) The seed of the pseudo random number generator to use when shuffling the data. If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. learning_rate : string, optional The learning rate schedule: 'constant': eta = eta0 'optimal': eta = 1.0 / (alpha * (t + t0)) where t0 is chosen by a heuristic proposed by Leon Bottou. 'invscaling': [default] eta = eta0 / pow(t, power_t) 'adaptive': eta = eta0, as long as the training keeps decreasing. Each time n_iter_no_change consecutive epochs fail to decrease the training loss by tol or fail to increase validation score by tol if early_stopping is True, the current learning rate is divided by 5. eta0 : double The initial learning rate for the 'constant', 'invscaling' or 'adaptive' schedules. The default value is 0.0 as eta0 is not used by the default schedule 'optimal'. power_t : double The exponent for inverse scaling learning rate [default 0.5]. early_stopping : bool, default=False Whether to use early stopping to terminate training when validation score is not improving. If set to True, it will automatically set aside a fraction of training data as validation and terminate training when validation score is not improving by at least tol for n_iter_no_change consecutive epochs. .. versionadded:: 0.20 validation_fraction : float, default=0.1 The proportion of training data to set aside as validation set for early stopping. Must be between 0 and 1. Only used if early_stopping is True. .. versionadded:: 0.20 n_iter_no_change : int, default=5 Number of iterations with no improvement to wait before early stopping. .. versionadded:: 0.20 warm_start : bool, optional When set to True, reuse the solution of the previous call to fit as initialization, otherwise, just erase the previous solution. See :term:`the Glossary <warm_start>`. Repeatedly calling fit or partial_fit when warm_start is True can result in a different solution than when calling fit a single time because of the way the data is shuffled. If a dynamic learning rate is used, the learning rate is adapted depending on the number of samples already seen. Calling ``fit`` resets this counter, while ``partial_fit`` will result in increasing the existing counter. average : bool or int, optional When set to True, computes the averaged SGD weights and stores the result in the ``coef_`` attribute. If set to an int greater than 1, averaging will begin once the total number of samples seen reaches average. So ``average=10`` will begin averaging after seeing 10 samples. n_iter : int, optional The number of passes over the training data (aka epochs). Defaults to None. Deprecated, will be removed in 0.21. .. versionchanged:: 0.19 Deprecated Attributes ---------- coef_ : array, shape (n_features,) Weights assigned to the features. intercept_ : array, shape (1,) The intercept term. average_coef_ : array, shape (n_features,) Averaged weights assigned to the features. average_intercept_ : array, shape (1,) The averaged intercept term. n_iter_ : int The actual number of iterations to reach the stopping criterion. Examples -------- >>> import numpy as np >>> from sklearn import linear_model >>> n_samples, n_features = 10, 5 >>> np.random.seed(0) >>> y = np.random.randn(n_samples) >>> X = np.random.randn(n_samples, n_features) >>> clf = linear_model.SGDRegressor(max_iter=1000) >>> clf.fit(X, y) ... #doctest: +NORMALIZE_WHITESPACE SGDRegressor(alpha=0.0001, average=False, early_stopping=False, epsilon=0.1, eta0=0.01, fit_intercept=True, l1_ratio=0.15, learning_rate='invscaling', loss='squared_loss', max_iter=1000, n_iter=None, n_iter_no_change=5, penalty='l2', power_t=0.25, random_state=None, shuffle=True, tol=None, validation_fraction=0.1, verbose=0, warm_start=False) See also -------- Ridge, ElasticNet, Lasso, sklearn.svm.SVR """ def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001, l1_ratio=0.15, fit_intercept=True, max_iter=None, tol=None, shuffle=True, verbose=0, epsilon=DEFAULT_EPSILON, random_state=None, learning_rate="invscaling", eta0=0.01, power_t=0.25, early_stopping=False, validation_fraction=0.1, n_iter_no_change=5, warm_start=False, average=False, n_iter=None): super(SGDRegressor, self).__init__( loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio, fit_intercept=fit_intercept, max_iter=max_iter, tol=tol, shuffle=shuffle, verbose=verbose, epsilon=epsilon, random_state=random_state, learning_rate=learning_rate, eta0=eta0, power_t=power_t, early_stopping=early_stopping, validation_fraction=validation_fraction, n_iter_no_change=n_iter_no_change, warm_start=warm_start, average=average, n_iter=n_iter)
unknown
codeparrot/codeparrot-clean
// This file is part of OpenCV project. // It is subject to the license terms in the LICENSE file found in the top-level directory // of this distribution and at http://opencv.org/license.html. #ifndef OPENCV_CORE_BINDINGS_UTILS_HPP #define OPENCV_CORE_BINDINGS_UTILS_HPP #include <opencv2/core/async.hpp> #include <opencv2/core/detail/async_promise.hpp> #include <opencv2/core/utils/logger.hpp> #include <stdexcept> namespace cv { namespace utils { //! @addtogroup core_utils //! @{ CV_EXPORTS_W String dumpInputArray(InputArray argument); CV_EXPORTS_W String dumpInputArrayOfArrays(InputArrayOfArrays argument); CV_EXPORTS_W String dumpInputOutputArray(InputOutputArray argument); CV_EXPORTS_W String dumpInputOutputArrayOfArrays(InputOutputArrayOfArrays argument); CV_WRAP static inline String dumpBool(bool argument) { return (argument) ? String("Bool: True") : String("Bool: False"); } CV_WRAP static inline String dumpInt(int argument) { return cv::format("Int: %d", argument); } CV_WRAP static inline String dumpInt64(int64 argument) { std::ostringstream oss("Int64: ", std::ios::ate); oss << argument; return oss.str(); } CV_WRAP static inline String dumpSizeT(size_t argument) { std::ostringstream oss("size_t: ", std::ios::ate); oss << argument; return oss.str(); } CV_WRAP static inline String dumpFloat(float argument) { return cv::format("Float: %.2f", argument); } CV_WRAP static inline String dumpDouble(double argument) { return cv::format("Double: %.2f", argument); } CV_WRAP static inline String dumpCString(const char* argument) { return cv::format("String: %s", argument); } CV_WRAP static inline String dumpString(const String& argument) { return cv::format("String: %s", argument.c_str()); } CV_WRAP static inline String dumpRect(const Rect& argument) { return format("rect: (x=%d, y=%d, w=%d, h=%d)", argument.x, argument.y, argument.width, argument.height); } CV_WRAP static inline String dumpTermCriteria(const TermCriteria& argument) { return format("term_criteria: (type=%d, max_count=%d, epsilon=%lf", argument.type, argument.maxCount, argument.epsilon); } CV_WRAP static inline String dumpRotatedRect(const RotatedRect& argument) { return format("rotated_rect: (c_x=%f, c_y=%f, w=%f, h=%f, a=%f)", argument.center.x, argument.center.y, argument.size.width, argument.size.height, argument.angle); } CV_WRAP static inline String dumpRange(const Range& argument) { if (argument == Range::all()) { return "range: all"; } else { return format("range: (s=%d, e=%d)", argument.start, argument.end); } } CV_EXPORTS_W String dumpVectorOfInt(const std::vector<int>& vec); CV_EXPORTS_W String dumpVectorOfDouble(const std::vector<double>& vec); CV_EXPORTS_W String dumpVectorOfRect(const std::vector<Rect>& vec); //! @cond IGNORED CV_WRAP static inline String testOverloadResolution(int value, const Point& point = Point(42, 24)) { return format("overload (int=%d, point=(x=%d, y=%d))", value, point.x, point.y); } CV_WRAP static inline String testOverloadResolution(const Rect& rect) { return format("overload (rect=(x=%d, y=%d, w=%d, h=%d))", rect.x, rect.y, rect.width, rect.height); } CV_WRAP static inline RotatedRect testRotatedRect(float x, float y, float w, float h, float angle) { return RotatedRect(Point2f(x, y), Size2f(w, h), angle); } CV_WRAP static inline std::vector<RotatedRect> testRotatedRectVector(float x, float y, float w, float h, float angle) { std::vector<RotatedRect> result; for (int i = 0; i < 10; i++) result.push_back(RotatedRect(Point2f(x + i, y + 2 * i), Size2f(w, h), angle + 10 * i)); return result; } CV_WRAP static inline int testOverwriteNativeMethod(int argument) { return argument; } CV_WRAP static inline String testReservedKeywordConversion(int positional_argument, int lambda = 2, int from = 3) { return format("arg=%d, lambda=%d, from=%d", positional_argument, lambda, from); } CV_WRAP static inline void generateVectorOfRect(size_t len, CV_OUT std::vector<Rect>& vec) { vec.resize(len); if (len > 0) { RNG rng(12345); Mat tmp(static_cast<int>(len), 1, CV_32SC4); rng.fill(tmp, RNG::UNIFORM, 10, 20); tmp.copyTo(vec); } } CV_WRAP static inline void generateVectorOfInt(size_t len, CV_OUT std::vector<int>& vec) { vec.resize(len); if (len > 0) { RNG rng(554433); Mat tmp(static_cast<int>(len), 1, CV_32SC1); rng.fill(tmp, RNG::UNIFORM, -10, 10); tmp.copyTo(vec); } } CV_WRAP static inline void generateVectorOfMat(size_t len, int rows, int cols, int dtype, CV_OUT std::vector<Mat>& vec) { vec.resize(len); if (len > 0) { RNG rng(65431); for (size_t i = 0; i < len; ++i) { vec[i].create(rows, cols, dtype); rng.fill(vec[i], RNG::UNIFORM, 0, 10); } } } CV_WRAP static inline void testRaiseGeneralException() { throw std::runtime_error("exception text"); } CV_WRAP static inline AsyncArray testAsyncArray(InputArray argument) { AsyncPromise p; p.setValue(argument); return p.getArrayResult(); } CV_WRAP static inline AsyncArray testAsyncException() { AsyncPromise p; try { CV_Error(Error::StsOk, "Test: Generated async error"); } catch (const cv::Exception& e) { p.setException(e); } return p.getArrayResult(); } CV_WRAP static inline String dumpVec2i(const cv::Vec2i value = cv::Vec2i(42, 24)) { return format("Vec2i(%d, %d)", value[0], value[1]); } struct CV_EXPORTS_W_SIMPLE ClassWithKeywordProperties { CV_PROP_RW int lambda; CV_PROP int except; CV_WRAP explicit ClassWithKeywordProperties(int lambda_arg = 24, int except_arg = 42) { lambda = lambda_arg; except = except_arg; } }; struct CV_EXPORTS_W_PARAMS FunctionParams { CV_PROP_RW int lambda = -1; CV_PROP_RW float sigma = 0.0f; FunctionParams& setLambda(int value) CV_NOEXCEPT { lambda = value; return *this; } FunctionParams& setSigma(float value) CV_NOEXCEPT { sigma = value; return *this; } }; CV_WRAP static inline String copyMatAndDumpNamedArguments(InputArray src, OutputArray dst, const FunctionParams& params = FunctionParams()) { src.copyTo(dst); return format("lambda=%d, sigma=%.1f", params.lambda, params.sigma); } namespace nested { CV_WRAP static inline bool testEchoBooleanFunction(bool flag) { return flag; } class CV_EXPORTS_W CV_WRAP_AS(ExportClassName) OriginalClassName { public: struct CV_EXPORTS_W_SIMPLE Params { CV_PROP_RW int int_value; CV_PROP_RW float float_value; CV_WRAP explicit Params(int int_param = 123, float float_param = 3.5f) { int_value = int_param; float_value = float_param; } }; explicit OriginalClassName(const OriginalClassName::Params& params = OriginalClassName::Params()) { params_ = params; } CV_WRAP int getIntParam() const { return params_.int_value; } CV_WRAP float getFloatParam() const { return params_.float_value; } CV_WRAP static std::string originalName() { return "OriginalClassName"; } CV_WRAP static Ptr<OriginalClassName> create(const OriginalClassName::Params& params = OriginalClassName::Params()) { return makePtr<OriginalClassName>(params); } private: OriginalClassName::Params params_; }; typedef OriginalClassName::Params OriginalClassName_Params; } // namespace nested //! @endcond IGNORED namespace fs { CV_EXPORTS_W cv::String getCacheDirectoryForDownloads(); } // namespace fs //! @} // core_utils } // namespace cv::utils } // namespaces cv / utils #endif // OPENCV_CORE_BINDINGS_UTILS_HPP
unknown
github
https://github.com/opencv/opencv
modules/core/include/opencv2/core/bindings_utils.hpp
#ifndef JEMALLOC_INTERNAL_PROF_EXTERNS_H #define JEMALLOC_INTERNAL_PROF_EXTERNS_H #include "jemalloc/internal/mutex.h" #include "jemalloc/internal/prof_hook.h" extern bool opt_prof; extern bool opt_prof_active; extern bool opt_prof_thread_active_init; extern size_t opt_lg_prof_sample; /* Mean bytes between samples. */ extern ssize_t opt_lg_prof_interval; /* lg(prof_interval). */ extern bool opt_prof_gdump; /* High-water memory dumping. */ extern bool opt_prof_final; /* Final profile dumping. */ extern bool opt_prof_leak; /* Dump leak summary at exit. */ extern bool opt_prof_leak_error; /* Exit with error code if memory leaked */ extern bool opt_prof_accum; /* Report cumulative bytes. */ extern bool opt_prof_log; /* Turn logging on at boot. */ extern char opt_prof_prefix[ /* Minimize memory bloat for non-prof builds. */ #ifdef JEMALLOC_PROF PATH_MAX + #endif 1]; extern bool opt_prof_unbias; /* For recording recent allocations */ extern ssize_t opt_prof_recent_alloc_max; /* Whether to use thread name provided by the system or by mallctl. */ extern bool opt_prof_sys_thread_name; /* Whether to record per size class counts and request size totals. */ extern bool opt_prof_stats; /* Accessed via prof_active_[gs]et{_unlocked,}(). */ extern bool prof_active_state; /* Accessed via prof_gdump_[gs]et{_unlocked,}(). */ extern bool prof_gdump_val; /* Profile dump interval, measured in bytes allocated. */ extern uint64_t prof_interval; /* * Initialized as opt_lg_prof_sample, and potentially modified during profiling * resets. */ extern size_t lg_prof_sample; extern bool prof_booted; void prof_backtrace_hook_set(prof_backtrace_hook_t hook); prof_backtrace_hook_t prof_backtrace_hook_get(); void prof_dump_hook_set(prof_dump_hook_t hook); prof_dump_hook_t prof_dump_hook_get(); /* Functions only accessed in prof_inlines.h */ prof_tdata_t *prof_tdata_init(tsd_t *tsd); prof_tdata_t *prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata); void prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx); void prof_malloc_sample_object(tsd_t *tsd, const void *ptr, size_t size, size_t usize, prof_tctx_t *tctx); void prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_info_t *prof_info); prof_tctx_t *prof_tctx_create(tsd_t *tsd); void prof_idump(tsdn_t *tsdn); bool prof_mdump(tsd_t *tsd, const char *filename); void prof_gdump(tsdn_t *tsdn); void prof_tdata_cleanup(tsd_t *tsd); bool prof_active_get(tsdn_t *tsdn); bool prof_active_set(tsdn_t *tsdn, bool active); const char *prof_thread_name_get(tsd_t *tsd); int prof_thread_name_set(tsd_t *tsd, const char *thread_name); bool prof_thread_active_get(tsd_t *tsd); bool prof_thread_active_set(tsd_t *tsd, bool active); bool prof_thread_active_init_get(tsdn_t *tsdn); bool prof_thread_active_init_set(tsdn_t *tsdn, bool active_init); bool prof_gdump_get(tsdn_t *tsdn); bool prof_gdump_set(tsdn_t *tsdn, bool active); void prof_boot0(void); void prof_boot1(void); bool prof_boot2(tsd_t *tsd, base_t *base); void prof_prefork0(tsdn_t *tsdn); void prof_prefork1(tsdn_t *tsdn); void prof_postfork_parent(tsdn_t *tsdn); void prof_postfork_child(tsdn_t *tsdn); /* Only accessed by thread event. */ uint64_t prof_sample_new_event_wait(tsd_t *tsd); uint64_t prof_sample_postponed_event_wait(tsd_t *tsd); void prof_sample_event_handler(tsd_t *tsd, uint64_t elapsed); #endif /* JEMALLOC_INTERNAL_PROF_EXTERNS_H */
c
github
https://github.com/redis/redis
deps/jemalloc/include/jemalloc/internal/prof_externs.h
#!/usr/bin/env python # # Copyright (C) 2009 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This module is used for version 2 of the Google Data APIs. """Provides classes and constants for the XML in the Google Spreadsheets API. Documentation for the raw XML which these classes represent can be found here: http://code.google.com/apis/spreadsheets/docs/3.0/reference.html#Elements """ __author__ = 'j.s@google.com (Jeff Scudder)' import atom.core import gdata.data GS_TEMPLATE = '{http://schemas.google.com/spreadsheets/2006}%s' GSX_NAMESPACE = 'http://schemas.google.com/spreadsheets/2006/extended' INSERT_MODE = 'insert' OVERWRITE_MODE = 'overwrite' WORKSHEETS_REL = 'http://schemas.google.com/spreadsheets/2006#worksheetsfeed' class Error(Exception): pass class FieldMissing(Exception): pass class HeaderNotSet(Error): """The desired column header had no value for the row in the list feed.""" class Cell(atom.core.XmlElement): """The gs:cell element. A cell in the worksheet. The <gs:cell> element can appear only as a child of <atom:entry>. """ _qname = GS_TEMPLATE % 'cell' col = 'col' input_value = 'inputValue' numeric_value = 'numericValue' row = 'row' class ColCount(atom.core.XmlElement): """The gs:colCount element. Indicates the number of columns in the worksheet, including columns that contain only empty cells. The <gs:colCount> element can appear as a child of <atom:entry> or <atom:feed> """ _qname = GS_TEMPLATE % 'colCount' class Field(atom.core.XmlElement): """The gs:field element. A field single cell within a record. Contained in an <atom:entry>. """ _qname = GS_TEMPLATE % 'field' index = 'index' name = 'name' class Column(Field): """The gs:column element.""" _qname = GS_TEMPLATE % 'column' class Data(atom.core.XmlElement): """The gs:data element. A data region of a table. Contained in an <atom:entry> element. """ _qname = GS_TEMPLATE % 'data' column = [Column] insertion_mode = 'insertionMode' num_rows = 'numRows' start_row = 'startRow' class Header(atom.core.XmlElement): """The gs:header element. Indicates which row is the header row. Contained in an <atom:entry>. """ _qname = GS_TEMPLATE % 'header' row = 'row' class RowCount(atom.core.XmlElement): """The gs:rowCount element. Indicates the number of total rows in the worksheet, including rows that contain only empty cells. The <gs:rowCount> element can appear as a child of <atom:entry> or <atom:feed>. """ _qname = GS_TEMPLATE % 'rowCount' class Worksheet(atom.core.XmlElement): """The gs:worksheet element. The worksheet where the table lives.Contained in an <atom:entry>. """ _qname = GS_TEMPLATE % 'worksheet' name = 'name' class Spreadsheet(gdata.data.GDEntry): """An Atom entry which represents a Google Spreadsheet.""" def find_worksheets_feed(self): return self.find_url(WORKSHEETS_REL) FindWorksheetsFeed = find_worksheets_feed class SpreadsheetsFeed(gdata.data.GDFeed): """An Atom feed listing a user's Google Spreadsheets.""" entry = [Spreadsheet] class WorksheetEntry(gdata.data.GDEntry): """An Atom entry representing a single worksheet in a spreadsheet.""" row_count = RowCount col_count = ColCount class WorksheetsFeed(gdata.data.GDFeed): """A feed containing the worksheets in a single spreadsheet.""" entry = [WorksheetEntry] class Table(gdata.data.GDEntry): """An Atom entry that represents a subsection of a worksheet. A table allows you to treat part or all of a worksheet somewhat like a table in a database that is, as a set of structured data items. Tables don't exist until you explicitly create them before you can use a table feed, you have to explicitly define where the table data comes from. """ data = Data header = Header worksheet = Worksheet def get_table_id(self): if self.id.text: return self.id.text.split('/')[-1] return None GetTableId = get_table_id class TablesFeed(gdata.data.GDFeed): """An Atom feed containing the tables defined within a worksheet.""" entry = [Table] class Record(gdata.data.GDEntry): """An Atom entry representing a single record in a table. Note that the order of items in each record is the same as the order of columns in the table definition, which may not match the order of columns in the GUI. """ field = [Field] def value_for_index(self, column_index): for field in self.field: if field.index == column_index: return field.text raise FieldMissing('There is no field for %s' % column_index) ValueForIndex = value_for_index def value_for_name(self, name): for field in self.field: if field.name == name: return field.text raise FieldMissing('There is no field for %s' % name) ValueForName = value_for_name def get_record_id(self): if self.id.text: return self.id.text.split('/')[-1] return None class RecordsFeed(gdata.data.GDFeed): """An Atom feed containing the individuals records in a table.""" entry = [Record] class ListRow(atom.core.XmlElement): """A gsx column value within a row. The local tag in the _qname is blank and must be set to the column name. For example, when adding to a ListEntry, do: col_value = ListRow(text='something') col_value._qname = col_value._qname % 'mycolumnname' """ _qname = '{http://schemas.google.com/spreadsheets/2006/extended}%s' class ListEntry(gdata.data.GDEntry): """An Atom entry representing a worksheet row in the list feed. The values for a particular column can be get and set using x.get_value('columnheader') and x.set_value('columnheader', 'value'). See also the explanation of column names in the ListFeed class. """ def get_value(self, column_name): """Returns the displayed text for the desired column in this row. The formula or input which generated the displayed value is not accessible through the list feed, to see the user's input, use the cells feed. If a column is not present in this spreadsheet, or there is no value for a column in this row, this method will return None. """ values = self.get_elements(column_name, GSX_NAMESPACE) if len(values) == 0: return None return values[0].text def set_value(self, column_name, value): """Changes the value of cell in this row under the desired column name. Warning: if the cell contained a formula, it will be wiped out by setting the value using the list feed since the list feed only works with displayed values. No client side checking is performed on the column_name, you need to ensure that the column_name is the local tag name in the gsx tag for the column. For example, the column_name will not contain special characters, spaces, uppercase letters, etc. """ # Try to find the column in this row to change an existing value. values = self.get_elements(column_name, GSX_NAMESPACE) if len(values) > 0: values[0].text = value else: # There is no value in this row for the desired column, so add a new # gsx:column_name element. new_value = ListRow(text=value) new_value._qname = new_value._qname % (column_name,) self._other_elements.append(new_value) class ListsFeed(gdata.data.GDFeed): """An Atom feed in which each entry represents a row in a worksheet. The first row in the worksheet is used as the column names for the values in each row. If a header cell is empty, then a unique column ID is used for the gsx element name. Spaces in a column name are removed from the name of the corresponding gsx element. Caution: The columnNames are case-insensitive. For example, if you see a <gsx:e-mail> element in a feed, you can't know whether the column heading in the original worksheet was "e-mail" or "E-Mail". Note: If two or more columns have the same name, then subsequent columns of the same name have _n appended to the columnName. For example, if the first column name is "e-mail", followed by columns named "E-Mail" and "E-mail", then the columnNames will be gsx:e-mail, gsx:e-mail_2, and gsx:e-mail_3 respectively. """ entry = [ListEntry] class CellEntry(gdata.data.BatchEntry): """An Atom entry representing a single cell in a worksheet.""" cell = Cell class CellsFeed(gdata.data.BatchFeed): """An Atom feed contains one entry per cell in a worksheet. The cell feed supports batch operations, you can send multiple cell operations in one HTTP request. """ entry = [CellEntry] def batch_set_cell(row, col, input): pass
unknown
codeparrot/codeparrot-clean
#!/usr/bin/env python # # Copyright (c) 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Add all generated lint_result.xml files to suppressions.xml""" import collections import optparse import os import sys from xml.dom import minidom _BUILD_ANDROID_DIR = os.path.join(os.path.dirname(__file__), '..') sys.path.append(_BUILD_ANDROID_DIR) from pylib import constants _THIS_FILE = os.path.abspath(__file__) _CONFIG_PATH = os.path.join(os.path.dirname(_THIS_FILE), 'suppressions.xml') _DOC = ( '\nSTOP! It looks like you want to suppress some lint errors:\n' '- Have you tried identifing the offending patch?\n' ' Ask the author for a fix and/or revert the patch.\n' '- It is preferred to add suppressions in the code instead of\n' ' sweeping it under the rug here. See:\n\n' ' http://developer.android.com/tools/debugging/improving-w-lint.html\n' '\n' 'Still reading?\n' '- You can edit this file manually to suppress an issue\n' ' globally if it is not applicable to the project.\n' '- You can also automatically add issues found so for in the\n' ' build process by running:\n\n' ' ' + os.path.relpath(_THIS_FILE, constants.DIR_SOURCE_ROOT) + '\n\n' ' which will generate this file (Comments are not preserved).\n' ' Note: PRODUCT_DIR will be substituted at run-time with actual\n' ' directory path (e.g. out/Debug)\n' ) _Issue = collections.namedtuple('Issue', ['severity', 'paths']) def _ParseConfigFile(config_path): print 'Parsing %s' % config_path issues_dict = {} dom = minidom.parse(config_path) for issue in dom.getElementsByTagName('issue'): issue_id = issue.attributes['id'].value severity = issue.getAttribute('severity') paths = set( [p.attributes['path'].value for p in issue.getElementsByTagName('ignore')]) issues_dict[issue_id] = _Issue(severity, paths) return issues_dict def _ParseAndMergeResultFile(result_path, issues_dict): print 'Parsing and merging %s' % result_path dom = minidom.parse(result_path) for issue in dom.getElementsByTagName('issue'): issue_id = issue.attributes['id'].value severity = issue.attributes['severity'].value path = issue.getElementsByTagName('location')[0].attributes['file'].value if issue_id not in issues_dict: issues_dict[issue_id] = _Issue(severity, set()) issues_dict[issue_id].paths.add(path) def _WriteConfigFile(config_path, issues_dict): new_dom = minidom.getDOMImplementation().createDocument(None, 'lint', None) top_element = new_dom.documentElement top_element.appendChild(new_dom.createComment(_DOC)) for issue_id in sorted(issues_dict.keys()): severity = issues_dict[issue_id].severity paths = issues_dict[issue_id].paths issue = new_dom.createElement('issue') issue.attributes['id'] = issue_id if severity: issue.attributes['severity'] = severity if severity == 'ignore': print 'Warning: [%s] is suppressed globally.' % issue_id else: for path in sorted(paths): ignore = new_dom.createElement('ignore') ignore.attributes['path'] = path issue.appendChild(ignore) top_element.appendChild(issue) with open(config_path, 'w') as f: f.write(new_dom.toprettyxml(indent=' ', encoding='utf-8')) print 'Updated %s' % config_path def _Suppress(config_path, result_path): issues_dict = _ParseConfigFile(config_path) _ParseAndMergeResultFile(result_path, issues_dict) _WriteConfigFile(config_path, issues_dict) def main(): parser = optparse.OptionParser(usage='%prog RESULT-FILE') _, args = parser.parse_args() if len(args) != 1 or not os.path.exists(args[0]): parser.error('Must provide RESULT-FILE') _Suppress(_CONFIG_PATH, args[0]) if __name__ == '__main__': main()
unknown
codeparrot/codeparrot-clean
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utils for make_zip tests.""" import functools import io import itertools import operator import os import re import string import sys import tempfile import traceback import zipfile import ml_dtypes import numpy as np import tensorflow as tf from google.protobuf import text_format from tensorflow.lite.python import lite from tensorflow.lite.testing import _pywrap_string_util from tensorflow.lite.testing import generate_examples_report as report_lib from tensorflow.lite.tools import flatbuffer_utils from tensorflow.python.framework import convert_to_constants from tensorflow.python.saved_model import signature_constants # pylint: disable=g-import-not-at-top # A map from names to functions which make test cases. _MAKE_TEST_FUNCTIONS_MAP = {} # A decorator to register the make test functions. # Usage: # All the make_*_test should be registered. Example: # @register_make_test_function() # def make_conv_tests(options): # # ... # If a function is decorated by other decorators, it's required to specify the # name explicitly. Example: # @register_make_test_function(name="make_unidirectional_sequence_lstm_tests") # @test_util.enable_control_flow_v2 # def make_unidirectional_sequence_lstm_tests(options): # # ... def register_make_test_function(name=None): def decorate(function, name=name): if name is None: name = function.__name__ _MAKE_TEST_FUNCTIONS_MAP[name] = function return decorate def get_test_function(test_function_name): """Get the test function according to the test function name.""" if test_function_name not in _MAKE_TEST_FUNCTIONS_MAP: return None return _MAKE_TEST_FUNCTIONS_MAP[test_function_name] RANDOM_SEED = 342 MAP_TF_TO_NUMPY_TYPE = { tf.float32: np.float32, tf.float16: np.float16, tf.float64: np.float64, tf.complex64: np.complex64, tf.complex128: np.complex128, tf.int32: np.int32, tf.uint32: np.uint32, tf.uint8: np.uint8, tf.int8: np.int8, tf.uint16: np.uint16, tf.int16: np.int16, tf.int64: np.int64, tf.bool: np.bool_, tf.string: np.bytes_, } class ExtraConvertOptions: """Additional options for conversion, besides input, output, shape.""" def __init__(self): # Whether to ignore control dependency nodes. self.drop_control_dependency = False # Allow custom ops in the conversion. self.allow_custom_ops = False # Rnn states that are used to support rnn / lstm cells. self.rnn_states = None # Split the LSTM inputs from 5 inputs to 18 inputs for TFLite. self.split_tflite_lstm_inputs = None # The inference input type passed to TFLiteConvert. self.inference_input_type = None # The inference output type passed to TFLiteConvert. self.inference_output_type = None def create_tensor_data(dtype, shape, min_value=-100, max_value=100): """Build tensor data spreading the range [min_value, max_value).""" if dtype in MAP_TF_TO_NUMPY_TYPE: dtype = MAP_TF_TO_NUMPY_TYPE[dtype] if dtype in (tf.float32, tf.float16, tf.float64): value = (max_value - min_value) * np.random.random_sample(shape) + min_value elif dtype in (tf.complex64, tf.complex128): real = (max_value - min_value) * np.random.random_sample(shape) + min_value imag = (max_value - min_value) * np.random.random_sample(shape) + min_value value = real + imag * 1j elif dtype in (tf.uint32, tf.int32, tf.uint8, tf.int8, tf.int64, tf.uint16, tf.int16): value = np.random.randint(min_value, max_value + 1, shape) elif dtype == tf.bool: value = np.random.choice([True, False], size=shape) elif dtype == np.bytes_: # Not the best strings, but they will do for some basic testing. letters = list(string.ascii_uppercase) return np.random.choice(letters, size=shape).astype(dtype) elif dtype == tf.bfloat16: value = (max_value - min_value) * np.random.random_sample(shape) + min_value # There is no bfloat16 type in numpy. Uses ml_dtypes.bfloat16 for Eigen. dtype = ml_dtypes.bfloat16 else: raise ValueError("Unsupported dtype: %s" % dtype) return np.dtype(dtype).type(value) if np.isscalar(value) else value.astype( dtype) def create_scalar_data(dtype, min_value=-100, max_value=100): """Build scalar tensor data range from min_value to max_value exclusively.""" if dtype in MAP_TF_TO_NUMPY_TYPE: dtype = MAP_TF_TO_NUMPY_TYPE[dtype] if dtype in (tf.float32, tf.float16, tf.float64): value = (max_value - min_value) * np.random.random() + min_value elif dtype in (tf.int32, tf.uint8, tf.int64, tf.int16): value = np.random.randint(min_value, max_value + 1) elif dtype == tf.bool: value = np.random.choice([True, False]) elif dtype == np.bytes_: l = np.random.randint(1, 6) value = "".join(np.random.choice(list(string.ascii_uppercase), size=l)) elif dtype == tf.bfloat16: value = (max_value - min_value) * np.random.random() + min_value # There is no bfloat16 type in numpy. Uses ml_dtypes.bfloat16 for Eigen. dtype = ml_dtypes.bfloat16 else: raise ValueError("Unsupported dtype: %s" % dtype) return np.array(value, dtype=dtype) def freeze_graph(session, outputs): """Freeze the current graph. Args: session: Tensorflow sessions containing the graph outputs: List of output tensors Returns: The frozen graph_def. """ return convert_to_constants.convert_variables_to_constants( session, session.graph.as_graph_def(), [x.op.name for x in outputs]) def format_result(t): """Convert a tensor to a format that can be used in test specs.""" if t.dtype.kind not in [np.dtype(np.bytes_).kind, np.dtype(np.object_).kind]: # Output 9 digits after the point to ensure the precision is good enough. # bfloat16 promotes the value to string, not float. so we need to # convert it to float explicitly. if t.dtype == ml_dtypes.bfloat16: values = ["{:.9f}".format(float(value)) for value in list(t.flatten())] else: values = ["{:.9f}".format(value) for value in list(t.flatten())] return ",".join(values) else: # SerializeAsHexString returns bytes in PY3, so decode if appropriate. return _pywrap_string_util.SerializeAsHexString(t.flatten()).decode("utf-8") def write_examples(fp, examples): """Given a list `examples`, write a text format representation. The file format is csv like with a simple repeated pattern. We would ike to use proto here, but we can't yet due to interfacing with the Android team using this format. Args: fp: File-like object to write to. examples: Example dictionary consisting of keys "inputs" and "outputs" """ def write_tensor(fp, name, x): """Write tensor in file format supported by TFLITE example.""" fp.write("name,%s\n" % name) fp.write("dtype,%s\n" % x.dtype) fp.write("shape," + ",".join(map(str, x.shape)) + "\n") fp.write("values," + format_result(x) + "\n") fp.write("test_cases,%d\n" % len(examples)) for example in examples: fp.write("inputs,%d\n" % len(example["inputs"])) for name, value in example["inputs"].items(): if value is not None: write_tensor(fp, name, value) fp.write("outputs,%d\n" % len(example["outputs"])) for name, value in example["outputs"].items(): write_tensor(fp, name, value) class TextFormatWriter: """Utility class for writing ProtoBuf like messages.""" def __init__(self, fp, name=None, parent=None): self.fp = fp self.indent = parent.indent if parent else 0 self.name = name def __enter__(self): if self.name: self.write(self.name + " {") self.indent += 2 return self def __exit__(self, *exc_info): if self.name: self.indent -= 2 self.write("}") return True def write(self, data): self.fp.write(" " * self.indent + data + "\n") def write_field(self, key, val): self.write(key + ": \"" + val + "\"") def sub_message(self, name): return TextFormatWriter(self.fp, name, self) def write_test_cases(fp, model_name, examples): """Given a dictionary of `examples`, write a text format representation. The file format is protocol-buffer-like, even though we don't use proto due to the needs of the Android team. Args: fp: File-like object to write to. model_name: Filename where the model was written to, relative to filename. examples: Example dictionary consisting of keys "inputs" and "outputs" Raises: RuntimeError: Example dictionary does not have input / output names. """ writer = TextFormatWriter(fp) writer.write_field("load_model", os.path.basename(model_name)) for example in examples: inputs = [] for name in example["inputs"].keys(): if name: inputs.append(name) outputs = [] for name in example["outputs"].keys(): if name: outputs.append(name) if not (inputs and outputs): raise RuntimeError("Empty input / output names.") # Reshape message with writer.sub_message("reshape") as reshape: for name, value in example["inputs"].items(): with reshape.sub_message("input") as input_msg: input_msg.write_field("key", name) input_msg.write_field("value", ",".join(map(str, value.shape))) # Invoke message with writer.sub_message("invoke") as invoke: for name, value in example["inputs"].items(): with invoke.sub_message("input") as input_msg: input_msg.write_field("key", name) input_msg.write_field("value", format_result(value)) # Expectations for name, value in example["outputs"].items(): with invoke.sub_message("output") as output_msg: output_msg.write_field("key", name) output_msg.write_field("value", format_result(value)) with invoke.sub_message("output_shape") as output_shape: output_shape.write_field("key", name) output_shape.write_field("value", ",".join([str(dim) for dim in value.shape])) def get_input_shapes_map(input_tensors): """Gets a map of input names to shapes. Args: input_tensors: List of input tensor tuples `(name, shape, type)`. Returns: {string : list of integers}. """ input_arrays = [tensor[0] for tensor in input_tensors] input_shapes_list = [] for _, shape, _ in input_tensors: dims = None if shape: dims = [dim.value for dim in shape.dims] input_shapes_list.append(dims) input_shapes = { name: shape for name, shape in zip(input_arrays, input_shapes_list) if shape } return input_shapes def _normalize_input_name(input_name): """Remove :i suffix from input tensor names.""" return input_name.split(":")[0] def _normalize_output_name(output_name): """Remove :0 suffix from output tensor names.""" return output_name.split(":")[0] if output_name.endswith( ":0") else output_name def _get_tensor_info(tensors, default_name_prefix, normalize_func): """Get the list of tensor name and info.""" tensor_names = [] tensor_info_map = {} for idx, tensor in enumerate(tensors): if not tensor.name: tensor.name = default_name_prefix + str(idx) tensor_info = tf.compat.v1.saved_model.utils.build_tensor_info(tensor) tensor_name = normalize_func(tensor.name) tensor_info_map[tensor_name] = tensor_info tensor_names.append(tensor_name) return tensor_names, tensor_info_map # How many test cases we may have in a zip file. Too many test cases will # slow down the test data generation process. _MAX_TESTS_PER_ZIP = 500 def make_zip_of_tests(options, test_parameters, make_graph, make_test_inputs, extra_convert_options=ExtraConvertOptions(), use_frozen_graph=False, expected_tf_failures=0): """Helper to make a zip file of a bunch of TensorFlow models. This does a cartesian product of the dictionary of test_parameters and calls make_graph() for each item in the cartesian product set. If the graph is built successfully, then make_test_inputs() is called to build expected input/output value pairs. The model is then converted to tflite, and the examples are serialized with the tflite model into a zip file (2 files per item in the cartesian product set). Args: options: An Options instance. test_parameters: Dictionary mapping to lists for each parameter. e.g. `{"strides": [[1,3,3,1], [1,2,2,1]], "foo": [1.2, 1.3]}` make_graph: function that takes current parameters and returns tuple `[input1, input2, ...], [output1, output2, ...]` make_test_inputs: function taking `curr_params`, `session`, `input_tensors`, `output_tensors` and returns tuple `(input_values, output_values)`. extra_convert_options: Additional convert options. use_frozen_graph: Whether or not freeze graph before convertion. expected_tf_failures: Number of times tensorflow is expected to fail in executing the input graphs. In some cases it is OK for TensorFlow to fail because the one or more combination of parameters is invalid. Raises: RuntimeError: if there are converter errors that can't be ignored. """ zip_path = os.path.join(options.output_path, options.zip_to_output) parameter_count = 0 for parameters in test_parameters: parameter_count += functools.reduce( operator.mul, [len(values) for values in parameters.values()]) all_parameter_count = parameter_count if options.multi_gen_state: all_parameter_count += options.multi_gen_state.parameter_count if not options.no_tests_limit and all_parameter_count > _MAX_TESTS_PER_ZIP: raise RuntimeError( "Too many parameter combinations for generating '%s'.\n" "There are at least %d combinations while the upper limit is %d.\n" "Having too many combinations will slow down the tests.\n" "Please consider splitting the test into multiple functions.\n" % (zip_path, all_parameter_count, _MAX_TESTS_PER_ZIP)) if options.multi_gen_state: options.multi_gen_state.parameter_count = all_parameter_count # TODO(aselle): Make this allow multiple inputs outputs. if options.multi_gen_state: archive = options.multi_gen_state.archive else: archive = zipfile.PyZipFile(zip_path, "w") zip_manifest = [] convert_report = [] converter_errors = 0 processed_labels = set() if options.make_tf_ptq_tests: # For cases with fully_quantize is True, also generates a case with # fully_quantize is False. Marks these cases as suitable for PTQ tests. parameter_count = 0 for parameters in test_parameters: if True in parameters.get("fully_quantize", []): parameters.update({"fully_quantize": [True, False], "tf_ptq": [True]}) # TODO(b/199054047): Support 16x8 quantization in TF Quantization. parameters.update({"quant_16x8": [False]}) parameter_count += functools.reduce( operator.mul, [len(values) for values in parameters.values()]) if options.make_edgetpu_tests: extra_convert_options.inference_input_type = tf.uint8 extra_convert_options.inference_output_type = tf.uint8 # Only count parameters when fully_quantize is True. parameter_count = 0 for parameters in test_parameters: if True in parameters.get("fully_quantize", []) and False in parameters.get( "quant_16x8", [False]): parameter_count += functools.reduce(operator.mul, [ len(values) for key, values in parameters.items() if key != "fully_quantize" and key != "quant_16x8" ]) label_base_path = zip_path if options.multi_gen_state: label_base_path = options.multi_gen_state.label_base_path i = 1 for parameters in test_parameters: keys = parameters.keys() for curr in itertools.product(*parameters.values()): label = label_base_path.replace(".zip", "_") + (",".join( "%s=%r" % z for z in sorted(zip(keys, curr))).replace(" ", "")) if label[0] == "/": label = label[1:] zip_path_label = label if len(os.path.basename(zip_path_label)) > 245: zip_path_label = label_base_path.replace(".zip", "_") + str(i) i += 1 if label in processed_labels: # Do not populate data for the same label more than once. It will cause # errors when unzipping. continue processed_labels.add(label) param_dict = dict(zip(keys, curr)) if options.make_tf_ptq_tests and not param_dict.get("tf_ptq", False): continue if options.make_edgetpu_tests and (not param_dict.get( "fully_quantize", False) or param_dict.get("quant_16x8", False)): continue def generate_inputs_outputs(tflite_model_binary, min_value=0, max_value=255): """Generate input values and output values of the given tflite model. Args: tflite_model_binary: A serialized flatbuffer as a string. min_value: min value for the input tensor. max_value: max value for the input tensor. Returns: (input_values, output_values): Maps of input values and output values built. """ interpreter = lite.Interpreter(model_content=tflite_model_binary) interpreter.allocate_tensors() input_details = interpreter.get_input_details() input_values = {} for input_detail in input_details: input_value = create_tensor_data( input_detail["dtype"], input_detail["shape"], min_value=min_value, max_value=max_value) interpreter.set_tensor(input_detail["index"], input_value) input_values.update( {_normalize_input_name(input_detail["name"]): input_value}) interpreter.invoke() output_details = interpreter.get_output_details() output_values = {} for output_detail in output_details: output_values.update({ _normalize_output_name(output_detail["name"]): interpreter.get_tensor(output_detail["index"]) }) return input_values, output_values def build_example(label, param_dict_real, zip_path_label): """Build the model with parameter values set in param_dict_real. Args: label: Label of the model param_dict_real: Parameter dictionary (arguments to the factories make_graph and make_test_inputs) zip_path_label: Filename in the zip Returns: (tflite_model_binary, report) where tflite_model_binary is the serialized flatbuffer as a string and report is a dictionary with keys `tflite_converter_log` (log of conversion), `tf_log` (log of tf conversion), `converter` (a string of success status of the conversion), `tf` (a string success status of the conversion). """ np.random.seed(RANDOM_SEED) report = { "tflite_converter": report_lib.NOTRUN, "tf": report_lib.FAILED } # Build graph report["tf_log"] = "" report["tflite_converter_log"] = "" tf.compat.v1.reset_default_graph() with tf.Graph().as_default(): with tf.device("/cpu:0"): try: inputs, outputs = make_graph(param_dict_real) inputs = [x for x in inputs if x is not None] except (tf.errors.UnimplementedError, tf.errors.InvalidArgumentError, ValueError): report["tf_log"] += traceback.format_exc() return None, report sess = tf.compat.v1.Session() try: baseline_inputs, baseline_outputs = ( make_test_inputs(param_dict_real, sess, inputs, outputs)) baseline_inputs = [x for x in baseline_inputs if x is not None] # Converts baseline inputs/outputs to maps. The signature input and # output names are set to be the same as the tensor names. input_names = [_normalize_input_name(x.name) for x in inputs] output_names = [_normalize_output_name(x.name) for x in outputs] baseline_input_map = dict(zip(input_names, baseline_inputs)) baseline_output_map = dict(zip(output_names, baseline_outputs)) except (tf.errors.UnimplementedError, tf.errors.InvalidArgumentError, ValueError): report["tf_log"] += traceback.format_exc() return None, report report["tflite_converter"] = report_lib.FAILED report["tf"] = report_lib.SUCCESS # Builds a saved model with the default signature key. input_names, tensor_info_inputs = _get_tensor_info( inputs, "input_", _normalize_input_name) output_tensors, tensor_info_outputs = _get_tensor_info( outputs, "output_", _normalize_output_name) input_tensors = [ (name, t.shape, t.dtype) for name, t in zip(input_names, inputs) ] inference_signature = ( tf.compat.v1.saved_model.signature_def_utils.build_signature_def( inputs=tensor_info_inputs, outputs=tensor_info_outputs, method_name="op_test")) saved_model_dir = tempfile.mkdtemp("op_test") saved_model_tags = [tf.saved_model.SERVING] signature_key = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY builder = tf.compat.v1.saved_model.builder.SavedModelBuilder( saved_model_dir) builder.add_meta_graph_and_variables( sess, saved_model_tags, signature_def_map={ signature_key: inference_signature, }, strip_default_attrs=True) builder.save(as_text=False) # pylint: disable=g-long-ternary graph_def = freeze_graph( sess, tf.compat.v1.global_variables() + inputs + outputs) if use_frozen_graph else sess.graph_def if "split_tflite_lstm_inputs" in param_dict_real: extra_convert_options.split_tflite_lstm_inputs = param_dict_real[ "split_tflite_lstm_inputs"] tflite_model_binary, converter_log = options.tflite_convert_function( options, saved_model_dir, input_tensors, output_tensors, extra_convert_options=extra_convert_options, test_params=param_dict_real) report["tflite_converter"] = ( report_lib.SUCCESS if tflite_model_binary is not None else report_lib.FAILED) report["tflite_converter_log"] = converter_log if options.save_graphdefs: zipinfo = zipfile.ZipInfo(zip_path_label + ".pbtxt") archive.writestr(zipinfo, text_format.MessageToString(graph_def), zipfile.ZIP_DEFLATED) if tflite_model_binary: if options.make_edgetpu_tests: # Set proper min max values according to input dtype. baseline_input_map, baseline_output_map = generate_inputs_outputs( tflite_model_binary, min_value=0, max_value=255) zipinfo = zipfile.ZipInfo(zip_path_label + ".bin") if sys.byteorder == "big": tflite_model_binary = flatbuffer_utils.byte_swap_tflite_buffer( tflite_model_binary, "big", "little" ) archive.writestr(zipinfo, tflite_model_binary, zipfile.ZIP_DEFLATED) example = { "inputs": baseline_input_map, "outputs": baseline_output_map } example_fp = io.StringIO() write_examples(example_fp, [example]) zipinfo = zipfile.ZipInfo(zip_path_label + ".inputs") archive.writestr(zipinfo, example_fp.getvalue(), zipfile.ZIP_DEFLATED) example_fp2 = io.StringIO() write_test_cases(example_fp2, zip_path_label + ".bin", [example]) zipinfo = zipfile.ZipInfo(zip_path_label + "_tests.txt") archive.writestr(zipinfo, example_fp2.getvalue(), zipfile.ZIP_DEFLATED) zip_manifest_label = zip_path_label + " " + label if zip_path_label == label: zip_manifest_label = zip_path_label zip_manifest.append(zip_manifest_label + "\n") return tflite_model_binary, report _, report = build_example(label, param_dict, zip_path_label) if report["tflite_converter"] == report_lib.FAILED: ignore_error = False if not options.known_bugs_are_errors: for pattern, bug_number in options.known_bugs.items(): if re.search(pattern, label): print("Ignored converter error due to bug %s" % bug_number) ignore_error = True if not ignore_error: converter_errors += 1 print("-----------------\nconverter error!\n%s\n-----------------\n" % report["tflite_converter_log"]) convert_report.append((param_dict, report)) if not options.no_conversion_report: report_io = io.StringIO() report_lib.make_report_table(report_io, zip_path, convert_report) if options.multi_gen_state: zipinfo = zipfile.ZipInfo("report_" + options.multi_gen_state.test_name + ".html") archive.writestr(zipinfo, report_io.getvalue()) else: zipinfo = zipfile.ZipInfo("report.html") archive.writestr(zipinfo, report_io.getvalue()) if options.multi_gen_state: options.multi_gen_state.zip_manifest.extend(zip_manifest) else: zipinfo = zipfile.ZipInfo("manifest.txt") archive.writestr(zipinfo, "".join(zip_manifest), zipfile.ZIP_DEFLATED) # Log statistics of what succeeded total_conversions = len(convert_report) tf_success = sum( 1 for x in convert_report if x[1]["tf"] == report_lib.SUCCESS) converter_success = sum(1 for x in convert_report if x[1]["tflite_converter"] == report_lib.SUCCESS) percent = 0 if tf_success > 0: percent = float(converter_success) / float(tf_success) * 100. tf.compat.v1.logging.info( ("Archive %s Considered %d graphs, %d TF evaluated graphs " " and %d converted graphs (%.1f%%"), zip_path, total_conversions, tf_success, converter_success, percent) tf_failures = parameter_count - tf_success if tf_failures / parameter_count > 0.8: raise RuntimeError(("Test for '%s' is not very useful. " "TensorFlow fails in %d percent of the cases.") % (zip_path, int(100 * tf_failures / parameter_count))) if tf_failures != expected_tf_failures and not (options.make_edgetpu_tests or options.make_tf_ptq_tests): raise RuntimeError(("Expected TF to fail %d times while generating '%s', " "but that happened %d times") % (expected_tf_failures, zip_path, tf_failures)) if not options.ignore_converter_errors and converter_errors > 0: raise RuntimeError("Found %d errors while generating models" % converter_errors)
python
github
https://github.com/tensorflow/tensorflow
tensorflow/lite/testing/zip_test_utils.py
# encoding: UTF-8 ''' vn.femas的gateway接入 考虑到飞马只对接期货(目前只有中金所), vtSymbol直接使用symbol ''' import os import json from vnpy.api.femas import MdApi, TdApi, defineDict from vnpy.trader.vtFunction import getTempPath, getJsonPath from vnpy.trader.vtGateway import * # 以下为一些VT类型和CTP类型的映射字典 # 价格类型映射 priceTypeMap = {} priceTypeMap[PRICETYPE_LIMITPRICE] = defineDict["USTP_FTDC_OPT_LimitPrice"] priceTypeMap[PRICETYPE_MARKETPRICE] = defineDict["USTP_FTDC_OPT_AnyPrice"] priceTypeMapReverse = {v: k for k, v in priceTypeMap.items()} # 方向类型映射 directionMap = {} directionMap[DIRECTION_LONG] = defineDict['USTP_FTDC_D_Buy'] directionMap[DIRECTION_SHORT] = defineDict['USTP_FTDC_D_Sell'] directionMapReverse = {v: k for k, v in directionMap.items()} # 开平类型映射 offsetMap = {} offsetMap[OFFSET_OPEN] = defineDict['USTP_FTDC_OF_Open'] offsetMap[OFFSET_CLOSE] = defineDict['USTP_FTDC_OF_Close'] offsetMap[OFFSET_CLOSETODAY] = defineDict['USTP_FTDC_OF_CloseToday'] offsetMap[OFFSET_CLOSEYESTERDAY] = defineDict['USTP_FTDC_OF_CloseYesterday'] offsetMapReverse = {v:k for k,v in offsetMap.items()} # 交易所类型映射 exchangeMap = {} #exchangeMap[EXCHANGE_CFFEX] = defineDict['USTP_FTDC_EIDT_CFFEX'] #exchangeMap[EXCHANGE_SHFE] = defineDict['USTP_FTDC_EIDT_SHFE'] #exchangeMap[EXCHANGE_CZCE] = defineDict['USTP_FTDC_EIDT_CZCE'] #exchangeMap[EXCHANGE_DCE] = defineDict['USTP_FTDC_EIDT_DCE'] exchangeMap[EXCHANGE_CFFEX] = 'CFFEX' exchangeMap[EXCHANGE_SHFE] = 'SHFE' exchangeMap[EXCHANGE_CZCE] = 'CZCE' exchangeMap[EXCHANGE_DCE] = 'DCE' exchangeMap[EXCHANGE_UNKNOWN] = '' exchangeMapReverse = {v:k for k,v in exchangeMap.items()} # 持仓类型映射 posiDirectionMap = {} posiDirectionMap[DIRECTION_LONG] = defineDict["USTP_FTDC_D_Buy"] posiDirectionMap[DIRECTION_SHORT] = defineDict["USTP_FTDC_D_Sell"] posiDirectionMapReverse = {v:k for k,v in posiDirectionMap.items()} ######################################################################## class FemasGateway(VtGateway): """飞马接口""" #---------------------------------------------------------------------- def __init__(self, eventEngine, gatewayName='FEMAS'): """Constructor""" super(FemasGateway, self).__init__(eventEngine, gatewayName) self.mdApi = FemasMdApi(self) # 行情API self.tdApi = FemasTdApi(self) # 交易API self.mdConnected = False # 行情API连接状态,登录完成后为True self.tdConnected = False # 交易API连接状态 self.qryEnabled = False # 是否要启动循环查询 self.fileName = self.gatewayName + '_connect.json' self.filePath = getJsonPath(self.fileName, __file__) #---------------------------------------------------------------------- def connect(self): """连接""" # 载入json文件 try: f = file(self.filePath) except IOError: log = VtLogData() log.gatewayName = self.gatewayName log.logContent = u'读取连接配置出错,请检查' self.onLog(log) return # 解析json文件 setting = json.load(f) try: userID = str(setting['userID']) password = str(setting['password']) brokerID = str(setting['brokerID']) tdAddress = str(setting['tdAddress']) mdAddress = str(setting['mdAddress']) except KeyError: log = VtLogData() log.gatewayName = self.gatewayName log.logContent = u'连接配置缺少字段,请检查' self.onLog(log) return # 创建行情和交易接口对象 self.mdApi.connect(userID, password, brokerID, mdAddress) self.tdApi.connect(userID, password, brokerID, tdAddress) # 初始化并启动查询 self.initQuery() #---------------------------------------------------------------------- def subscribe(self, subscribeReq): """订阅行情""" self.mdApi.subscribe(subscribeReq) #---------------------------------------------------------------------- def sendOrder(self, orderReq): """发单""" return self.tdApi.sendOrder(orderReq) #---------------------------------------------------------------------- def cancelOrder(self, cancelOrderReq): """撤单""" self.tdApi.cancelOrder(cancelOrderReq) #---------------------------------------------------------------------- def qryAccount(self): """查询账户资金""" self.tdApi.qryAccount() #---------------------------------------------------------------------- def qryPosition(self): """查询持仓""" self.tdApi.qryPosition() #---------------------------------------------------------------------- def close(self): """关闭""" if self.mdConnected: self.mdApi.close() if self.tdConnected: self.tdApi.close() #---------------------------------------------------------------------- def initQuery(self): """初始化连续查询""" if self.qryEnabled: # 需要循环的查询函数列表 self.qryFunctionList = [self.qryAccount, self.qryPosition] self.qryCount = 0 # 查询触发倒计时 self.qryTrigger = 2 # 查询触发点 self.qryNextFunction = 0 # 上次运行的查询函数索引 self.startQuery() #---------------------------------------------------------------------- def query(self, event): """注册到事件处理引擎上的查询函数""" self.qryCount += 1 if self.qryCount > self.qryTrigger: # 清空倒计时 self.qryCount = 0 # 执行查询函数 function = self.qryFunctionList[self.qryNextFunction] function() # 计算下次查询函数的索引,如果超过了列表长度,则重新设为0 self.qryNextFunction += 1 if self.qryNextFunction == len(self.qryFunctionList): self.qryNextFunction = 0 #---------------------------------------------------------------------- def startQuery(self): """启动连续查询""" self.eventEngine.register(EVENT_TIMER, self.query) #---------------------------------------------------------------------- def setQryEnabled(self, qryEnabled): """设置是否要启动循环查询""" self.qryEnabled = qryEnabled ######################################################################## class FemasMdApi(MdApi): """飞马行情API实现""" #---------------------------------------------------------------------- def __init__(self, gateway): """Constructor""" super(FemasMdApi, self).__init__() self.gateway = gateway # gateway对象 self.gatewayName = gateway.gatewayName # gateway对象名称 self.reqID = EMPTY_INT # 操作请求编号 self.connectionStatus = False # 连接状态 self.loginStatus = False # 登录状态 self.subscribedSymbols = set() # 已订阅合约代码 self.userID = EMPTY_STRING # 账号 self.password = EMPTY_STRING # 密码 self.brokerID = EMPTY_STRING # 经纪商代码 self.address = EMPTY_STRING # 服务器地址 #---------------------------------------------------------------------- def onFrontConnected(self): """服务器连接""" self.connectionStatus = True log = VtLogData() log.gatewayName = self.gatewayName log.logContent = u'行情服务器连接成功' self.gateway.onLog(log) self.login() #---------------------------------------------------------------------- def onFrontDisconnected(self, n): """服务器断开""" self.connectionStatus = False self.loginStatus = False self.gateway.mdConnected = False log = VtLogData() log.gatewayName = self.gatewayName log.logContent = u'行情服务器连接断开' self.gateway.onLog(log) #---------------------------------------------------------------------- def onHeartBeatWarning(self, n): """心跳报警""" # 因为API的心跳报警比较常被触发,且与API工作关系不大,因此选择忽略 pass #---------------------------------------------------------------------- def onRspError(self, error, n, last): """错误回报""" err = VtErrorData() err.gatewayName = self.gatewayName err.errorID = error['ErrorID'] err.errorMsg = error['ErrorMsg'].decode('gbk') self.gateway.onError(err) #---------------------------------------------------------------------- def onRspUserLogin(self, data, error, n, last): """登陆回报""" # 如果登录成功,推送日志信息 if error['ErrorID'] == 0: self.loginStatus = True self.gateway.mdConnected = True log = VtLogData() log.gatewayName = self.gatewayName log.logContent = u'行情服务器登录完成' self.gateway.onLog(log) # 重新订阅之前订阅的合约 for subscribeReq in self.subscribedSymbols: self.subscribe(subscribeReq) # 否则,推送错误信息 else: err = VtErrorData() err.gatewayName = self.gatewayName err.errorID = error['ErrorID'] err.errorMsg = error['ErrorMsg'].decode('gbk') self.gateway.onError(err) #---------------------------------------------------------------------- def onRspUserLogout(self, data, error, n, last): """登出回报""" # 如果登出成功,推送日志信息 if error['ErrorID'] == 0: self.loginStatus = False self.gateway.tdConnected = False log = VtLogData() log.gatewayName = self.gatewayName log.logContent = u'行情服务器登出完成' self.gateway.onLog(log) # 否则,推送错误信息 else: err = VtErrorData() err.gatewayName = self.gatewayName err.errorID = error['ErrorID'] err.errorMsg = error['ErrorMsg'].decode('gbk') self.gateway.onError(err) #---------------------------------------------------------------------- def onRspSubMarketData(self, data, error, n, last): """订阅合约回报""" # 通常不在乎订阅错误,选择忽略 pass #---------------------------------------------------------------------- def onRspUnSubMarketData(self, data, error, n, last): """退订合约回报""" # 同上 pass #---------------------------------------------------------------------- def onRspSubscribeTopic(self, data, error, n, last): """""" # 同上 pass #---------------------------------------------------------------------- def onRspQryTopic(self, data, error, n, last): """""" # 同上 pass #---------------------------------------------------------------------- def onRtnDepthMarketData(self, data): """行情推送""" tick = VtTickData() tick.gatewayName = self.gatewayName tick.symbol = data['InstrumentID'] tick.vtSymbol = tick.symbol tick.lastPrice = data['LastPrice'] tick.volume = data['Volume'] tick.openInterest = data['OpenInterest'] tick.time = '.'.join([data['UpdateTime'], str(data['UpdateMillisec']/100)]) tick.date = data['TradingDay'] tick.openPrice = data['OpenPrice'] tick.highPrice = data['HighestPrice'] tick.lowPrice = data['LowestPrice'] tick.preClosePrice = data['PreClosePrice'] tick.upperLimit = data['UpperLimitPrice'] tick.lowerLimit = data['LowerLimitPrice'] # CTP只有一档行情 tick.bidPrice1 = data['BidPrice1'] tick.bidVolume1 = data['BidVolume1'] tick.askPrice1 = data['AskPrice1'] tick.askVolume1 = data['AskVolume1'] self.gateway.onTick(tick) #---------------------------------------------------------------------- def connect(self, userID, password, brokerID, address): """初始化连接""" self.userID = userID # 账号 self.password = password # 密码 self.brokerID = brokerID # 经纪商代码 self.address = address # 服务器地址 # 如果尚未建立服务器连接,则进行连接 if not self.connectionStatus: # 创建C++环境中的API对象,这里传入的参数是需要用来保存.con文件的文件夹路径 path = getTempPath(self.gatewayName + '_') self.createFtdcMdApi(path) # 订阅主题 self.subscribeMarketDataTopic(100, 2) # 注册服务器地址 self.registerFront(self.address) # 初始化连接,成功会调用onFrontConnected self.init() # 若已经连接但尚未登录,则进行登录 else: if not self.loginStatus: self.login() #---------------------------------------------------------------------- def subscribe(self, subscribeReq): """订阅合约""" # 这里的设计是,如果尚未登录就调用了订阅方法 # 则先保存订阅请求,登录完成后会自动订阅 if self.loginStatus: self.subMarketData(subscribeReq.symbol) self.subscribedSymbols.add(subscribeReq) #---------------------------------------------------------------------- def login(self): """登录""" # 如果填入了用户名密码等,则登录 if self.userID and self.password and self.brokerID: req = {} req['UserID'] = self.userID req['Password'] = self.password req['BrokerID'] = self.brokerID self.reqID += 1 self.reqUserLogin(req, self.reqID) #---------------------------------------------------------------------- def close(self): """关闭""" self.exit() ######################################################################## class FemasTdApi(TdApi): """飞马交易API实现""" #---------------------------------------------------------------------- def __init__(self, gateway): """API对象的初始化函数""" super(FemasTdApi, self).__init__() self.gateway = gateway # gateway对象 self.gatewayName = gateway.gatewayName # gateway对象名称 self.reqID = EMPTY_INT # 操作请求编号 self.localID = EMPTY_INT # 本地订单编号 self.connectionStatus = False # 连接状态 self.loginStatus = False # 登录状态 self.userID = EMPTY_STRING # 账号 self.password = EMPTY_STRING # 密码 self.brokerID = EMPTY_STRING # 经纪商代码 self.address = EMPTY_STRING # 服务器地址 self.frontID = EMPTY_INT # 前置机编号 self.sessionID = EMPTY_INT # 会话编号 #---------------------------------------------------------------------- def connect(self, userID, password, brokerID, address): """初始化连接""" self.userID = userID # 账号 self.password = password # 密码 self.brokerID = brokerID # 经纪商代码 self.address = address # 服务器地址 # 如果尚未建立服务器连接,则进行连接 if not self.connectionStatus: # 创建C++环境中的API对象,这里传入的参数是需要用来保存.con文件的文件夹路径 path = getTempPath(self.gatewayName + '_') self.createFtdcTraderApi(path) # 订阅主题 self.subscribePrivateTopic(0) self.subscribePublicTopic(0) #self.subscribeUserTopic(0) # 注册服务器地址 self.registerFront(self.address) # 初始化连接,成功会调用onFrontConnected self.init() # 若已经连接但尚未登录,则进行登录 else: if not self.loginStatus: self.login() #---------------------------------------------------------------------- def login(self): """连接服务器""" # 如果填入了用户名密码等,则登录 if self.userID and self.password and self.brokerID: req = {} req['UserID'] = self.userID req['Password'] = self.password req['BrokerID'] = self.brokerID self.reqID += 1 self.reqUserLogin(req, self.reqID) #---------------------------------------------------------------------- def qryAccount(self): """查询账户""" self.reqID += 1 req = {} req['BrokerID'] = self.brokerID req['InvestorID'] = self.userID self.reqQryInvestorAccount(req, self.reqID) #---------------------------------------------------------------------- def qryPosition(self): """查询持仓""" self.reqID += 1 req = {} req['BrokerID'] = self.brokerID req['InvestorID'] = self.userID self.reqQryInvestorPosition(req, self.reqID) #---------------------------------------------------------------------- def sendOrder(self, orderReq): """发单""" self.reqID += 1 self.localID += 1 strLocalID = generateStrLocalID(self.localID) req = {} req['InstrumentID'] = orderReq.symbol req['ExchangeID'] = orderReq.exchange req['LimitPrice'] = orderReq.price req['Volume'] = orderReq.volume # 下面如果由于传入的类型本接口不支持,则会返回空字符串 try: req['OrderPriceType'] = priceTypeMap[orderReq.priceType] req['Direction'] = directionMap[orderReq.direction] req['OffsetFlag'] = offsetMap[orderReq.offset] except KeyError: return '' req['UserOrderLocalID'] = strLocalID req['InvestorID'] = self.userID req['UserID'] = self.userID req['BrokerID'] = self.brokerID req['HedgeFlag'] = defineDict['USTP_FTDC_CHF_Speculation'] # 投机单 req['ForceCloseReason'] = defineDict['USTP_FTDC_FCR_NotForceClose'] # 非强平 req['IsAutoSuspend'] = 0 # 非自动挂起 req['TimeCondition'] = defineDict['USTP_FTDC_TC_GFD'] # 今日有效 req['VolumeCondition'] = defineDict['USTP_FTDC_VC_AV'] # 任意成交量 req['MinVolume'] = 1 # 最小成交量为1 self.reqOrderInsert(req, self.reqID) # 返回订单号(字符串),便于某些算法进行动态管理 vtOrderID = '.'.join([self.gatewayName, strLocalID]) return vtOrderID #---------------------------------------------------------------------- def cancelOrder(self, cancelOrderReq): """撤单""" self.reqID += 1 self.localID += 1 strLocalID = generateStrLocalID(self.localID) req = {} req['ExchangeID'] = cancelOrderReq.exchange req['UserOrderLocalID'] = cancelOrderReq.orderID req['UserOrderActionLocalID'] = strLocalID # 飞马需要传入撤单编号字段,即该次撤单操作的唯一编号 req['ActionFlag'] = defineDict['USTP_FTDC_AF_Delete'] req['BrokerID'] = self.brokerID req['InvestorID'] = self.userID req['UserID'] = self.userID # 飞马需要传入UserID字段(CTP不用) self.reqOrderAction(req, self.reqID) #---------------------------------------------------------------------- def close(self): """关闭""" self.exit() #---------------------------------------------------------------------- def onFrontConnected(self): """服务器连接""" self.connectionStatus = True log = VtLogData() log.gatewayName = self.gatewayName log.logContent = u'交易服务器连接成功' self.gateway.onLog(log) self.login() #---------------------------------------------------------------------- def onFrontDisconnected(self, n): """服务器断开""" self.connectionStatus = False self.loginStatus = False self.gateway.tdConnected = False log = VtLogData() log.gatewayName = self.gatewayName log.logContent = u'交易服务器连接断开' self.gateway.onLog(log) #---------------------------------------------------------------------- def onHeartBeatWarning(self, n): """""" pass #---------------------------------------------------------------------- def onRspError(self, error, n, last): """错误回报""" err = VtErrorData() err.gatewayName = self.gatewayName err.errorID = error['ErrorID'] err.errorMsg = error['ErrorMsg'].decode('gbk') self.gateway.onError(err) #---------------------------------------------------------------------- def onRspUserLogin(self, data, error, n, last): """登陆回报""" # 如果登录成功,推送日志信息 if error['ErrorID'] == 0: for k, v in data.items(): print k, ':', v if data['MaxOrderLocalID']: self.localID = int(data['MaxOrderLocalID']) # 目前最大本地报单号 print 'id now', self.localID self.loginStatus = True self.gateway.mdConnected = True log = VtLogData() log.gatewayName = self.gatewayName log.logContent = u'交易服务器登录完成' self.gateway.onLog(log) # 查询合约代码 self.reqID += 1 self.reqQryInstrument({}, self.reqID) # 否则,推送错误信息 else: err = VtErrorData() err.gatewayName = self.gateway err.errorID = error['ErrorID'] err.errorMsg = error['ErrorMsg'].decode('gbk') self.gateway.onError(err) #---------------------------------------------------------------------- def onRspUserLogout(self, data, error, n, last): """登出回报""" # 如果登出成功,推送日志信息 if error['ErrorID'] == 0: self.loginStatus = False self.gateway.tdConnected = False log = VtLogData() log.gatewayName = self.gatewayName log.logContent = u'交易服务器登出完成' self.gateway.onLog(log) # 否则,推送错误信息 else: err = VtErrorData() err.gatewayName = self.gatewayName err.errorID = error['ErrorID'] err.errorMsg = error['ErrorMsg'].decode('gbk') self.gateway.onError(err) #---------------------------------------------------------------------- def onRspUserPasswordUpdate(self, data, error, n, last): """""" pass #---------------------------------------------------------------------- def onRspOrderInsert(self, data, error, n, last): """发单错误(柜台)""" # 飞马在无错误信息时也可能进行推送(内容为正确),但是没有错误编号 if error['ErrorID']: err = VtErrorData() err.gatewayName = self.gatewayName err.errorID = error['ErrorID'] err.errorMsg = error['ErrorMsg'].decode('gbk') self.gateway.onError(err) #---------------------------------------------------------------------- def onRspOrderAction(self, data, error, n, last): """撤单错误(柜台)""" if error['ErrorID']: err = VtErrorData() err.gatewayName = self.gatewayName err.errorID = error['ErrorID'] err.errorMsg = error['ErrorMsg'].decode('gbk') self.gateway.onError(err) #---------------------------------------------------------------------- def onRtnFlowMessageCancel(self, data): """""" pass #---------------------------------------------------------------------- def onRtnTrade(self, data): """成交回报""" # 创建报单数据对象 trade = VtTradeData() trade.gatewayName = self.gatewayName # 保存代码和报单号 trade.symbol = data['InstrumentID'] trade.exchange = exchangeMapReverse[data['ExchangeID']] trade.vtSymbol = trade.symbol #'.'.join([trade.symbol, trade.exchange]) trade.tradeID = data['TradeID'] trade.vtTradeID = '.'.join([self.gatewayName, trade.tradeID]) trade.orderID = data['UserOrderLocalID'] trade.vtOrderID = '.'.join([self.gatewayName, trade.orderID]) # 方向 trade.direction = directionMapReverse.get(data['Direction'], '') # 开平 trade.offset = offsetMapReverse.get(data['OffsetFlag'], '') # 价格、报单量等数值 trade.price = data['TradePrice'] trade.volume = data['TradeVolume'] trade.tradeTime = data['TradeTime'] # 推送 self.gateway.onTrade(trade) #---------------------------------------------------------------------- def onRtnOrder(self, data): """报单回报""" # 更新最大报单编号 self.localID = max(self.localID, int(data['UserOrderLocalID'])) # 检查并增加本地报单编号 # 创建报单数据对象 order = VtOrderData() order.gatewayName = self.gatewayName # 保存代码和报单号 order.symbol = data['InstrumentID'] order.exchange = exchangeMapReverse[data['ExchangeID']] order.vtSymbol = order.symbol #'.'.join([order.symbol, order.exchange]) order.orderID = data['UserOrderLocalID'] # 飞马使用该单一字段维护报单,为字符串 # 方向 if data['Direction'] == '0': order.direction = DIRECTION_LONG elif data['Direction'] == '1': order.direction = DIRECTION_SHORT else: order.direction = DIRECTION_UNKNOWN # 开平 if data['OffsetFlag'] == '0': order.offset = OFFSET_OPEN elif data['OffsetFlag'] == '1': order.offset = OFFSET_CLOSE elif data['OffsetFlag'] == '3': order.offset = OFFSET_CLOSETODAY elif data['OffsetFlag'] == '4': order.offset = OFFSET_CLOSEYESTERDAY else: order.offset = OFFSET_UNKNOWN # 状态 if data['OrderStatus'] == '0': order.status = STATUS_ALLTRADED elif data['OrderStatus'] == '1': order.status = STATUS_PARTTRADED elif data['OrderStatus'] == '3': order.status = STATUS_NOTTRADED elif data['OrderStatus'] == '5': order.status = STATUS_CANCELLED else: order.status = STATUS_UNKNOWN # 价格、报单量等数值 order.price = data['LimitPrice'] order.totalVolume = data['Volume'] order.tradedVolume = data['VolumeTraded'] order.orderTime = data['InsertTime'] order.cancelTime = data['CancelTime'] # CTP的报单号一致性维护需要基于frontID, sessionID, orderID三个字段 # 但在本接口设计中,已经考虑了CTP的OrderRef的自增性,避免重复 # 唯一可能出现OrderRef重复的情况是多处登录并在非常接近的时间内(几乎同时发单) # 考虑到VtTrader的应用场景,认为以上情况不会构成问题 order.vtOrderID = '.'.join([self.gatewayName, order.orderID]) # 推送 self.gateway.onOrder(order) #---------------------------------------------------------------------- def onErrRtnOrderInsert(self, data, error): """发单错误回报(交易所)""" if error['ErrorID']: err = VtErrorData() err.gatewayName = self.gatewayName err.errorID = error['ErrorID'] err.errorMsg = error['ErrorMsg'].decode('gbk') self.gateway.onError(err) #---------------------------------------------------------------------- def onErrRtnOrderAction(self, data, error): """撤单错误回报(交易所)""" if error['ErrorID']: err = VtErrorData() err.gatewayName = self.gatewayName err.errorID = error['ErrorID'] err.errorMsg = error['ErrorMsg'].decode('gbk') self.gateway.onError(err) #---------------------------------------------------------------------- def onRtnInstrumentStatus(self, data): """""" pass #---------------------------------------------------------------------- def onRtnInvestorAccountDeposit(self, data): """""" pass #---------------------------------------------------------------------- def onRspQryOrder(self, data, error, n, last): """""" pass #---------------------------------------------------------------------- def onRspQryTrade(self, data, error, n, last): """""" pass #---------------------------------------------------------------------- def onRspQryUserInvestor(self, data, error, n, last): """""" pass #---------------------------------------------------------------------- def onRspQryTradingCode(self, data, error, n, last): """""" pass #---------------------------------------------------------------------- def onRspQryInvestorAccount(self, data, error, n, last): """资金账户查询回报""" account = VtAccountData() account.gatewayName = self.gatewayName # 账户代码 account.accountID = data['AccountID'] account.vtAccountID = '.'.join([self.gatewayName, account.accountID]) # 数值相关 account.preBalance = data['PreBalance'] account.available = data['Available'] account.commission = data['Fee'] account.margin = data['Margin'] account.closeProfit = data['CloseProfit'] account.positionProfit = data['PositionProfit'] # 这里的balance和快期中的账户不确定是否一样,需要测试 #account.balance = (data['PreBalance'] - data['Withdraw'] + data['Deposit'] + #data['CloseProfit'] + data['PositionProfit'] + data['TodayInOut'] - #data['Fee']) account.balance = data['DynamicRights'] # 飞马直接提供动态权益字段 # 推送 self.gateway.onAccount(account) #---------------------------------------------------------------------- def onRspQryInstrument(self, data, error, n, last): """合约查询回报""" contract = VtContractData() contract.gatewayName = self.gatewayName contract.symbol = data['InstrumentID'] contract.exchange = exchangeMapReverse[data['ExchangeID']] contract.vtSymbol = contract.symbol #'.'.join([contract.symbol, contract.exchange]) contract.name = data['InstrumentName'].decode('GBK') # 合约数值 contract.size = data['VolumeMultiple'] contract.priceTick = data['PriceTick'] contract.strikePrice = data['StrikePrice'] contract.underlyingSymbol = data['UnderlyingInstrID'] # 期权类型 if data['OptionsType'] == '1': contract.productClass = PRODUCT_OPTION contract.optionType = OPTION_CALL elif data['OptionsType'] == '2': contract.productClass = PRODUCT_OPTION contract.optionType = OPTION_PUT elif data['OptionsType'] == '3': contract.productClass = PRODUCT_FUTURES contract.optionType = '' # 推送 self.gateway.onContract(contract) if last: log = VtLogData() log.gatewayName = self.gatewayName log.logContent = u'交易合约信息获取完成' self.gateway.onLog(log) #---------------------------------------------------------------------- def onRspQryExchange(self, data, error, n, last): """""" pass #---------------------------------------------------------------------- def onRspQryInvestorPosition(self, data, error, n, last): """持仓查询回报""" pos = VtPositionData() pos.gatewayName = self.gatewayName # 保存代码 pos.symbol = data['InstrumentID'] pos.vtSymbol = pos.symbol # 这里因为data中没有ExchangeID这个字段 # 方向和持仓冻结数量 pos.direction = posiDirectionMapReverse.get(data['Direction'], '') pos.frozen = data['FrozenPosition'] # 持仓量 pos.position = data['Position'] pos.ydPosition = data['YdPosition'] # 持仓均价 if pos.position: pos.price = data['PositionCost'] / pos.position # VT系统持仓名 pos.vtPositionName = '.'.join([pos.vtSymbol, pos.direction]) # 推送 self.gateway.onPosition(pos) #---------------------------------------------------------------------- def onRspSubscribeTopic(self, data, error, n, last): """""" pass #---------------------------------------------------------------------- def onRspQryComplianceParam(self, data, error, n, last): """""" pass #---------------------------------------------------------------------- def onRspQryTopic(self, data, error, n, last): """""" pass #---------------------------------------------------------------------- def onRspQryInvestorFee(self, data, error, n, last): """""" pass #---------------------------------------------------------------------- def onRspQryInvestorMargin(self, data, error, n, last): """""" pass
unknown
codeparrot/codeparrot-clean
"""Handle Konnected messages.""" import logging from homeassistant.const import ( ATTR_ENTITY_ID, ATTR_STATE, DEVICE_CLASS_HUMIDITY, DEVICE_CLASS_TEMPERATURE, ) from homeassistant.helpers.dispatcher import async_dispatcher_send from homeassistant.util import decorator from .const import CONF_INVERSE, SIGNAL_DS18B20_NEW _LOGGER = logging.getLogger(__name__) HANDLERS = decorator.Registry() @HANDLERS.register("state") async def async_handle_state_update(hass, context, msg): """Handle a binary sensor state update.""" _LOGGER.debug("[state handler] context: %s msg: %s", context, msg) entity_id = context.get(ATTR_ENTITY_ID) state = bool(int(msg.get(ATTR_STATE))) if context.get(CONF_INVERSE): state = not state async_dispatcher_send(hass, f"konnected.{entity_id}.update", state) @HANDLERS.register("temp") async def async_handle_temp_update(hass, context, msg): """Handle a temperature sensor state update.""" _LOGGER.debug("[temp handler] context: %s msg: %s", context, msg) entity_id, temp = context.get(DEVICE_CLASS_TEMPERATURE), msg.get("temp") if entity_id: async_dispatcher_send(hass, f"konnected.{entity_id}.update", temp) @HANDLERS.register("humi") async def async_handle_humi_update(hass, context, msg): """Handle a humidity sensor state update.""" _LOGGER.debug("[humi handler] context: %s msg: %s", context, msg) entity_id, humi = context.get(DEVICE_CLASS_HUMIDITY), msg.get("humi") if entity_id: async_dispatcher_send(hass, f"konnected.{entity_id}.update", humi) @HANDLERS.register("addr") async def async_handle_addr_update(hass, context, msg): """Handle an addressable sensor update.""" _LOGGER.debug("[addr handler] context: %s msg: %s", context, msg) addr, temp = msg.get("addr"), msg.get("temp") entity_id = context.get(addr) if entity_id: async_dispatcher_send(hass, f"konnected.{entity_id}.update", temp) else: msg["device_id"] = context.get("device_id") msg["temperature"] = temp msg["addr"] = addr async_dispatcher_send(hass, SIGNAL_DS18B20_NEW, msg)
unknown
codeparrot/codeparrot-clean
# -*- test-case-name: twisted.test.test_reflect -*- # Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. """ Standardized versions of various cool and/or strange things that you can do with Python's reflection capabilities. """ import sys from .compat import PY3 class _NoModuleFound(Exception): """ No module was found because none exists. """ class InvalidName(ValueError): """ The given name is not a dot-separated list of Python objects. """ class ModuleNotFound(InvalidName): """ The module associated with the given name doesn't exist and it can't be imported. """ class ObjectNotFound(InvalidName): """ The object associated with the given name doesn't exist and it can't be imported. """ if PY3: def reraise(exception, traceback): raise exception.with_traceback(traceback) else: exec("""def reraise(exception, traceback): raise exception.__class__, exception, traceback""") reraise.__doc__ = """ Re-raise an exception, with an optional traceback, in a way that is compatible with both Python 2 and Python 3. Note that on Python 3, re-raised exceptions will be mutated, with their C{__traceback__} attribute being set. @param exception: The exception instance. @param traceback: The traceback to use, or C{None} indicating a new traceback. """ def _importAndCheckStack(importName): """ Import the given name as a module, then walk the stack to determine whether the failure was the module not existing, or some code in the module (for example a dependent import) failing. This can be helpful to determine whether any actual application code was run. For example, to distiguish administrative error (entering the wrong module name), from programmer error (writing buggy code in a module that fails to import). @param importName: The name of the module to import. @type importName: C{str} @raise Exception: if something bad happens. This can be any type of exception, since nobody knows what loading some arbitrary code might do. @raise _NoModuleFound: if no module was found. """ try: return __import__(importName) except ImportError: excType, excValue, excTraceback = sys.exc_info() while excTraceback: execName = excTraceback.tb_frame.f_globals["__name__"] # in Python 2 execName is None when an ImportError is encountered, # where in Python 3 execName is equal to the importName. if execName is None or execName == importName: reraise(excValue, excTraceback) excTraceback = excTraceback.tb_next raise _NoModuleFound() def namedAny(name): """ Retrieve a Python object by its fully qualified name from the global Python module namespace. The first part of the name, that describes a module, will be discovered and imported. Each subsequent part of the name is treated as the name of an attribute of the object specified by all of the name which came before it. For example, the fully-qualified name of this object is 'twisted.python.reflect.namedAny'. @type name: L{str} @param name: The name of the object to return. @raise InvalidName: If the name is an empty string, starts or ends with a '.', or is otherwise syntactically incorrect. @raise ModuleNotFound: If the name is syntactically correct but the module it specifies cannot be imported because it does not appear to exist. @raise ObjectNotFound: If the name is syntactically correct, includes at least one '.', but the module it specifies cannot be imported because it does not appear to exist. @raise AttributeError: If an attribute of an object along the way cannot be accessed, or a module along the way is not found. @return: the Python object identified by 'name'. """ if not name: raise InvalidName('Empty module name') names = name.split('.') # if the name starts or ends with a '.' or contains '..', the __import__ # will raise an 'Empty module name' error. This will provide a better error # message. if '' in names: raise InvalidName( "name must be a string giving a '.'-separated list of Python " "identifiers, not %r" % (name,)) topLevelPackage = None moduleNames = names[:] while not topLevelPackage: if moduleNames: trialname = '.'.join(moduleNames) try: topLevelPackage = _importAndCheckStack(trialname) except _NoModuleFound: moduleNames.pop() else: if len(names) == 1: raise ModuleNotFound("No module named %r" % (name,)) else: raise ObjectNotFound('%r does not name an object' % (name,)) obj = topLevelPackage for n in names[1:]: obj = getattr(obj, n) return obj
unknown
codeparrot/codeparrot-clean
#!/usr/bin/env python # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. '''This utility cleans up the html files as emitted by doxygen so that they are suitable for publication on a Google documentation site. ''' import optparse import os import re import shutil import string import sys try: from BeautifulSoup import BeautifulSoup, Tag except (ImportError, NotImplementedError): print ("This tool requires the BeautifulSoup package " "(see http://www.crummy.com/software/BeautifulSoup/).\n" "Make sure that the file BeautifulSoup.py is either in this directory " "or is available in your PYTHON_PATH") raise class HTMLFixer(object): '''This class cleans up the html strings as produced by Doxygen ''' def __init__(self, html): self.soup = BeautifulSoup(html) def FixTableHeadings(self): '''Fixes the doxygen table headings. This includes: - Using bare <h2> title row instead of row embedded in <tr><td> in table - Putting the "name" attribute into the "id" attribute of the <tr> tag. - Splitting up tables into multiple separate tables if a table heading appears in the middle of a table. For example, this html: <table> <tr><td colspan="2"><h2><a name="pub-attribs"></a> Data Fields List</h2></td></tr> ... </table> would be converted to this: <h2>Data Fields List</h2> <table> ... </table> ''' table_headers = [] for tag in self.soup.findAll('tr'): if tag.td and tag.td.h2 and tag.td.h2.a and tag.td.h2.a['name']: #tag['id'] = tag.td.h2.a['name'] tag.string = tag.td.h2.a.next tag.name = 'h2' table_headers.append(tag) # reverse the list so that earlier tags don't delete later tags table_headers.reverse() # Split up tables that have multiple table header (th) rows for tag in table_headers: print "Header tag: %s is %s" % (tag.name, tag.string.strip()) # Is this a heading in the middle of a table? if tag.findPreviousSibling('tr') and tag.parent.name == 'table': print "Splitting Table named %s" % tag.string.strip() table = tag.parent table_parent = table.parent table_index = table_parent.contents.index(table) new_table = Tag(self.soup, name='table', attrs=table.attrs) table_parent.insert(table_index + 1, new_table) tag_index = table.contents.index(tag) for index, row in enumerate(table.contents[tag_index:]): new_table.insert(index, row) # Now move the <h2> tag to be in front of the <table> tag assert tag.parent.name == 'table' table = tag.parent table_parent = table.parent table_index = table_parent.contents.index(table) table_parent.insert(table_index, tag) def RemoveTopHeadings(self): '''Removes <div> sections with a header, tabs, or navpath class attribute''' header_tags = self.soup.findAll( name='div', attrs={'class' : re.compile('^(header|tabs[0-9]*|navpath)$')}) [tag.extract() for tag in header_tags] def FixAll(self): self.FixTableHeadings() self.RemoveTopHeadings() def __str__(self): return str(self.soup) def main(): '''Main entry for the doxy_cleanup utility doxy_cleanup takes a list of html files and modifies them in place.''' parser = optparse.OptionParser(usage='Usage: %prog [options] files...') parser.add_option('-m', '--move', dest='move', action='store_true', default=False, help='move html files to "original_html"') options, files = parser.parse_args() if not files: parser.print_usage() return 1 for filename in files: try: with open(filename, 'r') as file: html = file.read() print "Processing %s" % filename fixer = HTMLFixer(html) fixer.FixAll() with open(filename, 'w') as file: file.write(str(fixer)) if options.move: new_directory = os.path.join( os.path.dirname(os.path.dirname(filename)), 'original_html') if not os.path.exists(new_directory): os.mkdir(new_directory) shutil.move(filename, new_directory) except: print "Error while processing %s" % filename raise return 0 if __name__ == '__main__': sys.exit(main())
unknown
codeparrot/codeparrot-clean
#!/usr/bin/env python ################################################## # Gnuradio Python Flow Graph # Title: Howto Square # Generated: Thu Nov 12 11:26:07 2009 ################################################## import howto from gnuradio import eng_notation from gnuradio import gr from gnuradio.eng_option import eng_option from gnuradio.gr import firdes from gnuradio.wxgui import scopesink2 from grc_gnuradio import wxgui as grc_wxgui from optparse import OptionParser import wx class howto_square(grc_wxgui.top_block_gui): def __init__(self): grc_wxgui.top_block_gui.__init__(self, title="Howto Square") ################################################## # Variables ################################################## self.samp_rate = samp_rate = 10e3 ################################################## # Blocks ################################################## self.sink = scopesink2.scope_sink_f( self.GetWin(), title="Input", sample_rate=samp_rate, v_scale=20, v_offset=0, t_scale=0.002, ac_couple=False, xy_mode=False, num_inputs=1, ) self.Add(self.sink.win) self.sink2 = scopesink2.scope_sink_f( self.GetWin(), title="Output", sample_rate=samp_rate, v_scale=0, v_offset=0, t_scale=0.002, ac_couple=False, xy_mode=False, num_inputs=1, ) self.Add(self.sink2.win) self.sqr = howto.square_ff() self.src = gr.vector_source_f(([float(n)-50 for n in range(100)]), True, 1) self.thr = gr.throttle(gr.sizeof_float*1, samp_rate) ################################################## # Connections ################################################## self.connect((self.thr, 0), (self.sqr, 0)) self.connect((self.src, 0), (self.thr, 0)) self.connect((self.thr, 0), (self.sink, 0)) self.connect((self.sqr, 0), (self.sink2, 0)) def set_samp_rate(self, samp_rate): self.samp_rate = samp_rate self.sink.set_sample_rate(self.samp_rate) self.sink2.set_sample_rate(self.samp_rate) if __name__ == '__main__': parser = OptionParser(option_class=eng_option, usage="%prog: [options]") (options, args) = parser.parse_args() tb = howto_square() tb.Run(True)
unknown
codeparrot/codeparrot-clean
// Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: BUSL-1.1 // This program is the generator for the gRPC service wrapper types in the // parent directory. It's not suitable for any other use. // // This makes various assumptions about how the protobuf compiler and // gRPC stub generators produce code. If those significantly change in future // then this will probably break. package main import ( "bytes" "fmt" "go/format" "go/types" "log" "os" "path/filepath" "regexp" "strings" "golang.org/x/tools/go/packages" ) var protobufPkgs = map[string]string{ "dependencies": "github.com/hashicorp/terraform/internal/rpcapi/terraform1/dependencies", "stacks": "github.com/hashicorp/terraform/internal/rpcapi/terraform1/stacks", "packages": "github.com/hashicorp/terraform/internal/rpcapi/terraform1/packages", } var additionalImportsByName = map[string]string{ "dependencies": `"google.golang.org/grpc"`, "stacks": `"google.golang.org/grpc"`, } func main() { for shortName, pkgName := range protobufPkgs { cfg := &packages.Config{ Mode: packages.NeedTypes | packages.NeedTypesInfo | packages.NeedFiles, } pkgs, err := packages.Load(cfg, pkgName) if err != nil { log.Fatalf("can't load the protobuf/gRPC proxy package: %s", err) } if len(pkgs) != 1 { log.Fatalf("wrong number of packages found") } pkg := pkgs[0] if pkg.TypesInfo == nil { log.Fatalf("types info not available") } if len(pkg.GoFiles) < 1 { log.Fatalf("no files included in package") } // We assume that our output directory is sibling to the directory // containing the protobuf specification. outDir := filepath.Join(filepath.Dir(pkg.GoFiles[0]), "../../dynrpcserver") Types: for _, obj := range pkg.TypesInfo.Defs { typ, ok := obj.(*types.TypeName) if !ok { continue } underTyp := typ.Type().Underlying() iface, ok := underTyp.(*types.Interface) if !ok { continue } if !strings.HasSuffix(typ.Name(), "Server") || typ.Name() == "SetupServer" { // Doesn't look like a generated gRPC server interface continue } // The interfaces used for streaming requests/responses unfortunately // also have a "Server" suffix in the generated Go code, and so // we need to detect those more surgically by noticing that they // have grpc.ServerStream embedded inside. for i := 0; i < iface.NumEmbeddeds(); i++ { emb, ok := iface.EmbeddedType(i).(*types.Named) if !ok { continue } pkg := emb.Obj().Pkg().Path() name := emb.Obj().Name() if pkg == "google.golang.org/grpc" && name == "ServerStream" { continue Types } } // If we get here then what we're holding _seems_ to be a gRPC // server interface, and so we'll generate a dynamic initialization // wrapper for it. ifaceName := typ.Name() baseName := strings.TrimSuffix(ifaceName, "Server") filename := toFilenameCase(baseName) + ".go" absFilename := filepath.Join(outDir, filename) if regexp.MustCompile("^Unsafe").MatchString(ifaceName) { // This isn't a gRPC server interface, so skip it. // // This is an interface that's intended to be embedded to help users to meet requirements for Unimplemented servers. // See: // > Docs: https://pkg.go.dev/google.golang.org/grpc/cmd/protoc-gen-go-grpc#readme-future-proofing-services // > PR for Unsafe interfaces: https://github.com/grpc/grpc-go/pull/3911 continue Types } var buf bytes.Buffer fmt.Fprintf(&buf, `// Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: BUSL-1.1 // Code generated by ./generator. DO NOT EDIT. `) fmt.Fprintf(&buf, `package dynrpcserver import ( "context" "sync" %s %s %q ) `, additionalImportsByName[shortName], shortName, pkg) fmt.Fprintf(&buf, "type %s struct {\n", baseName) fmt.Fprintf(&buf, "impl %s.%s\n", shortName, ifaceName) fmt.Fprintln(&buf, "mu sync.RWMutex") unimplementedServerInterface := fmt.Sprintf("%s.Unimplemented%s", shortName, ifaceName) // UnimplementedFoobarServer struct name that's generated from the proto file. unimplementedServerMethod := fmt.Sprintf("mustEmbedUnimplemented%s", ifaceName) // Name of the method implemented on UnimplementedFoobarServer. fmt.Fprintln(&buf, unimplementedServerInterface) // Embed UnimplementedFoobarServer struct into the struct we're generating. buf.WriteString("}\n\n") fmt.Fprintf(&buf, "var _ %s.%s = (*%s)(nil)\n\n", shortName, ifaceName, baseName) fmt.Fprintf(&buf, "func New%sStub() *%s {\n", baseName, baseName) fmt.Fprintf(&buf, "return &%s{}\n", baseName) fmt.Fprintf(&buf, "}\n\n") for i := 0; i < iface.NumMethods(); i++ { method := iface.Method(i) if method.Name() == unimplementedServerMethod { // Code for this method doesn't need to be generated. // The method is present via embedding, see use of `unimplementedServerInterface` above. continue } sig := method.Type().(*types.Signature) fmt.Fprintf(&buf, "func (s *%s) %s(", baseName, method.Name()) for i := 0; i < sig.Params().Len(); i++ { param := sig.Params().At(i) // The generated interface types don't include parameter names // and so we just use synthetic parameter names here. name := fmt.Sprintf("a%d", i) genType := typeRef(param.Type().String(), shortName, pkgName) if i > 0 { buf.WriteString(", ") } buf.WriteString(name) buf.WriteString(" ") buf.WriteString(genType) } fmt.Fprintf(&buf, ")") if sig.Results().Len() > 1 { buf.WriteString("(") } for i := 0; i < sig.Results().Len(); i++ { result := sig.Results().At(i) genType := typeRef(result.Type().String(), shortName, pkgName) if i > 0 { buf.WriteString(", ") } buf.WriteString(genType) } if sig.Results().Len() > 1 { buf.WriteString(")") } switch n := sig.Results().Len(); n { case 1: fmt.Fprintf(&buf, ` { impl, err := s.realRPCServer() if err != nil { return err } `) case 2: fmt.Fprintf(&buf, ` { impl, err := s.realRPCServer() if err != nil { return nil, err } `) default: log.Fatalf("don't know how to make a stub for method with %d results", n) } fmt.Fprintf(&buf, "return impl.%s(", method.Name()) for i := 0; i < sig.Params().Len(); i++ { if i > 0 { buf.WriteString(", ") } fmt.Fprintf(&buf, "a%d", i) } fmt.Fprintf(&buf, ")\n}\n\n") } fmt.Fprintf(&buf, ` func (s *%s) ActivateRPCServer(impl %s.%s) { s.mu.Lock() s.impl = impl s.mu.Unlock() } func (s *%s) realRPCServer() (%s.%s, error) { s.mu.RLock() impl := s.impl s.mu.RUnlock() if impl == nil { return nil, unavailableErr } return impl, nil } `, baseName, shortName, ifaceName, baseName, shortName, ifaceName) src, err := format.Source(buf.Bytes()) if err != nil { //log.Fatalf("formatting %s: %s", filename, err) src = buf.Bytes() } f, err := os.Create(absFilename) if err != nil { log.Fatal(err) } _, err = f.Write(src) if err != nil { log.Fatalf("writing %s: %s", filename, err) } } } } func typeRef(fullType, name, pkg string) string { // The following is specialized to only the parameter types // we typically expect to see in a server interface. This // might need extra rules if we step outside the design idiom // we've used for these services so far. // Identifies generic types from google.golang.org/grpc module with 1+ type arguments. grpcGenericRe := regexp.MustCompile(`^google\.golang\.org\/grpc\.\w+\[[\w\.\/,\s]+\]`) switch { case fullType == "context.Context" || fullType == "error": return fullType case fullType == "interface{}" || fullType == "any": return "any" case strings.HasPrefix(fullType, "*"+pkg+"."): return "*" + name + "." + fullType[len(pkg)+2:] case strings.HasPrefix(fullType, pkg+"."): return name + "." + fullType[len(pkg)+1:] case grpcGenericRe.MatchString(fullType): // Handling use of google.golang.org/grpc.Foobar[T...] generic types. // Example 1: google.golang.org/grpc.ServerStreamingServer[github.com/hashicorp/terraform/internal/rpcapi/terraform1/dependencies.BuildProviderPluginCache_Event] // Example 2: google.golang.org/grpc.ClientStreamingServer[github.com/hashicorp/terraform/internal/rpcapi/terraform1/stacks.OpenStackPlan_RequestItem, github.com/hashicorp/terraform/internal/rpcapi/terraform1/stacks.OpenStackPlan_Response] // Pull grpc.Foobar out of fullType string grpcGenericRe := regexp.MustCompile(`^google\.golang\.org\/(?P<GrpcType>grpc\.\w+)\[github.com`) i := grpcGenericRe.SubexpIndex("GrpcType") grpcGeneric := grpcGenericRe.FindStringSubmatch(fullType)[i] // Get type argument(s) typeRe := regexp.MustCompile(fmt.Sprintf(`%s\.\w+`, name)) typeArgs := typeRe.FindAllString(fullType, -1) // Build string, with potential need for comma separation // e.g. grpc.Foobar[pkg1.A, pkg2.B] var buf strings.Builder buf.WriteString(grpcGeneric + "[") for i, arg := range typeArgs { buf.WriteString(arg) if i+1 != len(typeArgs) { buf.WriteString(", ") } } buf.WriteString("]") return buf.String() default: log.Fatalf("don't know what to do with parameter type %s", fullType) return "" } } var firstCapPattern = regexp.MustCompile("(.)([A-Z][a-z]+)") var otherCapPattern = regexp.MustCompile("([a-z0-9])([A-Z])") func toFilenameCase(typeName string) string { ret := firstCapPattern.ReplaceAllString(typeName, "${1}_${2}") ret = otherCapPattern.ReplaceAllString(ret, "${1}_${2}") return strings.ToLower(ret) }
go
github
https://github.com/hashicorp/terraform
internal/rpcapi/dynrpcserver/generator/main.go
# Copyright 2019 Fortinet, Inc. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <https://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type import os import json import pytest from mock import ANY from ansible.module_utils.network.fortios.fortios import FortiOSHandler try: from ansible.modules.network.fortios import fortios_log_fortianalyzer2_setting except ImportError: pytest.skip("Could not load required modules for testing", allow_module_level=True) @pytest.fixture(autouse=True) def connection_mock(mocker): connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_log_fortianalyzer2_setting.Connection') return connection_class_mock fos_instance = FortiOSHandler(connection_mock) def test_log_fortianalyzer2_setting_creation(mocker): schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema') set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200} set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result) input_data = { 'username': 'admin', 'state': 'present', 'log_fortianalyzer2_setting': { '__change_ip': '3', 'certificate': 'test_value_4', 'conn_timeout': '5', 'enc_algorithm': 'high-medium', 'faz_type': '7', 'hmac_algorithm': 'sha256', 'ips_archive': 'enable', 'mgmt_name': 'test_value_10', 'monitor_failure_retry_period': '11', 'monitor_keepalive_period': '12', 'reliable': 'enable', 'server': '192.168.100.14', 'source_ip': '84.230.14.15', 'ssl_min_proto_version': 'default', 'status': 'enable', 'upload_day': 'test_value_18', 'upload_interval': 'daily', 'upload_option': 'store-and-upload', 'upload_time': 'test_value_21' }, 'vdom': 'root'} is_error, changed, response = fortios_log_fortianalyzer2_setting.fortios_log_fortianalyzer2(input_data, fos_instance) expected_data = { '--change-ip': '3', 'certificate': 'test_value_4', 'conn-timeout': '5', 'enc-algorithm': 'high-medium', 'faz-type': '7', 'hmac-algorithm': 'sha256', 'ips-archive': 'enable', 'mgmt-name': 'test_value_10', 'monitor-failure-retry-period': '11', 'monitor-keepalive-period': '12', 'reliable': 'enable', 'server': '192.168.100.14', 'source-ip': '84.230.14.15', 'ssl-min-proto-version': 'default', 'status': 'enable', 'upload-day': 'test_value_18', 'upload-interval': 'daily', 'upload-option': 'store-and-upload', 'upload-time': 'test_value_21' } set_method_mock.assert_called_with('log.fortianalyzer2', 'setting', data=expected_data, vdom='root') schema_method_mock.assert_not_called() assert not is_error assert changed assert response['status'] == 'success' assert response['http_status'] == 200 def test_log_fortianalyzer2_setting_creation_fails(mocker): schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema') set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500} set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result) input_data = { 'username': 'admin', 'state': 'present', 'log_fortianalyzer2_setting': { '__change_ip': '3', 'certificate': 'test_value_4', 'conn_timeout': '5', 'enc_algorithm': 'high-medium', 'faz_type': '7', 'hmac_algorithm': 'sha256', 'ips_archive': 'enable', 'mgmt_name': 'test_value_10', 'monitor_failure_retry_period': '11', 'monitor_keepalive_period': '12', 'reliable': 'enable', 'server': '192.168.100.14', 'source_ip': '84.230.14.15', 'ssl_min_proto_version': 'default', 'status': 'enable', 'upload_day': 'test_value_18', 'upload_interval': 'daily', 'upload_option': 'store-and-upload', 'upload_time': 'test_value_21' }, 'vdom': 'root'} is_error, changed, response = fortios_log_fortianalyzer2_setting.fortios_log_fortianalyzer2(input_data, fos_instance) expected_data = { '--change-ip': '3', 'certificate': 'test_value_4', 'conn-timeout': '5', 'enc-algorithm': 'high-medium', 'faz-type': '7', 'hmac-algorithm': 'sha256', 'ips-archive': 'enable', 'mgmt-name': 'test_value_10', 'monitor-failure-retry-period': '11', 'monitor-keepalive-period': '12', 'reliable': 'enable', 'server': '192.168.100.14', 'source-ip': '84.230.14.15', 'ssl-min-proto-version': 'default', 'status': 'enable', 'upload-day': 'test_value_18', 'upload-interval': 'daily', 'upload-option': 'store-and-upload', 'upload-time': 'test_value_21' } set_method_mock.assert_called_with('log.fortianalyzer2', 'setting', data=expected_data, vdom='root') schema_method_mock.assert_not_called() assert is_error assert not changed assert response['status'] == 'error' assert response['http_status'] == 500 def test_log_fortianalyzer2_setting_idempotent(mocker): schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema') set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404} set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result) input_data = { 'username': 'admin', 'state': 'present', 'log_fortianalyzer2_setting': { '__change_ip': '3', 'certificate': 'test_value_4', 'conn_timeout': '5', 'enc_algorithm': 'high-medium', 'faz_type': '7', 'hmac_algorithm': 'sha256', 'ips_archive': 'enable', 'mgmt_name': 'test_value_10', 'monitor_failure_retry_period': '11', 'monitor_keepalive_period': '12', 'reliable': 'enable', 'server': '192.168.100.14', 'source_ip': '84.230.14.15', 'ssl_min_proto_version': 'default', 'status': 'enable', 'upload_day': 'test_value_18', 'upload_interval': 'daily', 'upload_option': 'store-and-upload', 'upload_time': 'test_value_21' }, 'vdom': 'root'} is_error, changed, response = fortios_log_fortianalyzer2_setting.fortios_log_fortianalyzer2(input_data, fos_instance) expected_data = { '--change-ip': '3', 'certificate': 'test_value_4', 'conn-timeout': '5', 'enc-algorithm': 'high-medium', 'faz-type': '7', 'hmac-algorithm': 'sha256', 'ips-archive': 'enable', 'mgmt-name': 'test_value_10', 'monitor-failure-retry-period': '11', 'monitor-keepalive-period': '12', 'reliable': 'enable', 'server': '192.168.100.14', 'source-ip': '84.230.14.15', 'ssl-min-proto-version': 'default', 'status': 'enable', 'upload-day': 'test_value_18', 'upload-interval': 'daily', 'upload-option': 'store-and-upload', 'upload-time': 'test_value_21' } set_method_mock.assert_called_with('log.fortianalyzer2', 'setting', data=expected_data, vdom='root') schema_method_mock.assert_not_called() assert not is_error assert not changed assert response['status'] == 'error' assert response['http_status'] == 404 def test_log_fortianalyzer2_setting_filter_foreign_attributes(mocker): schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema') set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200} set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result) input_data = { 'username': 'admin', 'state': 'present', 'log_fortianalyzer2_setting': { 'random_attribute_not_valid': 'tag', '__change_ip': '3', 'certificate': 'test_value_4', 'conn_timeout': '5', 'enc_algorithm': 'high-medium', 'faz_type': '7', 'hmac_algorithm': 'sha256', 'ips_archive': 'enable', 'mgmt_name': 'test_value_10', 'monitor_failure_retry_period': '11', 'monitor_keepalive_period': '12', 'reliable': 'enable', 'server': '192.168.100.14', 'source_ip': '84.230.14.15', 'ssl_min_proto_version': 'default', 'status': 'enable', 'upload_day': 'test_value_18', 'upload_interval': 'daily', 'upload_option': 'store-and-upload', 'upload_time': 'test_value_21' }, 'vdom': 'root'} is_error, changed, response = fortios_log_fortianalyzer2_setting.fortios_log_fortianalyzer2(input_data, fos_instance) expected_data = { '--change-ip': '3', 'certificate': 'test_value_4', 'conn-timeout': '5', 'enc-algorithm': 'high-medium', 'faz-type': '7', 'hmac-algorithm': 'sha256', 'ips-archive': 'enable', 'mgmt-name': 'test_value_10', 'monitor-failure-retry-period': '11', 'monitor-keepalive-period': '12', 'reliable': 'enable', 'server': '192.168.100.14', 'source-ip': '84.230.14.15', 'ssl-min-proto-version': 'default', 'status': 'enable', 'upload-day': 'test_value_18', 'upload-interval': 'daily', 'upload-option': 'store-and-upload', 'upload-time': 'test_value_21' } set_method_mock.assert_called_with('log.fortianalyzer2', 'setting', data=expected_data, vdom='root') schema_method_mock.assert_not_called() assert not is_error assert changed assert response['status'] == 'success' assert response['http_status'] == 200
unknown
codeparrot/codeparrot-clean
# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_serialization import jsonutils as json import six from six.moves.urllib import parse as urllib from tempest.lib.common import rest_client from tempest.lib import exceptions as lib_exc from tempest.lib.services.volume import base_client class VolumesClient(base_client.BaseClient): """Client class to send CRUD Volume V2 API requests""" api_version = "v2" def _prepare_params(self, params): """Prepares params for use in get or _ext_get methods. If params is a string it will be left as it is, but if it's not it will be urlencoded. """ if isinstance(params, six.string_types): return params return urllib.urlencode(params) def list_volumes(self, detail=False, params=None): """List all the volumes created. Params can be a string (must be urlencoded) or a dictionary. For a full list of available parameters, please refer to the official API reference: http://developer.openstack.org/api-ref/block-storage/v2/#list-volumes-with-details http://developer.openstack.org/api-ref/block-storage/v2/#list-volumes """ url = 'volumes' if detail: url += '/detail' if params: url += '?%s' % self._prepare_params(params) resp, body = self.get(url) body = json.loads(body) self.expected_success(200, resp.status) return rest_client.ResponseBody(resp, body) def show_volume(self, volume_id): """Returns the details of a single volume.""" url = "volumes/%s" % volume_id resp, body = self.get(url) body = json.loads(body) self.expected_success(200, resp.status) return rest_client.ResponseBody(resp, body) def create_volume(self, **kwargs): """Creates a new Volume. For a full list of available parameters, please refer to the official API reference: http://developer.openstack.org/api-ref/block-storage/v2/#create-volume """ post_body = json.dumps({'volume': kwargs}) resp, body = self.post('volumes', post_body) body = json.loads(body) self.expected_success(202, resp.status) return rest_client.ResponseBody(resp, body) def update_volume(self, volume_id, **kwargs): """Updates the Specified Volume. For a full list of available parameters, please refer to the official API reference: http://developer.openstack.org/api-ref/block-storage/v2/#update-volume """ put_body = json.dumps({'volume': kwargs}) resp, body = self.put('volumes/%s' % volume_id, put_body) body = json.loads(body) self.expected_success(200, resp.status) return rest_client.ResponseBody(resp, body) def delete_volume(self, volume_id, **params): """Deletes the Specified Volume. For a full list of available parameters, please refer to the official API reference: https://developer.openstack.org/api-ref/block-storage/v2/#delete-volume """ url = 'volumes/%s' % volume_id if params: url += '?%s' % urllib.urlencode(params) resp, body = self.delete(url) self.expected_success(202, resp.status) return rest_client.ResponseBody(resp, body) def upload_volume(self, volume_id, **kwargs): """Uploads a volume in Glance.""" post_body = json.dumps({'os-volume_upload_image': kwargs}) url = 'volumes/%s/action' % (volume_id) resp, body = self.post(url, post_body) body = json.loads(body) self.expected_success(202, resp.status) return rest_client.ResponseBody(resp, body) def attach_volume(self, volume_id, **kwargs): """Attaches a volume to a given instance on a given mountpoint. For a full list of available parameters, please refer to the official API reference: http://developer.openstack.org/api-ref/block-storage/v2/#attach-volume-to-server """ post_body = json.dumps({'os-attach': kwargs}) url = 'volumes/%s/action' % (volume_id) resp, body = self.post(url, post_body) self.expected_success(202, resp.status) return rest_client.ResponseBody(resp, body) def set_bootable_volume(self, volume_id, **kwargs): """Set a bootable flag for a volume - true or false. For a full list of available parameters, please refer to the official API reference: http://developer.openstack.org/api-ref/block-storage/v2/#update-volume-bootable-status """ post_body = json.dumps({'os-set_bootable': kwargs}) url = 'volumes/%s/action' % (volume_id) resp, body = self.post(url, post_body) self.expected_success(200, resp.status) return rest_client.ResponseBody(resp, body) def detach_volume(self, volume_id): """Detaches a volume from an instance.""" post_body = json.dumps({'os-detach': {}}) url = 'volumes/%s/action' % (volume_id) resp, body = self.post(url, post_body) self.expected_success(202, resp.status) return rest_client.ResponseBody(resp, body) def reserve_volume(self, volume_id): """Reserves a volume.""" post_body = json.dumps({'os-reserve': {}}) url = 'volumes/%s/action' % (volume_id) resp, body = self.post(url, post_body) self.expected_success(202, resp.status) return rest_client.ResponseBody(resp, body) def unreserve_volume(self, volume_id): """Restore a reserved volume .""" post_body = json.dumps({'os-unreserve': {}}) url = 'volumes/%s/action' % (volume_id) resp, body = self.post(url, post_body) self.expected_success(202, resp.status) return rest_client.ResponseBody(resp, body) def is_resource_deleted(self, id): """Check the specified resource is deleted or not. :param id: A checked resource id :raises lib_exc.DeleteErrorException: If the specified resource is on the status the delete was failed. """ try: volume = self.show_volume(id) except lib_exc.NotFound: return True if volume["volume"]["status"] == "error_deleting": raise lib_exc.DeleteErrorException(resource_id=id) return False @property def resource_type(self): """Returns the primary type of resource this client works with.""" return 'volume' def extend_volume(self, volume_id, **kwargs): """Extend a volume. For a full list of available parameters, please refer to the official API reference: http://developer.openstack.org/api-ref/block-storage/v2/#extend-volume-size """ post_body = json.dumps({'os-extend': kwargs}) url = 'volumes/%s/action' % (volume_id) resp, body = self.post(url, post_body) self.expected_success(202, resp.status) return rest_client.ResponseBody(resp, body) def reset_volume_status(self, volume_id, **kwargs): """Reset the Specified Volume's Status. For a full list of available parameters, please refer to the official API reference: http://developer.openstack.org/api-ref/block-storage/v2/#reset-volume-statuses """ post_body = json.dumps({'os-reset_status': kwargs}) resp, body = self.post('volumes/%s/action' % volume_id, post_body) self.expected_success(202, resp.status) return rest_client.ResponseBody(resp, body) def update_volume_readonly(self, volume_id, **kwargs): """Update the Specified Volume readonly.""" post_body = json.dumps({'os-update_readonly_flag': kwargs}) url = 'volumes/%s/action' % (volume_id) resp, body = self.post(url, post_body) self.expected_success(202, resp.status) return rest_client.ResponseBody(resp, body) def force_delete_volume(self, volume_id): """Force Delete Volume.""" post_body = json.dumps({'os-force_delete': {}}) resp, body = self.post('volumes/%s/action' % volume_id, post_body) self.expected_success(202, resp.status) return rest_client.ResponseBody(resp, body) def create_volume_metadata(self, volume_id, metadata): """Create metadata for the volume. For a full list of available parameters, please refer to the official API reference: http://developer.openstack.org/api-ref/block-storage/v2/#create-volume-metadata """ put_body = json.dumps({'metadata': metadata}) url = "volumes/%s/metadata" % volume_id resp, body = self.post(url, put_body) body = json.loads(body) self.expected_success(200, resp.status) return rest_client.ResponseBody(resp, body) def show_volume_metadata(self, volume_id): """Get metadata of the volume.""" url = "volumes/%s/metadata" % volume_id resp, body = self.get(url) body = json.loads(body) self.expected_success(200, resp.status) return rest_client.ResponseBody(resp, body) def update_volume_metadata(self, volume_id, metadata): """Update metadata for the volume. For a full list of available parameters, please refer to the official API reference: http://developer.openstack.org/api-ref/block-storage/v2/#update-volume-metadata """ put_body = json.dumps({'metadata': metadata}) url = "volumes/%s/metadata" % volume_id resp, body = self.put(url, put_body) body = json.loads(body) self.expected_success(200, resp.status) return rest_client.ResponseBody(resp, body) def show_volume_metadata_item(self, volume_id, id): """Show metadata item for the volume.""" url = "volumes/%s/metadata/%s" % (volume_id, id) resp, body = self.get(url) body = json.loads(body) self.expected_success(200, resp.status) return rest_client.ResponseBody(resp, body) def update_volume_metadata_item(self, volume_id, id, meta_item): """Update metadata item for the volume.""" put_body = json.dumps({'meta': meta_item}) url = "volumes/%s/metadata/%s" % (volume_id, id) resp, body = self.put(url, put_body) body = json.loads(body) self.expected_success(200, resp.status) return rest_client.ResponseBody(resp, body) def delete_volume_metadata_item(self, volume_id, id): """Delete metadata item for the volume.""" url = "volumes/%s/metadata/%s" % (volume_id, id) resp, body = self.delete(url) self.expected_success(200, resp.status) return rest_client.ResponseBody(resp, body) def retype_volume(self, volume_id, **kwargs): """Updates volume with new volume type. For a full list of available parameters, please refer to the official API reference: https://developer.openstack.org/api-ref/block-storage/v2/#retype-volume """ post_body = json.dumps({'os-retype': kwargs}) resp, body = self.post('volumes/%s/action' % volume_id, post_body) self.expected_success(202, resp.status) return rest_client.ResponseBody(resp, body) def force_detach_volume(self, volume_id, **kwargs): """Force detach a volume. For a full list of available parameters, please refer to the official API reference: https://developer.openstack.org/api-ref/block-storage/v2/#force-detach-volume """ post_body = json.dumps({'os-force_detach': kwargs}) url = 'volumes/%s/action' % volume_id resp, body = self.post(url, post_body) self.expected_success(202, resp.status) return rest_client.ResponseBody(resp, body) def update_volume_image_metadata(self, volume_id, **kwargs): """Update image metadata for the volume. For a full list of available parameters, please refer to the official API reference: http://developer.openstack.org/api-ref/block-storage/v2/#set-image-metadata-for-volume """ post_body = json.dumps({'os-set_image_metadata': {'metadata': kwargs}}) url = "volumes/%s/action" % (volume_id) resp, body = self.post(url, post_body) body = json.loads(body) self.expected_success(200, resp.status) return rest_client.ResponseBody(resp, body) def delete_volume_image_metadata(self, volume_id, key_name): """Delete image metadata item for the volume.""" post_body = json.dumps({'os-unset_image_metadata': {'key': key_name}}) url = "volumes/%s/action" % (volume_id) resp, body = self.post(url, post_body) self.expected_success(200, resp.status) return rest_client.ResponseBody(resp, body) def show_volume_image_metadata(self, volume_id): """Show image metadata for the volume.""" post_body = json.dumps({'os-show_image_metadata': {}}) url = "volumes/%s/action" % volume_id resp, body = self.post(url, post_body) body = json.loads(body) self.expected_success(200, resp.status) return rest_client.ResponseBody(resp, body) def unmanage_volume(self, volume_id): """Unmanage volume. For a full list of available parameters, please refer to the official API reference: https://developer.openstack.org/api-ref/block-storage/v2/#unmanage-volume """ post_body = json.dumps({'os-unmanage': {}}) resp, body = self.post('volumes/%s/action' % volume_id, post_body) self.expected_success(202, resp.status) return rest_client.ResponseBody(resp, body)
unknown
codeparrot/codeparrot-clean
"""Test that lldb command 'process signal SIGUSR1' to send a signal to the inferior works.""" import lldb from lldbsuite.test.decorators import * from lldbsuite.test.lldbtest import * from lldbsuite.test import lldbutil class SendSignalTestCase(TestBase): mydir = TestBase.compute_mydir(__file__) def setUp(self): # Call super's setUp(). TestBase.setUp(self) # Find the line number to break inside main(). self.line = line_number('main.c', 'Put breakpoint here') @expectedFailureNetBSD(bugnumber='llvm.org/pr43959') @skipIfWindows # Windows does not support signals @skipIfReproducer # FIXME: Unexpected packet during (active) replay def test_with_run_command(self): """Test that lldb command 'process signal SIGUSR1' sends a signal to the inferior process.""" self.build() exe = self.getBuildArtifact("a.out") # Create a target by the debugger. target = self.dbg.CreateTarget(exe) self.assertTrue(target, VALID_TARGET) # Now create a breakpoint on main.c by name 'c'. breakpoint = target.BreakpointCreateByLocation('main.c', self.line) self.assertTrue(breakpoint and breakpoint.GetNumLocations() == 1, VALID_BREAKPOINT) # Get the breakpoint location from breakpoint after we verified that, # indeed, it has one location. location = breakpoint.GetLocationAtIndex(0) self.assertTrue(location and location.IsEnabled(), VALID_BREAKPOINT_LOCATION) # Now launch the process, no arguments & do not stop at entry point. launch_info = target.GetLaunchInfo() launch_info.SetWorkingDirectory(self.get_process_working_directory()) process_listener = lldb.SBListener("signal_test_listener") launch_info.SetListener(process_listener) error = lldb.SBError() process = target.Launch(launch_info, error) self.assertTrue(process, PROCESS_IS_VALID) self.runCmd("process handle -n False -p True -s True SIGUSR1") thread = lldbutil.get_stopped_thread( process, lldb.eStopReasonBreakpoint) self.assertTrue(thread.IsValid(), "We hit the first breakpoint.") # After resuming the process, send it a SIGUSR1 signal. self.setAsync(True) self.assertTrue( process_listener.IsValid(), "Got a good process listener") # Disable our breakpoint, we don't want to hit it anymore... breakpoint.SetEnabled(False) # Now continue: process.Continue() # If running remote test, there should be a connected event if lldb.remote_platform: self.match_state(process_listener, lldb.eStateConnected) self.match_state(process_listener, lldb.eStateRunning) # Now signal the process, and make sure it stops: process.Signal(lldbutil.get_signal_number('SIGUSR1')) self.match_state(process_listener, lldb.eStateStopped) # Now make sure the thread was stopped with a SIGUSR1: threads = lldbutil.get_stopped_threads(process, lldb.eStopReasonSignal) self.assertEquals(len(threads), 1, "One thread stopped for a signal.") thread = threads[0] self.assertTrue( thread.GetStopReasonDataCount() >= 1, "There was data in the event.") self.assertEqual( thread.GetStopReasonDataAtIndex(0), lldbutil.get_signal_number('SIGUSR1'), "The stop signal was SIGUSR1") def match_state(self, process_listener, expected_state): num_seconds = 5 broadcaster = self.process().GetBroadcaster() event_type_mask = lldb.SBProcess.eBroadcastBitStateChanged event = lldb.SBEvent() got_event = process_listener.WaitForEventForBroadcasterWithType( num_seconds, broadcaster, event_type_mask, event) self.assertTrue(got_event, "Got an event") state = lldb.SBProcess.GetStateFromEvent(event) self.assertEquals(state, expected_state, "It was the %s state." % lldb.SBDebugger_StateAsCString(expected_state))
unknown
codeparrot/codeparrot-clean
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # --- name: "Approve Workflow Run" description: "Approve a Workflow run that has been submitted by a non-committer" inputs: gh-token: description: "The GitHub token for use with the CLI" required: true repository: description: "The GitHub repository" required: true default: "apache/kafka" run_id: description: "The Workflow Run ID" required: true pr_number: description: "The Pull Request number" required: true commit_sha: description: "The SHA of the commit the run is for" required: true runs: using: "composite" steps: - name: Approve Workflow Run shell: bash env: GH_TOKEN: ${{ inputs.gh-token }} REPO: ${{ inputs.repository }} RUN_ID: ${{ inputs.run_id }} PR_NUMBER: ${{ inputs.pr_number }} COMMIT_SHA: ${{ inputs.commit_sha }} run: | echo "Approving workflow run $RUN_ID for PR $PR_NUMBER at SHA $COMMIT_SHA"; gh api --method POST \ -H 'Accept: application/vnd.github+json' \ -H 'X-GitHub-Api-Version: 2022-11-28' \ /repos/$REPO/actions/runs/$RUN_ID/approve
unknown
github
https://github.com/apache/kafka
.github/actions/gh-api-approve-run/action.yml
# Prepare component for translation To prepare your project for translation, complete the following actions. - Use the `i18n` attribute to mark text in component templates - Use the `i18n-` attribute to mark attribute text strings in component templates - Use the `$localize` tagged message string to mark text strings in component code ## Mark text in component template In a component template, the i18n metadata is the value of the `i18n` attribute. ```html <element i18n="{i18n_metadata}">{string_to_translate}</element> ``` Use the `i18n` attribute to mark a static text message in your component templates for translation. Place it on every element tag that contains fixed text you want to translate. HELPFUL: The `i18n` attribute is a custom attribute that the Angular tools and compilers recognize. ### `i18n` example The following `<h1>` tag displays a simple English language greeting, "Hello i18n!". <docs-code header="app.component.html" path="adev/src/content/examples/i18n/doc-files/app.component.html" region="greeting"/> To mark the greeting for translation, add the `i18n` attribute to the `<h1>` tag. <docs-code header="app.component.html" path="adev/src/content/examples/i18n/doc-files/app.component.html" region="i18n-attribute"/> ### using conditional statement with `i18n` The following `<div>` tag will display translated text as part of `div` and `aria-label` based on toggle status <docs-code-multifile> <docs-code header="app.component.html" path="adev/src/content/examples/i18n/src/app/app.component.html" region="i18n-conditional"/> <docs-code header="app.component.ts" path="adev/src/content/examples/i18n/src/app/app.component.ts" visibleLines="[[14,21],[33,37]]"/> </docs-code-multifile> ### Translate inline text without HTML element Use the `<ng-container>` element to associate a translation behavior for specific text without changing the way text is displayed. HELPFUL: Each HTML element creates a new DOM element. To avoid creating a new DOM element, wrap the text in an `<ng-container>` element. The following example shows the `<ng-container>` element transformed into a non-displayed HTML comment. <docs-code path="adev/src/content/examples/i18n/src/app/app.component.html" region="i18n-ng-container"/> ## Mark element attributes for translations In a component template, the i18n metadata is the value of the `i18n-{attribute_name}` attribute. ```html <element i18n-{attribute_name}="{i18n_metadata}" {attribute_name}="{attribute_value}" /> ``` The attributes of HTML elements include text that should be translated along with the rest of the displayed text in the component template. Use `i18n-{attribute_name}` with any attribute of any element and replace `{attribute_name}` with the name of the attribute. Use the following syntax to assign a meaning, description, and custom ID. ```html i18n-{attribute_name}="{meaning}|{description}@@{id}" ``` ### `i18n-title` example To translate the title of an image, review this example. The following example displays an image with a `title` attribute. <docs-code header="app.component.html" path="adev/src/content/examples/i18n/doc-files/app.component.html" region="i18n-title"/> To mark the title attribute for translation, complete the following action. Add the `i18n-title` attribute The following example displays how to mark the `title` attribute on the `img` tag by adding `i18n-title`. <docs-code header="app.component.html" path="adev/src/content/examples/i18n/src/app/app.component.html" region="i18n-title-translate"/> ## Mark text in component code In component code, the translation source text and the metadata are surrounded by backtick \(<code>&#96;</code>\) characters. Use the [`$localize`][ApiLocalizeInitLocalize] tagged message string to mark a string in your code for translation. ```ts $localize`string_to_translate`; ``` The i18n metadata is surrounded by colon \(`:`\) characters and prepends the translation source text. ```ts $localize`:{i18n_metadata}:string_to_translate`; ``` ### Include interpolated text Include [interpolations](guide/templates/binding#render-dynamic-text-with-text-interpolation) in a [`$localize`][ApiLocalizeInitLocalize] tagged message string. ```ts $localize`string_to_translate ${variable_name}`; ``` ### Name the interpolation placeholder ```ts $localize`string_to_translate ${variable_name}:placeholder_name:`; ``` ### Conditional syntax for translations ```ts return this.show ? $localize`Show Tabs` : $localize`Hide tabs`; ``` ## i18n metadata for translation ```html {meaning}|{description}@@{custom_id} ``` The following parameters provide context and additional information to reduce confusion for your translator. | Metadata parameter | Details | | :----------------- | :-------------------------------------------------------------------- | | Custom ID | Provide a custom identifier | | Description | Provide additional information or context | | Meaning | Provide the meaning or intent of the text within the specific context | For additional information about custom IDs, see [Manage marked text with custom IDs][GuideI18nOptionalManageMarkedText]. ### Add helpful descriptions and meanings To translate a text message accurately, provide additional information or context for the translator. Add a _description_ of the text message as the value of the `i18n` attribute or [`$localize`][ApiLocalizeInitLocalize] tagged message string. The following example shows the value of the `i18n` attribute. <docs-code header="app.component.html" path="adev/src/content/examples/i18n/doc-files/app.component.html" region="i18n-attribute-desc"/> The following example shows the value of the [`$localize`][ApiLocalizeInitLocalize] tagged message string with a description. ```ts $localize`:An introduction header for this sample:Hello i18n!`; ``` The translator may also need to know the meaning or intent of the text message within this particular application context, in order to translate it the same way as other text with the same meaning. Start the `i18n` attribute value with the _meaning_ and separate it from the _description_ with the `|` character: `{meaning}|{description}`. #### `h1` example For example, you may want to specify that the `<h1>` tag is a site header that you need translated the same way, whether it is used as a header or referenced in another section of text. The following example shows how to specify that the `<h1>` tag must be translated as a header or referenced elsewhere. <docs-code header="app.component.html" path="adev/src/content/examples/i18n/doc-files/app.component.html" region="i18n-attribute-meaning"/> The result is any text marked with `site header`, as the _meaning_ is translated exactly the same way. The following code example shows the value of the [`$localize`][ApiLocalizeInitLocalize] tagged message string with a meaning and a description. ```ts $localize`:site header|An introduction header for this sample:Hello i18n!`; ``` <docs-callout title="How meanings control text extraction and merges"> The Angular extraction tool generates a translation unit entry for each `i18n` attribute in a template. The Angular extraction tool assigns each translation unit a unique ID based on the _meaning_ and _description_. HELPFUL: For more information about the Angular extraction tool, see [Work with translation files](guide/i18n/translation-files). The same text elements with different _meanings_ are extracted with different IDs. For example, if the word "right" uses the following two definitions in two different locations, the word is translated differently and merged back into the application as different translation entries. - `correct` as in "you are right" - `direction` as in "turn right" If the same text elements meet the following conditions, the text elements are extracted only once and use the same ID. - Same meaning or definition - Different descriptions That one translation entry is merged back into the application wherever the same text elements appear. </docs-callout> ## ICU expressions ICU expressions help you mark alternate text in component templates to meet conditions. An ICU expression includes a component property, an ICU clause, and the case statements surrounded by open curly brace \(`{`\) and close curly brace \(`}`\) characters. ```html { component_property, icu_clause, case_statements } ``` The component property defines the variable. An ICU clause defines the type of conditional text. | ICU clause | Details | | :------------------------------------------------------------------- | :------------------------------------------------------------------ | | [`plural`][GuideI18nCommonPrepareMarkPlurals] | Mark the use of plural numbers | | [`select`][GuideI18nCommonPrepareMarkAlternatesAndNestedExpressions] | Mark choices for alternate text based on your defined string values | To simplify translation, use International Components for Unicode clauses \(ICU clauses\) with regular expressions. HELPFUL: The ICU clauses adhere to the [ICU Message Format][GithubUnicodeOrgIcuUserguideFormatParseMessages] specified in the [CLDR pluralization rules][UnicodeCldrIndexCldrSpecPluralRules]. ### Mark plurals Different languages have different pluralization rules that increase the difficulty of translation. Because other locales express cardinality differently, you may need to set pluralization categories that do not align with English. Use the `plural` clause to mark expressions that may not be meaningful if translated word-for-word. ```html { component_property, plural, pluralization_categories } ``` After the pluralization category, enter the default text \(English\) surrounded by open curly brace \(`{`\) and close curly brace \(`}`\) characters. ```html pluralization_category { } ``` The following pluralization categories are available for English and may change based on the locale. | Pluralization category | Details | Example | | :--------------------- | :------------------------- | :------------------------- | | `zero` | Quantity is zero | `=0 { }` <br /> `zero { }` | | `one` | Quantity is 1 | `=1 { }` <br /> `one { }` | | `two` | Quantity is 2 | `=2 { }` <br /> `two { }` | | `few` | Quantity is 2 or more | `few { }` | | `many` | Quantity is a large number | `many { }` | | `other` | The default quantity | `other { }` | If none of the pluralization categories match, Angular uses `other` to match the standard fallback for a missing category. ```html other { default_quantity } ``` HELPFUL: For more information about pluralization categories, see [Choosing plural category names][UnicodeCldrIndexCldrSpecPluralRulesTocChoosingPluralCategoryNames] in the [CLDR - Unicode Common Locale Data Repository][UnicodeCldrMain]. <docs-callout header='Background: Locales may not support some pluralization categories'> Many locales don't support some of the pluralization categories. The default locale \(`en-US`\) uses a very simple `plural()` function that doesn't support the `few` pluralization category. Another locale with a simple `plural()` function is `es`. The following code example shows the [en-US `plural()`][GithubAngularAngularBlobEcffc3557fe1bff9718c01277498e877ca44588dPackagesCoreSrcI18nLocaleEnTsL14L18] function. <docs-code path="adev/src/content/examples/i18n/doc-files/locale_plural_function.ts" class="no-box" hideCopy/> The `plural()` function only returns 1 \(`one`\) or 5 \(`other`\). The `few` category never matches. </docs-callout> #### `minutes` example If you want to display the following phrase in English, where `x` is a number. <!--todo: replace output docs-code with screen capture image ---> ```html updated x minutes ago ``` And you also want to display the following phrases based on the cardinality of `x`. <!--todo: replace output docs-code with screen capture image ---> ```html updated just now ``` <!--todo: replace output docs-code with screen capture image ---> ```html updated one minute ago ``` Use HTML markup and [interpolations](guide/templates/binding#render-dynamic-text-with-text-interpolation). The following code example shows how to use the `plural` clause to express the previous three situations in a `<span>` element. <docs-code header="app.component.html" path="adev/src/content/examples/i18n/src/app/app.component.html" region="i18n-plural"/> Review the following details in the previous code example. | Parameters | Details | | :-------------------------------- | :-------------------------------------------------------------------------------------------------------------------- | | `minutes` | The first parameter specifies the component property is `minutes` and determines the number of minutes. | | `plural` | The second parameter specifies the ICU clause is `plural`. | | `=0 {just now}` | For zero minutes, the pluralization category is `=0`. The value is `just now`. | | `=1 {one minute}` | For one minute, the pluralization category is `=1`. The value is `one minute`. | | `other {{{minutes}} minutes ago}` | For any unmatched cardinality, the default pluralization category is `other`. The value is `{{minutes}} minutes ago`. | `{{minutes}}` is an [interpolation](guide/templates/binding#render-dynamic-text-with-text-interpolation). ### Mark alternates and nested expressions The `select` clause marks choices for alternate text based on your defined string values. ```html { component_property, select, selection_categories } ``` Translate all of the alternates to display alternate text based on the value of a variable. After the selection category, enter the text \(English\) surrounded by open curly brace \(`{`\) and close curly brace \(`}`\) characters. ```html selection_category { text } ``` Different locales have different grammatical constructions that increase the difficulty of translation. Use HTML markup. If none of the selection categories match, Angular uses `other` to match the standard fallback for a missing category. ```html other { default_value } ``` #### `gender` example If you want to display the following phrase in English. <!--todo: replace output docs-code with screen capture image ---> ```html The author is other ``` And you also want to display the following phrases based on the `gender` property of the component. <!--todo: replace output docs-code with screen capture image ---> ```html The author is female ``` <!--todo: replace output docs-code with screen capture image ---> ```html The author is male ``` The following code example shows how to bind the `gender` property of the component and use the `select` clause to express the previous three situations in a `<span>` element. The `gender` property binds the outputs to each of following string values. | Value | English value | | :----- | :------------ | | female | `female` | | male | `male` | | other | `other` | The `select` clause maps the values to the appropriate translations. The following code example shows `gender` property used with the select clause. <docs-code header="app.component.html" path="adev/src/content/examples/i18n/src/app/app.component.html" region="i18n-select"/> #### `gender` and `minutes` example Combine different clauses together, such as the `plural` and `select` clauses. The following code example shows nested clauses based on the `gender` and `minutes` examples. <docs-code header="app.component.html" path="adev/src/content/examples/i18n/src/app/app.component.html" region="i18n-nested"/> ## What's next <docs-pill-row> <docs-pill href="guide/i18n/translation-files" title="Work with translation files"/> </docs-pill-row> [ApiLocalizeInitLocalize]: api/localize/init/$localize '$localize | init - localize - API | Angular' [GuideI18nCommonPrepareMarkAlternatesAndNestedExpressions]: guide/i18n/prepare#mark-alternates-and-nested-expressions 'Mark alternates and nested expressions - Prepare templates for translation | Angular' [GuideI18nCommonPrepareMarkPlurals]: guide/i18n/prepare#mark-plurals 'Mark plurals - Prepare component for translation | Angular' [GuideI18nOptionalManageMarkedText]: guide/i18n/manage-marked-text 'Manage marked text with custom IDs | Angular' [GithubAngularAngularBlobEcffc3557fe1bff9718c01277498e877ca44588dPackagesCoreSrcI18nLocaleEnTsL14L18]: https://github.com/angular/angular/blob/ecffc3557fe1bff9718c01277498e877ca44588d/packages/core/src/i18n/locale_en.ts#L14-L18 'Line 14 to 18 - angular/packages/core/src/i18n/locale_en.ts | angular/angular | GitHub' [GithubUnicodeOrgIcuUserguideFormatParseMessages]: https://unicode-org.github.io/icu/userguide/format_parse/messages 'ICU Message Format - ICU Documentation | Unicode | GitHub' [UnicodeCldrMain]: https://cldr.unicode.org 'Unicode CLDR Project' [UnicodeCldrIndexCldrSpecPluralRules]: http://cldr.unicode.org/index/cldr-spec/plural-rules 'Plural Rules | CLDR - Unicode Common Locale Data Repository | Unicode' [UnicodeCldrIndexCldrSpecPluralRulesTocChoosingPluralCategoryNames]: http://cldr.unicode.org/index/cldr-spec/plural-rules#TOC-Choosing-Plural-Category-Names 'Choosing Plural Category Names - Plural Rules | CLDR - Unicode Common Locale Data Repository | Unicode'
unknown
github
https://github.com/angular/angular
adev/src/content/guide/i18n/prepare.md
from django.contrib.gis.db.models import Collect, Count, Extent, F, MakeLine, Q, Union from django.contrib.gis.db.models.functions import Centroid from django.contrib.gis.geos import GEOSGeometry, MultiPoint, Point from django.db import NotSupportedError, connection from django.test import TestCase, skipUnlessDBFeature from django.test.utils import override_settings from django.utils import timezone from ..utils import skipUnlessGISLookup from .models import Article, Author, Book, City, DirectoryEntry, Event, Location, Parcel class RelatedGeoModelTest(TestCase): fixtures = ["initial"] def test02_select_related(self): "Testing `select_related` on geographic models (see #7126)." qs1 = City.objects.order_by("id") qs2 = City.objects.order_by("id").select_related() qs3 = City.objects.order_by("id").select_related("location") # Reference data for what's in the fixtures. cities = ( ("Aurora", "TX", -97.516111, 33.058333), ("Roswell", "NM", -104.528056, 33.387222), ("Kecksburg", "PA", -79.460734, 40.18476), ) for qs in (qs1, qs2, qs3): for ref, c in zip(cities, qs): nm, st, lon, lat = ref self.assertEqual(nm, c.name) self.assertEqual(st, c.state) self.assertAlmostEqual(lon, c.location.point.x, 6) self.assertAlmostEqual(lat, c.location.point.y, 6) @skipUnlessDBFeature("supports_extent_aggr") def test_related_extent_aggregate(self): "Testing the `Extent` aggregate on related geographic models." # This combines the Extent and Union aggregates into one query aggs = City.objects.aggregate(Extent("location__point")) # One for all locations, one that excludes New Mexico (Roswell). all_extent = (-104.528056, 29.763374, -79.460734, 40.18476) txpa_extent = (-97.516111, 29.763374, -79.460734, 40.18476) e1 = City.objects.aggregate(Extent("location__point"))[ "location__point__extent" ] e2 = City.objects.exclude(state="NM").aggregate(Extent("location__point"))[ "location__point__extent" ] e3 = aggs["location__point__extent"] # The tolerance value is to four decimal places because of differences # between the Oracle and PostGIS spatial backends on the extent # calculation. tol = 4 for ref, e in [(all_extent, e1), (txpa_extent, e2), (all_extent, e3)]: for ref_val, e_val in zip(ref, e): self.assertAlmostEqual(ref_val, e_val, tol) @skipUnlessDBFeature("supports_extent_aggr") def test_related_extent_annotate(self): """ Test annotation with Extent GeoAggregate. """ cities = City.objects.annotate( points_extent=Extent("location__point") ).order_by("name") tol = 4 self.assertAlmostEqual( cities[0].points_extent, (-97.516111, 33.058333, -97.516111, 33.058333), tol ) @skipUnlessDBFeature("supports_union_aggr") def test_related_union_aggregate(self): "Testing the `Union` aggregate on related geographic models." # This combines the Extent and Union aggregates into one query aggs = City.objects.aggregate(Union("location__point")) # These are the points that are components of the aggregate geographic # union that is returned. Each point # corresponds to City PK. p1 = Point(-104.528056, 33.387222) p2 = Point(-97.516111, 33.058333) p3 = Point(-79.460734, 40.18476) p4 = Point(-96.801611, 32.782057) p5 = Point(-95.363151, 29.763374) # The second union aggregate is for a union # query that includes limiting information in the WHERE clause (in # other words a `.filter()` precedes the call to `.aggregate(Union()`). ref_u1 = MultiPoint(p1, p2, p4, p5, p3, srid=4326) ref_u2 = MultiPoint(p2, p3, srid=4326) u1 = City.objects.aggregate(Union("location__point"))["location__point__union"] u2 = City.objects.exclude( name__in=("Roswell", "Houston", "Dallas", "Fort Worth"), ).aggregate(Union("location__point"))["location__point__union"] u3 = aggs["location__point__union"] self.assertEqual(type(u1), MultiPoint) self.assertEqual(type(u3), MultiPoint) # Ordering of points in the result of the union is not defined and # implementation-dependent (DB backend, GEOS version). tests = [ (u1, ref_u1), (u2, ref_u2), (u3, ref_u1), ] for union, ref in tests: for point, ref_point in zip(sorted(union), sorted(ref), strict=True): self.assertIs(point.equals_exact(ref_point, tolerance=6), True) def test05_select_related_fk_to_subclass(self): """ select_related on a query over a model with an FK to a model subclass. """ # Regression test for #9752. list(DirectoryEntry.objects.select_related()) @skipUnlessGISLookup("within") def test06_f_expressions(self): "Testing F() expressions on GeometryFields." # Constructing a dummy parcel border and getting the City instance for # assigning the FK. b1 = GEOSGeometry( "POLYGON((-97.501205 33.052520,-97.501205 33.052576," "-97.501150 33.052576,-97.501150 33.052520,-97.501205 33.052520))", srid=4326, ) pcity = City.objects.get(name="Aurora") # First parcel has incorrect center point that is equal to the City; # it also has a second border that is different from the first as a # 100ft buffer around the City. c1 = pcity.location.point c2 = c1.transform(2276, clone=True) b2 = c2.buffer(100) Parcel.objects.create( name="P1", city=pcity, center1=c1, center2=c2, border1=b1, border2=b2 ) # Now creating a second Parcel where the borders are the same, just # in different coordinate systems. The center points are also the # same (but in different coordinate systems), and this time they # actually correspond to the centroid of the border. c1 = b1.centroid c2 = c1.transform(2276, clone=True) b2 = ( b1 if connection.features.supports_transform else b1.transform(2276, clone=True) ) Parcel.objects.create( name="P2", city=pcity, center1=c1, center2=c2, border1=b1, border2=b2 ) # Should return the second Parcel, which has the center within the # border. qs = Parcel.objects.filter(center1__within=F("border1")) self.assertEqual(1, len(qs)) self.assertEqual("P2", qs[0].name) # This time center2 is in a different coordinate system and needs to be # wrapped in transformation SQL. qs = Parcel.objects.filter(center2__within=F("border1")) if connection.features.supports_transform: self.assertEqual("P2", qs.get().name) else: msg = "This backend doesn't support the Transform function." with self.assertRaisesMessage(NotSupportedError, msg): list(qs) # Should return the first Parcel, which has the center point equal # to the point in the City ForeignKey. qs = Parcel.objects.filter(center1=F("city__location__point")) self.assertEqual(1, len(qs)) self.assertEqual("P1", qs[0].name) # This time the city column should be wrapped in transformation SQL. qs = Parcel.objects.filter(border2__contains=F("city__location__point")) if connection.features.supports_transform: self.assertEqual("P1", qs.get().name) else: msg = "This backend doesn't support the Transform function." with self.assertRaisesMessage(NotSupportedError, msg): list(qs) def test07_values(self): "Testing values() and values_list()." gqs = Location.objects.all() gvqs = Location.objects.values() gvlqs = Location.objects.values_list() # Incrementing through each of the models, dictionaries, and tuples # returned by each QuerySet. for m, d, t in zip(gqs, gvqs, gvlqs): # The values should be Geometry objects and not raw strings # returned by the spatial database. self.assertIsInstance(d["point"], GEOSGeometry) self.assertIsInstance(t[1], GEOSGeometry) self.assertEqual(m.point, d["point"]) self.assertEqual(m.point, t[1]) @override_settings(USE_TZ=True) def test_07b_values(self): "Testing values() and values_list() with aware datetime. See #21565." Event.objects.create(name="foo", when=timezone.now()) list(Event.objects.values_list("when")) def test08_defer_only(self): "Testing defer() and only() on Geographic models." qs = Location.objects.all().order_by("pk") def_qs = Location.objects.defer("point").order_by("pk") for loc, def_loc in zip(qs, def_qs): self.assertEqual(loc.point, def_loc.point) def test09_pk_relations(self): """ Ensuring correct primary key column is selected across relations. See #10757. """ # The expected ID values -- notice the last two location IDs # are out of order. Dallas and Houston have location IDs that differ # from their PKs -- this is done to ensure that the related location # ID column is selected instead of ID column for the city. city_ids = (1, 2, 3, 4, 5) loc_ids = (1, 2, 3, 5, 4) ids_qs = City.objects.order_by("id").values("id", "location__id") for val_dict, c_id, l_id in zip(ids_qs, city_ids, loc_ids): self.assertEqual(val_dict["id"], c_id) self.assertEqual(val_dict["location__id"], l_id) @skipUnlessGISLookup("within") def test10_combine(self): "Testing the combination of two QuerySets (#10807)." buf1 = City.objects.get(name="Aurora").location.point.buffer(0.1) buf2 = City.objects.get(name="Kecksburg").location.point.buffer(0.1) qs1 = City.objects.filter(location__point__within=buf1) qs2 = City.objects.filter(location__point__within=buf2) combined = qs1 | qs2 names = [c.name for c in combined] self.assertEqual(2, len(names)) self.assertIn("Aurora", names) self.assertIn("Kecksburg", names) @skipUnlessDBFeature("allows_group_by_lob") def test12a_count(self): "Testing `Count` aggregate on geo-fields." # The City, 'Fort Worth' uses the same location as Dallas. dallas = City.objects.get(name="Dallas") # Count annotation should be 2 for the Dallas location now. loc = Location.objects.annotate(num_cities=Count("city")).get( id=dallas.location.id ) self.assertEqual(2, loc.num_cities) def test12b_count(self): "Testing `Count` aggregate on non geo-fields." # Should only be one author (Trevor Paglen) returned by this query, and # the annotation should have 3 for the number of books, see #11087. # Also testing with a values(), see #11489. qs = Author.objects.annotate(num_books=Count("books")).filter(num_books__gt=1) vqs = ( Author.objects.values("name") .annotate(num_books=Count("books")) .filter(num_books__gt=1) ) self.assertEqual(1, len(qs)) self.assertEqual(3, qs[0].num_books) self.assertEqual(1, len(vqs)) self.assertEqual(3, vqs[0]["num_books"]) @skipUnlessDBFeature("allows_group_by_lob") def test13c_count(self): "Testing `Count` aggregate with `.values()`. See #15305." qs = ( Location.objects.filter(id=5) .annotate(num_cities=Count("city")) .values("id", "point", "num_cities") ) self.assertEqual(1, len(qs)) self.assertEqual(2, qs[0]["num_cities"]) self.assertIsInstance(qs[0]["point"], GEOSGeometry) def test13_select_related_null_fk(self): "Testing `select_related` on a nullable ForeignKey." Book.objects.create(title="Without Author") b = Book.objects.select_related("author").get(title="Without Author") # Should be `None`, and not a 'dummy' model. self.assertIsNone(b.author) @skipUnlessDBFeature("supports_collect_aggr") def test_collect(self): """ Testing the `Collect` aggregate. """ # Reference query: # SELECT AsText(ST_Collect("relatedapp_location"."point")) # FROM "relatedapp_city" # LEFT OUTER JOIN # "relatedapp_location" ON ( # "relatedapp_city"."location_id" = "relatedapp_location"."id" # ) # WHERE "relatedapp_city"."state" = 'TX'; ref_geom = GEOSGeometry( "MULTIPOINT(-97.516111 33.058333,-96.801611 32.782057," "-95.363151 29.763374,-96.801611 32.782057)" ) coll = City.objects.filter(state="TX").aggregate(Collect("location__point"))[ "location__point__collect" ] # Even though Dallas and Ft. Worth share same point, Collect doesn't # consolidate -- that's why 4 points in MultiPoint. self.assertEqual(4, len(coll)) self.assertTrue(ref_geom.equals(coll)) @skipUnlessDBFeature("supports_collect_aggr") def test_collect_filter(self): qs = City.objects.annotate( parcel_center=Collect( "parcel__center1", filter=~Q(parcel__name__icontains="ignore"), ), parcel_center_nonexistent=Collect( "parcel__center1", filter=Q(parcel__name__icontains="nonexistent"), ), parcel_center_single=Collect( "parcel__center1", filter=Q(parcel__name__contains="Alpha"), ), ) city = qs.get(name="Aurora") self.assertEqual( city.parcel_center.wkt, GEOSGeometry("MULTIPOINT (1.7128 -2.006, 4.7128 5.006)"), ) self.assertIsNone(city.parcel_center_nonexistent) self.assertIn( city.parcel_center_single.wkt, [ GEOSGeometry("MULTIPOINT (1.7128 -2.006)"), GEOSGeometry("POINT (1.7128 -2.006)"), # SpatiaLite collapse to POINT. ], ) @skipUnlessDBFeature("has_Centroid_function", "supports_collect_aggr") def test_centroid_collect_filter(self): qs = City.objects.annotate( parcel_centroid=Centroid( Collect( "parcel__center1", filter=~Q(parcel__name__icontains="ignore"), ) ) ) city = qs.get(name="Aurora") if connection.ops.mariadb: self.assertIsNone(city.parcel_centroid) else: self.assertIsInstance(city.parcel_centroid, Point) self.assertAlmostEqual(city.parcel_centroid[0], 3.2128, 4) self.assertAlmostEqual(city.parcel_centroid[1], 1.5, 4) @skipUnlessDBFeature("supports_make_line_aggr") def test_make_line_filter(self): qs = City.objects.annotate( parcel_line=MakeLine( "parcel__center1", filter=~Q(parcel__name__icontains="ignore"), ), parcel_line_nonexistent=MakeLine( "parcel__center1", filter=Q(parcel__name__icontains="nonexistent"), ), ) city = qs.get(name="Aurora") self.assertIn( city.parcel_line.wkt, # The default ordering is flaky, so check both. [ "LINESTRING (1.7128 -2.006, 4.7128 5.006)", "LINESTRING (4.7128 5.006, 1.7128 -2.006)", ], ) self.assertIsNone(city.parcel_line_nonexistent) @skipUnlessDBFeature("supports_extent_aggr") def test_extent_filter(self): qs = City.objects.annotate( parcel_border=Extent( "parcel__border1", filter=~Q(parcel__name__icontains="ignore"), ), parcel_border_nonexistent=Extent( "parcel__border1", filter=Q(parcel__name__icontains="nonexistent"), ), parcel_border_no_filter=Extent("parcel__border1"), ) city = qs.get(name="Aurora") self.assertEqual(city.parcel_border, (0.0, 0.0, 22.0, 22.0)) self.assertIsNone(city.parcel_border_nonexistent) self.assertEqual(city.parcel_border_no_filter, (0.0, 0.0, 32.0, 32.0)) @skipUnlessDBFeature("supports_union_aggr") def test_union_filter(self): qs = City.objects.annotate( parcel_point_union=Union( "parcel__center2", filter=~Q(parcel__name__icontains="ignore"), ), parcel_point_nonexistent=Union( "parcel__center2", filter=Q(parcel__name__icontains="nonexistent"), ), parcel_point_union_single=Union( "parcel__center2", filter=Q(parcel__name__contains="Alpha"), ), ) city = qs.get(name="Aurora") self.assertIn( city.parcel_point_union.wkt, [ GEOSGeometry("MULTIPOINT (12.75 10.05, 3.7128 -5.006)"), GEOSGeometry("MULTIPOINT (3.7128 -5.006, 12.75 10.05)"), ], ) self.assertIsNone(city.parcel_point_nonexistent) self.assertEqual(city.parcel_point_union_single.wkt, "POINT (3.7128 -5.006)") def test15_invalid_select_related(self): """ select_related on the related name manager of a unique FK. """ qs = Article.objects.select_related("author__article") # This triggers TypeError when `get_default_columns` has no # `local_only` keyword. The TypeError is swallowed if QuerySet is # actually evaluated as list generation swallows TypeError in CPython. str(qs.query) def test16_annotated_date_queryset(self): """ Ensure annotated date querysets work if spatial backend is used. See #14648. """ birth_years = [ dt.year for dt in list( Author.objects.annotate(num_books=Count("books")).dates("dob", "year") ) ] birth_years.sort() self.assertEqual([1950, 1974], birth_years) # TODO: Related tests for KML, GML, and distance lookups.
python
github
https://github.com/django/django
tests/gis_tests/relatedapp/tests.py
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.addons.crm import crm from openerp.osv import fields, osv from openerp import tools class crm_opportunity_report(osv.Model): """ CRM Opportunity Analysis """ _name = "crm.opportunity.report" _auto = False _description = "CRM Opportunity Analysis" _rec_name = 'date_deadline' _inherit = ["crm.tracking.mixin"] _columns = { 'date_deadline': fields.date('Exp. Closing', readonly=True, help="Expected Closing"), 'create_date': fields.datetime('Creation Date', readonly=True), 'opening_date': fields.datetime('Assignation Date', readonly=True), 'date_closed': fields.datetime('Close Date', readonly=True), 'date_last_stage_update': fields.datetime('Last Stage Update', readonly=True), 'nbr_cases': fields.integer("# of Cases", readonly=True), # durations 'delay_open': fields.float('Delay to Assign',digits=(16,2),readonly=True, group_operator="avg",help="Number of Days to open the case"), 'delay_close': fields.float('Delay to Close',digits=(16,2),readonly=True, group_operator="avg",help="Number of Days to close the case"), 'delay_expected': fields.float('Overpassed Deadline',digits=(16,2),readonly=True, group_operator="avg"), 'user_id':fields.many2one('res.users', 'User', readonly=True), 'section_id':fields.many2one('crm.case.section', 'Sales Team', readonly=True), 'country_id':fields.many2one('res.country', 'Country', readonly=True), 'company_id': fields.many2one('res.company', 'Company', readonly=True), 'probability': fields.float('Probability',digits=(16,2),readonly=True, group_operator="avg"), 'total_revenue': fields.float('Total Revenue',digits=(16,2),readonly=True), 'expected_revenue': fields.float('Expected Revenue', digits=(16,2),readonly=True), 'stage_id': fields.many2one ('crm.case.stage', 'Stage', readonly=True, domain="[('section_ids', '=', section_id)]"), 'partner_id': fields.many2one('res.partner', 'Partner' , readonly=True), 'company_id': fields.many2one('res.company', 'Company', readonly=True), 'priority': fields.selection(crm.AVAILABLE_PRIORITIES, 'Priority'), 'type':fields.selection([ ('lead','Lead'), ('opportunity','Opportunity'), ],'Type', help="Type is used to separate Leads and Opportunities"), } def init(self, cr): tools.drop_view_if_exists(cr, 'crm_opportunity_report') cr.execute(""" CREATE OR REPLACE VIEW crm_opportunity_report AS ( SELECT id, c.date_deadline, count(id) as nbr_cases, c.date_open as opening_date, c.date_closed as date_closed, c.date_last_stage_update as date_last_stage_update, c.user_id, c.probability, c.stage_id, c.type, c.company_id, c.priority, c.section_id, c.campaign_id, c.source_id, c.medium_id, c.partner_id, c.country_id, c.planned_revenue as total_revenue, c.planned_revenue*(c.probability/100) as expected_revenue, c.create_date as create_date, extract('epoch' from (c.date_closed-c.create_date))/(3600*24) as delay_close, abs(extract('epoch' from (c.date_deadline - c.date_closed))/(3600*24)) as delay_expected, extract('epoch' from (c.date_open-c.create_date))/(3600*24) as delay_open FROM crm_lead c WHERE c.active = 'true' GROUP BY c.id )""")
unknown
codeparrot/codeparrot-clean
/* * Copyright 2012-present the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.springframework.boot.autoconfigure.condition; import org.junit.jupiter.api.Test; import org.springframework.boot.test.context.assertj.AssertableApplicationContext; import org.springframework.boot.test.context.runner.ApplicationContextRunner; import org.springframework.boot.test.context.runner.ContextConsumer; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Condition; import org.springframework.context.annotation.ConditionContext; import org.springframework.context.annotation.Conditional; import org.springframework.context.annotation.Configuration; import org.springframework.core.type.AnnotatedTypeMetadata; import static org.assertj.core.api.Assertions.assertThat; /** * Tests for {@link AnyNestedCondition}. * * @author Phillip Webb * @author Dave Syer */ class AnyNestedConditionTests { private final ApplicationContextRunner contextRunner = new ApplicationContextRunner(); @Test void neither() { this.contextRunner.withUserConfiguration(Config.class).run(match(false)); } @Test void propertyA() { this.contextRunner.withUserConfiguration(Config.class).withPropertyValues("a:a").run(match(true)); } @Test void propertyB() { this.contextRunner.withUserConfiguration(Config.class).withPropertyValues("b:b").run(match(true)); } @Test void both() { this.contextRunner.withUserConfiguration(Config.class).withPropertyValues("a:a", "b:b").run(match(true)); } private ContextConsumer<AssertableApplicationContext> match(boolean expected) { return (context) -> { if (expected) { assertThat(context).hasBean("myBean"); } else { assertThat(context).doesNotHaveBean("myBean"); } }; } @Configuration(proxyBeanMethods = false) @Conditional(OnPropertyAorBCondition.class) static class Config { @Bean String myBean() { return "myBean"; } } static class OnPropertyAorBCondition extends AnyNestedCondition { OnPropertyAorBCondition() { super(ConfigurationPhase.PARSE_CONFIGURATION); } @ConditionalOnProperty("a") static class HasPropertyA { } @ConditionalOnExpression("true") @ConditionalOnProperty("b") static class HasPropertyB { } @Conditional(NonSpringBootCondition.class) static class SubclassC { } } static class NonSpringBootCondition implements Condition { @Override public boolean matches(ConditionContext context, AnnotatedTypeMetadata metadata) { return false; } } }
java
github
https://github.com/spring-projects/spring-boot
core/spring-boot-autoconfigure/src/test/java/org/springframework/boot/autoconfigure/condition/AnyNestedConditionTests.java
""" iri2uri Converts an IRI to a URI. """ __author__ = "Joe Gregorio (joe@bitworking.org)" __copyright__ = "Copyright 2006, Joe Gregorio" __contributors__ = [] __version__ = "1.0.0" __license__ = "MIT" __history__ = """ """ import urllib.parse # Convert an IRI to a URI following the rules in RFC 3987 # # The characters we need to enocde and escape are defined in the spec: # # iprivate = %xE000-F8FF / %xF0000-FFFFD / %x100000-10FFFD # ucschar = %xA0-D7FF / %xF900-FDCF / %xFDF0-FFEF # / %x10000-1FFFD / %x20000-2FFFD / %x30000-3FFFD # / %x40000-4FFFD / %x50000-5FFFD / %x60000-6FFFD # / %x70000-7FFFD / %x80000-8FFFD / %x90000-9FFFD # / %xA0000-AFFFD / %xB0000-BFFFD / %xC0000-CFFFD # / %xD0000-DFFFD / %xE1000-EFFFD escape_range = [ (0xA0, 0xD7FF), (0xE000, 0xF8FF), (0xF900, 0xFDCF), (0xFDF0, 0xFFEF), (0x10000, 0x1FFFD), (0x20000, 0x2FFFD), (0x30000, 0x3FFFD), (0x40000, 0x4FFFD), (0x50000, 0x5FFFD), (0x60000, 0x6FFFD), (0x70000, 0x7FFFD), (0x80000, 0x8FFFD), (0x90000, 0x9FFFD), (0xA0000, 0xAFFFD), (0xB0000, 0xBFFFD), (0xC0000, 0xCFFFD), (0xD0000, 0xDFFFD), (0xE1000, 0xEFFFD), (0xF0000, 0xFFFFD), (0x100000, 0x10FFFD), ] def encode(c): retval = c i = ord(c) for low, high in escape_range: if i < low: break if i >= low and i <= high: retval = "".join(["%%%2X" % o for o in c.encode('utf-8')]) break return retval def iri2uri(uri): """Convert an IRI to a URI. Note that IRIs must be passed in a unicode strings. That is, do not utf-8 encode the IRI before passing it into the function.""" if isinstance(uri ,str): (scheme, authority, path, query, fragment) = urllib.parse.urlsplit(uri) authority = authority.encode('idna').decode('utf-8') # For each character in 'ucschar' or 'iprivate' # 1. encode as utf-8 # 2. then %-encode each octet of that utf-8 uri = urllib.parse.urlunsplit((scheme, authority, path, query, fragment)) uri = "".join([encode(c) for c in uri]) return uri if __name__ == "__main__": import unittest class Test(unittest.TestCase): def test_uris(self): """Test that URIs are invariant under the transformation.""" invariant = [ "ftp://ftp.is.co.za/rfc/rfc1808.txt", "http://www.ietf.org/rfc/rfc2396.txt", "ldap://[2001:db8::7]/c=GB?objectClass?one", "mailto:John.Doe@example.com", "news:comp.infosystems.www.servers.unix", "tel:+1-816-555-1212", "telnet://192.0.2.16:80/", "urn:oasis:names:specification:docbook:dtd:xml:4.1.2" ] for uri in invariant: self.assertEqual(uri, iri2uri(uri)) def test_iri(self): """ Test that the right type of escaping is done for each part of the URI.""" self.assertEqual("http://xn--o3h.com/%E2%98%84", iri2uri("http://\N{COMET}.com/\N{COMET}")) self.assertEqual("http://bitworking.org/?fred=%E2%98%84", iri2uri("http://bitworking.org/?fred=\N{COMET}")) self.assertEqual("http://bitworking.org/#%E2%98%84", iri2uri("http://bitworking.org/#\N{COMET}")) self.assertEqual("#%E2%98%84", iri2uri("#\N{COMET}")) self.assertEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri("/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}")) self.assertEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri(iri2uri("/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}"))) self.assertNotEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri("/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}".encode('utf-8'))) unittest.main()
unknown
codeparrot/codeparrot-clean
{ "name": "antora", "lockfileVersion": 3, "requires": true, "packages": { "": { "dependencies": { "@antora/atlas-extension": "1.0.0-alpha.5", "@antora/cli": "3.2.0-alpha.11", "@antora/site-generator": "3.2.0-alpha.11", "@asciidoctor/tabs": "1.0.0-beta.6", "@springio/antora-extensions": "1.14.7", "@springio/antora-xref-extension": "1.0.0-alpha.5", "@springio/antora-zip-contents-collector-extension": "1.0.0-alpha.10", "@springio/asciidoctor-extensions": "1.0.0-alpha.17" } }, "node_modules/@antora/asciidoc-loader": { "version": "3.2.0-alpha.11", "resolved": "https://registry.npmjs.org/@antora/asciidoc-loader/-/asciidoc-loader-3.2.0-alpha.11.tgz", "integrity": "sha512-P70cQvjAzovXOuvFrL6/h5Js/9pzdU9fxwK+kA6IGcwuQ2FOb50TEJ0Y291IcrMIEu8lbF7w/yxyDLqsZRr7Ng==", "license": "MPL-2.0", "dependencies": { "@antora/logger": "3.2.0-alpha.11", "@antora/user-require-helper": "~3.0", "@asciidoctor/core": "~2.2" }, "engines": { "node": ">=18.0.0" } }, "node_modules/@antora/atlas-extension": { "version": "1.0.0-alpha.5", "resolved": "https://registry.npmjs.org/@antora/atlas-extension/-/atlas-extension-1.0.0-alpha.5.tgz", "integrity": "sha512-zuewa8wp4AqgU+oug/GK2n0k/l40Pd72rF6pkUlNnAcY11nUUbtWAGWNbLWFalKJmZ5BTpLUnlCu1QluLiBcuw==", "license": "MPL-2.0", "dependencies": { "@antora/expand-path-helper": "~3.0", "cache-directory": "~2.0", "node-gzip": "~1.1", "simple-get": "~4.0" }, "engines": { "node": ">=16.0.0" } }, "node_modules/@antora/atlas-extension/node_modules/@antora/expand-path-helper": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/@antora/expand-path-helper/-/expand-path-helper-3.0.0.tgz", "integrity": "sha512-7PdEIhk97v85/CSm3HynCsX14TR6oIVz1s233nNLsiWubE8tTnpPt4sNRJR+hpmIZ6Bx9c6QDp3XIoiyu/WYYA==", "license": "MPL-2.0", "engines": { "node": ">=16.0.0" } }, "node_modules/@antora/cli": { "version": "3.2.0-alpha.11", "resolved": "https://registry.npmjs.org/@antora/cli/-/cli-3.2.0-alpha.11.tgz", "integrity": "sha512-hyCVmJT/esg6raPFyR1aIpUuGwXbe/0cAGPlQ86Ag/m7CUP80mQEZaVN1NxyVOK/KmtF5E7qpNvksDetBtGvcw==", "license": "MPL-2.0", "dependencies": { "@antora/logger": "3.2.0-alpha.11", "@antora/playbook-builder": "3.2.0-alpha.11", "@antora/user-require-helper": "~3.0", "commander": "~12.1" }, "bin": { "antora": "bin/antora" }, "engines": { "node": ">=18.0.0" } }, "node_modules/@antora/content-aggregator": { "version": "3.2.0-alpha.11", "resolved": "https://registry.npmjs.org/@antora/content-aggregator/-/content-aggregator-3.2.0-alpha.11.tgz", "integrity": "sha512-3LH5Got0075nXbR3cVFG3mOcfzka+v/oqc55RGP6VOLGq5fOvE0cfi4auJIselQv6wzK2jGsiAzyInbDLWlk4w==", "license": "MPL-2.0", "dependencies": { "@antora/expand-path-helper": "~3.0", "@antora/logger": "3.2.0-alpha.11", "@antora/user-require-helper": "~3.0", "braces": "~3.0", "cache-directory": "~2.0", "fast-glob": "~3.3", "hpagent": "~1.2", "isomorphic-git": "~1.25", "js-yaml": "~4.1", "multi-progress": "~4.0", "picomatch": "~4.0", "progress": "~2.0", "should-proxy": "~1.0", "simple-get": "~4.0", "vinyl": "~3.0" }, "engines": { "node": ">=18.0.0" } }, "node_modules/@antora/content-aggregator/node_modules/@antora/expand-path-helper": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/@antora/expand-path-helper/-/expand-path-helper-3.0.0.tgz", "integrity": "sha512-7PdEIhk97v85/CSm3HynCsX14TR6oIVz1s233nNLsiWubE8tTnpPt4sNRJR+hpmIZ6Bx9c6QDp3XIoiyu/WYYA==", "license": "MPL-2.0", "engines": { "node": ">=16.0.0" } }, "node_modules/@antora/content-classifier": { "version": "3.2.0-alpha.11", "resolved": "https://registry.npmjs.org/@antora/content-classifier/-/content-classifier-3.2.0-alpha.11.tgz", "integrity": "sha512-6TKNWGWdOJkop0hGYFgSLTdJjK1RJVlwuUye2n+fPGMJH2k3T53i3OIJxsQwoKhVkdA3n383tdsjsf+ptoJAmw==", "license": "MPL-2.0", "dependencies": { "@antora/asciidoc-loader": "3.2.0-alpha.11", "@antora/logger": "3.2.0-alpha.11", "mime-types": "~2.1", "vinyl": "~3.0" }, "engines": { "node": ">=18.0.0" } }, "node_modules/@antora/document-converter": { "version": "3.2.0-alpha.11", "resolved": "https://registry.npmjs.org/@antora/document-converter/-/document-converter-3.2.0-alpha.11.tgz", "integrity": "sha512-yfjqxeXrTFY5EQMFk6I1ITZySgerwVcN/4G30dcrTTUUYZyjSospKuKwlnIr0qP+TUXf27ArzL5+Wpj8+X5XZQ==", "license": "MPL-2.0", "dependencies": { "@antora/asciidoc-loader": "3.2.0-alpha.11" }, "engines": { "node": ">=18.0.0" } }, "node_modules/@antora/expand-path-helper": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/@antora/expand-path-helper/-/expand-path-helper-2.0.0.tgz", "integrity": "sha512-CSMBGC+tI21VS2kGW3PV7T2kQTM5eT3f2GTPVLttwaNYbNxDve08en/huzszHJfxo11CcEs26Ostr0F2c1QqeA==", "engines": { "node": ">=10.17.0" } }, "node_modules/@antora/file-publisher": { "version": "3.2.0-alpha.11", "resolved": "https://registry.npmjs.org/@antora/file-publisher/-/file-publisher-3.2.0-alpha.11.tgz", "integrity": "sha512-2XcyCR148aUCQsNPMBkBRD0SUBwpx1ZpgyshNnSuvE/cHP1pSkBjxMxqdsF8SZTL2za/k67Ge1vaTND5PZRMIw==", "license": "MPL-2.0", "dependencies": { "@antora/expand-path-helper": "~3.0", "@antora/user-require-helper": "~3.0", "vinyl": "~3.0", "yazl": "~2.5" }, "engines": { "node": ">=18.0.0" } }, "node_modules/@antora/file-publisher/node_modules/@antora/expand-path-helper": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/@antora/expand-path-helper/-/expand-path-helper-3.0.0.tgz", "integrity": "sha512-7PdEIhk97v85/CSm3HynCsX14TR6oIVz1s233nNLsiWubE8tTnpPt4sNRJR+hpmIZ6Bx9c6QDp3XIoiyu/WYYA==", "license": "MPL-2.0", "engines": { "node": ">=16.0.0" } }, "node_modules/@antora/logger": { "version": "3.2.0-alpha.11", "resolved": "https://registry.npmjs.org/@antora/logger/-/logger-3.2.0-alpha.11.tgz", "integrity": "sha512-reiqsBqpllqRsuJhcPqbe7roOR3Ja+LfysTzyDzgrXS7liu5BexlwHCPOn9MqScT3A+fhUYajnck8t+w6+fcgQ==", "license": "MPL-2.0", "dependencies": { "@antora/expand-path-helper": "~3.0", "pino": "~9.2", "pino-pretty": "~11.2", "sonic-boom": "~4.0" }, "engines": { "node": ">=18.0.0" } }, "node_modules/@antora/logger/node_modules/@antora/expand-path-helper": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/@antora/expand-path-helper/-/expand-path-helper-3.0.0.tgz", "integrity": "sha512-7PdEIhk97v85/CSm3HynCsX14TR6oIVz1s233nNLsiWubE8tTnpPt4sNRJR+hpmIZ6Bx9c6QDp3XIoiyu/WYYA==", "license": "MPL-2.0", "engines": { "node": ">=16.0.0" } }, "node_modules/@antora/navigation-builder": { "version": "3.2.0-alpha.11", "resolved": "https://registry.npmjs.org/@antora/navigation-builder/-/navigation-builder-3.2.0-alpha.11.tgz", "integrity": "sha512-59+Uev7/Dk7/Is5EbL4DmsNJs/ju0MVZzb7qzRLu/3C5rRWZqG3l536Bb1/mStGfSbP6xHHSlzZWMjr7DZpCnw==", "license": "MPL-2.0", "dependencies": { "@antora/asciidoc-loader": "3.2.0-alpha.11" }, "engines": { "node": ">=18.0.0" } }, "node_modules/@antora/page-composer": { "version": "3.2.0-alpha.11", "resolved": "https://registry.npmjs.org/@antora/page-composer/-/page-composer-3.2.0-alpha.11.tgz", "integrity": "sha512-04JKTx5fui605PDh5HJPTmhDsMszwYST5xP6pXSpujEaeKCuHa6z9pmlxbesc9WYIkMqEdAXPrxtzXA/DdFpEA==", "license": "MPL-2.0", "dependencies": { "@antora/logger": "3.2.0-alpha.11", "handlebars": "~4.7", "require-from-string": "~2.0" }, "engines": { "node": ">=18.0.0" } }, "node_modules/@antora/playbook-builder": { "version": "3.2.0-alpha.11", "resolved": "https://registry.npmjs.org/@antora/playbook-builder/-/playbook-builder-3.2.0-alpha.11.tgz", "integrity": "sha512-rI707tLA3sGigNTEaoQzCDBn1pIyQa35ktf7OAO2kTYcUF9eLwf8gU3u68BqTwdh4ewuFG2uvHDEx6/6pTciNQ==", "license": "MPL-2.0", "dependencies": { "@iarna/toml": "~2.2", "convict": "~6.2", "js-yaml": "~4.1", "json5": "~2.2", "yargs-parser": "~20.2" }, "engines": { "node": ">=18.0.0" } }, "node_modules/@antora/redirect-producer": { "version": "3.2.0-alpha.11", "resolved": "https://registry.npmjs.org/@antora/redirect-producer/-/redirect-producer-3.2.0-alpha.11.tgz", "integrity": "sha512-kMjA79P+3tzd1Ao5MAygIBoZqSYRG6yn2/P4VUURw7IIwh/kAbqwgJVXTFTIAL0u6PuDsGWtrOnO+eteT7WpwQ==", "license": "MPL-2.0", "dependencies": { "vinyl": "~3.0" }, "engines": { "node": ">=18.0.0" } }, "node_modules/@antora/site-generator": { "version": "3.2.0-alpha.11", "resolved": "https://registry.npmjs.org/@antora/site-generator/-/site-generator-3.2.0-alpha.11.tgz", "integrity": "sha512-m8CGC4dRxv3gUSK56p5KXHHEGOO6EOA8QlMi7gfiwtK/5+O/qRC6thJeet7zQPATuwESnpI1ASViHhOI2aSKpA==", "license": "MPL-2.0", "dependencies": { "@antora/asciidoc-loader": "3.2.0-alpha.11", "@antora/content-aggregator": "3.2.0-alpha.11", "@antora/content-classifier": "3.2.0-alpha.11", "@antora/document-converter": "3.2.0-alpha.11", "@antora/file-publisher": "3.2.0-alpha.11", "@antora/logger": "3.2.0-alpha.11", "@antora/navigation-builder": "3.2.0-alpha.11", "@antora/page-composer": "3.2.0-alpha.11", "@antora/playbook-builder": "3.2.0-alpha.11", "@antora/redirect-producer": "3.2.0-alpha.11", "@antora/site-mapper": "3.2.0-alpha.11", "@antora/site-publisher": "3.2.0-alpha.11", "@antora/ui-loader": "3.2.0-alpha.11", "@antora/user-require-helper": "~3.0" }, "engines": { "node": ">=18.0.0" } }, "node_modules/@antora/site-mapper": { "version": "3.2.0-alpha.11", "resolved": "https://registry.npmjs.org/@antora/site-mapper/-/site-mapper-3.2.0-alpha.11.tgz", "integrity": "sha512-CWvsfjVXSRbdGSN9UmGeiVJa+UDDSYLH15O5eKeEzvMp8mVXPVcSvWyW0h1js20ki+PNLlBqwP8O524v7/Mkzg==", "license": "MPL-2.0", "dependencies": { "@antora/content-classifier": "3.2.0-alpha.11", "vinyl": "~3.0" }, "engines": { "node": ">=18.0.0" } }, "node_modules/@antora/site-publisher": { "version": "3.2.0-alpha.11", "resolved": "https://registry.npmjs.org/@antora/site-publisher/-/site-publisher-3.2.0-alpha.11.tgz", "integrity": "sha512-gxiPpQHioCp+y9xYPlJFl2yLnZhjb1Hzr+e4YgmDsc4KvOB+IGR4oGiaOhUfBb63X0VsKFHzv/uq2Mhohazj3Q==", "license": "MPL-2.0", "dependencies": { "@antora/file-publisher": "3.2.0-alpha.11" }, "engines": { "node": ">=18.0.0" } }, "node_modules/@antora/ui-loader": { "version": "3.2.0-alpha.11", "resolved": "https://registry.npmjs.org/@antora/ui-loader/-/ui-loader-3.2.0-alpha.11.tgz", "integrity": "sha512-LHK8wo9mlXg8RmumBW2yCwIZXXoNgcYUh5ln94vf46p8gnO4cm+5i8REa6JgFPIO95FGVVQlE4lRhr86I7GPgw==", "license": "MPL-2.0", "dependencies": { "@antora/expand-path-helper": "~3.0", "braces": "~3.0", "cache-directory": "~2.0", "fast-glob": "~3.3", "hpagent": "~1.2", "js-yaml": "~4.1", "picomatch": "~4.0", "should-proxy": "~1.0", "simple-get": "~4.0", "vinyl": "~3.0", "yauzl": "~3.1" }, "engines": { "node": ">=18.0.0" } }, "node_modules/@antora/ui-loader/node_modules/@antora/expand-path-helper": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/@antora/expand-path-helper/-/expand-path-helper-3.0.0.tgz", "integrity": "sha512-7PdEIhk97v85/CSm3HynCsX14TR6oIVz1s233nNLsiWubE8tTnpPt4sNRJR+hpmIZ6Bx9c6QDp3XIoiyu/WYYA==", "license": "MPL-2.0", "engines": { "node": ">=16.0.0" } }, "node_modules/@antora/ui-loader/node_modules/yauzl": { "version": "3.1.3", "resolved": "https://registry.npmjs.org/yauzl/-/yauzl-3.1.3.tgz", "integrity": "sha512-JCCdmlJJWv7L0q/KylOekyRaUrdEoUxWkWVcgorosTROCFWiS9p2NNPE9Yb91ak7b1N5SxAZEliWpspbZccivw==", "license": "MIT", "dependencies": { "buffer-crc32": "~0.2.3", "pend": "~1.2.0" }, "engines": { "node": ">=12" } }, "node_modules/@antora/user-require-helper": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/@antora/user-require-helper/-/user-require-helper-3.0.0.tgz", "integrity": "sha512-KIXb8WYhnrnwH7Jj21l1w+et9k5GvcgcqvLOwxqWLEd0uVZOiMFdqFjqbVm3M+zcrs1JXWMeh2LLvxBbQs3q/Q==", "license": "MPL-2.0", "dependencies": { "@antora/expand-path-helper": "~3.0" }, "engines": { "node": ">=16.0.0" } }, "node_modules/@antora/user-require-helper/node_modules/@antora/expand-path-helper": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/@antora/expand-path-helper/-/expand-path-helper-3.0.0.tgz", "integrity": "sha512-7PdEIhk97v85/CSm3HynCsX14TR6oIVz1s233nNLsiWubE8tTnpPt4sNRJR+hpmIZ6Bx9c6QDp3XIoiyu/WYYA==", "license": "MPL-2.0", "engines": { "node": ">=16.0.0" } }, "node_modules/@asciidoctor/core": { "version": "2.2.8", "resolved": "https://registry.npmjs.org/@asciidoctor/core/-/core-2.2.8.tgz", "integrity": "sha512-oozXk7ZO1RAd/KLFLkKOhqTcG4GO3CV44WwOFg2gMcCsqCUTarvMT7xERIoWW2WurKbB0/ce+98r01p8xPOlBw==", "license": "MIT", "dependencies": { "asciidoctor-opal-runtime": "0.3.3", "unxhr": "1.0.1" }, "engines": { "node": ">=8.11", "npm": ">=5.0.0", "yarn": ">=1.1.0" } }, "node_modules/@asciidoctor/tabs": { "version": "1.0.0-beta.6", "resolved": "https://registry.npmjs.org/@asciidoctor/tabs/-/tabs-1.0.0-beta.6.tgz", "integrity": "sha512-gGZnW7UfRXnbiyKNd9PpGKtSuD8+DsqaaTSbQ1dHVkZ76NaolLhdQg8RW6/xqN3pX1vWZEcF4e81+Oe9rNRWxg==", "engines": { "node": ">=16.0.0" } }, "node_modules/@babel/runtime": { "version": "7.26.10", "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.26.10.tgz", "integrity": "sha512-2WJMeRQPHKSPemqk/awGrAiuFfzBmOIPXKizAsVhWH9YJqLZ0H+HS4c8loHGgW6utJ3E/ejXQUsiGaQy2NZ9Fw==", "license": "MIT", "dependencies": { "regenerator-runtime": "^0.14.0" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@iarna/toml": { "version": "2.2.5", "resolved": "https://registry.npmjs.org/@iarna/toml/-/toml-2.2.5.tgz", "integrity": "sha512-trnsAYxU3xnS1gPHPyU961coFyLkh4gAD/0zQ5mymY4yOZ+CYvsPqUbOFSw0aDM4y0tV7tiFxL/1XfXPNC6IPg==", "license": "ISC" }, "node_modules/@nodelib/fs.scandir": { "version": "2.1.5", "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==", "license": "MIT", "dependencies": { "@nodelib/fs.stat": "2.0.5", "run-parallel": "^1.1.9" }, "engines": { "node": ">= 8" } }, "node_modules/@nodelib/fs.stat": { "version": "2.0.5", "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz", "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==", "license": "MIT", "engines": { "node": ">= 8" } }, "node_modules/@nodelib/fs.walk": { "version": "1.2.8", "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz", "integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==", "license": "MIT", "dependencies": { "@nodelib/fs.scandir": "2.1.5", "fastq": "^1.6.0" }, "engines": { "node": ">= 8" } }, "node_modules/@springio/antora-extensions": { "version": "1.14.7", "resolved": "https://registry.npmjs.org/@springio/antora-extensions/-/antora-extensions-1.14.7.tgz", "integrity": "sha512-lu++qtzSTAAznFvUEegoyaDfSJXQK1Ftk8NHxWhqzvPAx0t8oO3kNQE1C/5m1qs97QRtKh0UbJ26qEfEQDEcpQ==", "license": "ASL-2.0", "dependencies": { "@antora/expand-path-helper": "~2.0", "archiver": "^5.3.1", "asciinema-player": "^3.6.1", "cache-directory": "~2.0", "ci": "^2.3.0", "decompress": "4.2.1", "fast-xml-parser": "^4.5.2", "handlebars": "latest" }, "engines": { "node": ">=16.0.0" } }, "node_modules/@springio/antora-xref-extension": { "version": "1.0.0-alpha.5", "resolved": "https://registry.npmjs.org/@springio/antora-xref-extension/-/antora-xref-extension-1.0.0-alpha.5.tgz", "integrity": "sha512-X7gjjMffHYr/Bz9CR1Dpc44NjkOFLTAzNHzxOxj+RUZHhrbbcnj4h3tBDxSUffiddKcqQ/AEHV+SoAngW/pavA==", "license": "Apache-2.0", "engines": { "node": ">=18.0.0" } }, "node_modules/@springio/antora-zip-contents-collector-extension": { "version": "1.0.0-alpha.10", "resolved": "https://registry.npmjs.org/@springio/antora-zip-contents-collector-extension/-/antora-zip-contents-collector-extension-1.0.0-alpha.10.tgz", "integrity": "sha512-0O4eksIv7Y6N6VvGq6r92AyjpPfauLFOkXU283Y1r0RwfvohuSGO8OmnAv9h+t7v+G3ILwDrRV4vJ8oHh0gErw==", "license": "Apache-2.0", "dependencies": { "@antora/expand-path-helper": "~2.0", "cache-directory": "~2.0", "glob-stream": "~7.0", "isomorphic-git": "~1.21", "js-yaml": "~4.1" }, "engines": { "node": ">=20.0.0" } }, "node_modules/@springio/antora-zip-contents-collector-extension/node_modules/isomorphic-git": { "version": "1.21.0", "resolved": "https://registry.npmjs.org/isomorphic-git/-/isomorphic-git-1.21.0.tgz", "integrity": "sha512-ZqCAUM63CYepA3fB8H7NVyPSiOkgzIbQ7T+QPrm9xtYgQypN9JUJ5uLMjB5iTfomdJf3mdm6aSxjZwnT6ubvEA==", "dependencies": { "async-lock": "^1.1.0", "clean-git-ref": "^2.0.1", "crc-32": "^1.2.0", "diff3": "0.0.3", "ignore": "^5.1.4", "minimisted": "^2.0.0", "pako": "^1.0.10", "pify": "^4.0.1", "readable-stream": "^3.4.0", "sha.js": "^2.4.9", "simple-get": "^4.0.1" }, "bin": { "isogit": "cli.cjs" }, "engines": { "node": ">=12" } }, "node_modules/@springio/antora-zip-contents-collector-extension/node_modules/pify": { "version": "4.0.1", "resolved": "https://registry.npmjs.org/pify/-/pify-4.0.1.tgz", "integrity": "sha512-uB80kBFb/tfd68bVleG9T5GGsGPjJrLAUpR5PZIrhBnIaRTQRjqdJSsIKkOP6OAIFbj7GOrcudc5pNjZ+geV2g==", "engines": { "node": ">=6" } }, "node_modules/@springio/asciidoctor-extensions": { "version": "1.0.0-alpha.17", "resolved": "https://registry.npmjs.org/@springio/asciidoctor-extensions/-/asciidoctor-extensions-1.0.0-alpha.17.tgz", "integrity": "sha512-mvVEKZNdGQu1+raOF+sy1DKWZrq1bB0dM4ZVlIIFV+jJ/mengXByq7YQk63nMOFsue6fGlgb3nQUte8EbvoQAw==", "license": "ASL-2.0", "dependencies": { "js-yaml": "~4.1" }, "engines": { "node": ">=16.0.0" } }, "node_modules/abort-controller": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/abort-controller/-/abort-controller-3.0.0.tgz", "integrity": "sha512-h8lQ8tacZYnR3vNQTgibj+tODHI5/+l06Au2Pcriv/Gmet0eaj4TwWH41sO9wnHDiQsEj19q0drzdWdeAHtweg==", "license": "MIT", "dependencies": { "event-target-shim": "^5.0.0" }, "engines": { "node": ">=6.5" } }, "node_modules/archiver": { "version": "5.3.2", "resolved": "https://registry.npmjs.org/archiver/-/archiver-5.3.2.tgz", "integrity": "sha512-+25nxyyznAXF7Nef3y0EbBeqmGZgeN/BxHX29Rs39djAfaFalmQ89SE6CWyDCHzGL0yt/ycBtNOmGTW0FyGWNw==", "dependencies": { "archiver-utils": "^2.1.0", "async": "^3.2.4", "buffer-crc32": "^0.2.1", "readable-stream": "^3.6.0", "readdir-glob": "^1.1.2", "tar-stream": "^2.2.0", "zip-stream": "^4.1.0" }, "engines": { "node": ">= 10" } }, "node_modules/archiver-utils": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/archiver-utils/-/archiver-utils-2.1.0.tgz", "integrity": "sha512-bEL/yUb/fNNiNTuUz979Z0Yg5L+LzLxGJz8x79lYmR54fmTIb6ob/hNQgkQnIUDWIFjZVQwl9Xs356I6BAMHfw==", "dependencies": { "glob": "^7.1.4", "graceful-fs": "^4.2.0", "lazystream": "^1.0.0", "lodash.defaults": "^4.2.0", "lodash.difference": "^4.5.0", "lodash.flatten": "^4.4.0", "lodash.isplainobject": "^4.0.6", "lodash.union": "^4.6.0", "normalize-path": "^3.0.0", "readable-stream": "^2.0.0" }, "engines": { "node": ">= 6" } }, "node_modules/archiver-utils/node_modules/readable-stream": { "version": "2.3.8", "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.8.tgz", "integrity": "sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA==", "dependencies": { "core-util-is": "~1.0.0", "inherits": "~2.0.3", "isarray": "~1.0.0", "process-nextick-args": "~2.0.0", "safe-buffer": "~5.1.1", "string_decoder": "~1.1.1", "util-deprecate": "~1.0.1" } }, "node_modules/archiver-utils/node_modules/safe-buffer": { "version": "5.1.2", "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" }, "node_modules/archiver-utils/node_modules/string_decoder": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", "dependencies": { "safe-buffer": "~5.1.0" } }, "node_modules/argparse": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==" }, "node_modules/asciidoctor-opal-runtime": { "version": "0.3.3", "resolved": "https://registry.npmjs.org/asciidoctor-opal-runtime/-/asciidoctor-opal-runtime-0.3.3.tgz", "integrity": "sha512-/CEVNiOia8E5BMO9FLooo+Kv18K4+4JBFRJp8vUy/N5dMRAg+fRNV4HA+o6aoSC79jVU/aT5XvUpxSxSsTS8FQ==", "license": "MIT", "dependencies": { "glob": "7.1.3", "unxhr": "1.0.1" }, "engines": { "node": ">=8.11" } }, "node_modules/asciidoctor-opal-runtime/node_modules/glob": { "version": "7.1.3", "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.3.tgz", "integrity": "sha512-vcfuiIxogLV4DlGBHIUOwI0IbrJ8HWPc4MU7HzviGeNho/UJDfi6B5p3sHeWIQ0KGIU0Jpxi5ZHxemQfLkkAwQ==", "deprecated": "Glob versions prior to v9 are no longer supported", "license": "ISC", "dependencies": { "fs.realpath": "^1.0.0", "inflight": "^1.0.4", "inherits": "2", "minimatch": "^3.0.4", "once": "^1.3.0", "path-is-absolute": "^1.0.0" }, "engines": { "node": "*" } }, "node_modules/asciinema-player": { "version": "3.7.1", "resolved": "https://registry.npmjs.org/asciinema-player/-/asciinema-player-3.7.1.tgz", "integrity": "sha512-zDJteGjBzNQhHEnD0aG7GqV3E53sOyKb1WCxKNRm2PquU70Lq3s4xxb91wyDS0hBJ3J/TB8aY3y8gjGPN+T23A==", "dependencies": { "@babel/runtime": "^7.21.0", "solid-js": "^1.3.0" } }, "node_modules/async": { "version": "3.2.5", "resolved": "https://registry.npmjs.org/async/-/async-3.2.5.tgz", "integrity": "sha512-baNZyqaaLhyLVKm/DlvdW051MSgO6b8eVfIezl9E5PqWxFgzLm/wQntEW4zOytVburDEr0JlALEpdOFwvErLsg==" }, "node_modules/async-lock": { "version": "1.4.1", "resolved": "https://registry.npmjs.org/async-lock/-/async-lock-1.4.1.tgz", "integrity": "sha512-Az2ZTpuytrtqENulXwO3GGv1Bztugx6TT37NIo7imr/Qo0gsYiGtSdBa2B6fsXhTpVZDNfu1Qn3pk531e3q+nQ==" }, "node_modules/atomic-sleep": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/atomic-sleep/-/atomic-sleep-1.0.0.tgz", "integrity": "sha512-kNOjDqAh7px0XWNI+4QbzoiR/nTkHAWNud2uvnJquD1/x5a7EQZMJT0AczqK0Qn67oY/TTQ1LbUKajZpp3I9tQ==", "license": "MIT", "engines": { "node": ">=8.0.0" } }, "node_modules/available-typed-arrays": { "version": "1.0.7", "resolved": "https://registry.npmjs.org/available-typed-arrays/-/available-typed-arrays-1.0.7.tgz", "integrity": "sha512-wvUjBtSGN7+7SjNpq/9M2Tg350UZD3q62IFZLbRAR1bSMlCo1ZaeW+BJ+D090e4hIIZLBcTDWe4Mh4jvUDajzQ==", "license": "MIT", "dependencies": { "possible-typed-array-names": "^1.0.0" }, "engines": { "node": ">= 0.4" }, "funding": { "url": "https://github.com/sponsors/ljharb" } }, "node_modules/b4a": { "version": "1.7.3", "resolved": "https://registry.npmjs.org/b4a/-/b4a-1.7.3.tgz", "integrity": "sha512-5Q2mfq2WfGuFp3uS//0s6baOJLMoVduPYVeNmDYxu5OUA1/cBfvr2RIS7vi62LdNj/urk1hfmj867I3qt6uZ7Q==", "license": "Apache-2.0", "peerDependencies": { "react-native-b4a": "*" }, "peerDependenciesMeta": { "react-native-b4a": { "optional": true } } }, "node_modules/balanced-match": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==" }, "node_modules/bare-events": { "version": "2.8.2", "resolved": "https://registry.npmjs.org/bare-events/-/bare-events-2.8.2.tgz", "integrity": "sha512-riJjyv1/mHLIPX4RwiK+oW9/4c3TEUeORHKefKAKnZ5kyslbN+HXowtbaVEqt4IMUB7OXlfixcs6gsFeo/jhiQ==", "license": "Apache-2.0", "peerDependencies": { "bare-abort-controller": "*" }, "peerDependenciesMeta": { "bare-abort-controller": { "optional": true } } }, "node_modules/base64-js": { "version": "1.5.1", "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz", "integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==", "funding": [ { "type": "github", "url": "https://github.com/sponsors/feross" }, { "type": "patreon", "url": "https://www.patreon.com/feross" }, { "type": "consulting", "url": "https://feross.org/support" } ] }, "node_modules/bl": { "version": "4.1.0", "resolved": "https://registry.npmjs.org/bl/-/bl-4.1.0.tgz", "integrity": "sha512-1W07cM9gS6DcLperZfFSj+bWLtaPGSOHWhPiGzXmvVJbRLdG82sH/Kn8EtW1VqWVA54AKf2h5k5BbnIbwF3h6w==", "dependencies": { "buffer": "^5.5.0", "inherits": "^2.0.4", "readable-stream": "^3.4.0" } }, "node_modules/brace-expansion": { "version": "1.1.12", "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", "license": "MIT", "dependencies": { "balanced-match": "^1.0.0", "concat-map": "0.0.1" } }, "node_modules/braces": { "version": "3.0.3", "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz", "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", "license": "MIT", "dependencies": { "fill-range": "^7.1.1" }, "engines": { "node": ">=8" } }, "node_modules/buffer": { "version": "5.7.1", "resolved": "https://registry.npmjs.org/buffer/-/buffer-5.7.1.tgz", "integrity": "sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ==", "funding": [ { "type": "github", "url": "https://github.com/sponsors/feross" }, { "type": "patreon", "url": "https://www.patreon.com/feross" }, { "type": "consulting", "url": "https://feross.org/support" } ], "dependencies": { "base64-js": "^1.3.1", "ieee754": "^1.1.13" } }, "node_modules/buffer-alloc": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/buffer-alloc/-/buffer-alloc-1.2.0.tgz", "integrity": "sha512-CFsHQgjtW1UChdXgbyJGtnm+O/uLQeZdtbDo8mfUgYXCHSM1wgrVxXm6bSyrUuErEb+4sYVGCzASBRot7zyrow==", "dependencies": { "buffer-alloc-unsafe": "^1.1.0", "buffer-fill": "^1.0.0" } }, "node_modules/buffer-alloc-unsafe": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/buffer-alloc-unsafe/-/buffer-alloc-unsafe-1.1.0.tgz", "integrity": "sha512-TEM2iMIEQdJ2yjPJoSIsldnleVaAk1oW3DBVUykyOLsEsFmEc9kn+SFFPz+gl54KQNxlDnAwCXosOS9Okx2xAg==" }, "node_modules/buffer-crc32": { "version": "0.2.13", "resolved": "https://registry.npmjs.org/buffer-crc32/-/buffer-crc32-0.2.13.tgz", "integrity": "sha512-VO9Ht/+p3SN7SKWqcrgEzjGbRSJYTx+Q1pTQC0wrWqHx0vpJraQ6GtHx8tvcg1rlK1byhU5gccxgOgj7B0TDkQ==", "engines": { "node": "*" } }, "node_modules/buffer-fill": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/buffer-fill/-/buffer-fill-1.0.0.tgz", "integrity": "sha512-T7zexNBwiiaCOGDg9xNX9PBmjrubblRkENuptryuI64URkXDFum9il/JGL8Lm8wYfAXpredVXXZz7eMHilimiQ==" }, "node_modules/cache-directory": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/cache-directory/-/cache-directory-2.0.0.tgz", "integrity": "sha512-7YKEapH+2Uikde8hySyfobXBqPKULDyHNl/lhKm7cKf/GJFdG/tU/WpLrOg2y9aUrQrWUilYqawFIiGJPS6gDA==", "dependencies": { "xdg-basedir": "^3.0.0" }, "engines": { "node": ">=4" } }, "node_modules/call-bind": { "version": "1.0.8", "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.8.tgz", "integrity": "sha512-oKlSFMcMwpUg2ednkhQ454wfWiU/ul3CkJe/PEHcTKuiX6RpbehUiFMXu13HalGZxfUwCQzZG747YXBn1im9ww==", "license": "MIT", "dependencies": { "call-bind-apply-helpers": "^1.0.0", "es-define-property": "^1.0.0", "get-intrinsic": "^1.2.4", "set-function-length": "^1.2.2" }, "engines": { "node": ">= 0.4" }, "funding": { "url": "https://github.com/sponsors/ljharb" } }, "node_modules/call-bind-apply-helpers": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz", "integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==", "license": "MIT", "dependencies": { "es-errors": "^1.3.0", "function-bind": "^1.1.2" }, "engines": { "node": ">= 0.4" } }, "node_modules/call-bound": { "version": "1.0.4", "resolved": "https://registry.npmjs.org/call-bound/-/call-bound-1.0.4.tgz", "integrity": "sha512-+ys997U96po4Kx/ABpBCqhA9EuxJaQWDQg7295H4hBphv3IZg0boBKuwYpt4YXp6MZ5AmZQnU/tyMTlRpaSejg==", "license": "MIT", "dependencies": { "call-bind-apply-helpers": "^1.0.2", "get-intrinsic": "^1.3.0" }, "engines": { "node": ">= 0.4" }, "funding": { "url": "https://github.com/sponsors/ljharb" } }, "node_modules/ci": { "version": "2.3.0", "resolved": "https://registry.npmjs.org/ci/-/ci-2.3.0.tgz", "integrity": "sha512-0MGXkzJKkwV3enG7RUxjJKdiAkbaZ7visCjitfpCN2BQjv02KGRMxCHLv4RPokkjJ4xR33FLMAXweS+aQ0pFSQ==", "bin": { "ci": "dist/cli.js" }, "funding": { "url": "https://github.com/privatenumber/ci?sponsor=1" } }, "node_modules/clean-git-ref": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/clean-git-ref/-/clean-git-ref-2.0.1.tgz", "integrity": "sha512-bLSptAy2P0s6hU4PzuIMKmMJJSE6gLXGH1cntDu7bWJUksvuM+7ReOK61mozULErYvP6a15rnYl0zFDef+pyPw==" }, "node_modules/clone": { "version": "2.1.2", "resolved": "https://registry.npmjs.org/clone/-/clone-2.1.2.tgz", "integrity": "sha512-3Pe/CF1Nn94hyhIYpjtiLhdCoEoz0DqQ+988E9gmeEdQZlojxnOb74wctFyuwWQHzqyf9X7C7MG8juUpqBJT8w==", "license": "MIT", "engines": { "node": ">=0.8" } }, "node_modules/colorette": { "version": "2.0.20", "resolved": "https://registry.npmjs.org/colorette/-/colorette-2.0.20.tgz", "integrity": "sha512-IfEDxwoWIjkeXL1eXcDiow4UbKjhLdq6/EuSVR9GMN7KVH3r9gQ83e73hsz1Nd1T3ijd5xv1wcWRYO+D6kCI2w==", "license": "MIT" }, "node_modules/commander": { "version": "12.1.0", "resolved": "https://registry.npmjs.org/commander/-/commander-12.1.0.tgz", "integrity": "sha512-Vw8qHK3bZM9y/P10u3Vib8o/DdkvA2OtPtZvD871QKjy74Wj1WSKFILMPRPSdUSx5RFK1arlJzEtA4PkFgnbuA==", "license": "MIT", "engines": { "node": ">=18" } }, "node_modules/compress-commons": { "version": "4.1.2", "resolved": "https://registry.npmjs.org/compress-commons/-/compress-commons-4.1.2.tgz", "integrity": "sha512-D3uMHtGc/fcO1Gt1/L7i1e33VOvD4A9hfQLP+6ewd+BvG/gQ84Yh4oftEhAdjSMgBgwGL+jsppT7JYNpo6MHHg==", "dependencies": { "buffer-crc32": "^0.2.13", "crc32-stream": "^4.0.2", "normalize-path": "^3.0.0", "readable-stream": "^3.6.0" }, "engines": { "node": ">= 10" } }, "node_modules/concat-map": { "version": "0.0.1", "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==" }, "node_modules/convict": { "version": "6.2.4", "resolved": "https://registry.npmjs.org/convict/-/convict-6.2.4.tgz", "integrity": "sha512-qN60BAwdMVdofckX7AlohVJ2x9UvjTNoKVXCL2LxFk1l7757EJqf1nySdMkPQer0bt8kQ5lQiyZ9/2NvrFBuwQ==", "license": "Apache-2.0", "dependencies": { "lodash.clonedeep": "^4.5.0", "yargs-parser": "^20.2.7" }, "engines": { "node": ">=6" } }, "node_modules/core-util-is": { "version": "1.0.3", "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.3.tgz", "integrity": "sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ==" }, "node_modules/crc-32": { "version": "1.2.2", "resolved": "https://registry.npmjs.org/crc-32/-/crc-32-1.2.2.tgz", "integrity": "sha512-ROmzCKrTnOwybPcJApAA6WBWij23HVfGVNKqqrZpuyZOHqK2CwHSvpGuyt/UNNvaIjEd8X5IFGp4Mh+Ie1IHJQ==", "bin": { "crc32": "bin/crc32.njs" }, "engines": { "node": ">=0.8" } }, "node_modules/crc32-stream": { "version": "4.0.3", "resolved": "https://registry.npmjs.org/crc32-stream/-/crc32-stream-4.0.3.tgz", "integrity": "sha512-NT7w2JVU7DFroFdYkeq8cywxrgjPHWkdX1wjpRQXPX5Asews3tA+Ght6lddQO5Mkumffp3X7GEqku3epj2toIw==", "dependencies": { "crc-32": "^1.2.0", "readable-stream": "^3.4.0" }, "engines": { "node": ">= 10" } }, "node_modules/csstype": { "version": "3.1.3", "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.1.3.tgz", "integrity": "sha512-M1uQkMl8rQK/szD0LNhtqxIPLpimGm8sOBwU7lLnCpSbTyY3yeU1Vc7l4KT5zT4s/yOxHH5O7tIuuLOCnLADRw==" }, "node_modules/dateformat": { "version": "4.6.3", "resolved": "https://registry.npmjs.org/dateformat/-/dateformat-4.6.3.tgz", "integrity": "sha512-2P0p0pFGzHS5EMnhdxQi7aJN+iMheud0UhG4dlE1DLAlvL8JHjJJTX/CSm4JXwV0Ka5nGk3zC5mcb5bUQUxxMA==", "license": "MIT", "engines": { "node": "*" } }, "node_modules/decompress": { "version": "4.2.1", "resolved": "https://registry.npmjs.org/decompress/-/decompress-4.2.1.tgz", "integrity": "sha512-e48kc2IjU+2Zw8cTb6VZcJQ3lgVbS4uuB1TfCHbiZIP/haNXm+SVyhu+87jts5/3ROpd82GSVCoNs/z8l4ZOaQ==", "dependencies": { "decompress-tar": "^4.0.0", "decompress-tarbz2": "^4.0.0", "decompress-targz": "^4.0.0", "decompress-unzip": "^4.0.1", "graceful-fs": "^4.1.10", "make-dir": "^1.0.0", "pify": "^2.3.0", "strip-dirs": "^2.0.0" }, "engines": { "node": ">=4" } }, "node_modules/decompress-response": { "version": "6.0.0", "resolved": "https://registry.npmjs.org/decompress-response/-/decompress-response-6.0.0.tgz", "integrity": "sha512-aW35yZM6Bb/4oJlZncMH2LCoZtJXTRxES17vE3hoRiowU2kWHaJKFkSBDnDR+cm9J+9QhXmREyIfv0pji9ejCQ==", "dependencies": { "mimic-response": "^3.1.0" }, "engines": { "node": ">=10" }, "funding": { "url": "https://github.com/sponsors/sindresorhus" } }, "node_modules/decompress-tar": { "version": "4.1.1", "resolved": "https://registry.npmjs.org/decompress-tar/-/decompress-tar-4.1.1.tgz", "integrity": "sha512-JdJMaCrGpB5fESVyxwpCx4Jdj2AagLmv3y58Qy4GE6HMVjWz1FeVQk1Ct4Kye7PftcdOo/7U7UKzYBJgqnGeUQ==", "dependencies": { "file-type": "^5.2.0", "is-stream": "^1.1.0", "tar-stream": "^1.5.2" }, "engines": { "node": ">=4" } }, "node_modules/decompress-tar/node_modules/bl": { "version": "1.2.3", "resolved": "https://registry.npmjs.org/bl/-/bl-1.2.3.tgz", "integrity": "sha512-pvcNpa0UU69UT341rO6AYy4FVAIkUHuZXRIWbq+zHnsVcRzDDjIAhGuuYoi0d//cwIwtt4pkpKycWEfjdV+vww==", "dependencies": { "readable-stream": "^2.3.5", "safe-buffer": "^5.1.1" } }, "node_modules/decompress-tar/node_modules/readable-stream": { "version": "2.3.8", "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.8.tgz", "integrity": "sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA==", "dependencies": { "core-util-is": "~1.0.0", "inherits": "~2.0.3", "isarray": "~1.0.0", "process-nextick-args": "~2.0.0", "safe-buffer": "~5.1.1", "string_decoder": "~1.1.1", "util-deprecate": "~1.0.1" } }, "node_modules/decompress-tar/node_modules/safe-buffer": { "version": "5.1.2", "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" }, "node_modules/decompress-tar/node_modules/string_decoder": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", "dependencies": { "safe-buffer": "~5.1.0" } }, "node_modules/decompress-tar/node_modules/tar-stream": { "version": "1.6.2", "resolved": "https://registry.npmjs.org/tar-stream/-/tar-stream-1.6.2.tgz", "integrity": "sha512-rzS0heiNf8Xn7/mpdSVVSMAWAoy9bfb1WOTYC78Z0UQKeKa/CWS8FOq0lKGNa8DWKAn9gxjCvMLYc5PGXYlK2A==", "dependencies": { "bl": "^1.0.0", "buffer-alloc": "^1.2.0", "end-of-stream": "^1.0.0", "fs-constants": "^1.0.0", "readable-stream": "^2.3.0", "to-buffer": "^1.1.1", "xtend": "^4.0.0" }, "engines": { "node": ">= 0.8.0" } }, "node_modules/decompress-tarbz2": { "version": "4.1.1", "resolved": "https://registry.npmjs.org/decompress-tarbz2/-/decompress-tarbz2-4.1.1.tgz", "integrity": "sha512-s88xLzf1r81ICXLAVQVzaN6ZmX4A6U4z2nMbOwobxkLoIIfjVMBg7TeguTUXkKeXni795B6y5rnvDw7rxhAq9A==", "dependencies": { "decompress-tar": "^4.1.0", "file-type": "^6.1.0", "is-stream": "^1.1.0", "seek-bzip": "^1.0.5", "unbzip2-stream": "^1.0.9" }, "engines": { "node": ">=4" } }, "node_modules/decompress-tarbz2/node_modules/file-type": { "version": "6.2.0", "resolved": "https://registry.npmjs.org/file-type/-/file-type-6.2.0.tgz", "integrity": "sha512-YPcTBDV+2Tm0VqjybVd32MHdlEGAtuxS3VAYsumFokDSMG+ROT5wawGlnHDoz7bfMcMDt9hxuXvXwoKUx2fkOg==", "engines": { "node": ">=4" } }, "node_modules/decompress-targz": { "version": "4.1.1", "resolved": "https://registry.npmjs.org/decompress-targz/-/decompress-targz-4.1.1.tgz", "integrity": "sha512-4z81Znfr6chWnRDNfFNqLwPvm4db3WuZkqV+UgXQzSngG3CEKdBkw5jrv3axjjL96glyiiKjsxJG3X6WBZwX3w==", "dependencies": { "decompress-tar": "^4.1.1", "file-type": "^5.2.0", "is-stream": "^1.1.0" }, "engines": { "node": ">=4" } }, "node_modules/decompress-unzip": { "version": "4.0.1", "resolved": "https://registry.npmjs.org/decompress-unzip/-/decompress-unzip-4.0.1.tgz", "integrity": "sha512-1fqeluvxgnn86MOh66u8FjbtJpAFv5wgCT9Iw8rcBqQcCo5tO8eiJw7NNTrvt9n4CRBVq7CstiS922oPgyGLrw==", "dependencies": { "file-type": "^3.8.0", "get-stream": "^2.2.0", "pify": "^2.3.0", "yauzl": "^2.4.2" }, "engines": { "node": ">=4" } }, "node_modules/decompress-unzip/node_modules/file-type": { "version": "3.9.0", "resolved": "https://registry.npmjs.org/file-type/-/file-type-3.9.0.tgz", "integrity": "sha512-RLoqTXE8/vPmMuTI88DAzhMYC99I8BWv7zYP4A1puo5HIjEJ5EX48ighy4ZyKMG9EDXxBgW6e++cn7d1xuFghA==", "engines": { "node": ">=0.10.0" } }, "node_modules/define-data-property": { "version": "1.1.4", "resolved": "https://registry.npmjs.org/define-data-property/-/define-data-property-1.1.4.tgz", "integrity": "sha512-rBMvIzlpA8v6E+SJZoo++HAYqsLrkg7MSfIinMPFhmkorw7X+dOXVJQs+QT69zGkzMyfDnIMN2Wid1+NbL3T+A==", "dependencies": { "es-define-property": "^1.0.0", "es-errors": "^1.3.0", "gopd": "^1.0.1" }, "engines": { "node": ">= 0.4" }, "funding": { "url": "https://github.com/sponsors/ljharb" } }, "node_modules/diff3": { "version": "0.0.3", "resolved": "https://registry.npmjs.org/diff3/-/diff3-0.0.3.tgz", "integrity": "sha512-iSq8ngPOt0K53A6eVr4d5Kn6GNrM2nQZtC740pzIriHtn4pOQ2lyzEXQMBeVcWERN0ye7fhBsk9PbLLQOnUx/g==" }, "node_modules/dunder-proto": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz", "integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==", "license": "MIT", "dependencies": { "call-bind-apply-helpers": "^1.0.1", "es-errors": "^1.3.0", "gopd": "^1.2.0" }, "engines": { "node": ">= 0.4" } }, "node_modules/duplexify": { "version": "4.1.3", "resolved": "https://registry.npmjs.org/duplexify/-/duplexify-4.1.3.tgz", "integrity": "sha512-M3BmBhwJRZsSx38lZyhE53Csddgzl5R7xGJNk7CVddZD6CcmwMCH8J+7AprIrQKH7TonKxaCjcv27Qmf+sQ+oA==", "dependencies": { "end-of-stream": "^1.4.1", "inherits": "^2.0.3", "readable-stream": "^3.1.1", "stream-shift": "^1.0.2" } }, "node_modules/end-of-stream": { "version": "1.4.4", "resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.4.tgz", "integrity": "sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q==", "dependencies": { "once": "^1.4.0" } }, "node_modules/es-define-property": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz", "integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==", "license": "MIT", "engines": { "node": ">= 0.4" } }, "node_modules/es-errors": { "version": "1.3.0", "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==", "engines": { "node": ">= 0.4" } }, "node_modules/es-object-atoms": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz", "integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==", "license": "MIT", "dependencies": { "es-errors": "^1.3.0" }, "engines": { "node": ">= 0.4" } }, "node_modules/event-target-shim": { "version": "5.0.1", "resolved": "https://registry.npmjs.org/event-target-shim/-/event-target-shim-5.0.1.tgz", "integrity": "sha512-i/2XbnSz/uxRCU6+NdVJgKWDTM427+MqYbkQzD321DuCQJUqOuJKIA0IM2+W2xtYHdKOmZ4dR6fExsd4SXL+WQ==", "license": "MIT", "engines": { "node": ">=6" } }, "node_modules/events": { "version": "3.3.0", "resolved": "https://registry.npmjs.org/events/-/events-3.3.0.tgz", "integrity": "sha512-mQw+2fkQbALzQ7V0MY0IqdnXNOeTtP4r0lN9z7AAawCXgqea7bDii20AYrIBrFd/Hx0M2Ocz6S111CaFkUcb0Q==", "license": "MIT", "engines": { "node": ">=0.8.x" } }, "node_modules/events-universal": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/events-universal/-/events-universal-1.0.1.tgz", "integrity": "sha512-LUd5euvbMLpwOF8m6ivPCbhQeSiYVNb8Vs0fQ8QjXo0JTkEHpz8pxdQf0gStltaPpw0Cca8b39KxvK9cfKRiAw==", "license": "Apache-2.0", "dependencies": { "bare-events": "^2.7.0" } }, "node_modules/extend": { "version": "3.0.2", "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz", "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==" }, "node_modules/fast-copy": { "version": "3.0.2", "resolved": "https://registry.npmjs.org/fast-copy/-/fast-copy-3.0.2.tgz", "integrity": "sha512-dl0O9Vhju8IrcLndv2eU4ldt1ftXMqqfgN4H1cpmGV7P6jeB9FwpN9a2c8DPGE1Ys88rNUJVYDHq73CGAGOPfQ==", "license": "MIT" }, "node_modules/fast-fifo": { "version": "1.3.2", "resolved": "https://registry.npmjs.org/fast-fifo/-/fast-fifo-1.3.2.tgz", "integrity": "sha512-/d9sfos4yxzpwkDkuN7k2SqFKtYNmCTzgfEpz82x34IM9/zc8KGxQoXg1liNC/izpRM/MBdt44Nmx41ZWqk+FQ==", "license": "MIT" }, "node_modules/fast-glob": { "version": "3.3.3", "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.3.tgz", "integrity": "sha512-7MptL8U0cqcFdzIzwOTHoilX9x5BrNqye7Z/LuC7kCMRio1EMSyqRK3BEAUD7sXRq4iT4AzTVuZdhgQ2TCvYLg==", "license": "MIT", "dependencies": { "@nodelib/fs.stat": "^2.0.2", "@nodelib/fs.walk": "^1.2.3", "glob-parent": "^5.1.2", "merge2": "^1.3.0", "micromatch": "^4.0.8" }, "engines": { "node": ">=8.6.0" } }, "node_modules/fast-glob/node_modules/glob-parent": { "version": "5.1.2", "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", "license": "ISC", "dependencies": { "is-glob": "^4.0.1" }, "engines": { "node": ">= 6" } }, "node_modules/fast-redact": { "version": "3.5.0", "resolved": "https://registry.npmjs.org/fast-redact/-/fast-redact-3.5.0.tgz", "integrity": "sha512-dwsoQlS7h9hMeYUq1W++23NDcBLV4KqONnITDV9DjfS3q1SgDGVrBdvvTLUotWtPSD7asWDV9/CmsZPy8Hf70A==", "license": "MIT", "engines": { "node": ">=6" } }, "node_modules/fast-safe-stringify": { "version": "2.1.1", "resolved": "https://registry.npmjs.org/fast-safe-stringify/-/fast-safe-stringify-2.1.1.tgz", "integrity": "sha512-W+KJc2dmILlPplD/H4K9l9LcAHAfPtP6BY84uVLXQ6Evcz9Lcg33Y2z1IVblT6xdY54PXYVHEv+0Wpq8Io6zkA==", "license": "MIT" }, "node_modules/fast-xml-parser": { "version": "4.5.3", "resolved": "https://registry.npmjs.org/fast-xml-parser/-/fast-xml-parser-4.5.3.tgz", "integrity": "sha512-RKihhV+SHsIUGXObeVy9AXiBbFwkVk7Syp8XgwN5U3JV416+Gwp/GO9i0JYKmikykgz/UHRrrV4ROuZEo/T0ig==", "funding": [ { "type": "github", "url": "https://github.com/sponsors/NaturalIntelligence" } ], "license": "MIT", "dependencies": { "strnum": "^1.1.1" }, "bin": { "fxparser": "src/cli/cli.js" } }, "node_modules/fastq": { "version": "1.19.1", "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.19.1.tgz", "integrity": "sha512-GwLTyxkCXjXbxqIhTsMI2Nui8huMPtnxg7krajPJAjnEG/iiOS7i+zCtWGZR9G0NBKbXKh6X9m9UIsYX/N6vvQ==", "license": "ISC", "dependencies": { "reusify": "^1.0.4" } }, "node_modules/fd-slicer": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/fd-slicer/-/fd-slicer-1.1.0.tgz", "integrity": "sha512-cE1qsB/VwyQozZ+q1dGxR8LBYNZeofhEdUNGSMbQD3Gw2lAzX9Zb3uIU6Ebc/Fmyjo9AWWfnn0AUCHqtevs/8g==", "dependencies": { "pend": "~1.2.0" } }, "node_modules/file-type": { "version": "5.2.0", "resolved": "https://registry.npmjs.org/file-type/-/file-type-5.2.0.tgz", "integrity": "sha512-Iq1nJ6D2+yIO4c8HHg4fyVb8mAJieo1Oloy1mLLaB2PvezNedhBVm+QU7g0qM42aiMbRXTxKKwGD17rjKNJYVQ==", "engines": { "node": ">=4" } }, "node_modules/fill-range": { "version": "7.1.1", "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", "license": "MIT", "dependencies": { "to-regex-range": "^5.0.1" }, "engines": { "node": ">=8" } }, "node_modules/for-each": { "version": "0.3.5", "resolved": "https://registry.npmjs.org/for-each/-/for-each-0.3.5.tgz", "integrity": "sha512-dKx12eRCVIzqCxFGplyFKJMPvLEWgmNtUrpTiJIR5u97zEhRG8ySrtboPHZXx7daLxQVrl643cTzbab2tkQjxg==", "license": "MIT", "dependencies": { "is-callable": "^1.2.7" }, "engines": { "node": ">= 0.4" }, "funding": { "url": "https://github.com/sponsors/ljharb" } }, "node_modules/fs-constants": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/fs-constants/-/fs-constants-1.0.0.tgz", "integrity": "sha512-y6OAwoSIf7FyjMIv94u+b5rdheZEjzR63GTyZJm5qh4Bi+2YgwLCcI/fPFZkL5PSixOt6ZNKm+w+Hfp/Bciwow==" }, "node_modules/fs.realpath": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==" }, "node_modules/function-bind": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", "funding": { "url": "https://github.com/sponsors/ljharb" } }, "node_modules/get-intrinsic": { "version": "1.3.0", "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz", "integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==", "license": "MIT", "dependencies": { "call-bind-apply-helpers": "^1.0.2", "es-define-property": "^1.0.1", "es-errors": "^1.3.0", "es-object-atoms": "^1.1.1", "function-bind": "^1.1.2", "get-proto": "^1.0.1", "gopd": "^1.2.0", "has-symbols": "^1.1.0", "hasown": "^2.0.2", "math-intrinsics": "^1.1.0" }, "engines": { "node": ">= 0.4" }, "funding": { "url": "https://github.com/sponsors/ljharb" } }, "node_modules/get-proto": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz", "integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==", "license": "MIT", "dependencies": { "dunder-proto": "^1.0.1", "es-object-atoms": "^1.0.0" }, "engines": { "node": ">= 0.4" } }, "node_modules/get-stream": { "version": "2.3.1", "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-2.3.1.tgz", "integrity": "sha512-AUGhbbemXxrZJRD5cDvKtQxLuYaIbNtDTK8YqupCI393Q2KSTreEsLUN3ZxAWFGiKTzL6nKuzfcIvieflUX9qA==", "dependencies": { "object-assign": "^4.0.1", "pinkie-promise": "^2.0.0" }, "engines": { "node": ">=0.10.0" } }, "node_modules/glob": { "version": "7.2.3", "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", "dependencies": { "fs.realpath": "^1.0.0", "inflight": "^1.0.4", "inherits": "2", "minimatch": "^3.1.1", "once": "^1.3.0", "path-is-absolute": "^1.0.0" }, "engines": { "node": "*" }, "funding": { "url": "https://github.com/sponsors/isaacs" } }, "node_modules/glob-parent": { "version": "6.0.2", "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==", "dependencies": { "is-glob": "^4.0.3" }, "engines": { "node": ">=10.13.0" } }, "node_modules/glob-stream": { "version": "7.0.0", "resolved": "https://registry.npmjs.org/glob-stream/-/glob-stream-7.0.0.tgz", "integrity": "sha512-evR4kvr6s0Yo5t4CD4H171n4T8XcnPFznvsbeN8K9FPzc0Q0wYqcOWyGtck2qcvJSLXKnU6DnDyfmbDDabYvRQ==", "dependencies": { "extend": "^3.0.2", "glob": "^7.2.0", "glob-parent": "^6.0.2", "is-negated-glob": "^1.0.0", "ordered-read-streams": "^1.0.1", "pumpify": "^2.0.1", "readable-stream": "^3.6.0", "remove-trailing-separator": "^1.1.0", "to-absolute-glob": "^2.0.2", "unique-stream": "^2.3.1" }, "engines": { "node": ">=10.13.0" } }, "node_modules/gopd": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz", "integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==", "license": "MIT", "engines": { "node": ">= 0.4" }, "funding": { "url": "https://github.com/sponsors/ljharb" } }, "node_modules/graceful-fs": { "version": "4.2.11", "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz", "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==" }, "node_modules/handlebars": { "version": "4.7.8", "resolved": "https://registry.npmjs.org/handlebars/-/handlebars-4.7.8.tgz", "integrity": "sha512-vafaFqs8MZkRrSX7sFVUdo3ap/eNiLnb4IakshzvP56X5Nr1iGKAIqdX6tMlm6HcNRIkr6AxO5jFEoJzzpT8aQ==", "dependencies": { "minimist": "^1.2.5", "neo-async": "^2.6.2", "source-map": "^0.6.1", "wordwrap": "^1.0.0" }, "bin": { "handlebars": "bin/handlebars" }, "engines": { "node": ">=0.4.7" }, "optionalDependencies": { "uglify-js": "^3.1.4" } }, "node_modules/has-property-descriptors": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/has-property-descriptors/-/has-property-descriptors-1.0.2.tgz", "integrity": "sha512-55JNKuIW+vq4Ke1BjOTjM2YctQIvCT7GFzHwmfZPGo5wnrgkid0YQtnAleFSqumZm4az3n2BS+erby5ipJdgrg==", "dependencies": { "es-define-property": "^1.0.0" }, "funding": { "url": "https://github.com/sponsors/ljharb" } }, "node_modules/has-symbols": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz", "integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==", "license": "MIT", "engines": { "node": ">= 0.4" }, "funding": { "url": "https://github.com/sponsors/ljharb" } }, "node_modules/has-tostringtag": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.2.tgz", "integrity": "sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==", "license": "MIT", "dependencies": { "has-symbols": "^1.0.3" }, "engines": { "node": ">= 0.4" }, "funding": { "url": "https://github.com/sponsors/ljharb" } }, "node_modules/hasown": { "version": "2.0.2", "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", "dependencies": { "function-bind": "^1.1.2" }, "engines": { "node": ">= 0.4" } }, "node_modules/help-me": { "version": "5.0.0", "resolved": "https://registry.npmjs.org/help-me/-/help-me-5.0.0.tgz", "integrity": "sha512-7xgomUX6ADmcYzFik0HzAxh/73YlKR9bmFzf51CZwR+b6YtzU2m0u49hQCqV6SvlqIqsaxovfwdvbnsw3b/zpg==", "license": "MIT" }, "node_modules/hpagent": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/hpagent/-/hpagent-1.2.0.tgz", "integrity": "sha512-A91dYTeIB6NoXG+PxTQpCCDDnfHsW9kc06Lvpu1TEe9gnd6ZFeiBoRO9JvzEv6xK7EX97/dUE8g/vBMTqTS3CA==", "license": "MIT", "engines": { "node": ">=14" } }, "node_modules/ieee754": { "version": "1.2.1", "resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.2.1.tgz", "integrity": "sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA==", "funding": [ { "type": "github", "url": "https://github.com/sponsors/feross" }, { "type": "patreon", "url": "https://www.patreon.com/feross" }, { "type": "consulting", "url": "https://feross.org/support" } ] }, "node_modules/ignore": { "version": "5.3.1", "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.3.1.tgz", "integrity": "sha512-5Fytz/IraMjqpwfd34ke28PTVMjZjJG2MPn5t7OE4eUCUNf8BAa7b5WUS9/Qvr6mwOQS7Mk6vdsMno5he+T8Xw==", "engines": { "node": ">= 4" } }, "node_modules/inflight": { "version": "1.0.6", "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==", "dependencies": { "once": "^1.3.0", "wrappy": "1" } }, "node_modules/inherits": { "version": "2.0.4", "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==" }, "node_modules/is-absolute": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/is-absolute/-/is-absolute-1.0.0.tgz", "integrity": "sha512-dOWoqflvcydARa360Gvv18DZ/gRuHKi2NU/wU5X1ZFzdYfH29nkiNZsF3mp4OJ3H4yo9Mx8A/uAGNzpzPN3yBA==", "dependencies": { "is-relative": "^1.0.0", "is-windows": "^1.0.1" }, "engines": { "node": ">=0.10.0" } }, "node_modules/is-callable": { "version": "1.2.7", "resolved": "https://registry.npmjs.org/is-callable/-/is-callable-1.2.7.tgz", "integrity": "sha512-1BC0BVFhS/p0qtw6enp8e+8OD0UrK0oFLztSjNzhcKA3WDuJxxAPXzPuPtKkjEY9UUoEWlX/8fgKeu2S8i9JTA==", "license": "MIT", "engines": { "node": ">= 0.4" }, "funding": { "url": "https://github.com/sponsors/ljharb" } }, "node_modules/is-extglob": { "version": "2.1.1", "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", "engines": { "node": ">=0.10.0" } }, "node_modules/is-glob": { "version": "4.0.3", "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", "dependencies": { "is-extglob": "^2.1.1" }, "engines": { "node": ">=0.10.0" } }, "node_modules/is-natural-number": { "version": "4.0.1", "resolved": "https://registry.npmjs.org/is-natural-number/-/is-natural-number-4.0.1.tgz", "integrity": "sha512-Y4LTamMe0DDQIIAlaer9eKebAlDSV6huy+TWhJVPlzZh2o4tRP5SQWFlLn5N0To4mDD22/qdOq+veo1cSISLgQ==" }, "node_modules/is-negated-glob": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/is-negated-glob/-/is-negated-glob-1.0.0.tgz", "integrity": "sha512-czXVVn/QEmgvej1f50BZ648vUI+em0xqMq2Sn+QncCLN4zj1UAxlT+kw/6ggQTOaZPd1HqKQGEqbpQVtJucWug==", "engines": { "node": ">=0.10.0" } }, "node_modules/is-number": { "version": "7.0.0", "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", "license": "MIT", "engines": { "node": ">=0.12.0" } }, "node_modules/is-relative": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/is-relative/-/is-relative-1.0.0.tgz", "integrity": "sha512-Kw/ReK0iqwKeu0MITLFuj0jbPAmEiOsIwyIXvvbfa6QfmN9pkD1M+8pdk7Rl/dTKbH34/XBFMbgD4iMJhLQbGA==", "dependencies": { "is-unc-path": "^1.0.0" }, "engines": { "node": ">=0.10.0" } }, "node_modules/is-stream": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-1.1.0.tgz", "integrity": "sha512-uQPm8kcs47jx38atAcWTVxyltQYoPT68y9aWYdV6yWXSyW8mzSat0TL6CiWdZeCdF3KrAvpVtnHbTv4RN+rqdQ==", "engines": { "node": ">=0.10.0" } }, "node_modules/is-typed-array": { "version": "1.1.15", "resolved": "https://registry.npmjs.org/is-typed-array/-/is-typed-array-1.1.15.tgz", "integrity": "sha512-p3EcsicXjit7SaskXHs1hA91QxgTw46Fv6EFKKGS5DRFLD8yKnohjF3hxoju94b/OcMZoQukzpPpBE9uLVKzgQ==", "license": "MIT", "dependencies": { "which-typed-array": "^1.1.16" }, "engines": { "node": ">= 0.4" }, "funding": { "url": "https://github.com/sponsors/ljharb" } }, "node_modules/is-unc-path": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/is-unc-path/-/is-unc-path-1.0.0.tgz", "integrity": "sha512-mrGpVd0fs7WWLfVsStvgF6iEJnbjDFZh9/emhRDcGWTduTfNHd9CHeUwH3gYIjdbwo4On6hunkztwOaAw0yllQ==", "dependencies": { "unc-path-regex": "^0.1.2" }, "engines": { "node": ">=0.10.0" } }, "node_modules/is-windows": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/is-windows/-/is-windows-1.0.2.tgz", "integrity": "sha512-eXK1UInq2bPmjyX6e3VHIzMLobc4J94i4AWn+Hpq3OU5KkrRC96OAcR3PRJ/pGu6m8TRnBHP9dkXQVsT/COVIA==", "engines": { "node": ">=0.10.0" } }, "node_modules/isarray": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", "integrity": "sha512-VLghIWNM6ELQzo7zwmcg0NmTVyWKYjvIeM83yjp0wRDTmUnrM678fQbcKBo6n2CJEF0szoG//ytg+TKla89ALQ==" }, "node_modules/isomorphic-git": { "version": "1.25.10", "resolved": "https://registry.npmjs.org/isomorphic-git/-/isomorphic-git-1.25.10.tgz", "integrity": "sha512-IxGiaKBwAdcgBXwIcxJU6rHLk+NrzYaaPKXXQffcA0GW3IUrQXdUPDXDo+hkGVcYruuz/7JlGBiuaeTCgIgivQ==", "license": "MIT", "dependencies": { "async-lock": "^1.4.1", "clean-git-ref": "^2.0.1", "crc-32": "^1.2.0", "diff3": "0.0.3", "ignore": "^5.1.4", "minimisted": "^2.0.0", "pako": "^1.0.10", "pify": "^4.0.1", "readable-stream": "^3.4.0", "sha.js": "^2.4.9", "simple-get": "^4.0.1" }, "bin": { "isogit": "cli.cjs" }, "engines": { "node": ">=12" } }, "node_modules/isomorphic-git/node_modules/pify": { "version": "4.0.1", "resolved": "https://registry.npmjs.org/pify/-/pify-4.0.1.tgz", "integrity": "sha512-uB80kBFb/tfd68bVleG9T5GGsGPjJrLAUpR5PZIrhBnIaRTQRjqdJSsIKkOP6OAIFbj7GOrcudc5pNjZ+geV2g==", "license": "MIT", "engines": { "node": ">=6" } }, "node_modules/joycon": { "version": "3.1.1", "resolved": "https://registry.npmjs.org/joycon/-/joycon-3.1.1.tgz", "integrity": "sha512-34wB/Y7MW7bzjKRjUKTa46I2Z7eV62Rkhva+KkopW7Qvv/OSWBqvkSY7vusOPrNuZcUG3tApvdVgNB8POj3SPw==", "license": "MIT", "engines": { "node": ">=10" } }, "node_modules/js-yaml": { "version": "4.1.1", "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.1.tgz", "integrity": "sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==", "license": "MIT", "dependencies": { "argparse": "^2.0.1" }, "bin": { "js-yaml": "bin/js-yaml.js" } }, "node_modules/json-stable-stringify-without-jsonify": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz", "integrity": "sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==" }, "node_modules/json5": { "version": "2.2.3", "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz", "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==", "license": "MIT", "bin": { "json5": "lib/cli.js" }, "engines": { "node": ">=6" } }, "node_modules/lazystream": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/lazystream/-/lazystream-1.0.1.tgz", "integrity": "sha512-b94GiNHQNy6JNTrt5w6zNyffMrNkXZb3KTkCZJb2V1xaEGCk093vkZ2jk3tpaeP33/OiXC+WvK9AxUebnf5nbw==", "dependencies": { "readable-stream": "^2.0.5" }, "engines": { "node": ">= 0.6.3" } }, "node_modules/lazystream/node_modules/readable-stream": { "version": "2.3.8", "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.8.tgz", "integrity": "sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA==", "dependencies": { "core-util-is": "~1.0.0", "inherits": "~2.0.3", "isarray": "~1.0.0", "process-nextick-args": "~2.0.0", "safe-buffer": "~5.1.1", "string_decoder": "~1.1.1", "util-deprecate": "~1.0.1" } }, "node_modules/lazystream/node_modules/safe-buffer": { "version": "5.1.2", "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" }, "node_modules/lazystream/node_modules/string_decoder": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", "dependencies": { "safe-buffer": "~5.1.0" } }, "node_modules/lodash.clonedeep": { "version": "4.5.0", "resolved": "https://registry.npmjs.org/lodash.clonedeep/-/lodash.clonedeep-4.5.0.tgz", "integrity": "sha512-H5ZhCF25riFd9uB5UCkVKo61m3S/xZk1x4wA6yp/L3RFP6Z/eHH1ymQcGLo7J3GMPfm0V/7m1tryHuGVxpqEBQ==", "license": "MIT" }, "node_modules/lodash.defaults": { "version": "4.2.0", "resolved": "https://registry.npmjs.org/lodash.defaults/-/lodash.defaults-4.2.0.tgz", "integrity": "sha512-qjxPLHd3r5DnsdGacqOMU6pb/avJzdh9tFX2ymgoZE27BmjXrNy/y4LoaiTeAb+O3gL8AfpJGtqfX/ae2leYYQ==" }, "node_modules/lodash.difference": { "version": "4.5.0", "resolved": "https://registry.npmjs.org/lodash.difference/-/lodash.difference-4.5.0.tgz", "integrity": "sha512-dS2j+W26TQ7taQBGN8Lbbq04ssV3emRw4NY58WErlTO29pIqS0HmoT5aJ9+TUQ1N3G+JOZSji4eugsWwGp9yPA==" }, "node_modules/lodash.flatten": { "version": "4.4.0", "resolved": "https://registry.npmjs.org/lodash.flatten/-/lodash.flatten-4.4.0.tgz", "integrity": "sha512-C5N2Z3DgnnKr0LOpv/hKCgKdb7ZZwafIrsesve6lmzvZIRZRGaZ/l6Q8+2W7NaT+ZwO3fFlSCzCzrDCFdJfZ4g==" }, "node_modules/lodash.isplainobject": { "version": "4.0.6", "resolved": "https://registry.npmjs.org/lodash.isplainobject/-/lodash.isplainobject-4.0.6.tgz", "integrity": "sha512-oSXzaWypCMHkPC3NvBEaPHf0KsA5mvPrOPgQWDsbg8n7orZ290M0BmC/jgRZ4vcJ6DTAhjrsSYgdsW/F+MFOBA==" }, "node_modules/lodash.union": { "version": "4.6.0", "resolved": "https://registry.npmjs.org/lodash.union/-/lodash.union-4.6.0.tgz", "integrity": "sha512-c4pB2CdGrGdjMKYLA+XiRDO7Y0PRQbm/Gzg8qMj+QH+pFVAoTp5sBpO0odL3FjoPCGjK96p6qsP+yQoiLoOBcw==" }, "node_modules/make-dir": { "version": "1.3.0", "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-1.3.0.tgz", "integrity": "sha512-2w31R7SJtieJJnQtGc7RVL2StM2vGYVfqUOvUDxH6bC6aJTxPxTF0GnIgCyu7tjockiUWAYQRbxa7vKn34s5sQ==", "dependencies": { "pify": "^3.0.0" }, "engines": { "node": ">=4" } }, "node_modules/make-dir/node_modules/pify": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/pify/-/pify-3.0.0.tgz", "integrity": "sha512-C3FsVNH1udSEX48gGX1xfvwTWfsYWj5U+8/uK15BGzIGrKoUpghX8hWZwa/OFnakBiiVNmBvemTJR5mcy7iPcg==", "engines": { "node": ">=4" } }, "node_modules/math-intrinsics": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz", "integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==", "license": "MIT", "engines": { "node": ">= 0.4" } }, "node_modules/merge2": { "version": "1.4.1", "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz", "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==", "license": "MIT", "engines": { "node": ">= 8" } }, "node_modules/micromatch": { "version": "4.0.8", "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz", "integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==", "license": "MIT", "dependencies": { "braces": "^3.0.3", "picomatch": "^2.3.1" }, "engines": { "node": ">=8.6" } }, "node_modules/micromatch/node_modules/picomatch": { "version": "2.3.1", "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", "license": "MIT", "engines": { "node": ">=8.6" }, "funding": { "url": "https://github.com/sponsors/jonschlinkert" } }, "node_modules/mime-db": { "version": "1.52.0", "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", "license": "MIT", "engines": { "node": ">= 0.6" } }, "node_modules/mime-types": { "version": "2.1.35", "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", "license": "MIT", "dependencies": { "mime-db": "1.52.0" }, "engines": { "node": ">= 0.6" } }, "node_modules/mimic-response": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/mimic-response/-/mimic-response-3.1.0.tgz", "integrity": "sha512-z0yWI+4FDrrweS8Zmt4Ej5HdJmky15+L2e6Wgn3+iK5fWzb6T3fhNFq2+MeTRb064c6Wr4N/wv0DzQTjNzHNGQ==", "engines": { "node": ">=10" }, "funding": { "url": "https://github.com/sponsors/sindresorhus" } }, "node_modules/minimatch": { "version": "3.1.2", "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", "dependencies": { "brace-expansion": "^1.1.7" }, "engines": { "node": "*" } }, "node_modules/minimist": { "version": "1.2.8", "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz", "integrity": "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==", "funding": { "url": "https://github.com/sponsors/ljharb" } }, "node_modules/minimisted": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/minimisted/-/minimisted-2.0.1.tgz", "integrity": "sha512-1oPjfuLQa2caorJUM8HV8lGgWCc0qqAO1MNv/k05G4qslmsndV/5WdNZrqCiyqiz3wohia2Ij2B7w2Dr7/IyrA==", "dependencies": { "minimist": "^1.2.5" } }, "node_modules/multi-progress": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/multi-progress/-/multi-progress-4.0.0.tgz", "integrity": "sha512-9zcjyOou3FFCKPXsmkbC3ethv51SFPoA4dJD6TscIp2pUmy26kBDZW6h9XofPELrzseSkuD7r0V+emGEeo39Pg==", "license": "MIT", "peerDependencies": { "progress": "^2.0.0" } }, "node_modules/neo-async": { "version": "2.6.2", "resolved": "https://registry.npmjs.org/neo-async/-/neo-async-2.6.2.tgz", "integrity": "sha512-Yd3UES5mWCSqR+qNT93S3UoYUkqAZ9lLg8a7g9rimsWmYGK8cVToA4/sF3RrshdyV3sAGMXVUmpMYOw+dLpOuw==" }, "node_modules/node-gzip": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/node-gzip/-/node-gzip-1.1.2.tgz", "integrity": "sha512-ZB6zWpfZHGtxZnPMrJSKHVPrRjURoUzaDbLFj3VO70mpLTW5np96vXyHwft4Id0o+PYIzgDkBUjIzaNHhQ8srw==" }, "node_modules/normalize-path": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", "engines": { "node": ">=0.10.0" } }, "node_modules/object-assign": { "version": "4.1.1", "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==", "engines": { "node": ">=0.10.0" } }, "node_modules/on-exit-leak-free": { "version": "2.1.2", "resolved": "https://registry.npmjs.org/on-exit-leak-free/-/on-exit-leak-free-2.1.2.tgz", "integrity": "sha512-0eJJY6hXLGf1udHwfNftBqH+g73EU4B504nZeKpz1sYRKafAghwxEJunB2O7rDZkL4PGfsMVnTXZ2EjibbqcsA==", "license": "MIT", "engines": { "node": ">=14.0.0" } }, "node_modules/once": { "version": "1.4.0", "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", "dependencies": { "wrappy": "1" } }, "node_modules/ordered-read-streams": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/ordered-read-streams/-/ordered-read-streams-1.0.1.tgz", "integrity": "sha512-Z87aSjx3r5c0ZB7bcJqIgIRX5bxR7A4aSzvIbaxd0oTkWBCOoKfuGHiKj60CHVUgg1Phm5yMZzBdt8XqRs73Mw==", "dependencies": { "readable-stream": "^2.0.1" } }, "node_modules/ordered-read-streams/node_modules/readable-stream": { "version": "2.3.8", "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.8.tgz", "integrity": "sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA==", "dependencies": { "core-util-is": "~1.0.0", "inherits": "~2.0.3", "isarray": "~1.0.0", "process-nextick-args": "~2.0.0", "safe-buffer": "~5.1.1", "string_decoder": "~1.1.1", "util-deprecate": "~1.0.1" } }, "node_modules/ordered-read-streams/node_modules/safe-buffer": { "version": "5.1.2", "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" }, "node_modules/ordered-read-streams/node_modules/string_decoder": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", "dependencies": { "safe-buffer": "~5.1.0" } }, "node_modules/pako": { "version": "1.0.11", "resolved": "https://registry.npmjs.org/pako/-/pako-1.0.11.tgz", "integrity": "sha512-4hLB8Py4zZce5s4yd9XzopqwVv/yGNhV1Bl8NTmCq1763HeK2+EwVTv+leGeL13Dnh2wfbqowVPXCIO0z4taYw==" }, "node_modules/path-is-absolute": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==", "engines": { "node": ">=0.10.0" } }, "node_modules/pend": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/pend/-/pend-1.2.0.tgz", "integrity": "sha512-F3asv42UuXchdzt+xXqfW1OGlVBe+mxa2mqI0pg5yAHZPvFmY3Y6drSf/GQ1A86WgWEN9Kzh/WrgKa6iGcHXLg==" }, "node_modules/picomatch": { "version": "4.0.3", "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz", "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", "license": "MIT", "engines": { "node": ">=12" }, "funding": { "url": "https://github.com/sponsors/jonschlinkert" } }, "node_modules/pify": { "version": "2.3.0", "resolved": "https://registry.npmjs.org/pify/-/pify-2.3.0.tgz", "integrity": "sha512-udgsAY+fTnvv7kI7aaxbqwWNb0AHiB0qBO89PZKPkoTmGOgdbrHDKD+0B2X4uTfJ/FT1R09r9gTsjUjNJotuog==", "engines": { "node": ">=0.10.0" } }, "node_modules/pinkie": { "version": "2.0.4", "resolved": "https://registry.npmjs.org/pinkie/-/pinkie-2.0.4.tgz", "integrity": "sha512-MnUuEycAemtSaeFSjXKW/aroV7akBbY+Sv+RkyqFjgAe73F+MR0TBWKBRDkmfWq/HiFmdavfZ1G7h4SPZXaCSg==", "engines": { "node": ">=0.10.0" } }, "node_modules/pinkie-promise": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/pinkie-promise/-/pinkie-promise-2.0.1.tgz", "integrity": "sha512-0Gni6D4UcLTbv9c57DfxDGdr41XfgUjqWZu492f0cIGr16zDU06BWP/RAEvOuo7CQ0CNjHaLlM59YJJFm3NWlw==", "dependencies": { "pinkie": "^2.0.0" }, "engines": { "node": ">=0.10.0" } }, "node_modules/pino": { "version": "9.2.0", "resolved": "https://registry.npmjs.org/pino/-/pino-9.2.0.tgz", "integrity": "sha512-g3/hpwfujK5a4oVbaefoJxezLzsDgLcNJeITvC6yrfwYeT9la+edCK42j5QpEQSQCZgTKapXvnQIdgZwvRaZug==", "license": "MIT", "dependencies": { "atomic-sleep": "^1.0.0", "fast-redact": "^3.1.1", "on-exit-leak-free": "^2.1.0", "pino-abstract-transport": "^1.2.0", "pino-std-serializers": "^7.0.0", "process-warning": "^3.0.0", "quick-format-unescaped": "^4.0.3", "real-require": "^0.2.0", "safe-stable-stringify": "^2.3.1", "sonic-boom": "^4.0.1", "thread-stream": "^3.0.0" }, "bin": { "pino": "bin.js" } }, "node_modules/pino-abstract-transport": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/pino-abstract-transport/-/pino-abstract-transport-1.2.0.tgz", "integrity": "sha512-Guhh8EZfPCfH+PMXAb6rKOjGQEoy0xlAIn+irODG5kgfYV+BQ0rGYYWTIel3P5mmyXqkYkPmdIkywsn6QKUR1Q==", "license": "MIT", "dependencies": { "readable-stream": "^4.0.0", "split2": "^4.0.0" } }, "node_modules/pino-abstract-transport/node_modules/buffer": { "version": "6.0.3", "resolved": "https://registry.npmjs.org/buffer/-/buffer-6.0.3.tgz", "integrity": "sha512-FTiCpNxtwiZZHEZbcbTIcZjERVICn9yq/pDFkTl95/AxzD1naBctN7YO68riM/gLSDY7sdrMby8hofADYuuqOA==", "funding": [ { "type": "github", "url": "https://github.com/sponsors/feross" }, { "type": "patreon", "url": "https://www.patreon.com/feross" }, { "type": "consulting", "url": "https://feross.org/support" } ], "license": "MIT", "dependencies": { "base64-js": "^1.3.1", "ieee754": "^1.2.1" } }, "node_modules/pino-abstract-transport/node_modules/readable-stream": { "version": "4.7.0", "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-4.7.0.tgz", "integrity": "sha512-oIGGmcpTLwPga8Bn6/Z75SVaH1z5dUut2ibSyAMVhmUggWpmDn2dapB0n7f8nwaSiRtepAsfJyfXIO5DCVAODg==", "license": "MIT", "dependencies": { "abort-controller": "^3.0.0", "buffer": "^6.0.3", "events": "^3.3.0", "process": "^0.11.10", "string_decoder": "^1.3.0" }, "engines": { "node": "^12.22.0 || ^14.17.0 || >=16.0.0" } }, "node_modules/pino-pretty": { "version": "11.2.2", "resolved": "https://registry.npmjs.org/pino-pretty/-/pino-pretty-11.2.2.tgz", "integrity": "sha512-2FnyGir8nAJAqD3srROdrF1J5BIcMT4nwj7hHSc60El6Uxlym00UbCCd8pYIterstVBFlMyF1yFV8XdGIPbj4A==", "license": "MIT", "dependencies": { "colorette": "^2.0.7", "dateformat": "^4.6.3", "fast-copy": "^3.0.2", "fast-safe-stringify": "^2.1.1", "help-me": "^5.0.0", "joycon": "^3.1.1", "minimist": "^1.2.6", "on-exit-leak-free": "^2.1.0", "pino-abstract-transport": "^1.0.0", "pump": "^3.0.0", "readable-stream": "^4.0.0", "secure-json-parse": "^2.4.0", "sonic-boom": "^4.0.1", "strip-json-comments": "^3.1.1" }, "bin": { "pino-pretty": "bin.js" } }, "node_modules/pino-pretty/node_modules/buffer": { "version": "6.0.3", "resolved": "https://registry.npmjs.org/buffer/-/buffer-6.0.3.tgz", "integrity": "sha512-FTiCpNxtwiZZHEZbcbTIcZjERVICn9yq/pDFkTl95/AxzD1naBctN7YO68riM/gLSDY7sdrMby8hofADYuuqOA==", "funding": [ { "type": "github", "url": "https://github.com/sponsors/feross" }, { "type": "patreon", "url": "https://www.patreon.com/feross" }, { "type": "consulting", "url": "https://feross.org/support" } ], "license": "MIT", "dependencies": { "base64-js": "^1.3.1", "ieee754": "^1.2.1" } }, "node_modules/pino-pretty/node_modules/readable-stream": { "version": "4.7.0", "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-4.7.0.tgz", "integrity": "sha512-oIGGmcpTLwPga8Bn6/Z75SVaH1z5dUut2ibSyAMVhmUggWpmDn2dapB0n7f8nwaSiRtepAsfJyfXIO5DCVAODg==", "license": "MIT", "dependencies": { "abort-controller": "^3.0.0", "buffer": "^6.0.3", "events": "^3.3.0", "process": "^0.11.10", "string_decoder": "^1.3.0" }, "engines": { "node": "^12.22.0 || ^14.17.0 || >=16.0.0" } }, "node_modules/pino-std-serializers": { "version": "7.0.0", "resolved": "https://registry.npmjs.org/pino-std-serializers/-/pino-std-serializers-7.0.0.tgz", "integrity": "sha512-e906FRY0+tV27iq4juKzSYPbUj2do2X2JX4EzSca1631EB2QJQUqGbDuERal7LCtOpxl6x3+nvo9NPZcmjkiFA==", "license": "MIT" }, "node_modules/possible-typed-array-names": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/possible-typed-array-names/-/possible-typed-array-names-1.1.0.tgz", "integrity": "sha512-/+5VFTchJDoVj3bhoqi6UeymcD00DAwb1nJwamzPvHEszJ4FpF6SNNbUbOS8yI56qHzdV8eK0qEfOSiodkTdxg==", "license": "MIT", "engines": { "node": ">= 0.4" } }, "node_modules/process": { "version": "0.11.10", "resolved": "https://registry.npmjs.org/process/-/process-0.11.10.tgz", "integrity": "sha512-cdGef/drWFoydD1JsMzuFf8100nZl+GT+yacc2bEced5f9Rjk4z+WtFUTBu9PhOi9j/jfmBPu0mMEY4wIdAF8A==", "license": "MIT", "engines": { "node": ">= 0.6.0" } }, "node_modules/process-nextick-args": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz", "integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag==" }, "node_modules/process-warning": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/process-warning/-/process-warning-3.0.0.tgz", "integrity": "sha512-mqn0kFRl0EoqhnL0GQ0veqFHyIN1yig9RHh/InzORTUiZHFRAur+aMtRkELNwGs9aNwKS6tg/An4NYBPGwvtzQ==", "license": "MIT" }, "node_modules/progress": { "version": "2.0.3", "resolved": "https://registry.npmjs.org/progress/-/progress-2.0.3.tgz", "integrity": "sha512-7PiHtLll5LdnKIMw100I+8xJXR5gW2QwWYkT6iJva0bXitZKa/XMrSbdmg3r2Xnaidz9Qumd0VPaMrZlF9V9sA==", "license": "MIT", "engines": { "node": ">=0.4.0" } }, "node_modules/pump": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/pump/-/pump-3.0.0.tgz", "integrity": "sha512-LwZy+p3SFs1Pytd/jYct4wpv49HiYCqd9Rlc5ZVdk0V+8Yzv6jR5Blk3TRmPL1ft69TxP0IMZGJ+WPFU2BFhww==", "dependencies": { "end-of-stream": "^1.1.0", "once": "^1.3.1" } }, "node_modules/pumpify": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/pumpify/-/pumpify-2.0.1.tgz", "integrity": "sha512-m7KOje7jZxrmutanlkS1daj1dS6z6BgslzOXmcSEpIlCxM3VJH7lG5QLeck/6hgF6F4crFf01UtQmNsJfweTAw==", "dependencies": { "duplexify": "^4.1.1", "inherits": "^2.0.3", "pump": "^3.0.0" } }, "node_modules/queue-microtask": { "version": "1.2.3", "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz", "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==", "funding": [ { "type": "github", "url": "https://github.com/sponsors/feross" }, { "type": "patreon", "url": "https://www.patreon.com/feross" }, { "type": "consulting", "url": "https://feross.org/support" } ], "license": "MIT" }, "node_modules/quick-format-unescaped": { "version": "4.0.4", "resolved": "https://registry.npmjs.org/quick-format-unescaped/-/quick-format-unescaped-4.0.4.tgz", "integrity": "sha512-tYC1Q1hgyRuHgloV/YXs2w15unPVh8qfu/qCTfhTYamaw7fyhumKa2yGpdSo87vY32rIclj+4fWYQXUMs9EHvg==", "license": "MIT" }, "node_modules/readable-stream": { "version": "3.6.2", "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.2.tgz", "integrity": "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==", "dependencies": { "inherits": "^2.0.3", "string_decoder": "^1.1.1", "util-deprecate": "^1.0.1" }, "engines": { "node": ">= 6" } }, "node_modules/readdir-glob": { "version": "1.1.3", "resolved": "https://registry.npmjs.org/readdir-glob/-/readdir-glob-1.1.3.tgz", "integrity": "sha512-v05I2k7xN8zXvPD9N+z/uhXPaj0sUFCe2rcWZIpBsqxfP7xXFQ0tipAd/wjj1YxWyWtUS5IDJpOG82JKt2EAVA==", "dependencies": { "minimatch": "^5.1.0" } }, "node_modules/readdir-glob/node_modules/brace-expansion": { "version": "2.0.2", "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz", "integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==", "license": "MIT", "dependencies": { "balanced-match": "^1.0.0" } }, "node_modules/readdir-glob/node_modules/minimatch": { "version": "5.1.6", "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-5.1.6.tgz", "integrity": "sha512-lKwV/1brpG6mBUFHtb7NUmtABCb2WZZmm2wNiOA5hAb8VdCS4B3dtMWyvcoViccwAW/COERjXLt0zP1zXUN26g==", "dependencies": { "brace-expansion": "^2.0.1" }, "engines": { "node": ">=10" } }, "node_modules/real-require": { "version": "0.2.0", "resolved": "https://registry.npmjs.org/real-require/-/real-require-0.2.0.tgz", "integrity": "sha512-57frrGM/OCTLqLOAh0mhVA9VBMHd+9U7Zb2THMGdBUoZVOtGbJzjxsYGDJ3A9AYYCP4hn6y1TVbaOfzWtm5GFg==", "license": "MIT", "engines": { "node": ">= 12.13.0" } }, "node_modules/regenerator-runtime": { "version": "0.14.1", "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.14.1.tgz", "integrity": "sha512-dYnhHh0nJoMfnkZs6GmmhFknAGRrLznOu5nc9ML+EJxGvrx6H7teuevqVqCuPcPK//3eDrrjQhehXVx9cnkGdw==" }, "node_modules/remove-trailing-separator": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/remove-trailing-separator/-/remove-trailing-separator-1.1.0.tgz", "integrity": "sha512-/hS+Y0u3aOfIETiaiirUFwDBDzmXPvO+jAfKTitUngIPzdKc6Z0LoFjM/CK5PL4C+eKwHohlHAb6H0VFfmmUsw==" }, "node_modules/replace-ext": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/replace-ext/-/replace-ext-2.0.0.tgz", "integrity": "sha512-UszKE5KVK6JvyD92nzMn9cDapSk6w/CaFZ96CnmDMUqH9oowfxF/ZjRITD25H4DnOQClLA4/j7jLGXXLVKxAug==", "license": "MIT", "engines": { "node": ">= 10" } }, "node_modules/require-from-string": { "version": "2.0.2", "resolved": "https://registry.npmjs.org/require-from-string/-/require-from-string-2.0.2.tgz", "integrity": "sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==", "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/reusify": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.1.0.tgz", "integrity": "sha512-g6QUff04oZpHs0eG5p83rFLhHeV00ug/Yf9nZM6fLeUrPguBTkTQOdpAWWspMh55TZfVQDPaN3NQJfbVRAxdIw==", "license": "MIT", "engines": { "iojs": ">=1.0.0", "node": ">=0.10.0" } }, "node_modules/run-parallel": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz", "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==", "funding": [ { "type": "github", "url": "https://github.com/sponsors/feross" }, { "type": "patreon", "url": "https://www.patreon.com/feross" }, { "type": "consulting", "url": "https://feross.org/support" } ], "license": "MIT", "dependencies": { "queue-microtask": "^1.2.2" } }, "node_modules/safe-buffer": { "version": "5.2.1", "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", "funding": [ { "type": "github", "url": "https://github.com/sponsors/feross" }, { "type": "patreon", "url": "https://www.patreon.com/feross" }, { "type": "consulting", "url": "https://feross.org/support" } ] }, "node_modules/safe-stable-stringify": { "version": "2.5.0", "resolved": "https://registry.npmjs.org/safe-stable-stringify/-/safe-stable-stringify-2.5.0.tgz", "integrity": "sha512-b3rppTKm9T+PsVCBEOUR46GWI7fdOs00VKZ1+9c1EWDaDMvjQc6tUwuFyIprgGgTcWoVHSKrU8H31ZHA2e0RHA==", "license": "MIT", "engines": { "node": ">=10" } }, "node_modules/secure-json-parse": { "version": "2.7.0", "resolved": "https://registry.npmjs.org/secure-json-parse/-/secure-json-parse-2.7.0.tgz", "integrity": "sha512-6aU+Rwsezw7VR8/nyvKTx8QpWH9FrcYiXXlqC4z5d5XQBDRqtbfsRjnwGyqbi3gddNtWHuEk9OANUotL26qKUw==", "license": "BSD-3-Clause" }, "node_modules/seek-bzip": { "version": "1.0.6", "resolved": "https://registry.npmjs.org/seek-bzip/-/seek-bzip-1.0.6.tgz", "integrity": "sha512-e1QtP3YL5tWww8uKaOCQ18UxIT2laNBXHjV/S2WYCiK4udiv8lkG89KRIoCjUagnAmCBurjF4zEVX2ByBbnCjQ==", "dependencies": { "commander": "^2.8.1" }, "bin": { "seek-bunzip": "bin/seek-bunzip", "seek-table": "bin/seek-bzip-table" } }, "node_modules/seek-bzip/node_modules/commander": { "version": "2.20.3", "resolved": "https://registry.npmjs.org/commander/-/commander-2.20.3.tgz", "integrity": "sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==" }, "node_modules/seroval": { "version": "1.4.2", "resolved": "https://registry.npmjs.org/seroval/-/seroval-1.4.2.tgz", "integrity": "sha512-N3HEHRCZYn3cQbsC4B5ldj9j+tHdf4JZoYPlcI4rRYu0Xy4qN8MQf1Z08EibzB0WpgRG5BGK08FTrmM66eSzKQ==", "license": "MIT", "engines": { "node": ">=10" } }, "node_modules/seroval-plugins": { "version": "1.2.1", "resolved": "https://registry.npmjs.org/seroval-plugins/-/seroval-plugins-1.2.1.tgz", "integrity": "sha512-H5vs53+39+x4Udwp4J5rNZfgFuA+Lt+uU+09w1gYBVWomtAl98B+E9w7yC05Xc81/HgLvJdlyqJbU0fJCKCmdw==", "license": "MIT", "engines": { "node": ">=10" }, "peerDependencies": { "seroval": "^1.0" } }, "node_modules/set-function-length": { "version": "1.2.2", "resolved": "https://registry.npmjs.org/set-function-length/-/set-function-length-1.2.2.tgz", "integrity": "sha512-pgRc4hJ4/sNjWCSS9AmnS40x3bNMDTknHgL5UaMBTMyJnU90EgWh1Rz+MC9eFu4BuN/UwZjKQuY/1v3rM7HMfg==", "dependencies": { "define-data-property": "^1.1.4", "es-errors": "^1.3.0", "function-bind": "^1.1.2", "get-intrinsic": "^1.2.4", "gopd": "^1.0.1", "has-property-descriptors": "^1.0.2" }, "engines": { "node": ">= 0.4" } }, "node_modules/sha.js": { "version": "2.4.12", "resolved": "https://registry.npmjs.org/sha.js/-/sha.js-2.4.12.tgz", "integrity": "sha512-8LzC5+bvI45BjpfXU8V5fdU2mfeKiQe1D1gIMn7XUlF3OTUrpdJpPPH4EMAnF0DsHHdSZqCdSss5qCmJKuiO3w==", "license": "(MIT AND BSD-3-Clause)", "dependencies": { "inherits": "^2.0.4", "safe-buffer": "^5.2.1", "to-buffer": "^1.2.0" }, "bin": { "sha.js": "bin.js" }, "engines": { "node": ">= 0.10" }, "funding": { "url": "https://github.com/sponsors/ljharb" } }, "node_modules/should-proxy": { "version": "1.0.4", "resolved": "https://registry.npmjs.org/should-proxy/-/should-proxy-1.0.4.tgz", "integrity": "sha512-RPQhIndEIVUCjkfkQ6rs6sOR6pkxJWCNdxtfG5pP0RVgUYbK5911kLTF0TNcCC0G3YCGd492rMollFT2aTd9iQ==", "license": "MIT" }, "node_modules/simple-concat": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/simple-concat/-/simple-concat-1.0.1.tgz", "integrity": "sha512-cSFtAPtRhljv69IK0hTVZQ+OfE9nePi/rtJmw5UjHeVyVroEqJXP1sFztKUy1qU+xvz3u/sfYJLa947b7nAN2Q==", "funding": [ { "type": "github", "url": "https://github.com/sponsors/feross" }, { "type": "patreon", "url": "https://www.patreon.com/feross" }, { "type": "consulting", "url": "https://feross.org/support" } ] }, "node_modules/simple-get": { "version": "4.0.1", "resolved": "https://registry.npmjs.org/simple-get/-/simple-get-4.0.1.tgz", "integrity": "sha512-brv7p5WgH0jmQJr1ZDDfKDOSeWWg+OVypG99A/5vYGPqJ6pxiaHLy8nxtFjBA7oMa01ebA9gfh1uMCFqOuXxvA==", "funding": [ { "type": "github", "url": "https://github.com/sponsors/feross" }, { "type": "patreon", "url": "https://www.patreon.com/feross" }, { "type": "consulting", "url": "https://feross.org/support" } ], "dependencies": { "decompress-response": "^6.0.0", "once": "^1.3.1", "simple-concat": "^1.0.0" } }, "node_modules/solid-js": { "version": "1.9.5", "resolved": "https://registry.npmjs.org/solid-js/-/solid-js-1.9.5.tgz", "integrity": "sha512-ogI3DaFcyn6UhYhrgcyRAMbu/buBJitYQASZz5WzfQVPP10RD2AbCoRZ517psnezrasyCbWzIxZ6kVqet768xw==", "license": "MIT", "dependencies": { "csstype": "^3.1.0", "seroval": "^1.1.0", "seroval-plugins": "^1.1.0" } }, "node_modules/sonic-boom": { "version": "4.0.1", "resolved": "https://registry.npmjs.org/sonic-boom/-/sonic-boom-4.0.1.tgz", "integrity": "sha512-hTSD/6JMLyT4r9zeof6UtuBDpjJ9sO08/nmS5djaA9eozT9oOlNdpXSnzcgj4FTqpk3nkLrs61l4gip9r1HCrQ==", "license": "MIT", "dependencies": { "atomic-sleep": "^1.0.0" } }, "node_modules/source-map": { "version": "0.6.1", "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", "engines": { "node": ">=0.10.0" } }, "node_modules/split2": { "version": "4.2.0", "resolved": "https://registry.npmjs.org/split2/-/split2-4.2.0.tgz", "integrity": "sha512-UcjcJOWknrNkF6PLX83qcHM6KHgVKNkV62Y8a5uYDVv9ydGQVwAHMKqHdJje1VTWpljG0WYpCDhrCdAOYH4TWg==", "license": "ISC", "engines": { "node": ">= 10.x" } }, "node_modules/stream-shift": { "version": "1.0.3", "resolved": "https://registry.npmjs.org/stream-shift/-/stream-shift-1.0.3.tgz", "integrity": "sha512-76ORR0DO1o1hlKwTbi/DM3EXWGf3ZJYO8cXX5RJwnul2DEg2oyoZyjLNoQM8WsvZiFKCRfC1O0J7iCvie3RZmQ==" }, "node_modules/streamx": { "version": "2.23.0", "resolved": "https://registry.npmjs.org/streamx/-/streamx-2.23.0.tgz", "integrity": "sha512-kn+e44esVfn2Fa/O0CPFcex27fjIL6MkVae0Mm6q+E6f0hWv578YCERbv+4m02cjxvDsPKLnmxral/rR6lBMAg==", "license": "MIT", "dependencies": { "events-universal": "^1.0.0", "fast-fifo": "^1.3.2", "text-decoder": "^1.1.0" } }, "node_modules/string_decoder": { "version": "1.3.0", "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz", "integrity": "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==", "dependencies": { "safe-buffer": "~5.2.0" } }, "node_modules/strip-dirs": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/strip-dirs/-/strip-dirs-2.1.0.tgz", "integrity": "sha512-JOCxOeKLm2CAS73y/U4ZeZPTkE+gNVCzKt7Eox84Iej1LT/2pTWYpZKJuxwQpvX1LiZb1xokNR7RLfuBAa7T3g==", "dependencies": { "is-natural-number": "^4.0.1" } }, "node_modules/strip-json-comments": { "version": "3.1.1", "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", "license": "MIT", "engines": { "node": ">=8" }, "funding": { "url": "https://github.com/sponsors/sindresorhus" } }, "node_modules/strnum": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/strnum/-/strnum-1.1.2.tgz", "integrity": "sha512-vrN+B7DBIoTTZjnPNewwhx6cBA/H+IS7rfW68n7XxC1y7uoiGQBxaKzqucGUgavX15dJgiGztLJ8vxuEzwqBdA==", "funding": [ { "type": "github", "url": "https://github.com/sponsors/NaturalIntelligence" } ], "license": "MIT" }, "node_modules/tar-stream": { "version": "2.2.0", "resolved": "https://registry.npmjs.org/tar-stream/-/tar-stream-2.2.0.tgz", "integrity": "sha512-ujeqbceABgwMZxEJnk2HDY2DlnUZ+9oEcb1KzTVfYHio0UE6dG71n60d8D2I4qNvleWrrXpmjpt7vZeF1LnMZQ==", "dependencies": { "bl": "^4.0.3", "end-of-stream": "^1.4.1", "fs-constants": "^1.0.0", "inherits": "^2.0.3", "readable-stream": "^3.1.1" }, "engines": { "node": ">=6" } }, "node_modules/teex": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/teex/-/teex-1.0.1.tgz", "integrity": "sha512-eYE6iEI62Ni1H8oIa7KlDU6uQBtqr4Eajni3wX7rpfXD8ysFx8z0+dri+KWEPWpBsxXfxu58x/0jvTVT1ekOSg==", "license": "MIT", "dependencies": { "streamx": "^2.12.5" } }, "node_modules/text-decoder": { "version": "1.2.3", "resolved": "https://registry.npmjs.org/text-decoder/-/text-decoder-1.2.3.tgz", "integrity": "sha512-3/o9z3X0X0fTupwsYvR03pJ/DjWuqqrfwBgTQzdWDiQSm9KitAyz/9WqsT2JQW7KV2m+bC2ol/zqpW37NHxLaA==", "license": "Apache-2.0", "dependencies": { "b4a": "^1.6.4" } }, "node_modules/thread-stream": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/thread-stream/-/thread-stream-3.1.0.tgz", "integrity": "sha512-OqyPZ9u96VohAyMfJykzmivOrY2wfMSf3C5TtFJVgN+Hm6aj+voFhlK+kZEIv2FBh1X6Xp3DlnCOfEQ3B2J86A==", "license": "MIT", "dependencies": { "real-require": "^0.2.0" } }, "node_modules/through": { "version": "2.3.8", "resolved": "https://registry.npmjs.org/through/-/through-2.3.8.tgz", "integrity": "sha512-w89qg7PI8wAdvX60bMDP+bFoD5Dvhm9oLheFp5O4a2QF0cSBGsBX4qZmadPMvVqlLJBBci+WqGGOAPvcDeNSVg==" }, "node_modules/through2": { "version": "2.0.5", "resolved": "https://registry.npmjs.org/through2/-/through2-2.0.5.tgz", "integrity": "sha512-/mrRod8xqpA+IHSLyGCQ2s8SPHiCDEeQJSep1jqLYeEUClOFG2Qsh+4FU6G9VeqpZnGW/Su8LQGc4YKni5rYSQ==", "dependencies": { "readable-stream": "~2.3.6", "xtend": "~4.0.1" } }, "node_modules/through2-filter": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/through2-filter/-/through2-filter-3.0.0.tgz", "integrity": "sha512-jaRjI2WxN3W1V8/FMZ9HKIBXixtiqs3SQSX4/YGIiP3gL6djW48VoZq9tDqeCWs3MT8YY5wb/zli8VW8snY1CA==", "dependencies": { "through2": "~2.0.0", "xtend": "~4.0.0" } }, "node_modules/through2/node_modules/readable-stream": { "version": "2.3.8", "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.8.tgz", "integrity": "sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA==", "dependencies": { "core-util-is": "~1.0.0", "inherits": "~2.0.3", "isarray": "~1.0.0", "process-nextick-args": "~2.0.0", "safe-buffer": "~5.1.1", "string_decoder": "~1.1.1", "util-deprecate": "~1.0.1" } }, "node_modules/through2/node_modules/safe-buffer": { "version": "5.1.2", "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" }, "node_modules/through2/node_modules/string_decoder": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", "dependencies": { "safe-buffer": "~5.1.0" } }, "node_modules/to-absolute-glob": { "version": "2.0.2", "resolved": "https://registry.npmjs.org/to-absolute-glob/-/to-absolute-glob-2.0.2.tgz", "integrity": "sha512-rtwLUQEwT8ZeKQbyFJyomBRYXyE16U5VKuy0ftxLMK/PZb2fkOsg5r9kHdauuVDbsNdIBoC/HCthpidamQFXYA==", "dependencies": { "is-absolute": "^1.0.0", "is-negated-glob": "^1.0.0" }, "engines": { "node": ">=0.10.0" } }, "node_modules/to-buffer": { "version": "1.2.1", "resolved": "https://registry.npmjs.org/to-buffer/-/to-buffer-1.2.1.tgz", "integrity": "sha512-tB82LpAIWjhLYbqjx3X4zEeHN6M8CiuOEy2JY8SEQVdYRe3CCHOFaqrBW1doLDrfpWhplcW7BL+bO3/6S3pcDQ==", "license": "MIT", "dependencies": { "isarray": "^2.0.5", "safe-buffer": "^5.2.1", "typed-array-buffer": "^1.0.3" }, "engines": { "node": ">= 0.4" } }, "node_modules/to-buffer/node_modules/isarray": { "version": "2.0.5", "resolved": "https://registry.npmjs.org/isarray/-/isarray-2.0.5.tgz", "integrity": "sha512-xHjhDr3cNBK0BzdUJSPXZntQUx/mwMS5Rw4A7lPJ90XGAO6ISP/ePDNuo0vhqOZU+UD5JoodwCAAoZQd3FeAKw==", "license": "MIT" }, "node_modules/to-regex-range": { "version": "5.0.1", "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", "license": "MIT", "dependencies": { "is-number": "^7.0.0" }, "engines": { "node": ">=8.0" } }, "node_modules/typed-array-buffer": { "version": "1.0.3", "resolved": "https://registry.npmjs.org/typed-array-buffer/-/typed-array-buffer-1.0.3.tgz", "integrity": "sha512-nAYYwfY3qnzX30IkA6AQZjVbtK6duGontcQm1WSG1MD94YLqK0515GNApXkoxKOWMusVssAHWLh9SeaoefYFGw==", "license": "MIT", "dependencies": { "call-bound": "^1.0.3", "es-errors": "^1.3.0", "is-typed-array": "^1.1.14" }, "engines": { "node": ">= 0.4" } }, "node_modules/uglify-js": { "version": "3.17.4", "resolved": "https://registry.npmjs.org/uglify-js/-/uglify-js-3.17.4.tgz", "integrity": "sha512-T9q82TJI9e/C1TAxYvfb16xO120tMVFZrGA3f9/P4424DNu6ypK103y0GPFVa17yotwSyZW5iYXgjYHkGrJW/g==", "optional": true, "bin": { "uglifyjs": "bin/uglifyjs" }, "engines": { "node": ">=0.8.0" } }, "node_modules/unbzip2-stream": { "version": "1.4.3", "resolved": "https://registry.npmjs.org/unbzip2-stream/-/unbzip2-stream-1.4.3.tgz", "integrity": "sha512-mlExGW4w71ebDJviH16lQLtZS32VKqsSfk80GCfUlwT/4/hNRFsoscrF/c++9xinkMzECL1uL9DDwXqFWkruPg==", "dependencies": { "buffer": "^5.2.1", "through": "^2.3.8" } }, "node_modules/unc-path-regex": { "version": "0.1.2", "resolved": "https://registry.npmjs.org/unc-path-regex/-/unc-path-regex-0.1.2.tgz", "integrity": "sha512-eXL4nmJT7oCpkZsHZUOJo8hcX3GbsiDOa0Qu9F646fi8dT3XuSVopVqAcEiVzSKKH7UoDti23wNX3qGFxcW5Qg==", "engines": { "node": ">=0.10.0" } }, "node_modules/unique-stream": { "version": "2.3.1", "resolved": "https://registry.npmjs.org/unique-stream/-/unique-stream-2.3.1.tgz", "integrity": "sha512-2nY4TnBE70yoxHkDli7DMazpWiP7xMdCYqU2nBRO0UB+ZpEkGsSija7MvmvnZFUeC+mrgiUfcHSr3LmRFIg4+A==", "dependencies": { "json-stable-stringify-without-jsonify": "^1.0.1", "through2-filter": "^3.0.0" } }, "node_modules/unxhr": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/unxhr/-/unxhr-1.0.1.tgz", "integrity": "sha512-MAhukhVHyaLGDjyDYhy8gVjWJyhTECCdNsLwlMoGFoNJ3o79fpQhtQuzmAE4IxCMDwraF4cW8ZjpAV0m9CRQbg==", "license": "MIT", "engines": { "node": ">=8.11" } }, "node_modules/util-deprecate": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==" }, "node_modules/vinyl": { "version": "3.0.1", "resolved": "https://registry.npmjs.org/vinyl/-/vinyl-3.0.1.tgz", "integrity": "sha512-0QwqXteBNXgnLCdWdvPQBX6FXRHtIH3VhJPTd5Lwn28tJXc34YqSCWUmkOvtJHBmB3gGoPtrOKk3Ts8/kEZ9aA==", "license": "MIT", "dependencies": { "clone": "^2.1.2", "remove-trailing-separator": "^1.1.0", "replace-ext": "^2.0.0", "teex": "^1.0.1" }, "engines": { "node": ">=10.13.0" } }, "node_modules/which-typed-array": { "version": "1.1.19", "resolved": "https://registry.npmjs.org/which-typed-array/-/which-typed-array-1.1.19.tgz", "integrity": "sha512-rEvr90Bck4WZt9HHFC4DJMsjvu7x+r6bImz0/BrbWb7A2djJ8hnZMrWnHo9F8ssv0OMErasDhftrfROTyqSDrw==", "license": "MIT", "dependencies": { "available-typed-arrays": "^1.0.7", "call-bind": "^1.0.8", "call-bound": "^1.0.4", "for-each": "^0.3.5", "get-proto": "^1.0.1", "gopd": "^1.2.0", "has-tostringtag": "^1.0.2" }, "engines": { "node": ">= 0.4" }, "funding": { "url": "https://github.com/sponsors/ljharb" } }, "node_modules/wordwrap": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/wordwrap/-/wordwrap-1.0.0.tgz", "integrity": "sha512-gvVzJFlPycKc5dZN4yPkP8w7Dc37BtP1yczEneOb4uq34pXZcvrtRTmWV8W+Ume+XCxKgbjM+nevkyFPMybd4Q==" }, "node_modules/wrappy": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==" }, "node_modules/xdg-basedir": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/xdg-basedir/-/xdg-basedir-3.0.0.tgz", "integrity": "sha512-1Dly4xqlulvPD3fZUQJLY+FUIeqN3N2MM3uqe4rCJftAvOjFa3jFGfctOgluGx4ahPbUCsZkmJILiP0Vi4T6lQ==", "engines": { "node": ">=4" } }, "node_modules/xtend": { "version": "4.0.2", "resolved": "https://registry.npmjs.org/xtend/-/xtend-4.0.2.tgz", "integrity": "sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ==", "engines": { "node": ">=0.4" } }, "node_modules/yargs-parser": { "version": "20.2.9", "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-20.2.9.tgz", "integrity": "sha512-y11nGElTIV+CT3Zv9t7VKl+Q3hTQoT9a1Qzezhhl6Rp21gJ/IVTW7Z3y9EWXhuUBC2Shnf+DX0antecpAwSP8w==", "license": "ISC", "engines": { "node": ">=10" } }, "node_modules/yauzl": { "version": "2.10.0", "resolved": "https://registry.npmjs.org/yauzl/-/yauzl-2.10.0.tgz", "integrity": "sha512-p4a9I6X6nu6IhoGmBqAcbJy1mlC4j27vEPZX9F4L4/vZT3Lyq1VkFHw/V/PUcB9Buo+DG3iHkT0x3Qya58zc3g==", "dependencies": { "buffer-crc32": "~0.2.3", "fd-slicer": "~1.1.0" } }, "node_modules/yazl": { "version": "2.5.1", "resolved": "https://registry.npmjs.org/yazl/-/yazl-2.5.1.tgz", "integrity": "sha512-phENi2PLiHnHb6QBVot+dJnaAZ0xosj7p3fWl+znIjBDlnMI2PsZCJZ306BPTFOaHf5qdDEI8x5qFrSOBN5vrw==", "license": "MIT", "dependencies": { "buffer-crc32": "~0.2.3" } }, "node_modules/zip-stream": { "version": "4.1.1", "resolved": "https://registry.npmjs.org/zip-stream/-/zip-stream-4.1.1.tgz", "integrity": "sha512-9qv4rlDiopXg4E69k+vMHjNN63YFMe9sZMrdlvKnCjlCRWeCBswPPMPUfx+ipsAWq1LXHe70RcbaHdJJpS6hyQ==", "dependencies": { "archiver-utils": "^3.0.4", "compress-commons": "^4.1.2", "readable-stream": "^3.6.0" }, "engines": { "node": ">= 10" } }, "node_modules/zip-stream/node_modules/archiver-utils": { "version": "3.0.4", "resolved": "https://registry.npmjs.org/archiver-utils/-/archiver-utils-3.0.4.tgz", "integrity": "sha512-KVgf4XQVrTjhyWmx6cte4RxonPLR9onExufI1jhvw/MQ4BB6IsZD5gT8Lq+u/+pRkWna/6JoHpiQioaqFP5Rzw==", "dependencies": { "glob": "^7.2.3", "graceful-fs": "^4.2.0", "lazystream": "^1.0.0", "lodash.defaults": "^4.2.0", "lodash.difference": "^4.5.0", "lodash.flatten": "^4.4.0", "lodash.isplainobject": "^4.0.6", "lodash.union": "^4.6.0", "normalize-path": "^3.0.0", "readable-stream": "^3.6.0" }, "engines": { "node": ">= 10" } } } }
json
github
https://github.com/spring-projects/spring-boot
antora/package-lock.json
#!/usr/bin/python # -*- coding: utf-8 -*- # # (c) 2016, René Moser <mail@renemoser.net> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ANSIBLE_METADATA = {'status': ['stableinterface'], 'supported_by': 'community', 'version': '1.0'} DOCUMENTATION = ''' --- module: cs_zone short_description: Manages zones on Apache CloudStack based clouds. description: - Create, update and remove zones. version_added: "2.1" author: "René Moser (@resmo)" options: name: description: - Name of the zone. required: true id: description: - uuid of the exising zone. default: null required: false state: description: - State of the zone. required: false default: 'present' choices: [ 'present', 'enabled', 'disabled', 'absent' ] domain: description: - Domain the zone is related to. - Zone is a public zone if not set. required: false default: null network_domain: description: - Network domain for the zone. required: false default: null network_type: description: - Network type of the zone. required: false default: basic choices: [ 'basic', 'advanced' ] dns1: description: - First DNS for the zone. - Required if C(state=present) required: false default: null dns2: description: - Second DNS for the zone. required: false default: null internal_dns1: description: - First internal DNS for the zone. - If not set C(dns1) will be used on C(state=present). required: false default: null internal_dns2: description: - Second internal DNS for the zone. required: false default: null dns1_ipv6: description: - First DNS for IPv6 for the zone. required: false default: null dns2_ipv6: description: - Second DNS for IPv6 for the zone. required: false default: null guest_cidr_address: description: - Guest CIDR address for the zone. required: false default: null dhcp_provider: description: - DHCP provider for the Zone. required: false default: null extends_documentation_fragment: cloudstack ''' EXAMPLES = ''' # Ensure a zone is present - local_action: module: cs_zone name: ch-zrh-ix-01 dns1: 8.8.8.8 dns2: 8.8.4.4 network_type: basic # Ensure a zone is disabled - local_action: module: cs_zone name: ch-zrh-ix-01 state: disabled # Ensure a zone is enabled - local_action: module: cs_zone name: ch-zrh-ix-01 state: enabled # Ensure a zone is absent - local_action: module: cs_zone name: ch-zrh-ix-01 state: absent ''' RETURN = ''' --- id: description: UUID of the zone. returned: success type: string sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6 name: description: Name of the zone. returned: success type: string sample: zone01 dns1: description: First DNS for the zone. returned: success type: string sample: 8.8.8.8 dns2: description: Second DNS for the zone. returned: success type: string sample: 8.8.4.4 internal_dns1: description: First internal DNS for the zone. returned: success type: string sample: 8.8.8.8 internal_dns2: description: Second internal DNS for the zone. returned: success type: string sample: 8.8.4.4 dns1_ipv6: description: First IPv6 DNS for the zone. returned: success type: string sample: "2001:4860:4860::8888" dns2_ipv6: description: Second IPv6 DNS for the zone. returned: success type: string sample: "2001:4860:4860::8844" allocation_state: description: State of the zone. returned: success type: string sample: Enabled domain: description: Domain the zone is related to. returned: success type: string sample: ROOT network_domain: description: Network domain for the zone. returned: success type: string sample: example.com network_type: description: Network type for the zone. returned: success type: string sample: basic local_storage_enabled: description: Local storage offering enabled. returned: success type: bool sample: false securitygroups_enabled: description: Security groups support is enabled. returned: success type: bool sample: false guest_cidr_address: description: Guest CIDR address for the zone returned: success type: string sample: 10.1.1.0/24 dhcp_provider: description: DHCP provider for the zone returned: success type: string sample: VirtualRouter zone_token: description: Zone token returned: success type: string sample: ccb0a60c-79c8-3230-ab8b-8bdbe8c45bb7 tags: description: List of resource tags associated with the zone. returned: success type: dict sample: [ { "key": "foo", "value": "bar" } ] ''' # import cloudstack common from ansible.module_utils.cloudstack import * class AnsibleCloudStackZone(AnsibleCloudStack): def __init__(self, module): super(AnsibleCloudStackZone, self).__init__(module) self.returns = { 'dns1': 'dns1', 'dns2': 'dns2', 'internaldns1': 'internal_dns1', 'internaldns2': 'internal_dns2', 'ipv6dns1': 'dns1_ipv6', 'ipv6dns2': 'dns2_ipv6', 'domain': 'network_domain', 'networktype': 'network_type', 'securitygroupsenabled': 'securitygroups_enabled', 'localstorageenabled': 'local_storage_enabled', 'guestcidraddress': 'guest_cidr_address', 'dhcpprovider': 'dhcp_provider', 'allocationstate': 'allocation_state', 'zonetoken': 'zone_token', } self.zone = None def _get_common_zone_args(self): args = {} args['name'] = self.module.params.get('name') args['dns1'] = self.module.params.get('dns1') args['dns2'] = self.module.params.get('dns2') args['internaldns1'] = self.get_or_fallback('internal_dns1', 'dns1') args['internaldns2'] = self.get_or_fallback('internal_dns2', 'dns2') args['ipv6dns1'] = self.module.params.get('dns1_ipv6') args['ipv6dns2'] = self.module.params.get('dns2_ipv6') args['networktype'] = self.module.params.get('network_type') args['domain'] = self.module.params.get('network_domain') args['localstorageenabled'] = self.module.params.get('local_storage_enabled') args['guestcidraddress'] = self.module.params.get('guest_cidr_address') args['dhcpprovider'] = self.module.params.get('dhcp_provider') state = self.module.params.get('state') if state in [ 'enabled', 'disabled']: args['allocationstate'] = state.capitalize() return args def get_zone(self): if not self.zone: args = {} uuid = self.module.params.get('id') if uuid: args['id'] = uuid zones = self.cs.listZones(**args) if zones: self.zone = zones['zone'][0] return self.zone args['name'] = self.module.params.get('name') zones = self.cs.listZones(**args) if zones: self.zone = zones['zone'][0] return self.zone def present_zone(self): zone = self.get_zone() if zone: zone = self._update_zone() else: zone = self._create_zone() return zone def _create_zone(self): required_params = [ 'dns1', ] self.module.fail_on_missing_params(required_params=required_params) self.result['changed'] = True args = self._get_common_zone_args() args['domainid'] = self.get_domain(key='id') args['securitygroupenabled'] = self.module.params.get('securitygroups_enabled') zone = None if not self.module.check_mode: res = self.cs.createZone(**args) if 'errortext' in res: self.module.fail_json(msg="Failed: '%s'" % res['errortext']) zone = res['zone'] return zone def _update_zone(self): zone = self.get_zone() args = self._get_common_zone_args() args['id'] = zone['id'] if self.has_changed(args, zone): self.result['changed'] = True if not self.module.check_mode: res = self.cs.updateZone(**args) if 'errortext' in res: self.module.fail_json(msg="Failed: '%s'" % res['errortext']) zone = res['zone'] return zone def absent_zone(self): zone = self.get_zone() if zone: self.result['changed'] = True args = {} args['id'] = zone['id'] if not self.module.check_mode: res = self.cs.deleteZone(**args) if 'errortext' in res: self.module.fail_json(msg="Failed: '%s'" % res['errortext']) return zone def main(): argument_spec = cs_argument_spec() argument_spec.update(dict( id = dict(default=None), name = dict(required=True), dns1 = dict(default=None), dns2 = dict(default=None), internal_dns1 = dict(default=None), internal_dns2 = dict(default=None), dns1_ipv6 = dict(default=None), dns2_ipv6 = dict(default=None), network_type = dict(default='basic', choices=['Basic', 'basic', 'Advanced', 'advanced']), network_domain = dict(default=None), guest_cidr_address = dict(default=None), dhcp_provider = dict(default=None), local_storage_enabled = dict(default=None), securitygroups_enabled = dict(default=None), state = dict(choices=['present', 'enabled', 'disabled', 'absent'], default='present'), domain = dict(default=None), )) module = AnsibleModule( argument_spec=argument_spec, required_together=cs_required_together(), supports_check_mode=True ) try: acs_zone = AnsibleCloudStackZone(module) state = module.params.get('state') if state in ['absent']: zone = acs_zone.absent_zone() else: zone = acs_zone.present_zone() result = acs_zone.get_result(zone) except CloudStackException as e: module.fail_json(msg='CloudStackException: %s' % str(e)) module.exit_json(**result) # import module snippets from ansible.module_utils.basic import * if __name__ == '__main__': main()
unknown
codeparrot/codeparrot-clean
name: Release Sources permissions: contents: read on: workflow_dispatch: inputs: release-version: description: Release Version required: true type: string workflow_call: inputs: release-version: description: Release Version required: true type: string secrets: RELEASE_TASKS_USER_TOKEN: description: "Secret used to check user permissions." required: false # Run on pull_requests for testing purposes. pull_request: paths: - '.github/workflows/release-sources.yml' - 'llvm/utils/release/export.sh' types: - opened - synchronize - reopened # When a PR is closed, we still start this workflow, but then skip # all the jobs, which makes it effectively a no-op. The reason to # do this is that it allows us to take advantage of concurrency groups # to cancel in progress CI jobs whenever the PR is closed. - closed concurrency: group: ${{ github.workflow }}-${{ inputs.release-version || github.event.pull_request.number }} cancel-in-progress: True jobs: inputs: name: Collect Job Inputs if: >- github.repository_owner == 'llvm' && github.event.action != 'closed' outputs: ref: ${{ steps.inputs.outputs.ref }} export-args: ${{ steps.inputs.outputs.export-args }} runs-on: ubuntu-24.04 steps: - id: inputs run: | ref=${{ (inputs.release-version && format('llvmorg-{0}', inputs.release-version)) || github.sha }} if [ -n "${{ inputs.release-version }}" ]; then export_args="-release ${{ inputs.release-version }} -final" else export_args="-git-ref ${{ github.sha }}" fi echo "ref=$ref" >> $GITHUB_OUTPUT echo "export-args=$export_args" >> $GITHUB_OUTPUT release-sources: name: Package Release Sources if: github.repository_owner == 'llvm' runs-on: ubuntu-24.04 outputs: digest: ${{ steps.digest.outputs.digest }} artifact-id: ${{ steps.artifact-upload.outputs.artifact-id }} needs: - inputs steps: - name: Checkout LLVM uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: ref: ${{ needs.inputs.outputs.ref }} fetch-tags: true - name: Install Dependencies run: | pip install --require-hashes -r ./llvm/utils/git/requirements.txt - name: Create Tarballs run: | ./llvm/utils/release/export.sh ${{ needs.inputs.outputs.export-args }} - name: Generate sha256 digest for sources id: digest run: | echo "digest=$(cat *.xz | sha256sum | cut -d ' ' -f 1)" >> $GITHUB_OUTPUT - name: Release Sources Artifact id: artifact-upload uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 with: name: ${{ needs.inputs.outputs.ref }}-sources path: | *.xz attest-release-sources: name: Attest Release Sources runs-on: ubuntu-24.04 if: github.event_name != 'pull_request' needs: - inputs - release-sources permissions: id-token: write attestations: write steps: - name: Checkout Release Scripts uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: sparse-checkout: | .github/workflows/upload-release-artifact llvm/utils/release/github-upload-release.py llvm/utils/git/requirements.txt sparse-checkout-cone-mode: false - name: Upload Artifacts uses: ./.github/workflows/upload-release-artifact with: artifact-id: ${{ needs.release-sources.outputs.artifact-id }} attestation-name: ${{ needs.inputs.outputs.ref }}-sources-attestation digest: ${{ needs.release-sources.outputs.digest }} upload: false
unknown
github
https://github.com/llvm/llvm-project
.github/workflows/release-sources.yml
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.template.defaultfilters import slugify from django.test import SimpleTestCase from django.utils import six from django.utils.encoding import force_text from django.utils.functional import lazy from django.utils.safestring import mark_safe from ..utils import setup class SlugifyTests(SimpleTestCase): """ Running slugify on a pre-escaped string leads to odd behavior, but the result is still safe. """ @setup({'slugify01': '{% autoescape off %}{{ a|slugify }} {{ b|slugify }}{% endautoescape %}'}) def test_slugify01(self): output = self.engine.render_to_string('slugify01', {'a': 'a & b', 'b': mark_safe('a &amp; b')}) self.assertEqual(output, 'a-b a-amp-b') @setup({'slugify02': '{{ a|slugify }} {{ b|slugify }}'}) def test_slugify02(self): output = self.engine.render_to_string('slugify02', {'a': 'a & b', 'b': mark_safe('a &amp; b')}) self.assertEqual(output, 'a-b a-amp-b') class FunctionTests(SimpleTestCase): def test_slugify(self): self.assertEqual( slugify(' Jack & Jill like numbers 1,2,3 and 4 and silly characters ?%.$!/'), 'jack-jill-like-numbers-123-and-4-and-silly-characters', ) def test_unicode(self): self.assertEqual( slugify("Un \xe9l\xe9phant \xe0 l'or\xe9e du bois"), 'un-elephant-a-loree-du-bois', ) def test_non_string_input(self): self.assertEqual(slugify(123), '123') def test_slugify_lazy_string(self): lazy_str = lazy(lambda string: force_text(string), six.text_type) self.assertEqual( slugify(lazy_str(' Jack & Jill like numbers 1,2,3 and 4 and silly characters ?%.$!/')), 'jack-jill-like-numbers-123-and-4-and-silly-characters', )
unknown
codeparrot/codeparrot-clean
/* * Copyright 2002-present the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.springframework.docs.dataaccess.jdbc.jdbccomplextypes import org.springframework.jdbc.core.SqlOutParameter import org.springframework.jdbc.`object`.StoredProcedure import java.sql.CallableStatement import java.sql.Struct import java.sql.Types import java.util.Date import javax.sql.DataSource @Suppress("unused") class TestItemStoredProcedure(dataSource: DataSource) : StoredProcedure(dataSource, "get_item") { init { declareParameter(SqlOutParameter("item",Types.STRUCT,"ITEM_TYPE") { cs: CallableStatement, colIndx: Int, _: Int, _: String? -> val struct = cs.getObject(colIndx) as Struct val attr = struct.attributes TestItem( (attr[0] as Number).toLong(), attr[1] as String, attr[2] as Date ) }) // ... } }
kotlin
github
https://github.com/spring-projects/spring-framework
framework-docs/src/main/kotlin/org/springframework/docs/dataaccess/jdbc/jdbccomplextypes/TestItemStoredProcedure.kt
# -*- coding: utf-8 -*- ''' Exodus Add-on Copyright (C) 2016 Exodus This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. ''' from resources.lib.modules import trakt from resources.lib.modules import cleangenre from resources.lib.modules import cleantitle from resources.lib.modules import control from resources.lib.modules import client from resources.lib.modules import cache from resources.lib.modules import metacache from resources.lib.modules import playcount from resources.lib.modules import workers from resources.lib.modules import views import os,sys,re,json,urllib,urlparse,datetime params = dict(urlparse.parse_qsl(sys.argv[2].replace('?',''))) action = params.get('action') control.moderator() class movies: def __init__(self): self.list = [] self.imdb_link = 'http://www.imdb.com' self.trakt_link = 'http://api-v2launch.trakt.tv' self.datetime = (datetime.datetime.utcnow() - datetime.timedelta(hours = 5)) self.systime = (self.datetime).strftime('%Y%m%d%H%M%S%f') self.trakt_user = control.setting('trakt.user').strip() self.imdb_user = control.setting('imdb.user').replace('ur', '') self.lang = control.apiLanguage()['trakt'] self.search_link = 'http://api-v2launch.trakt.tv/search?type=movie&limit=20&page=1&query=' self.imdb_info_link = 'http://www.omdbapi.com/?i=%s&plot=full&r=json' self.trakt_info_link = 'http://api-v2launch.trakt.tv/movies/%s' self.trakt_lang_link = 'http://api-v2launch.trakt.tv/movies/%s/translations/%s' self.fanart_tv_art_link = 'http://webservice.fanart.tv/v3/movies/%s' self.fanart_tv_level_link = 'http://webservice.fanart.tv/v3/level' self.persons_link = 'http://www.imdb.com/search/name?count=100&name=' self.personlist_link = 'http://www.imdb.com/search/name?count=100&gender=male,female' self.popular_link = 'http://www.imdb.com/search/title?title_type=feature,tv_movie&languages=en&num_votes=1000,&production_status=released&groups=top_1000&sort=moviemeter,asc&count=40&start=1' self.views_link = 'http://www.imdb.com/search/title?title_type=feature,tv_movie&languages=en&num_votes=1000,&production_status=released&sort=num_votes,desc&count=40&start=1' self.featured_link = 'http://www.imdb.com/search/title?title_type=feature,tv_movie&languages=en&num_votes=1000,&production_status=released&release_date=date[365],date[60]&sort=moviemeter,asc&count=40&start=1' self.person_link = 'http://www.imdb.com/search/title?title_type=feature,tv_movie&production_status=released&role=%s&sort=year,desc&count=40&start=1' self.genre_link = 'http://www.imdb.com/search/title?title_type=feature,tv_movie&languages=en&num_votes=100,&release_date=date[730],date[30]&genres=%s&sort=moviemeter,asc&count=40&start=1' self.language_link = 'http://www.imdb.com/search/title?title_type=feature,tv_movie&num_votes=100,&production_status=released&languages=%s&sort=moviemeter,asc&count=40&start=1' self.certification_link = 'http://www.imdb.com/search/title?title_type=feature,tv_movie&languages=en&num_votes=100,&production_status=released&certificates=us:%s&sort=moviemeter,asc&count=40&start=1' self.year_link = 'http://www.imdb.com/search/title?title_type=feature,tv_movie&languages=en&num_votes=100,&production_status=released&year=%s,%s&sort=moviemeter,asc&count=40&start=1' self.boxoffice_link = 'http://www.imdb.com/search/title?title_type=feature,tv_movie&languages=en&production_status=released&sort=boxoffice_gross_us,desc&count=40&start=1' self.oscars_link = 'http://www.imdb.com/search/title?title_type=feature,tv_movie&languages=en&production_status=released&groups=oscar_best_picture_winners&sort=year,desc&count=40&start=1' self.theaters_link = 'http://www.imdb.com/search/title?title_type=feature&languages=en&num_votes=1000,&release_date=date[365],date[0]&sort=release_date_us,desc&count=40&start=1' self.trending_link = 'http://api-v2launch.trakt.tv/movies/trending?limit=40&page=1' self.traktlists_link = 'http://api-v2launch.trakt.tv/users/me/lists' self.traktlikedlists_link = 'http://api-v2launch.trakt.tv/users/likes/lists?limit=1000000' self.traktlist_link = 'http://api-v2launch.trakt.tv/users/%s/lists/%s/items' self.traktcollection_link = 'http://api-v2launch.trakt.tv/users/me/collection/movies' self.traktwatchlist_link = 'http://api-v2launch.trakt.tv/users/me/watchlist/movies' self.traktfeatured_link = 'http://api-v2launch.trakt.tv/recommendations/movies?limit=40' self.trakthistory_link = 'http://api-v2launch.trakt.tv/users/me/history/movies?limit=40&page=1' self.imdblists_link = 'http://www.imdb.com/user/ur%s/lists?tab=all&sort=modified:desc&filter=titles' % self.imdb_user self.imdblist_link = 'http://www.imdb.com/list/%s/?view=detail&sort=title:asc&title_type=feature,short,tv_movie,tv_special,video,documentary,game&start=1' self.imdblist2_link = 'http://www.imdb.com/list/%s/?view=detail&sort=created:desc&title_type=feature,short,tv_movie,tv_special,video,documentary,game&start=1' self.imdbwatchlist_link = 'http://www.imdb.com/user/ur%s/watchlist?sort=alpha,asc' % self.imdb_user self.imdbwatchlist2_link = 'http://www.imdb.com/user/ur%s/watchlist?sort=date_added,desc' % self.imdb_user def get(self, url, idx=True): try: try: url = getattr(self, url + '_link') except: pass try: u = urlparse.urlparse(url).netloc.lower() except: pass if u in self.trakt_link and '/users/' in url: try: if url == self.trakthistory_link: raise Exception() if not '/users/me/' in url: raise Exception() if trakt.getActivity() > cache.timeout(self.trakt_list, url, self.trakt_user): raise Exception() self.list = cache.get(self.trakt_list, 720, url, self.trakt_user) except: self.list = cache.get(self.trakt_list, 0, url, self.trakt_user) if '/users/me/' in url and not '/watchlist/' in url: self.list = sorted(self.list, key=lambda k: re.sub('(^the |^a )', '', k['title'].lower())) if idx == True: self.worker() elif u in self.trakt_link and self.search_link in url: self.list = cache.get(self.trakt_list, 1, url, self.trakt_user) if idx == True: self.worker(level=0) elif u in self.trakt_link: self.list = cache.get(self.trakt_list, 24, url, self.trakt_user) if idx == True: self.worker() ; self.list = [i for i in self.list if not i['poster'] == '0'] elif u in self.imdb_link and ('/user/' in url or '/list/' in url): self.list = cache.get(self.imdb_list, 0, url) if idx == True: self.worker() elif u in self.imdb_link: self.list = cache.get(self.imdb_list, 24, url) if idx == True: self.worker() ; self.list = [i for i in self.list if not i['poster'] == '0'] if idx == True: self.movieDirectory(self.list) return self.list except: pass def widget(self): setting = control.setting('movie.widget') if setting == '2': self.get(self.trending_link) elif setting == '3': self.get(self.popular_link) elif setting == '4': self.get(self.theaters_link) else: self.get(self.featured_link) def search(self): try: control.idle() t = control.lang(32010).encode('utf-8') k = control.keyboard('', t) ; k.doModal() q = k.getText() if k.isConfirmed() else None if (q == None or q == ''): return url = self.search_link + urllib.quote_plus(q) url = '%s?action=moviePage&url=%s' % (sys.argv[0], urllib.quote_plus(url)) control.execute('Container.Update(%s)' % url) except: return def person(self): try: control.idle() t = control.lang(32010).encode('utf-8') k = control.keyboard('', t) ; k.doModal() q = k.getText() if k.isConfirmed() else None if (q == None or q == ''): return url = self.persons_link + urllib.quote_plus(q) url = '%s?action=moviePersons&url=%s' % (sys.argv[0], urllib.quote_plus(url)) control.execute('Container.Update(%s)' % url) except: return def genres(self): genres = [ ('Action', 'action'), ('Adventure', 'adventure'), ('Animation', 'animation'), ('Biography', 'biography'), ('Comedy', 'comedy'), ('Crime', 'crime'), ('Drama', 'drama'), ('Family', 'family'), ('Fantasy', 'fantasy'), ('History', 'history'), ('Horror', 'horror'), ('Music ', 'music'), ('Musical', 'musical'), ('Mystery', 'mystery'), ('Romance', 'romance'), ('Science Fiction', 'sci_fi'), ('Sport', 'sport'), ('Thriller', 'thriller'), ('War', 'war'), ('Western', 'western') ] for i in genres: self.list.append({'name': cleangenre.lang(i[0], self.lang), 'url': self.genre_link % i[1], 'image': 'genres.png', 'action': 'movies'}) self.addDirectory(self.list) return self.list def languages(self): languages = [ ('Arabic', 'ar'), ('Bulgarian', 'bg'), ('Chinese', 'zh'), ('Croatian', 'hr'), ('Dutch', 'nl'), ('English', 'en'), ('Finnish', 'fi'), ('French', 'fr'), ('German', 'de'), ('Greek', 'el'), ('Hebrew', 'he'), ('Hindi ', 'hi'), ('Hungarian', 'hu'), ('Icelandic', 'is'), ('Italian', 'it'), ('Japanese', 'ja'), ('Korean', 'ko'), ('Norwegian', 'no'), ('Persian', 'fa'), ('Polish', 'pl'), ('Portuguese', 'pt'), ('Punjabi', 'pa'), ('Romanian', 'ro'), ('Russian', 'ru'), ('Spanish', 'es'), ('Swedish', 'sv'), ('Turkish', 'tr'), ('Ukrainian', 'uk') ] for i in languages: self.list.append({'name': str(i[0]), 'url': self.language_link % i[1], 'image': 'languages.png', 'action': 'movies'}) self.addDirectory(self.list) return self.list def certifications(self): certificates = ['G', 'PG', 'PG-13', 'R', 'NC-17'] for i in certificates: self.list.append({'name': str(i), 'url': self.certification_link % str(i).replace('-', '_').lower(), 'image': 'certificates.png', 'action': 'movies'}) self.addDirectory(self.list) return self.list def years(self): year = (self.datetime.strftime('%Y')) for i in range(int(year)-0, int(year)-50, -1): self.list.append({'name': str(i), 'url': self.year_link % (str(i), str(i)), 'image': 'years.png', 'action': 'movies'}) self.addDirectory(self.list) return self.list def persons(self, url): if url == None: self.list = cache.get(self.imdb_person_list, 24, self.personlist_link) else: self.list = cache.get(self.imdb_person_list, 1, url) for i in range(0, len(self.list)): self.list[i].update({'action': 'movies'}) self.addDirectory(self.list) return self.list def userlists(self): try: userlists = [] if trakt.getTraktCredentialsInfo() == False: raise Exception() activity = trakt.getActivity() except: pass try: if trakt.getTraktCredentialsInfo() == False: raise Exception() try: if activity > cache.timeout(self.trakt_user_list, self.traktlists_link, self.trakt_user): raise Exception() userlists += cache.get(self.trakt_user_list, 720, self.traktlists_link, self.trakt_user) except: userlists += cache.get(self.trakt_user_list, 0, self.traktlists_link, self.trakt_user) except: pass try: self.list = [] if self.imdb_user == '': raise Exception() userlists += cache.get(self.imdb_user_list, 0, self.imdblists_link) except: pass try: self.list = [] if trakt.getTraktCredentialsInfo() == False: raise Exception() try: if activity > cache.timeout(self.trakt_user_list, self.traktlikedlists_link, self.trakt_user): raise Exception() userlists += cache.get(self.trakt_user_list, 720, self.traktlikedlists_link, self.trakt_user) except: userlists += cache.get(self.trakt_user_list, 0, self.traktlikedlists_link, self.trakt_user) except: pass self.list = userlists for i in range(0, len(self.list)): self.list[i].update({'image': 'userlists.png', 'action': 'movies'}) self.addDirectory(self.list, queue=True) return self.list def trakt_list(self, url, user): try: q = dict(urlparse.parse_qsl(urlparse.urlsplit(url).query)) q.update({'extended': 'full,images'}) q = (urllib.urlencode(q)).replace('%2C', ',') u = url.replace('?' + urlparse.urlparse(url).query, '') + '?' + q result = trakt.getTrakt(u) result = json.loads(result) items = [] for i in result: try: items.append(i['movie']) except: pass if len(items) == 0: items = result except: return try: q = dict(urlparse.parse_qsl(urlparse.urlsplit(url).query)) p = str(int(q['page']) + 1) if p == '5': raise Exception() q.update({'page': p}) q = (urllib.urlencode(q)).replace('%2C', ',') next = url.replace('?' + urlparse.urlparse(url).query, '') + '?' + q next = next.encode('utf-8') except: next = '' for item in items: try: title = item['title'] title = client.replaceHTMLCodes(title) title = title.encode('utf-8') year = item['year'] year = re.sub('[^0-9]', '', str(year)) year = year.encode('utf-8') if int(year) > int((self.datetime).strftime('%Y')): raise Exception() imdb = item['ids']['imdb'] if imdb == None or imdb == '': raise Exception() imdb = 'tt' + re.sub('[^0-9]', '', str(imdb)) imdb = imdb.encode('utf-8') try: premiered = item['released'] except: premiered = '0' try: premiered = re.compile('(\d{4}-\d{2}-\d{2})').findall(premiered)[0] except: premiered = '0' premiered = premiered.encode('utf-8') try: genre = item['genres'] except: genre = '0' genre = [i.title() for i in genre] if genre == []: genre = '0' genre = ' / '.join(genre) genre = genre.encode('utf-8') try: duration = str(item['runtime']) except: duration = '0' if duration == None: duration = '0' duration = duration.encode('utf-8') try: rating = str(item['rating']) except: rating = '0' if rating == None or rating == '0.0': rating = '0' rating = rating.encode('utf-8') try: votes = str(item['votes']) except: votes = '0' try: votes = str(format(int(votes),',d')) except: pass if votes == None: votes = '0' votes = votes.encode('utf-8') try: mpaa = item['certification'] except: mpaa = '0' if mpaa == None: mpaa = '0' mpaa = mpaa.encode('utf-8') try: plot = item['overview'] except: plot = '0' if plot == None: plot = '0' plot = client.replaceHTMLCodes(plot) plot = plot.encode('utf-8') self.list.append({'title': title, 'originaltitle': title, 'year': year, 'premiered': premiered, 'genre': genre, 'duration': duration, 'rating': rating, 'votes': votes, 'mpaa': mpaa, 'plot': plot, 'imdb': imdb, 'tvdb': '0', 'poster': '0', 'next': next}) except: pass return self.list def trakt_user_list(self, url, user): try: result = trakt.getTrakt(url) items = json.loads(result) except: pass for item in items: try: try: name = item['list']['name'] except: name = item['name'] name = client.replaceHTMLCodes(name) name = name.encode('utf-8') try: url = (trakt.slug(item['list']['user']['username']), item['list']['ids']['slug']) except: url = ('me', item['ids']['slug']) url = self.traktlist_link % url url = url.encode('utf-8') self.list.append({'name': name, 'url': url, 'context': url}) except: pass self.list = sorted(self.list, key=lambda k: re.sub('(^the |^a )', '', k['name'].lower())) return self.list def imdb_list(self, url): try: for i in re.findall('date\[(\d+)\]', url): url = url.replace('date[%s]' % i, (self.datetime - datetime.timedelta(days = int(i))).strftime('%Y-%m-%d')) def imdb_watchlist_id(url): return client.parseDOM(client.request(url).decode('iso-8859-1').encode('utf-8'), 'meta', ret='content', attrs = {'property': 'pageId'})[0] if url == self.imdbwatchlist_link: url = cache.get(imdb_watchlist_id, 8640, url) url = self.imdblist_link % url elif url == self.imdbwatchlist2_link: url = cache.get(imdb_watchlist_id, 8640, url) url = self.imdblist2_link % url result = client.request(url) result = result.replace('\n','') result = result.decode('iso-8859-1').encode('utf-8') items = client.parseDOM(result, 'div', attrs = {'class': 'lister-item mode-advanced'}) items += client.parseDOM(result, 'div', attrs = {'class': 'list_item.+?'}) except: return try: next = client.parseDOM(result, 'a', ret='href', attrs = {'class': 'lister-page-next.+?'}) if len(next) == 0: next = client.parseDOM(result, 'div', attrs = {'class': 'pagination'})[0] next = zip(client.parseDOM(next, 'a', ret='href'), client.parseDOM(next, 'a')) next = [i[0] for i in next if 'Next' in i[1]] next = url.replace(urlparse.urlparse(url).query, urlparse.urlparse(next[0]).query) next = client.replaceHTMLCodes(next) next = next.encode('utf-8') except: next = '' for item in items: try: title = client.parseDOM(item, 'a')[1] title = client.replaceHTMLCodes(title) title = title.encode('utf-8') year = client.parseDOM(item, 'span', attrs = {'class': 'lister-item-year.+?'}) year += client.parseDOM(item, 'span', attrs = {'class': 'year_type'}) year = re.findall('(\d{4})', year[0])[0] year = year.encode('utf-8') if int(year) > int((self.datetime).strftime('%Y')): raise Exception() imdb = client.parseDOM(item, 'a', ret='href')[0] imdb = re.findall('(tt\d*)', imdb)[0] imdb = imdb.encode('utf-8') try: poster = client.parseDOM(item, 'img', ret='loadlate')[0] except: poster = '0' if '/nopicture/' in poster: poster = '0' poster = re.sub('(?:_SX|_SY|_UX|_UY|_CR|_AL)(?:\d+|_).+?\.', '_SX500.', poster) poster = client.replaceHTMLCodes(poster) poster = poster.encode('utf-8') try: genre = client.parseDOM(item, 'span', attrs = {'class': 'genre'})[0] except: genre = '0' genre = ' / '.join([i.strip() for i in genre.split(',')]) if genre == '': genre = '0' genre = client.replaceHTMLCodes(genre) genre = genre.encode('utf-8') try: duration = re.findall('(\d+?) min(?:s|)', item)[-1] except: duration = '0' duration = duration.encode('utf-8') rating = '0' try: rating = client.parseDOM(item, 'span', attrs = {'class': 'rating-rating'})[0] except: pass try: rating = client.parseDOM(rating, 'span', attrs = {'class': 'value'})[0] except: rating = '0' try: rating = client.parseDOM(item, 'div', ret='data-value', attrs = {'class': '.*?imdb-rating'})[0] except: pass if rating == '' or rating == '-': rating = '0' rating = client.replaceHTMLCodes(rating) rating = rating.encode('utf-8') try: votes = client.parseDOM(item, 'div', ret='title', attrs = {'class': '.*?rating-list'})[0] except: votes = '0' try: votes = re.findall('\((.+?) vote(?:s|)\)', votes)[0] except: votes = '0' if votes == '': votes = '0' votes = client.replaceHTMLCodes(votes) votes = votes.encode('utf-8') try: mpaa = client.parseDOM(item, 'span', attrs = {'class': 'certificate'})[0] except: mpaa = '0' if mpaa == '' or mpaa == 'NOT_RATED': mpaa = '0' mpaa = mpaa.replace('_', '-') mpaa = client.replaceHTMLCodes(mpaa) mpaa = mpaa.encode('utf-8') try: director = re.findall('Director(?:s|):(.+?)(?:\||</div>)', item)[0] except: director = '0' director = client.parseDOM(director, 'a') director = ' / '.join(director) if director == '': director = '0' director = client.replaceHTMLCodes(director) director = director.encode('utf-8') try: cast = re.findall('Stars(?:s|):(.+?)(?:\||</div>)', item)[0] except: cast = '0' cast = client.replaceHTMLCodes(cast) cast = cast.encode('utf-8') cast = client.parseDOM(cast, 'a') if cast == []: cast = '0' plot = '0' try: plot = client.parseDOM(item, 'p', attrs = {'class': 'text-muted'})[0] except: pass try: plot = client.parseDOM(item, 'div', attrs = {'class': 'item_description'})[0] except: pass plot = plot.rsplit('<span>', 1)[0].strip() plot = re.sub('<.+?>|</.+?>', '', plot) if plot == '': plot = '0' plot = client.replaceHTMLCodes(plot) plot = plot.encode('utf-8') self.list.append({'title': title, 'originaltitle': title, 'year': year, 'genre': genre, 'duration': duration, 'rating': rating, 'votes': votes, 'mpaa': mpaa, 'director': director, 'cast': cast, 'plot': plot, 'imdb': imdb, 'tvdb': '0', 'poster': poster, 'next': next}) except: pass return self.list def imdb_person_list(self, url): try: result = client.request(url) result = result.decode('iso-8859-1').encode('utf-8') items = client.parseDOM(result, 'tr', attrs = {'class': '.+? detailed'}) except: return for item in items: try: name = client.parseDOM(item, 'a', ret='title')[0] name = client.replaceHTMLCodes(name) name = name.encode('utf-8') url = client.parseDOM(item, 'a', ret='href')[0] url = re.findall('(nm\d*)', url, re.I)[0] url = self.person_link % url url = client.replaceHTMLCodes(url) url = url.encode('utf-8') image = client.parseDOM(item, 'img', ret='src')[0] if not ('._SX' in image or '._SY' in image): raise Exception() image = re.sub('(?:_SX|_SY|_UX|_UY|_CR|_AL)(?:\d+|_).+?\.', '_SX500.', image) image = client.replaceHTMLCodes(image) image = image.encode('utf-8') self.list.append({'name': name, 'url': url, 'image': image}) except: pass return self.list def imdb_user_list(self, url): try: result = client.request(url) result = result.decode('iso-8859-1').encode('utf-8') items = client.parseDOM(result, 'div', attrs = {'class': 'list_name'}) except: pass for item in items: try: name = client.parseDOM(item, 'a')[0] name = client.replaceHTMLCodes(name) name = name.encode('utf-8') url = client.parseDOM(item, 'a', ret='href')[0] url = url.split('/list/', 1)[-1].replace('/', '') url = self.imdblist_link % url url = client.replaceHTMLCodes(url) url = url.encode('utf-8') self.list.append({'name': name, 'url': url, 'context': url}) except: pass self.list = sorted(self.list, key=lambda k: re.sub('(^the |^a )', '', k['name'].lower())) return self.list def worker(self, level=1): self.meta = [] total = len(self.list) self.fanart_tv_headers = {} fanart_tv_level = 'user' fanart_tv_user = control.setting('fanart.tv.user') self.fanart_tv_headers.update({'api-key': 'YTc2MGMyMTEzYTM1OTk5NzFiN2FjMWU0OWUzMTAyMGQ='.decode('base64')}) if level == 1 and not fanart_tv_user == '': self.fanart_tv_headers.update({'client-key': fanart_tv_user}) #try: fanart_tv_level = json.loads(client.request(self.fanart_tv_level_link, headers=self.fanart_tv_headers))['level'] #except: pass self.tm_art_link = 'http://api.themoviedb.org/3/movie/%s/images?api_key=' self.tm_img_link = 'https://image.tmdb.org/t/p/w%s%s' tm_user = control.setting('tm.user') if tm_user == '': self.tm_art_link += 'ZjVhMmIyZDc5NTUxOTBmYTczNWI2NzMwYTlmM2JhM2I='.decode('base64') else: self.tm_art_link += tm_user for i in range(0, total): self.list[i].update({'metacache': False}) self.list = metacache.fetch(self.list, self.lang) for r in range(0, total, 40): threads = [] for i in range(r, r+40): if i <= total: threads.append(workers.Thread(self.super_info, i)) [i.start() for i in threads] [i.join() for i in threads] if len(self.meta) > 0: metacache.insert(self.meta) if fanart_tv_level == 'user': for i in self.list: i.update({'clearlogo': '0', 'clearart': '0'}) if not tm_user == '': for i in self.list: i.update({'poster2': '0', 'fanart': '0'}) self.list = [i for i in self.list if not i['imdb'] == '0'] def super_info(self, i): try: if self.list[i]['metacache'] == True: raise Exception() imdb = self.list[i]['imdb'] url = self.imdb_info_link % imdb item = client.request(url, timeout='10') item = json.loads(item) title = item['Title'] title = title.encode('utf-8') year = item['Year'] year = year.encode('utf-8') imdb = item['imdbID'] if imdb == None or imdb == '' or imdb == 'N/A': imdb = '0' imdb = imdb.encode('utf-8') premiered = item['Released'] if premiered == None or premiered == '' or premiered == 'N/A': premiered = '0' premiered = re.findall('(\d*) (.+?) (\d*)', premiered) try: premiered = '%s-%s-%s' % (premiered[0][2], {'Jan':'01', 'Feb':'02', 'Mar':'03', 'Apr':'04', 'May':'05', 'Jun':'06', 'Jul':'07', 'Aug':'08', 'Sep':'09', 'Oct':'10', 'Nov':'11', 'Dec':'12'}[premiered[0][1]], premiered[0][0]) except: premiered = '0' premiered = premiered.encode('utf-8') genre = item['Genre'] if genre == None or genre == '' or genre == 'N/A': genre = '0' genre = genre.replace(', ', ' / ') genre = genre.encode('utf-8') duration = item['Runtime'] if duration == None or duration == '' or duration == 'N/A': duration = '0' duration = re.sub('[^0-9]', '', str(duration)) duration = duration.encode('utf-8') rating = item['imdbRating'] if rating == None or rating == '' or rating == 'N/A' or rating == '0.0': rating = '0' rating = rating.encode('utf-8') votes = item['imdbVotes'] try: votes = str(format(int(votes),',d')) except: pass if votes == None or votes == '' or votes == 'N/A': votes = '0' votes = votes.encode('utf-8') mpaa = item['Rated'] if mpaa == None or mpaa == '' or mpaa == 'N/A': mpaa = '0' mpaa = mpaa.encode('utf-8') director = item['Director'] if director == None or director == '' or director == 'N/A': director = '0' director = director.replace(', ', ' / ') director = re.sub(r'\(.*?\)', '', director) director = ' '.join(director.split()) director = director.encode('utf-8') writer = item['Writer'] if writer == None or writer == '' or writer == 'N/A': writer = '0' writer = writer.replace(', ', ' / ') writer = re.sub(r'\(.*?\)', '', writer) writer = ' '.join(writer.split()) writer = writer.encode('utf-8') cast = item['Actors'] if cast == None or cast == '' or cast == 'N/A': cast = '0' cast = [x.strip() for x in cast.split(',') if not x == ''] try: cast = [(x.encode('utf-8'), '') for x in cast] except: cast = [] if cast == []: cast = '0' plot = item['Plot'] if plot == None or plot == '' or plot == 'N/A': plot = '0' plot = client.replaceHTMLCodes(plot) plot = plot.encode('utf-8') poster = item['Poster'] if poster == None or poster == '' or poster == 'N/A': poster = '0' if '/nopicture/' in poster: poster = '0' poster = re.sub('(?:_SX|_SY|_UX|_UY|_CR|_AL)(?:\d+|_).+?\.', '_SX500.', poster) if 'poster' in self.list[i] and poster == '0': poster = self.list[i]['poster'] poster = poster.encode('utf-8') artmeta = True art = client.request(self.fanart_tv_art_link % imdb, headers=self.fanart_tv_headers, timeout='10', error=True) try: art = json.loads(art) except: artmeta = False art2meta = True art2 = client.request(self.tm_art_link % imdb, timeout='10', error=True) try: art2 = json.loads(art2) except: art2meta = False try: art2meta = False if 'status_code' in art2 and not art2['status_code'] == 34 else art2meta except: pass try: poster2 = art['movieposter'] poster2 = [x for x in poster2 if x.get('lang') == 'en'][::-1] + [x for x in poster2 if x.get('lang') == '00'][::-1] poster2 = poster2[0]['url'].encode('utf-8') except: poster2 = '0' try: poster3 = art2['posters'] poster3 = [x for x in poster3 if x.get('iso_639_1') == 'en'] + [x for x in poster3 if not x.get('iso_639_1') == 'en'] poster3 = [(x['width'], x['file_path']) for x in poster3] poster3 = [(x[0], x[1]) if x[0] < 300 else ('300', x[1]) for x in poster3] poster3 = self.tm_img_link % poster3[0] poster3 = poster3.encode('utf-8') except: poster3 = '0' try: if 'moviebackground' in art: fanart = art['moviebackground'] else: fanart = art['moviethumb'] fanart = [x for x in fanart if x.get('lang') == 'en'][::-1] + [x for x in fanart if x.get('lang') == '00'][::-1] fanart = fanart[0]['url'].encode('utf-8') except: fanart = '0' try: fanart2 = art2['backdrops'] fanart2 = [x for x in fanart2 if x.get('iso_639_1') == 'en'] + [x for x in fanart2 if not x.get('iso_639_1') == 'en'] fanart2 = [x for x in fanart2 if x.get('width') == 1920] + [x for x in fanart2 if x.get('width') < 1920] fanart2 = [(x['width'], x['file_path']) for x in fanart2] fanart2 = [(x[0], x[1]) if x[0] < 1280 else ('1280', x[1]) for x in fanart2] fanart2 = self.tm_img_link % fanart2[0] fanart2 = fanart2.encode('utf-8') except: fanart2 = '0' try: banner = art['moviebanner'] banner = [x for x in banner if x.get('lang') == 'en'][::-1] + [x for x in banner if x.get('lang') == '00'][::-1] banner = banner[0]['url'].encode('utf-8') except: banner = '0' try: if 'hdmovielogo' in art: clearlogo = art['hdmovielogo'] else: clearlogo = art['clearlogo'] clearlogo = [x for x in clearlogo if x.get('lang') == 'en'][::-1] + [x for x in clearlogo if x.get('lang') == '00'][::-1] clearlogo = clearlogo[0]['url'].encode('utf-8') except: clearlogo = '0' try: if 'hdmovieclearart' in art: clearart = art['hdmovieclearart'] else: clearart = art['clearart'] clearart = [x for x in clearart if x.get('lang') == 'en'][::-1] + [x for x in clearart if x.get('lang') == '00'][::-1] clearart = clearart[0]['url'].encode('utf-8') except: clearart = '0' try: if self.lang == 'en': raise Exception() url = self.trakt_lang_link % (imdb, self.lang) item = trakt.getTrakt(url) item = json.loads(item)[0] t = item['title'] if not (t == None or t == ''): title = t try: title = title.encode('utf-8') except: pass t = item['overview'] if not (t == None or t == ''): plot = t try: plot = plot.encode('utf-8') except: pass except: pass item = {'title': title, 'year': year, 'imdb': imdb, 'poster': poster, 'poster2': poster2, 'poster3': poster3, 'banner': banner, 'fanart': fanart, 'fanart2': fanart2, 'clearlogo': clearlogo, 'clearart': clearart, 'premiered': premiered, 'genre': genre, 'duration': duration, 'rating': rating, 'votes': votes, 'mpaa': mpaa, 'director': director, 'writer': writer, 'cast': cast, 'plot': plot} item = dict((k,v) for k, v in item.iteritems() if not v == '0') self.list[i].update(item) if artmeta == False or art2meta == False: raise Exception() meta = {'imdb': imdb, 'tvdb': '0', 'lang': self.lang, 'item': item} self.meta.append(meta) except: pass def movieDirectory(self, items): if items == None or len(items) == 0: control.idle() ; sys.exit() sysaddon = sys.argv[0] syshandle = int(sys.argv[1]) addonPoster, addonBanner = control.addonPoster(), control.addonBanner() addonFanart, settingFanart = control.addonFanart(), control.setting('fanart') traktCredentials = trakt.getTraktCredentialsInfo() try: isOld = False ; control.item().getArt('type') except: isOld = True isPlayable = 'true' if not 'plugin' in control.infoLabel('Container.PluginName') else 'false' indicators = playcount.getMovieIndicators(refresh=True) if action == 'movies' else playcount.getMovieIndicators() playbackMenu = control.lang(32063).encode('utf-8') if control.setting('hosts.mode') == '2' else control.lang(32064).encode('utf-8') watchedMenu = control.lang(32068).encode('utf-8') if trakt.getTraktIndicatorsInfo() == True else control.lang(32066).encode('utf-8') unwatchedMenu = control.lang(32069).encode('utf-8') if trakt.getTraktIndicatorsInfo() == True else control.lang(32067).encode('utf-8') queueMenu = control.lang(32065).encode('utf-8') traktManagerMenu = control.lang(32070).encode('utf-8') nextMenu = control.lang(32053).encode('utf-8') for i in items: try: label = '%s (%s)' % (i['title'], i['year']) imdb, title, year = i['imdb'], i['originaltitle'], i['year'] sysname = urllib.quote_plus('%s (%s)' % (title, year)) systitle = urllib.quote_plus(title) meta = dict((k,v) for k, v in i.iteritems() if not v == '0') meta.update({'mediatype': 'movie'}) meta.update({'trailer': '%s?action=trailer&name=%s' % (sysaddon, sysname)}) #meta.update({'trailer': 'plugin://script.extendedinfo/?info=playtrailer&&id=%s' % imdb}) if not 'duration' in i: meta.update({'duration': '120'}) elif i['duration'] == '0': meta.update({'duration': '120'}) try: meta.update({'duration': str(int(meta['duration']) * 60)}) except: pass try: meta.update({'genre': cleangenre.lang(meta['genre'], self.lang)}) except: pass sysmeta = urllib.quote_plus(json.dumps(meta)) url = '%s?action=play&title=%s&year=%s&imdb=%s&meta=%s&t=%s' % (sysaddon, systitle, year, imdb, sysmeta, self.systime) sysurl = urllib.quote_plus(url) path = '%s?action=play&title=%s&year=%s&imdb=%s' % (sysaddon, systitle, year, imdb) cm = [] cm.append((queueMenu, 'RunPlugin(%s?action=queueItem)' % sysaddon)) try: overlay = int(playcount.getMovieOverlay(indicators, imdb)) if overlay == 7: cm.append((unwatchedMenu, 'RunPlugin(%s?action=moviePlaycount&imdb=%s&query=6)' % (sysaddon, imdb))) meta.update({'playcount': 1, 'overlay': 7}) else: cm.append((watchedMenu, 'RunPlugin(%s?action=moviePlaycount&imdb=%s&query=7)' % (sysaddon, imdb))) meta.update({'playcount': 0, 'overlay': 6}) except: pass if traktCredentials == True: cm.append((traktManagerMenu, 'RunPlugin(%s?action=traktManager&name=%s&imdb=%s&content=movie)' % (sysaddon, sysname, imdb))) cm.append((playbackMenu, 'RunPlugin(%s?action=alterSources&url=%s&meta=%s)' % (sysaddon, sysurl, sysmeta))) if isOld == True: cm.append((control.lang2(19033).encode('utf-8'), 'Action(Info)')) item = control.item(label=label) art = {} if 'poster3' in i and not i['poster3'] == '0': art.update({'icon': i['poster3'], 'thumb': i['poster3'], 'poster': i['poster3']}) #elif 'poster2' in i and not i['poster2'] == '0': #art.update({'icon': i['poster2'], 'thumb': i['poster2'], 'poster': i['poster2']}) elif 'poster' in i and not i['poster'] == '0': art.update({'icon': i['poster'], 'thumb': i['poster'], 'poster': i['poster']}) else: art.update({'icon': addonPoster, 'thumb': addonPoster, 'poster': addonPoster}) if 'banner' in i and not i['banner'] == '0': art.update({'banner': i['banner']}) else: art.update({'banner': addonBanner}) if 'clearlogo' in i and not i['clearlogo'] == '0': art.update({'clearlogo': i['clearlogo']}) if 'clearart' in i and not i['clearart'] == '0': art.update({'clearart': i['clearart']}) if settingFanart == 'true' and 'fanart2' in i and not i['fanart2'] == '0': item.setProperty('Fanart_Image', i['fanart2']) elif settingFanart == 'true' and 'fanart' in i and not i['fanart'] == '0': item.setProperty('Fanart_Image', i['fanart']) elif not addonFanart == None: item.setProperty('Fanart_Image', addonFanart) item.setArt(art) item.addContextMenuItems(cm) item.setProperty('IsPlayable', isPlayable) item.setInfo(type='Video', infoLabels = meta) control.addItem(handle=syshandle, url=url, listitem=item, isFolder=False) except: pass try: url = items[0]['next'] if url == '': raise Exception() icon = control.addonNext() url = '%s?action=moviePage&url=%s' % (sysaddon, urllib.quote_plus(url)) item = control.item(label=nextMenu) item.setArt({'icon': icon, 'thumb': icon, 'poster': icon, 'banner': icon}) if not addonFanart == None: item.setProperty('Fanart_Image', addonFanart) control.addItem(handle=syshandle, url=url, listitem=item, isFolder=True) except: pass control.content(syshandle, 'movies') control.directory(syshandle, cacheToDisc=True) views.setView('movies', {'skin.confluence': 500}) def addDirectory(self, items, queue=False): if items == None or len(items) == 0: control.idle() ; sys.exit() sysaddon = sys.argv[0] syshandle = int(sys.argv[1]) addonFanart, addonThumb, artPath = control.addonFanart(), control.addonThumb(), control.artPath() queueMenu = control.lang(32065).encode('utf-8') for i in items: try: name = i['name'] if i['image'].startswith('http'): thumb = i['image'] elif not artPath == None: thumb = os.path.join(artPath, i['image']) else: thumb = addonThumb url = '%s?action=%s' % (sysaddon, i['action']) try: url += '&url=%s' % urllib.quote_plus(i['url']) except: pass cm = [] if queue == True: cm.append((queueMenu, 'RunPlugin(%s?action=queueItem)' % sysaddon)) item = control.item(label=name) item.setArt({'icon': thumb, 'thumb': thumb}) if not addonFanart == None: item.setProperty('Fanart_Image', addonFanart) item.addContextMenuItems(cm) control.addItem(handle=syshandle, url=url, listitem=item, isFolder=True) except: pass control.directory(syshandle, cacheToDisc=True)
unknown
codeparrot/codeparrot-clean
import random import unittest import uuid import time import yaml import os.path from sync.synchronizer import Synchronizer, es_scan ASSETS_DIR = os.path.join(os.path.dirname(__file__), "..", "assets") def load_config_dict(): with file(os.path.join(ASSETS_DIR, "config.yaml")) as f: return yaml.load(f) class BaseTestCase(unittest.TestCase): # longMessage = True def setUp(self): self.merger = Synchronizer(load_config_dict()) self.merger.checkpoint_reset() self.prepare_ca() self.prepare_es() self._prepare_ca_queries() def _prepare_ca_queries(self): fields = [self.merger.id_field, self.merger.version_field] + self.merger.sync_fields self.ca_ps_select_ids = self.merger.ca_session.prepare( "SELECT %s FROM %s WHERE %s IN ?;" % (",".join(fields), self.merger.ca_table, self.merger.id_field) ) def make_es_query_ids_in(self, ids): return { "filter": { "ids": { "values": ids } } } def generate_doc(self, id=None, version=None): return { "id": id or str(uuid.uuid4()), "version": version or int(time.time() - 1), "data_int": random.randint(0, 1000), "data_float": random.randint(0, 1000), "data_str": str(random.random()), } def prepare_ca(self): self.merger.ca_session.execute("TRUNCATE %s" % self.merger.ca_table) def prepare_es(self): try: self.merger.es_session.indices.delete(self.merger.es_index) except: pass self.merger.es_session.indices.create(self.merger.es_index) self.merger.es_session.cluster.health(index=self.merger.es_index, wait_for_status="yellow") def insert_ca(self, docs): self.merger.ca_batch_insert_with_ts(docs) def insert_es(self, docs): self.merger.es_bulk_insert_versioned(docs) # flush index after bulk insert self.merger.es_session.indices.flush(index=self.merger.es_index) def select_ca(self, ids): cursor = self.merger.ca_session.execute(self.ca_ps_select_ids, (ids,)) return list(cursor) def select_es(self, ids): # flush index before search self.merger.es_session.indices.flush(index=self.merger.es_index) cursor = es_scan(self.merger.es_session, index=self.merger.es_index, doc_type=self.merger.es_type, query=self.make_es_query_ids_in(ids)) return [hit["_source"] for hit in cursor] def assertDocsEqual(self, docs1, docs2, msg=None): """ Special function to compare documents OR a collection of documents (in any order). This function also handle type comparisons like UUIDs coming as string from Es and as uuid.UUID from C* """ if isinstance(docs1, dict): docs1 = (docs1,) docs2 = (docs2,) # FIXME: so ugly that it hurts my eyes :x if docs1 and isinstance(docs1[0]["id"], uuid.UUID): docs1 = [dict(list(d.items()) + [("id", str(d["id"]))]) for d in docs1] if docs2 and isinstance(docs2[0]["id"], uuid.UUID): docs2 = [dict(list(d.items()) + [("id", str(d["id"]))]) for d in docs2] docs1 = sorted(docs1, key=lambda d: d["id"]) docs2 = sorted(docs2, key=lambda d: d["id"]) # self.assertEqual(docs1, docs2, msg) # too slow :/ self.assertTrue(docs1 == docs1, msg) class TestCaseCaToEs(BaseTestCase): def test_one(self): doc1 = self.generate_doc() self.insert_ca([doc1]) self.merger.run_once() doc2 = self.select_es([doc1["id"]])[0] self.assertDocsEqual(doc1, doc2) def test_many(self): docs1 = [self.generate_doc() for _ in range(1000)] docs1_ids = [d["id"] for d in docs1] self.insert_ca(docs1) self.merger.run_once() docs2 = self.select_es(docs1_ids) self.assertDocsEqual(docs1, docs2) class TestCaseEsToCa(BaseTestCase): def test_one(self): doc1 = self.generate_doc() self.insert_es([doc1]) self.merger.run_once() doc2 = self.select_ca([doc1["id"]])[0] self.assertDocsEqual(doc1, doc2) def test_many(self): docs1 = [self.generate_doc() for _ in range(1000)] docs1_ids = [d["id"] for d in docs1] self.insert_es(docs1) self.merger.run_once() docs2 = self.select_ca(docs1_ids) self.assertDocsEqual(docs1, docs2) class TestCaseBoth(BaseTestCase): def test_many(self): docs1 = [self.generate_doc() for _ in range(1000)] docs2 = [self.generate_doc() for _ in range(1000)] docs1_ids = [d["id"] for d in docs1] docs2_ids = [d["id"] for d in docs2] docs_ids = docs1_ids + docs2_ids self.insert_es(docs1) self.insert_ca(docs2) self.merger.run_once() docs_ca = self.select_ca(docs_ids) docs_es = self.select_ca(docs_ids) self.assertDocsEqual(docs_ca, docs_es) self.assertDocsEqual(docs_ca, docs1 + docs2) import logging logging.basicConfig(level=logging.INFO)
unknown
codeparrot/codeparrot-clean
#!/usr/bin/env python # -*- encoding: utf-8 -*- """ Desc: 如何将原有的《Python Cookbook》3rd edition.pdf文件转换为我自己的cookbook翻译项目格式 1. 首先使用在线PDF文件切割截取出自己想要的pdf文件部分:http://smallpdf.com/split-pdf 2. 安装PDFMiner依赖,然后使用:pdf2txt.py -o pc.txt /home/mango/work/perfect.pdf生成的txt文件 3. 把生成的txt文件放到idea中,去除某些没用的符号,比如'口'字符,全局replace 4. 调用beauty2()函数,去除了页头和页脚的部分 5. 调用convert_cookbook()函数将txt文件转换为cookbook项目所需的格式 """ import re import os from os.path import join import logging logging.basicConfig(level=logging.INFO, format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s', datefmt='%Y-%m-%d %H:%M:%S', handlers=[logging.FileHandler('d:/logs/cookbook.log', 'w', 'utf-8')]) _log = logging.getLogger('app.' + __name__) def beauty(txt_file): with open(txt_file, mode='r+', encoding='utf-8') as f: lines = f.readlines() f.seek(0) for line in lines: if line.startswith('www.it-ebooks.info'): f.seek(f.tell() - 1, 1) if f.readline().startswith('Chapter '): # 回退7位 f.seek(f.tell() - 7, 1) else: f.seek(f.tell() - 5, 1) else: f.write(line) f.truncate() def beauty2(pre_txt, after_txt): with open(pre_txt, mode='r', encoding='utf-8') as f: lines = f.readlines() result_lines = [] for i, line in enumerate(lines): if line.startswith('www.it-ebooks.info'): if result_lines[len(result_lines) - 4].startswith('| '): # 删除7 for k in range(7): result_lines.pop() else: check_str = result_lines[len(result_lines) - 2].strip() if re.match('\d{3}', check_str): # 删除3行 for k in range(3): result_lines.pop() else: result_lines.append(line) # 结果写入 with open(after_txt, mode='w', encoding='utf-8') as f: f.writelines(result_lines) def convert_cookbook(txt_file, base_dir): """演示一下seek方法""" chapter = None # 章 paper = None # 节 write_file = None # 接下来要写入的文件 temp_lines = [] # 临时存放章或节内容 hit_paper = False # 是否命中小节标志 hit_offset = 0 # 命中后行距 with open(txt_file, mode='r', encoding='utf-8') as f: for line in f: c_match = re.match('^CHAPTER (\d+)$', line.strip()) p_match = re.match('^(\d+)\.(\d+)\. ', line.strip()) a_match = re.match('^APPENDIX A$', line.strip()) if c_match: old_chapter = chapter chapter = int(c_match.group(1)) if old_chapter and chapter - old_chapter != 1: _log.error('章节不连续啊: {}'.format(line.strip())) continue # 开始新的一章了 _log.info('------------------------------------------------------') _log.info('---------开始新的一章了,第{}章!-----------'.format(chapter)) # 前面的给写入文件中 if temp_lines: _log.info('write_file={}'.format(write_file)) with open(write_file, mode='r', encoding='utf-8') as wf: for i in range(7): temp_lines.insert(i, wf.readline()) with open(write_file, mode='w', encoding='utf-8') as wf: wf.writelines(temp_lines) temp_lines.clear() # 首先创建一个章节源码目录 c_dir = join(base_dir, 'cookbook', 'c{:02d}'.format(chapter)) if not os.path.exists(c_dir): os.makedirs(c_dir) # 找到章节文件 chapters_dir = join(base_dir, 'source', 'chapters') onlyfiles = [f for f in os.listdir(chapters_dir) if os.path.isfile(join(chapters_dir, f))] write_file = next(join(chapters_dir, f) for f in onlyfiles if f.startswith('p{:02d}'.format(chapter))) _log.info('找到章节文件:{}'.format(write_file)) elif p_match: hit_paper = True paper = int(p_match.group(2)) hit_offset = 0 elif hit_paper and hit_offset <= 2: if line.strip() == 'Problem': # 说明是新的一节开始了 _log.info('开始新的一节了,第{}章,第{}节!'.format(chapter, paper)) # 前面的给写入文件中 if temp_lines: if 'chapters' not in write_file: _log.info('write_file={}'.format(write_file)) with open(write_file, mode='r', encoding='utf-8') as wf: for i in range(7): temp_lines.insert(i, wf.readline()) with open(write_file, mode='w', encoding='utf-8') as wf: wf.writelines(temp_lines) temp_lines.clear() # 定义接下来要写入的节文件 paper_dir = join(base_dir, 'source', 'c{:02d}'.format(chapter)) pfs = [f for f in os.listdir(paper_dir) if os.path.isfile(join(paper_dir, f))] write_file = next( join(paper_dir, f) for f in pfs if f.startswith('p{:02d}'.format(paper))) _log.info('下次要写的小节文件:{}'.format(write_file)) # 创建小节源码文件 c_dir = join(base_dir, 'cookbook', 'c{:02d}'.format(chapter)) with open(join(c_dir, 'p{:02d}_.py'.format(paper)), 'w', encoding='utf-8') as pfile: pfile.write('#!/usr/bin/env python\n') pfile.write('# -*- encoding: utf-8 -*-\n') pfile.write('"""\n') pfile.write('Topic: \n') pfile.write('Desc : \n') pfile.write('"""\n') hit_paper = False hit_offset += 1 if hit_offset > 2: hit_paper = False elif a_match: # 前面的给写入文件中 if temp_lines: _log.info('write_file={}'.format(write_file)) with open(write_file, mode='r', encoding='utf-8') as wf: for i in range(7): temp_lines.insert(i, wf.readline()) with open(write_file, mode='w', encoding='utf-8') as wf: wf.writelines(temp_lines) temp_lines.clear() elif re.match('^Solution$', line.strip()): temp_lines.append('|\n') temp_lines.append('\n') temp_lines.append('----------\n') temp_lines.append('解决方案\n') temp_lines.append('----------\n') elif re.match('^Discussion$', line.strip()): temp_lines.append('|\n') temp_lines.append('\n') temp_lines.append('----------\n') temp_lines.append('讨论\n') temp_lines.append('----------\n') else: temp_lines.append(line) if __name__ == '__main__': convert_cookbook(r'D:\download\20150430\pc_after.txt' , r'D:\work\projects\gitprojects\python3-cookbook')
unknown
codeparrot/codeparrot-clean
//go:build windows package windows import ( "context" "encoding/json" "errors" "fmt" "net" "github.com/containerd/log" "github.com/moby/moby/v2/daemon/libnetwork/datastore" "github.com/moby/moby/v2/daemon/libnetwork/types" ) const ( windowsPrefix = "windows" windowsEndpointPrefix = "windows-endpoint" ) func (d *driver) initStore() error { err := d.populateNetworks() if err != nil { return err } err = d.populateEndpoints() if err != nil { return err } return nil } func (d *driver) populateNetworks() error { kvol, err := d.store.List(&networkConfiguration{Type: d.name}) if err != nil { if errors.Is(err, datastore.ErrKeyNotFound) { // It's normal for network configuration state to be empty. Just return. return nil } return fmt.Errorf("failed to get windows network configurations from store: %v", err) } for _, kvo := range kvol { nwCfg := kvo.(*networkConfiguration) if nwCfg.Type != d.name { continue } d.createNetwork(nwCfg) log.G(context.TODO()).Debugf("Network %v (%.7s) restored", d.name, nwCfg.ID) } return nil } func (d *driver) populateEndpoints() error { kvol, err := d.store.List(&hnsEndpoint{Type: d.name}) if err != nil { if errors.Is(err, datastore.ErrKeyNotFound) { return nil } return fmt.Errorf("failed to get endpoints from store: %v", err) } for _, kvo := range kvol { ep := kvo.(*hnsEndpoint) if ep.Type != d.name { continue } n, ok := d.networks[ep.nid] if !ok { log.G(context.TODO()).Debugf("Network (%.7s) not found for restored endpoint (%.7s)", ep.nid, ep.id) log.G(context.TODO()).Debugf("Deleting stale endpoint (%.7s) from store", ep.id) if err := d.storeDelete(ep); err != nil { log.G(context.TODO()).Debugf("Failed to delete stale endpoint (%.7s) from store", ep.id) } continue } n.endpoints[ep.id] = ep log.G(context.TODO()).Debugf("Endpoint (%.7s) restored to network (%.7s)", ep.id, ep.nid) } return nil } func (d *driver) storeUpdate(kvObject datastore.KVObject) error { if d.store == nil { log.G(context.TODO()).Warnf("store not initialized. kv object %s is not added to the store", datastore.Key(kvObject.Key()...)) return nil } if err := d.store.PutObjectAtomic(kvObject); err != nil { return fmt.Errorf("failed to update store for object type %T: %v", kvObject, err) } return nil } func (d *driver) storeDelete(kvObject datastore.KVObject) error { if d.store == nil { log.G(context.TODO()).Debugf("store not initialized. kv object %s is not deleted from store", datastore.Key(kvObject.Key()...)) return nil } return d.store.DeleteObject(kvObject) } func (ncfg *networkConfiguration) MarshalJSON() ([]byte, error) { nMap := make(map[string]any) nMap["ID"] = ncfg.ID nMap["Type"] = ncfg.Type nMap["Name"] = ncfg.Name nMap["HnsID"] = ncfg.HnsID nMap["VLAN"] = ncfg.VLAN nMap["VSID"] = ncfg.VSID nMap["DNSServers"] = ncfg.DNSServers nMap["DNSSuffix"] = ncfg.DNSSuffix nMap["SourceMac"] = ncfg.SourceMac nMap["NetworkAdapterName"] = ncfg.NetworkAdapterName return json.Marshal(nMap) } func (ncfg *networkConfiguration) UnmarshalJSON(b []byte) error { var ( err error nMap map[string]any ) if err = json.Unmarshal(b, &nMap); err != nil { return err } ncfg.ID = nMap["ID"].(string) ncfg.Type = nMap["Type"].(string) ncfg.Name = nMap["Name"].(string) ncfg.HnsID = nMap["HnsID"].(string) ncfg.VLAN = uint(nMap["VLAN"].(float64)) ncfg.VSID = uint(nMap["VSID"].(float64)) ncfg.DNSServers = nMap["DNSServers"].(string) ncfg.DNSSuffix = nMap["DNSSuffix"].(string) ncfg.SourceMac = nMap["SourceMac"].(string) ncfg.NetworkAdapterName = nMap["NetworkAdapterName"].(string) return nil } func (ncfg *networkConfiguration) Key() []string { return []string{windowsPrefix + ncfg.Type, ncfg.ID} } func (ncfg *networkConfiguration) KeyPrefix() []string { return []string{windowsPrefix + ncfg.Type} } func (ncfg *networkConfiguration) Value() []byte { b, err := json.Marshal(ncfg) if err != nil { return nil } return b } func (ncfg *networkConfiguration) SetValue(value []byte) error { return json.Unmarshal(value, ncfg) } func (ncfg *networkConfiguration) Index() uint64 { return ncfg.dbIndex } func (ncfg *networkConfiguration) SetIndex(index uint64) { ncfg.dbIndex = index ncfg.dbExists = true } func (ncfg *networkConfiguration) Exists() bool { return ncfg.dbExists } func (ncfg *networkConfiguration) Skip() bool { return false } func (ncfg *networkConfiguration) New() datastore.KVObject { return &networkConfiguration{Type: ncfg.Type} } func (ncfg *networkConfiguration) CopyTo(o datastore.KVObject) error { dstNcfg := o.(*networkConfiguration) *dstNcfg = *ncfg return nil } func (ep *hnsEndpoint) MarshalJSON() ([]byte, error) { epMap := make(map[string]any) epMap["id"] = ep.id epMap["nid"] = ep.nid epMap["Type"] = ep.Type epMap["profileID"] = ep.profileID epMap["MacAddress"] = ep.macAddress.String() if ep.addr.IP != nil { epMap["Addr"] = ep.addr.String() } if ep.gateway != nil { epMap["gateway"] = ep.gateway.String() } epMap["epOption"] = ep.epOption epMap["epConnectivity"] = ep.epConnectivity epMap["PortMapping"] = ep.portMapping return json.Marshal(epMap) } func (ep *hnsEndpoint) UnmarshalJSON(b []byte) error { var ( err error epMap map[string]any ) if err = json.Unmarshal(b, &epMap); err != nil { return fmt.Errorf("Failed to unmarshal to endpoint: %v", err) } if v, ok := epMap["MacAddress"]; ok { if ep.macAddress, err = net.ParseMAC(v.(string)); err != nil { return types.InternalErrorf("failed to decode endpoint MAC address (%s) after json unmarshal: %v", v.(string), err) } } if v, ok := epMap["Addr"]; ok { if ep.addr, err = types.ParseCIDR(v.(string)); err != nil { log.G(context.TODO()).Warnf("failed to decode endpoint IPv4 address (%s) after json unmarshal: %v", v.(string), err) } } if v, ok := epMap["gateway"]; ok { ep.gateway = net.ParseIP(v.(string)) } ep.id = epMap["id"].(string) ep.Type = epMap["Type"].(string) ep.nid = epMap["nid"].(string) ep.profileID = epMap["profileID"].(string) d, _ := json.Marshal(epMap["epOption"]) if err := json.Unmarshal(d, &ep.epOption); err != nil { log.G(context.TODO()).Warnf("Failed to decode endpoint container config %v", err) } d, _ = json.Marshal(epMap["epConnectivity"]) if err := json.Unmarshal(d, &ep.epConnectivity); err != nil { log.G(context.TODO()).Warnf("Failed to decode endpoint external connectivity configuration %v", err) } d, _ = json.Marshal(epMap["PortMapping"]) if err := json.Unmarshal(d, &ep.portMapping); err != nil { log.G(context.TODO()).Warnf("Failed to decode endpoint port mapping %v", err) } return nil } func (ep *hnsEndpoint) Key() []string { return []string{windowsEndpointPrefix + ep.Type, ep.id} } func (ep *hnsEndpoint) KeyPrefix() []string { return []string{windowsEndpointPrefix + ep.Type} } func (ep *hnsEndpoint) Value() []byte { b, err := json.Marshal(ep) if err != nil { return nil } return b } func (ep *hnsEndpoint) SetValue(value []byte) error { return json.Unmarshal(value, ep) } func (ep *hnsEndpoint) Index() uint64 { return ep.dbIndex } func (ep *hnsEndpoint) SetIndex(index uint64) { ep.dbIndex = index ep.dbExists = true } func (ep *hnsEndpoint) Exists() bool { return ep.dbExists } func (ep *hnsEndpoint) Skip() bool { return false } func (ep *hnsEndpoint) New() datastore.KVObject { return &hnsEndpoint{Type: ep.Type} } func (ep *hnsEndpoint) CopyTo(o datastore.KVObject) error { dstEp := o.(*hnsEndpoint) *dstEp = *ep return nil }
go
github
https://github.com/moby/moby
daemon/libnetwork/drivers/windows/windows_store.go
/* @file remove_transaction_safe @Copyright Barrett Adair 2015-2017 Distributed under the Boost Software License, Version 1.0. (See accompanying file LICENSE.md or copy at http://boost.org/LICENSE_1_0.txt) */ #ifndef BOOST_CLBL_TRTS_REMOVE_TRANSACTION_SAFE_HPP #define BOOST_CLBL_TRTS_REMOVE_TRANSACTION_SAFE_HPP #include <boost/callable_traits/detail/core.hpp> namespace boost { namespace callable_traits { BOOST_CLBL_TRTS_DEFINE_SFINAE_ERROR_ORIGIN(remove_transaction_safe) BOOST_CLBL_TRTS_SFINAE_MSG(remove_transaction_safe, cannot_remove_transaction_safe_from_this_type) //[ remove_transaction_safe_hpp /*` [section:ref_remove_transaction_safe remove_transaction_safe] [heading Header] ``#include <boost/callable_traits/remove_transaction_safe.hpp>`` [heading Definition] */ template<typename T> using remove_transaction_safe_t = //see below //<- detail::try_but_fail_if_invalid< typename detail::traits<T>::remove_transaction_safe, cannot_remove_transaction_safe_from_this_type>; namespace detail { template<typename T, typename = std::false_type> struct remove_transaction_safe_impl {}; template<typename T> struct remove_transaction_safe_impl <T, typename std::is_same< remove_transaction_safe_t<T>, detail::dummy>::type> { using type = remove_transaction_safe_t<T>; }; } //-> template<typename T> struct remove_transaction_safe : detail::remove_transaction_safe_impl<T> {}; //<- }} // namespace boost::callable_traits //-> /*` [heading Constraints] * `T` must be one of the following: * function type * function pointer type * function reference type * member function pointer type * If `T` is a pointer, it may not be cv/ref qualified [heading Behavior] * A substitution failure occurs if the constraints are violated. * Removes the member `transaction_safe` specifier from `T`, if present. [heading Input/Output Examples] [table [[`T`] [`remove_transaction_safe_t<T>`]] [[`int() const transaction_safe`] [`int() const`]] [[`int(*)() transaction_safe`] [`int(*)()`]] [[`int(&)() transaction_safe`] [`int(&)()`]] [[`int(foo::*)() transaction_safe`] [`int(foo::*)()`]] [[`int() const`] [`int() const`]] [[`int(*)()`] [`int(*)()`]] [[`int(&)()`] [`int(&)()`]] [[`int`] [(substitution failure)]] [[`int foo::*`] [(substitution failure)]] [[`int (foo::* const)()`] [(substitution failure)]] ] [heading Example Program] [import ../example/remove_transaction_safe.cpp] [remove_transaction_safe] [endsect] */ //] #endif // #ifndef BOOST_CLBL_TRTS_REMOVE_TRANSACTION_SAFE_HPP
unknown
github
https://github.com/mysql/mysql-server
extra/boost/boost_1_87_0/boost/callable_traits/remove_transaction_safe.hpp
//===--- TestTU.h - Scratch source files for testing -------------*- C++-*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // Many tests for indexing, code completion etc are most naturally expressed // using code examples. // TestTU lets test define these examples in a common way without dealing with // the mechanics of VFS and compiler interactions, and then easily grab the // AST, particular symbols, etc. // //===---------------------------------------------------------------------===// #ifndef LLVM_CLANG_TOOLS_EXTRA_CLANGD_UNITTESTS_TESTTU_H #define LLVM_CLANG_TOOLS_EXTRA_CLANGD_UNITTESTS_TESTTU_H #include "../TidyProvider.h" #include "Compiler.h" #include "FeatureModule.h" #include "ParsedAST.h" #include "TestFS.h" #include "index/Index.h" #include "llvm/ADT/StringMap.h" #include <memory> #include <string> #include <utility> #include <vector> namespace clang { namespace clangd { struct TestTU { static TestTU withCode(llvm::StringRef Code) { TestTU TU; TU.Code = std::string(Code); return TU; } static TestTU withHeaderCode(llvm::StringRef HeaderCode) { TestTU TU; TU.HeaderCode = std::string(HeaderCode); return TU; } // The code to be compiled. std::string Code; std::string Filename = "TestTU.cpp"; // Define contents of a header which will be implicitly included by Code. std::string HeaderCode; std::string HeaderFilename = "TestTU.h"; // Name and contents of each file. llvm::StringMap<std::string> AdditionalFiles; // Extra arguments for the compiler invocation. std::vector<std::string> ExtraArgs; // Predefine macros such as __UINTPTR_TYPE__. bool PredefineMacros = false; TidyProvider ClangTidyProvider = {}; // Index to use when building AST. const SymbolIndex *ExternalIndex = nullptr; // Simulate a header guard of the header (using an #import directive). bool ImplicitHeaderGuard = true; // Parse options pass on to the ParseInputs ParseOptions ParseOpts = {}; // Whether to use overlay the TestFS over the real filesystem. This is // required for use of implicit modules.where the module file is written to // disk and later read back. // FIXME: Change the way reading/writing modules work to allow us to keep them // in memory across multiple clang invocations, at least in tests, to // eliminate the need for real file system here. // Please avoid using this for things other than implicit modules. The plan is // to eliminate this option some day. bool OverlayRealFileSystemForModules = false; FeatureModuleSet *FeatureModules = nullptr; // By default, build() will report Error diagnostics as GTest errors. // Suppress this behavior by adding an 'error-ok' comment to the code. // The result will always have getDiagnostics() populated. ParsedAST build() const; std::shared_ptr<const PreambleData> preamble(PreambleParsedCallback PreambleCallback = nullptr) const; ParseInputs inputs(MockFS &FS) const; SymbolSlab headerSymbols() const; RefSlab headerRefs() const; std::unique_ptr<SymbolIndex> index() const; }; // Look up an index symbol by qualified name, which must be unique. const Symbol &findSymbol(const SymbolSlab &, llvm::StringRef QName); // Look up an AST symbol by qualified name, which must be unique and top-level. const NamedDecl &findDecl(ParsedAST &AST, llvm::StringRef QName); // Look up an AST symbol that satisfies \p Filter. const NamedDecl &findDecl(ParsedAST &AST, std::function<bool(const NamedDecl &)> Filter); // Look up an AST symbol by unqualified name, which must be unique. const NamedDecl &findUnqualifiedDecl(ParsedAST &AST, llvm::StringRef Name); } // namespace clangd } // namespace clang #endif // LLVM_CLANG_TOOLS_EXTRA_CLANGD_UNITTESTS_TESTTU_H
c
github
https://github.com/llvm/llvm-project
clang-tools-extra/clangd/unittests/TestTU.h
// Code generated by "enumer -type=RolePathPolicy -text -json -transform=kebab-case"; DO NOT EDIT. package pki_backend import ( "encoding/json" "fmt" ) const _RolePathPolicyName = "RPPUnknownRPPSignVerbatimRPPRole" var _RolePathPolicyIndex = [...]uint8{0, 10, 25, 32} func (i RolePathPolicy) String() string { if i < 0 || i >= RolePathPolicy(len(_RolePathPolicyIndex)-1) { return fmt.Sprintf("RolePathPolicy(%d)", i) } return _RolePathPolicyName[_RolePathPolicyIndex[i]:_RolePathPolicyIndex[i+1]] } var _RolePathPolicyValues = []RolePathPolicy{0, 1, 2} var _RolePathPolicyNameToValueMap = map[string]RolePathPolicy{ _RolePathPolicyName[0:10]: 0, _RolePathPolicyName[10:25]: 1, _RolePathPolicyName[25:32]: 2, } // RolePathPolicyString retrieves an enum value from the enum constants string name. // Throws an error if the param is not part of the enum. func RolePathPolicyString(s string) (RolePathPolicy, error) { if val, ok := _RolePathPolicyNameToValueMap[s]; ok { return val, nil } return 0, fmt.Errorf("%s does not belong to RolePathPolicy values", s) } // RolePathPolicyValues returns all values of the enum func RolePathPolicyValues() []RolePathPolicy { return _RolePathPolicyValues } // IsARolePathPolicy returns "true" if the value is listed in the enum definition. "false" otherwise func (i RolePathPolicy) IsARolePathPolicy() bool { for _, v := range _RolePathPolicyValues { if i == v { return true } } return false } // MarshalJSON implements the json.Marshaler interface for RolePathPolicy func (i RolePathPolicy) MarshalJSON() ([]byte, error) { return json.Marshal(i.String()) } // UnmarshalJSON implements the json.Unmarshaler interface for RolePathPolicy func (i *RolePathPolicy) UnmarshalJSON(data []byte) error { var s string if err := json.Unmarshal(data, &s); err != nil { return fmt.Errorf("RolePathPolicy should be a string, got %s", data) } var err error *i, err = RolePathPolicyString(s) return err } // MarshalText implements the encoding.TextMarshaler interface for RolePathPolicy func (i RolePathPolicy) MarshalText() ([]byte, error) { return []byte(i.String()), nil } // UnmarshalText implements the encoding.TextUnmarshaler interface for RolePathPolicy func (i *RolePathPolicy) UnmarshalText(text []byte) error { var err error *i, err = RolePathPolicyString(string(text)) return err }
go
github
https://github.com/hashicorp/vault
builtin/logical/pki/pki_backend/rolepathpolicy_enumer.go
# -*- coding: utf-8 -*- from __future__ import with_statement import itertools from django.conf import settings from django.contrib import admin from django.contrib.auth import get_user_model, get_permission_codename from django.contrib.auth.models import Permission from django.contrib.messages.storage import default_storage from django.core.cache import cache from django.core.exceptions import ImproperlyConfigured from django.db import models from django.http import HttpResponseForbidden, HttpResponse from django.template import TemplateSyntaxError, Template from django.template.context import Context, RequestContext from django.template.loader import get_template from django.test import TestCase, RequestFactory from django.test.utils import override_settings from django.utils.encoding import force_text from django.utils.numberformat import format from djangocms_link.cms_plugins import LinkPlugin from djangocms_text_ckeditor.cms_plugins import TextPlugin from djangocms_text_ckeditor.models import Text from sekizai.context import SekizaiContext from cms import constants from cms.api import add_plugin, create_page, create_title from cms.exceptions import DuplicatePlaceholderWarning from cms.models.fields import PlaceholderField from cms.models.placeholdermodel import Placeholder from cms.plugin_pool import plugin_pool from cms.plugin_rendering import render_placeholder from cms.test_utils.fixtures.fakemlng import FakemlngFixtures from cms.test_utils.project.fakemlng.models import Translations from cms.test_utils.project.objectpermissionsapp.models import UserObjectPermission from cms.test_utils.project.placeholderapp.models import ( DynamicPlaceholderSlotExample, Example1, MultilingualExample1, TwoPlaceholderExample, ) from cms.test_utils.project.sampleapp.models import Category from cms.test_utils.testcases import CMSTestCase from cms.test_utils.util.context_managers import UserLoginContext from cms.test_utils.util.mock import AttributeObject from cms.toolbar.toolbar import CMSToolbar from cms.utils.compat.tests import UnittestCompatMixin from cms.utils.conf import get_cms_setting from cms.utils.placeholder import (PlaceholderNoAction, MLNGPlaceholderActions, get_placeholder_conf, get_placeholders, _get_nodelist, _scan_placeholders) from cms.utils.plugins import assign_plugins from cms.utils.urlutils import admin_reverse class PlaceholderTestCase(CMSTestCase, UnittestCompatMixin): def setUp(self): u = self._create_user("test", True, True) self._login_context = self.login_user_context(u) self._login_context.__enter__() def tearDown(self): self._login_context.__exit__(None, None, None) def test_placeholder_scanning_extend(self): placeholders = get_placeholders('placeholder_tests/test_one.html') self.assertEqual(sorted(placeholders), sorted([u'new_one', u'two', u'three'])) def test_placeholder_scanning_sekizai_extend(self): placeholders = get_placeholders('placeholder_tests/test_one_sekizai.html') self.assertEqual(sorted(placeholders), sorted([u'new_one', u'two', u'three'])) def test_placeholder_scanning_include(self): placeholders = get_placeholders('placeholder_tests/test_two.html') self.assertEqual(sorted(placeholders), sorted([u'child', u'three'])) def test_placeholder_scanning_double_extend(self): placeholders = get_placeholders('placeholder_tests/test_three.html') self.assertEqual(sorted(placeholders), sorted([u'new_one', u'two', u'new_three'])) def test_placeholder_scanning_sekizai_double_extend(self): placeholders = get_placeholders('placeholder_tests/test_three_sekizai.html') self.assertEqual(sorted(placeholders), sorted([u'new_one', u'two', u'new_three'])) def test_placeholder_scanning_complex(self): placeholders = get_placeholders('placeholder_tests/test_four.html') self.assertEqual(sorted(placeholders), sorted([u'new_one', u'child', u'four'])) def test_placeholder_scanning_super(self): placeholders = get_placeholders('placeholder_tests/test_five.html') self.assertEqual(sorted(placeholders), sorted([u'one', u'extra_one', u'two', u'three'])) def test_placeholder_scanning_nested(self): placeholders = get_placeholders('placeholder_tests/test_six.html') self.assertEqual(sorted(placeholders), sorted([u'new_one', u'new_two', u'new_three'])) def test_placeholder_scanning_duplicate(self): placeholders = self.assertWarns(DuplicatePlaceholderWarning, 'Duplicate {% placeholder "one" %} in template placeholder_tests/test_seven.html.', get_placeholders, 'placeholder_tests/test_seven.html') self.assertEqual(sorted(placeholders), sorted([u'one'])) def test_placeholder_scanning_extend_outside_block(self): placeholders = get_placeholders('placeholder_tests/outside.html') self.assertEqual(sorted(placeholders), sorted([u'new_one', u'two', u'base_outside'])) def test_placeholder_scanning_sekizai_extend_outside_block(self): placeholders = get_placeholders('placeholder_tests/outside_sekizai.html') self.assertEqual(sorted(placeholders), sorted([u'new_one', u'two', u'base_outside'])) def test_placeholder_scanning_extend_outside_block_nested(self): placeholders = get_placeholders('placeholder_tests/outside_nested.html') self.assertEqual(sorted(placeholders), sorted([u'new_one', u'two', u'base_outside'])) def test_placeholder_scanning_sekizai_extend_outside_block_nested(self): placeholders = get_placeholders('placeholder_tests/outside_nested_sekizai.html') self.assertEqual(sorted(placeholders), sorted([u'new_one', u'two', u'base_outside'])) def test_placeholder_scanning_var(self): t = Template('{%load cms_tags %}{% include name %}{% placeholder "a_placeholder" %}') phs = _scan_placeholders(t.nodelist) self.assertListEqual(sorted(phs), sorted([u'a_placeholder'])) t = Template('{% include "placeholder_tests/outside_nested_sekizai.html" %}') phs = _scan_placeholders(t.nodelist) self.assertListEqual(sorted(phs), sorted([u'two', u'new_one', u'base_outside'])) def test_fieldsets_requests(self): response = self.client.get(admin_reverse('placeholderapp_example1_add')) self.assertEqual(response.status_code, 200) response = self.client.get(admin_reverse('placeholderapp_twoplaceholderexample_add')) self.assertEqual(response.status_code, 200) def test_page_only_plugins(self): ex = Example1( char_1='one', char_2='two', char_3='tree', char_4='four' ) ex.save() response = self.client.get(admin_reverse('placeholderapp_example1_change', args=(ex.pk,))) self.assertEqual(response.status_code, 200) self.assertNotContains(response, 'InheritPagePlaceholderPlugin') def test_inter_placeholder_plugin_move(self): ex = TwoPlaceholderExample( char_1='one', char_2='two', char_3='tree', char_4='four' ) ex.save() ph1 = ex.placeholder_1 ph2 = ex.placeholder_2 ph1_pl1 = add_plugin(ph1, TextPlugin, 'en', body='ph1 plugin1').cmsplugin_ptr ph1_pl2 = add_plugin(ph1, TextPlugin, 'en', body='ph1 plugin2').cmsplugin_ptr ph1_pl3 = add_plugin(ph1, TextPlugin, 'en', body='ph1 plugin3').cmsplugin_ptr ph2_pl1 = add_plugin(ph2, TextPlugin, 'en', body='ph2 plugin1').cmsplugin_ptr ph2_pl2 = add_plugin(ph2, TextPlugin, 'en', body='ph2 plugin2').cmsplugin_ptr ph2_pl3 = add_plugin(ph2, TextPlugin, 'en', body='ph2 plugin3').cmsplugin_ptr response = self.client.post(admin_reverse('placeholderapp_twoplaceholderexample_move_plugin'), { 'placeholder_id': str(ph2.pk), 'plugin_id': str(ph1_pl2.pk), 'plugin_order[]': [str(p.pk) for p in [ph2_pl3, ph2_pl1, ph2_pl2, ph1_pl2]] }) self.assertEqual(response.status_code, 200) self.assertEqual([ph1_pl1, ph1_pl3], list(ph1.cmsplugin_set.order_by('position'))) self.assertEqual([ph2_pl3, ph2_pl1, ph2_pl2, ph1_pl2, ], list(ph2.cmsplugin_set.order_by('position'))) @override_settings(CMS_PERMISSION=False) def test_nested_plugin_escapejs(self): """ Checks #1366 error condition. When adding/editing a plugin whose icon_src() method returns a URL containing an hyphen, the hyphen is escaped by django escapejs resulting in a incorrect URL """ ex = Example1( char_1='one', char_2='two', char_3='tree', char_4='four' ) ex.save() ph1 = ex.placeholder ### # add the test plugin ### test_plugin = add_plugin(ph1, u"EmptyPlugin", u"en") test_plugin.save() pl_url = "%sedit-plugin/%s/" % ( admin_reverse('placeholderapp_example1_change', args=(ex.pk,)), test_plugin.pk) response = self.client.post(pl_url, {}) self.assertContains(response, "CMS.API.Helpers.reloadBrowser") @override_settings(CMS_PERMISSION=False) def test_nested_plugin_escapejs_page(self): """ Sibling test of the above, on a page. #1366 does not apply to placeholder defined in a page """ page = create_page('page', 'col_two.html', 'en') ph1 = page.placeholders.get(slot='col_left') ### # add the test plugin ### test_plugin = add_plugin(ph1, u"EmptyPlugin", u"en") test_plugin.save() pl_url = "%sedit-plugin/%s/" % ( admin_reverse('cms_page_change', args=(page.pk,)), test_plugin.pk) response = self.client.post(pl_url, {}) self.assertContains(response, "CMS.API.Helpers.reloadBrowser") def test_placeholder_scanning_fail(self): self.assertRaises(TemplateSyntaxError, get_placeholders, 'placeholder_tests/test_eleven.html') def test_placeholder_tag(self): template = Template("{% load cms_tags %}{% render_placeholder placeholder %}") ctx = Context() self.assertEqual(template.render(ctx), "") request = self.get_request('/', language=settings.LANGUAGES[0][0]) rctx = RequestContext(request) self.assertEqual(template.render(rctx), "") placeholder = Placeholder.objects.create(slot="test") rctx['placeholder'] = placeholder self.assertEqual(template.render(rctx), "") self.assertEqual(placeholder.cmsplugin_set.count(), 0) add_plugin(placeholder, "TextPlugin", settings.LANGUAGES[0][0], body="test") self.assertEqual(placeholder.cmsplugin_set.count(), 1) rctx = RequestContext(request) placeholder = self.reload(placeholder) rctx.update({'placeholder': placeholder}) self.assertEqual(template.render(rctx).strip(), "test") def test_placeholder_tag_language(self): template = Template("{% load cms_tags %}{% render_placeholder placeholder language language %}") placeholder = Placeholder.objects.create(slot="test") add_plugin(placeholder, "TextPlugin", 'en', body="English") add_plugin(placeholder, "TextPlugin", 'de', body="Deutsch") request = self.get_request('/') rctx = RequestContext(request) rctx.update({ 'placeholder': placeholder, 'language': 'en' }) self.assertEqual(template.render(rctx).strip(), "English") del placeholder._plugins_cache rctx.update({ 'placeholder': placeholder, 'language': 'de' }) self.assertEqual(template.render(rctx).strip(), "Deutsch") def test_get_placeholder_conf(self): TEST_CONF = { 'main': { 'name': 'main content', 'plugins': ['TextPlugin', 'LinkPlugin'], 'default_plugins':[ { 'plugin_type':'TextPlugin', 'values':{ 'body':'<p>Some default text</p>' }, }, ], }, 'layout/home.html main': { 'name': u'main content with FilerImagePlugin and limit', 'plugins': ['TextPlugin', 'FilerImagePlugin', 'LinkPlugin',], 'inherit':'main', 'limits': {'global': 1,}, }, 'layout/other.html main': { 'name': u'main content with FilerImagePlugin and no limit', 'inherit':'layout/home.html main', 'limits': {}, }, } with self.settings(CMS_PLACEHOLDER_CONF=TEST_CONF): #test no inheritance returned = get_placeholder_conf('plugins', 'main') self.assertEqual(returned, TEST_CONF['main']['plugins']) #test no inherited value with inheritance enabled returned = get_placeholder_conf('plugins', 'main', 'layout/home.html') self.assertEqual(returned, TEST_CONF['layout/home.html main']['plugins']) #test direct inherited value returned = get_placeholder_conf('plugins', 'main', 'layout/other.html') self.assertEqual(returned, TEST_CONF['layout/home.html main']['plugins']) #test grandparent inherited value returned = get_placeholder_conf('default_plugins', 'main', 'layout/other.html') self.assertEqual(returned, TEST_CONF['main']['default_plugins']) def test_placeholder_context_leaking(self): TEST_CONF = {'test': {'extra_context': {'extra_width': 10}}} ph = Placeholder.objects.create(slot='test') context = SekizaiContext() context['request'] = self.get_request() with self.settings(CMS_PLACEHOLDER_CONF=TEST_CONF): render_placeholder(ph, context) self.assertFalse('extra_width' in context) ph.render(context, None) self.assertFalse('extra_width' in context) def test_placeholder_scanning_nested_super(self): placeholders = get_placeholders('placeholder_tests/nested_super_level1.html') self.assertEqual(sorted(placeholders), sorted([u'level1', u'level2', u'level3', u'level4'])) def test_placeholder_field_no_related_name(self): self.assertRaises(ValueError, PlaceholderField, 'placeholder', related_name='+') def test_placeholder_field_db_table(self): """ Test for leaking Django 1.7 Model._meta.db_table monkeypatching on sqlite See #3891 This test for a side-effect of the above which prevents placeholder fields to return the """ example = Category.objects.create( name='category', parent=None, depth=1, ) self.assertEqual(example.description._get_attached_fields()[0].model, Category) self.assertEqual(len(example.description._get_attached_fields()), 1) def test_placeholder_field_valid_slotname(self): self.assertRaises(ImproperlyConfigured, PlaceholderField, 10) def test_placeholder_field_dynamic_slot_generation(self): instance = DynamicPlaceholderSlotExample.objects.create(char_1='slot1', char_2='slot2') self.assertEqual(instance.char_1, instance.placeholder_1.slot) self.assertEqual(instance.char_2, instance.placeholder_2.slot) def test_placeholder_field_dynamic_slot_update(self): instance = DynamicPlaceholderSlotExample.objects.create(char_1='slot1', char_2='slot2') # Plugin counts old_placeholder_1_plugin_count = len(instance.placeholder_1.get_plugins()) old_placeholder_2_plugin_count = len(instance.placeholder_2.get_plugins()) # Switch around the slot names instance.char_1, instance.char_2 = instance.char_2, instance.char_1 # Store the ids before save, to test that a new placeholder is NOT created. placeholder_1_id = instance.placeholder_1.pk placeholder_2_id = instance.placeholder_2.pk # Save instance instance.save() current_placeholder_1_plugin_count = len(instance.placeholder_1.get_plugins()) current_placeholder_2_plugin_count = len(instance.placeholder_2.get_plugins()) # Now test that the placeholder slots have changed self.assertEqual(instance.char_2, 'slot1') self.assertEqual(instance.char_1, 'slot2') # Test that a new placeholder was never created self.assertEqual(instance.placeholder_1.pk, placeholder_1_id) self.assertEqual(instance.placeholder_2.pk, placeholder_2_id) # And test the plugin counts remain the same self.assertEqual(old_placeholder_1_plugin_count, current_placeholder_1_plugin_count) self.assertEqual(old_placeholder_2_plugin_count, current_placeholder_2_plugin_count) def test_plugins_language_fallback(self): """ Tests language_fallback placeholder configuration """ page_en = create_page('page_en', 'col_two.html', 'en') title_de = create_title("de", "page_de", page_en) placeholder_en = page_en.placeholders.get(slot='col_left') placeholder_de = title_de.page.placeholders.get(slot='col_left') add_plugin(placeholder_en, TextPlugin, 'en', body='en body') context_en = SekizaiContext() context_en['request'] = self.get_request(language="en", page=page_en) context_de = SekizaiContext() context_de['request'] = self.get_request(language="de", page=page_en) # First test the default (fallback) behavior) ## English page should have the text plugin content_en = render_placeholder(placeholder_en, context_en) self.assertRegexpMatches(content_en, "^en body$") ## Deutsch page have text due to fallback content_de = render_placeholder(placeholder_de, context_de) self.assertRegexpMatches(content_de, "^en body$") self.assertEqual(len(content_de), 7) conf = { 'col_left': { 'language_fallback': False, }, } # configure non fallback with self.settings(CMS_PLACEHOLDER_CONF=conf): ## Deutsch page should have no text del(placeholder_de._plugins_cache) cache.clear() content_de = render_placeholder(placeholder_de, context_de) ## Deutsch page should inherit english content self.assertNotRegex(content_de, "^en body$") context_de2 = SekizaiContext() request = self.get_request(language="de", page=page_en) request.user = self.get_superuser() request.toolbar = CMSToolbar(request) request.toolbar.edit_mode = True context_de2['request'] = request del(placeholder_de._plugins_cache) cache.clear() content_de2 = render_placeholder(placeholder_de, context_de2) self.assertFalse("en body" in content_de2) # remove the cached plugins instances del(placeholder_de._plugins_cache) cache.clear() # Then we add a plugin to check for proper rendering add_plugin(placeholder_de, TextPlugin, 'de', body='de body') content_de = render_placeholder(placeholder_de, context_de) self.assertRegexpMatches(content_de, "^de body$") def test_nested_plugins_language_fallback(self): """ Tests language_fallback placeholder configuration for nested plugins""" page_en = create_page('page_en', 'col_two.html', 'en') title_de = create_title("de", "page_de", page_en) placeholder_en = page_en.placeholders.get(slot='col_left') placeholder_de = title_de.page.placeholders.get(slot='col_left') link_en = add_plugin(placeholder_en, LinkPlugin, 'en', name='en name', url='http://example.com/en') add_plugin(placeholder_en, TextPlugin, 'en', target=link_en, body='en body') context_en = SekizaiContext() context_en['request'] = self.get_request(language="en", page=page_en) context_de = SekizaiContext() context_de['request'] = self.get_request(language="de", page=page_en) conf = { 'col_left': { 'language_fallback': True, }, } with self.settings(CMS_PLACEHOLDER_CONF=conf): content_de = render_placeholder(placeholder_de, context_de) self.assertRegexpMatches(content_de, "<a href=\"http://example.com/en\">") self.assertRegexpMatches(content_de, "en body") context_de2 = SekizaiContext() request = self.get_request(language="de", page=page_en) request.user = self.get_superuser() request.toolbar = CMSToolbar(request) request.toolbar.edit_mode = True context_de2['request'] = request del(placeholder_de._plugins_cache) cache.clear() content_de2 = render_placeholder(placeholder_de, context_de2) self.assertFalse("en body" in content_de2) # remove the cached plugins instances del(placeholder_de._plugins_cache) cache.clear() # Then we add a plugin to check for proper rendering link_de = add_plugin(placeholder_en, LinkPlugin, 'de', name='de name', url='http://example.com/de') add_plugin(placeholder_en, TextPlugin, 'de', target=link_de, body='de body') content_de = render_placeholder(placeholder_de, context_de) self.assertRegexpMatches(content_de, "<a href=\"http://example.com/de\">") self.assertRegexpMatches(content_de, "de body") def test_plugins_non_default_language_fallback(self): """ Tests language_fallback placeholder configuration """ page_en = create_page('page_en', 'col_two.html', 'en') create_title("de", "page_de", page_en) placeholder_en = page_en.placeholders.get(slot='col_left') placeholder_de = page_en.placeholders.get(slot='col_left') add_plugin(placeholder_de, TextPlugin, 'de', body='de body') context_en = SekizaiContext() context_en['request'] = self.get_request(language="en", page=page_en) context_de = SekizaiContext() context_de['request'] = self.get_request(language="de", page=page_en) # First test the default (fallback) behavior) ## Deutsch page should have the text plugin content_de = render_placeholder(placeholder_en, context_de) self.assertRegexpMatches(content_de, "^de body$") del(placeholder_en._plugins_cache) cache.clear() ## English page should have no text content_en = render_placeholder(placeholder_en, context_en) self.assertRegexpMatches(content_en, "^de body$") self.assertEqual(len(content_en), 7) del(placeholder_en._plugins_cache) cache.clear() conf = { 'col_left': { 'language_fallback': False, }, } # configure non-fallback with self.settings(CMS_PLACEHOLDER_CONF=conf): ## English page should have deutsch text content_en = render_placeholder(placeholder_en, context_en) self.assertNotRegex(content_en, "^de body$") # remove the cached plugins instances del(placeholder_en._plugins_cache) cache.clear() # Then we add a plugin to check for proper rendering add_plugin(placeholder_en, TextPlugin, 'en', body='en body') content_en = render_placeholder(placeholder_en, context_en) self.assertRegexpMatches(content_en, "^en body$") def test_plugins_discarded_with_language_fallback(self): """ Tests side effect of language fallback: if fallback enabled placeholder existed, it discards all other existing plugins """ page_en = create_page('page_en', 'col_two.html', 'en') create_title("de", "page_de", page_en) placeholder_sidebar_en = page_en.placeholders.get(slot='col_sidebar') placeholder_en = page_en.placeholders.get(slot='col_left') add_plugin(placeholder_sidebar_en, TextPlugin, 'en', body='en body') context_en = SekizaiContext() context_en['request'] = self.get_request(language="en", page=page_en) conf = { 'col_left': { 'language_fallback': True, }, } with self.settings(CMS_PLACEHOLDER_CONF=conf): # call assign plugins first, as this is what is done in real cms life # for all placeholders in a page at once assign_plugins(context_en['request'], [placeholder_sidebar_en, placeholder_en], 'col_two.html') # if the normal, non fallback enabled placeholder still has content content_en = render_placeholder(placeholder_sidebar_en, context_en) self.assertRegexpMatches(content_en, "^en body$") # remove the cached plugins instances del(placeholder_sidebar_en._plugins_cache) cache.clear() def test_plugins_prepopulate(self): """ Tests prepopulate placeholder configuration """ conf = { 'col_left': { 'default_plugins' : [ { 'plugin_type':'TextPlugin', 'values':{'body':'<p>en default body 1</p>'}, }, { 'plugin_type':'TextPlugin', 'values':{'body':'<p>en default body 2</p>'}, }, ] }, } with self.settings(CMS_PLACEHOLDER_CONF=conf): page = create_page('page_en', 'col_two.html', 'en') placeholder = page.placeholders.get(slot='col_left') context = SekizaiContext() context['request'] = self.get_request(language="en", page=page) # Our page should have "en default body 1" AND "en default body 2" content = render_placeholder(placeholder, context) self.assertRegexpMatches(content, "^<p>en default body 1</p>\s*<p>en default body 2</p>$") def test_plugins_children_prepopulate(self): """ Validate a default textplugin with a nested default link plugin """ conf = { 'col_left': { 'default_plugins': [ { 'plugin_type': 'TextPlugin', 'values': { 'body': '<p>body %(_tag_child_1)s and %(_tag_child_2)s</p>' }, 'children': [ { 'plugin_type': 'LinkPlugin', 'values': { 'name': 'django', 'url': 'https://www.djangoproject.com/' }, }, { 'plugin_type': 'LinkPlugin', 'values': { 'name': 'django-cms', 'url': 'https://www.django-cms.org' }, }, ] }, ] }, } with self.settings(CMS_PLACEHOLDER_CONF=conf): page = create_page('page_en', 'col_two.html', 'en') placeholder = page.placeholders.get(slot='col_left') context = SekizaiContext() context['request'] = self.get_request(language="en", page=page) render_placeholder(placeholder, context) plugins = placeholder.get_plugins_list() self.assertEqual(len(plugins), 3) self.assertEqual(plugins[0].plugin_type, 'TextPlugin') self.assertEqual(plugins[1].plugin_type, 'LinkPlugin') self.assertEqual(plugins[2].plugin_type, 'LinkPlugin') self.assertTrue(plugins[1].parent == plugins[2].parent and plugins[1].parent == plugins[0]) def test_placeholder_pk_thousands_format(self): page = create_page("page", "nav_playground.html", "en", published=True) for placeholder in page.placeholders.all(): page.placeholders.remove(placeholder) placeholder.pk += 1000 placeholder.save() page.placeholders.add(placeholder) page.reload() for placeholder in page.placeholders.all(): add_plugin(placeholder, "TextPlugin", "en", body="body") with self.settings(USE_THOUSAND_SEPARATOR=True, USE_L10N=True): # Superuser user = self.get_superuser() self.client.login(username=getattr(user, get_user_model().USERNAME_FIELD), password=getattr(user, get_user_model().USERNAME_FIELD)) response = self.client.get("/en/?%s" % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON')) for placeholder in page.placeholders.all(): self.assertContains( response, "'placeholder_id': '%s'" % placeholder.pk) self.assertNotContains( response, "'placeholder_id': '%s'" % format( placeholder.pk, ".", grouping=3, thousand_sep=",")) self.assertNotContains( response, "'plugin_id': '%s'" % format( placeholder.pk, ".", grouping=3, thousand_sep=",")) self.assertNotContains( response, "'clipboard': '%s'" % format( response.context['request'].toolbar.clipboard.pk, ".", grouping=3, thousand_sep=",")) def test_placeholder_languages_model(self): """ Checks the retrieval of filled languages for a placeholder in a django model """ avail_langs = set([u'en', u'de', u'fr']) # Setup instance ex = Example1( char_1='one', char_2='two', char_3='tree', char_4='four' ) ex.save() ### # add the test plugin ### for lang in avail_langs: add_plugin(ex.placeholder, u"EmptyPlugin", lang) # reload instance from database ex = Example1.objects.get(pk=ex.pk) #get languages langs = [lang['code'] for lang in ex.placeholder.get_filled_languages()] self.assertEqual(avail_langs, set(langs)) def test_placeholder_languages_page(self): """ Checks the retrieval of filled languages for a placeholder in a django model """ avail_langs = set([u'en', u'de', u'fr']) # Setup instances page = create_page('test page', 'col_two.html', u'en') for lang in avail_langs: if lang != u'en': create_title(lang, 'test page %s' % lang, page) placeholder = page.placeholders.get(slot='col_sidebar') ### # add the test plugin ### for lang in avail_langs: add_plugin(placeholder, u"EmptyPlugin", lang) # reload placeholder from database placeholder = page.placeholders.get(slot='col_sidebar') # get languages langs = [lang['code'] for lang in placeholder.get_filled_languages()] self.assertEqual(avail_langs, set(langs)) @override_settings(TEMPLATE_LOADERS=( ('django.template.loaders.cached.Loader', ( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', )),)) def test_cached_template_not_corrupted_by_placeholder_scan(self): """ This is the test for the low-level code that caused the bug: the placeholder scan corrupts the nodelist of the extends node, which is retained by the cached template loader, and future renders of that template will render the super block twice. """ nodelist = _get_nodelist(get_template("placeholder_tests/test_super_extends_2.html")) self.assertNotIn('one', nodelist[0].blocks.keys(), "test_super_extends_1.html contains a block called 'one', " "but _2.html does not.") get_placeholders("placeholder_tests/test_super_extends_2.html") nodelist = _get_nodelist(get_template("placeholder_tests/test_super_extends_2.html")) self.assertNotIn('one', nodelist[0].blocks.keys(), "test_super_extends_1.html still should not contain a block " "called 'one' after rescanning placeholders.") @override_settings(TEMPLATE_LOADERS=( ('django.template.loaders.cached.Loader', ( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', )),)) def test_super_extends_not_corrupted_by_placeholder_scan(self): """ This is the test for the symptom of the bug: because the block context now contains two copies of the inherited block, that block will be executed twice, and if it adds content to {{block.super}}, that content will be added twice. """ template = get_template("placeholder_tests/test_super_extends_2.html") output = template.render(Context({})) self.assertEqual(['Whee'], [o for o in output.split('\n') if 'Whee' in o]) get_placeholders("placeholder_tests/test_super_extends_2.html") template = get_template("placeholder_tests/test_super_extends_2.html") output = template.render(Context({})) self.assertEqual(['Whee'], [o for o in output.split('\n') if 'Whee' in o]) class PlaceholderActionTests(FakemlngFixtures, CMSTestCase): def test_placeholder_no_action(self): actions = PlaceholderNoAction() self.assertEqual(actions.get_copy_languages(), []) self.assertFalse(actions.copy()) def test_mlng_placeholder_actions_get_copy_languages(self): actions = MLNGPlaceholderActions() fr = Translations.objects.get(language_code='fr') de = Translations.objects.get(language_code='de') en = Translations.objects.get(language_code='en') fieldname = 'placeholder' fr_copy_languages = actions.get_copy_languages( fr.placeholder, Translations, fieldname ) de_copy_languages = actions.get_copy_languages( de.placeholder, Translations, fieldname ) en_copy_languages = actions.get_copy_languages( en.placeholder, Translations, fieldname ) EN = ('en', 'English') FR = ('fr', 'French') self.assertEqual(set(fr_copy_languages), set([EN])) self.assertEqual(set(de_copy_languages), set([EN, FR])) self.assertEqual(set(en_copy_languages), set([FR])) def test_mlng_placeholder_actions_copy(self): actions = MLNGPlaceholderActions() fr = Translations.objects.get(language_code='fr') de = Translations.objects.get(language_code='de') self.assertEqual(fr.placeholder.cmsplugin_set.count(), 1) self.assertEqual(de.placeholder.cmsplugin_set.count(), 0) new_plugins = actions.copy(de.placeholder, 'fr', 'placeholder', Translations, 'de') self.assertEqual(len(new_plugins), 1) de = self.reload(de) fr = self.reload(fr) self.assertEqual(fr.placeholder.cmsplugin_set.count(), 1) self.assertEqual(de.placeholder.cmsplugin_set.count(), 1) def test_mlng_placeholder_actions_empty_copy(self): actions = MLNGPlaceholderActions() fr = Translations.objects.get(language_code='fr') de = Translations.objects.get(language_code='de') self.assertEqual(fr.placeholder.cmsplugin_set.count(), 1) self.assertEqual(de.placeholder.cmsplugin_set.count(), 0) new_plugins = actions.copy(fr.placeholder, 'de', 'placeholder', Translations, 'fr') self.assertEqual(len(new_plugins), 0) de = self.reload(de) fr = self.reload(fr) self.assertEqual(fr.placeholder.cmsplugin_set.count(), 1) self.assertEqual(de.placeholder.cmsplugin_set.count(), 0) def test_mlng_placeholder_actions_no_placeholder(self): actions = MLNGPlaceholderActions() Translations.objects.filter(language_code='nl').update(placeholder=None) de = Translations.objects.get(language_code='de') nl = Translations.objects.get(language_code='nl') self.assertEqual(nl.placeholder, None) self.assertEqual(de.placeholder.cmsplugin_set.count(), 0) okay = actions.copy(de.placeholder, 'nl', 'placeholder', Translations, 'de') self.assertEqual(okay, False) de = self.reload(de) nl = self.reload(nl) nl = Translations.objects.get(language_code='nl') de = Translations.objects.get(language_code='de') class PlaceholderModelTests(CMSTestCase): def get_mock_user(self, superuser): return AttributeObject( is_superuser=superuser, has_perm=lambda string: False, ) def get_mock_request(self, superuser=True): return AttributeObject( superuser=superuser, user=self.get_mock_user(superuser) ) def test_check_placeholder_permissions_ok_for_superuser(self): ph = Placeholder.objects.create(slot='test', default_width=300) result = ph.has_change_permission(self.get_mock_request(True)) self.assertTrue(result) def test_check_placeholder_permissions_nok_for_user(self): ph = Placeholder.objects.create(slot='test', default_width=300) result = ph.has_change_permission(self.get_mock_request(False)) self.assertFalse(result) def test_check_unicode_rendering(self): ph = Placeholder.objects.create(slot='test', default_width=300) result = force_text(ph) self.assertEqual(result, u'test') def test_request_placeholders_permission_check_model(self): # Setup instance ex = Example1.objects.create( char_1='one', char_2='two', char_3='tree', char_4='four' ) page_en = create_page('page_en', 'col_two.html', 'en') context_en = SekizaiContext() # no user: no placeholders but no error either factory = RequestFactory() context_en['request'] = factory.get(page_en.get_absolute_url()) render_placeholder(ex.placeholder, context_en, use_cache=False) self.assertEqual(len(context_en['request'].placeholders), 0) self.assertNotIn(ex.placeholder, context_en['request'].placeholders) # request.placeholders is populated for superuser context_en['request'] = self.get_request(language="en", page=page_en) context_en['request'].user = self.get_superuser() render_placeholder(ex.placeholder, context_en, use_cache=False) self.assertEqual(len(context_en['request'].placeholders), 1) self.assertIn(ex.placeholder, context_en['request'].placeholders) # request.placeholders is not populated for staff user with no permission user = self.get_staff_user_with_no_permissions() context_en['request'] = self.get_request(language="en", page=page_en) context_en['request'].user = user render_placeholder(ex.placeholder, context_en, use_cache=False) self.assertEqual(len(context_en['request'].placeholders), 0) self.assertNotIn(ex.placeholder, context_en['request'].placeholders) # request.placeholders is populated for staff user with permission on the model user.user_permissions.add(Permission.objects.get(codename=get_permission_codename('change', ex._meta))) context_en['request'] = self.get_request(language="en", page=page_en) context_en['request'].user = get_user_model().objects.get(pk=user.pk) render_placeholder(ex.placeholder, context_en, use_cache=False) self.assertEqual(len(context_en['request'].placeholders), 1) self.assertIn(ex.placeholder, context_en['request'].placeholders) def test_request_placeholders_permission_check_page(self): page_en = create_page('page_en', 'col_two.html', 'en') placeholder_en = page_en.placeholders.get(slot='col_left') context_en = SekizaiContext() # request.placeholders is populated for superuser context_en['request'] = self.get_request(language="en", page=page_en) context_en['request'].user = self.get_superuser() render_placeholder(placeholder_en, context_en) self.assertEqual(len(context_en['request'].placeholders), 1) self.assertIn(placeholder_en, context_en['request'].placeholders) # request.placeholders is not populated for staff user with no permission user = self.get_staff_user_with_no_permissions() context_en['request'] = self.get_request(language="en", page=page_en) context_en['request'].user = user render_placeholder(placeholder_en, context_en) self.assertEqual(len(context_en['request'].placeholders), 0) self.assertNotIn(placeholder_en, context_en['request'].placeholders) # request.placeholders is populated for staff user with permission on the model user.user_permissions.add(Permission.objects.get(codename='change_page')) context_en['request'] = self.get_request(language="en", page=page_en) context_en['request'].user = get_user_model().objects.get(pk=user.pk) render_placeholder(placeholder_en, context_en) self.assertEqual(len(context_en['request'].placeholders), 1) self.assertIn(placeholder_en, context_en['request'].placeholders) def test_request_placeholders_permission_check_templatetag(self): """ Tests that {% render_placeholder %} templatetag check for placeholder permission """ page_en = create_page('page_en', 'col_two.html', 'en') ex1 = Example1(char_1="char_1", char_2="char_2", char_3="char_3", char_4="char_4") ex1.save() template = '{% load cms_tags %}{% render_placeholder ex1.placeholder %}' context = {'ex1': ex1} # request.placeholders is populated for superuser request = self.get_request(language="en", page=page_en) request.user = self.get_superuser() self.render_template_obj(template, context, request) self.assertEqual(len(request.placeholders), 2) self.assertIn(ex1.placeholder, request.placeholders) # request.placeholders is not populated for staff user with no permission user = self.get_staff_user_with_no_permissions() request = self.get_request(language="en", page=page_en) request.user = user self.render_template_obj(template, context, request) self.assertEqual(len(request.placeholders), 0) self.assertNotIn(ex1.placeholder, request.placeholders) # request.placeholders is populated for staff user with permission on the model user.user_permissions.add(Permission.objects.get(codename='change_example1')) request = self.get_request(language="en", page=page_en) request.user = get_user_model().objects.get(pk=user.pk) self.render_template_obj(template, context, request) self.assertEqual(len(request.placeholders), 2) self.assertIn(ex1.placeholder, request.placeholders) def test_excercise_get_attached_model(self): ph = Placeholder.objects.create(slot='test', default_width=300) result = ph._get_attached_model() self.assertEqual(result, None) # Simple PH - no model def test_excercise_get_attached_field_name(self): ph = Placeholder.objects.create(slot='test', default_width=300) result = ph._get_attached_field_name() self.assertEqual(result, None) # Simple PH - no field name def test_excercise_get_attached_models_notplugins(self): ex = Example1( char_1='one', char_2='two', char_3='tree', char_4='four' ) ex.save() ph = ex.placeholder result = list(ph._get_attached_models()) self.assertEqual(result, [Example1]) # Simple PH - Example1 model add_plugin(ph, TextPlugin, 'en', body='en body') result = list(ph._get_attached_models()) self.assertEqual(result, [Example1]) # Simple PH still one Example1 model def test_excercise_get_attached_fields_notplugins(self): ex = Example1( char_1='one', char_2='two', char_3='tree', char_4='four', ) ex.save() ph = ex.placeholder result = [f.name for f in list(ph._get_attached_fields())] self.assertEqual(result, ['placeholder']) # Simple PH - placeholder field name add_plugin(ph, TextPlugin, 'en', body='en body') result = [f.name for f in list(ph._get_attached_fields())] self.assertEqual(result, ['placeholder']) # Simple PH - still one placeholder field name class PlaceholderAdminTestBase(CMSTestCase): def get_placeholder(self): return Placeholder.objects.create(slot='test') def get_admin(self): admin.autodiscover() return admin.site._registry[Example1] def get_post_request(self, data): return self.get_request(post_data=data) class PlaceholderAdminTest(PlaceholderAdminTestBase): placeholderconf = {'test': { 'limits': { 'global': 2, 'TextPlugin': 1, } } } def test_global_limit(self): placeholder = self.get_placeholder() admin_instance = self.get_admin() data = { 'plugin_type': 'LinkPlugin', 'placeholder_id': placeholder.pk, 'plugin_language': 'en', } superuser = self.get_superuser() with UserLoginContext(self, superuser): with self.settings(CMS_PLACEHOLDER_CONF=self.placeholderconf): request = self.get_post_request(data) response = admin_instance.add_plugin(request) # first self.assertEqual(response.status_code, 200) response = admin_instance.add_plugin(request) # second self.assertEqual(response.status_code, 200) response = admin_instance.add_plugin(request) # third self.assertEqual(response.status_code, 400) self.assertEqual(response.content, b"This placeholder already has the maximum number of plugins (2).") def test_type_limit(self): placeholder = self.get_placeholder() admin_instance = self.get_admin() data = { 'plugin_type': 'TextPlugin', 'placeholder_id': placeholder.pk, 'plugin_language': 'en', } superuser = self.get_superuser() with UserLoginContext(self, superuser): with self.settings(CMS_PLACEHOLDER_CONF=self.placeholderconf): request = self.get_post_request(data) response = admin_instance.add_plugin(request) # first self.assertEqual(response.status_code, 200) response = admin_instance.add_plugin(request) # second self.assertEqual(response.status_code, 400) self.assertEqual(response.content, b"This placeholder already has the maximum number (1) of allowed Text plugins.") def test_global_limit_on_plugin_move(self): admin_instance = self.get_admin() superuser = self.get_superuser() source_placeholder = Placeholder.objects.create(slot='source') target_placeholder = self.get_placeholder() data = { 'placeholder': source_placeholder, 'plugin_type': 'LinkPlugin', 'language': 'en', } plugin_1 = add_plugin(**data) plugin_2 = add_plugin(**data) plugin_3 = add_plugin(**data) with UserLoginContext(self, superuser): with self.settings(CMS_PLACEHOLDER_CONF=self.placeholderconf): request = self.get_post_request({'placeholder_id': target_placeholder.pk, 'plugin_id': plugin_1.pk}) response = admin_instance.move_plugin(request) # first self.assertEqual(response.status_code, 200) request = self.get_post_request({'placeholder_id': target_placeholder.pk, 'plugin_id': plugin_2.pk}) response = admin_instance.move_plugin(request) # second self.assertEqual(response.status_code, 200) request = self.get_post_request({'placeholder_id': target_placeholder.pk, 'plugin_id': plugin_3.pk}) response = admin_instance.move_plugin(request) # third self.assertEqual(response.status_code, 400) self.assertEqual(response.content, b"This placeholder already has the maximum number of plugins (2).") def test_type_limit_on_plugin_move(self): admin_instance = self.get_admin() superuser = self.get_superuser() source_placeholder = Placeholder.objects.create(slot='source') target_placeholder = self.get_placeholder() data = { 'placeholder': source_placeholder, 'plugin_type': 'TextPlugin', 'language': 'en', } plugin_1 = add_plugin(**data) plugin_2 = add_plugin(**data) with UserLoginContext(self, superuser): with self.settings(CMS_PLACEHOLDER_CONF=self.placeholderconf): request = self.get_post_request({'placeholder_id': target_placeholder.pk, 'plugin_id': plugin_1.pk}) response = admin_instance.move_plugin(request) # first self.assertEqual(response.status_code, 200) request = self.get_post_request({'placeholder_id': target_placeholder.pk, 'plugin_id': plugin_2.pk}) response = admin_instance.move_plugin(request) # second self.assertEqual(response.status_code, 400) self.assertEqual(response.content, b"This placeholder already has the maximum number (1) of allowed Text plugins.") def test_no_limit_check_same_placeholder_move(self): admin_instance = self.get_admin() superuser = self.get_superuser() source_placeholder = self.get_placeholder() data = { 'placeholder': source_placeholder, 'plugin_type': 'LinkPlugin', 'language': 'en', } plugin_1 = add_plugin(**data) add_plugin(**data) with UserLoginContext(self, superuser): with self.settings(CMS_PLACEHOLDER_CONF=self.placeholderconf): request = self.get_post_request({'placeholder_id': source_placeholder.pk, 'plugin_id': plugin_1.pk, 'plugin_order': 1, }) response = admin_instance.move_plugin(request) # first self.assertEqual(response.status_code, 200) def test_edit_plugin_and_cancel(self): placeholder = self.get_placeholder() admin_instance = self.get_admin() data = { 'plugin_type': 'TextPlugin', 'placeholder_id': placeholder.pk, 'plugin_language': 'en', } superuser = self.get_superuser() with UserLoginContext(self, superuser): with self.settings(CMS_PLACEHOLDER_CONF=self.placeholderconf): request = self.get_post_request(data) response = admin_instance.add_plugin(request) self.assertEqual(response.status_code, 200) plugin_id = int(str(response.content).split('edit-plugin/')[1].split("/")[0]) data = { 'body': 'Hello World', } request = self.get_post_request(data) response = admin_instance.edit_plugin(request, plugin_id) self.assertEqual(response.status_code, 200) text_plugin = Text.objects.get(pk=plugin_id) self.assertEqual('Hello World', text_plugin.body) # edit again, but this time press cancel data = { 'body': 'Hello World!!', '_cancel': True, } request = self.get_post_request(data) response = admin_instance.edit_plugin(request, plugin_id) self.assertEqual(response.status_code, 200) text_plugin = Text.objects.get(pk=plugin_id) self.assertEqual('Hello World', text_plugin.body) class PlaceholderPluginPermissionTests(PlaceholderAdminTestBase): def _testuser(self): User = get_user_model() u = User(is_staff=True, is_active=True, is_superuser=False) setattr(u, u.USERNAME_FIELD, "test") u.set_password("test") u.save() return u def _create_example(self): ex = Example1( char_1='one', char_2='two', char_3='tree', char_4='four' ) ex.save() self._placeholder = ex.placeholder self.example_object = ex def _create_plugin(self): self._plugin = add_plugin(self._placeholder, 'TextPlugin', 'en') def _give_permission(self, user, model, permission_type, save=True): codename = '%s_%s' % (permission_type, model._meta.object_name.lower()) user.user_permissions.add(Permission.objects.get(codename=codename)) def _delete_permission(self, user, model, permission_type, save=True): codename = '%s_%s' % (permission_type, model._meta.object_name.lower()) user.user_permissions.remove(Permission.objects.get(codename=codename)) def _give_object_permission(self, user, object, permission_type, save=True): codename = '%s_%s' % (permission_type, object.__class__._meta.object_name.lower()) UserObjectPermission.objects.assign_perm(codename, user=user, obj=object) def _delete_object_permission(self, user, object, permission_type, save=True): codename = '%s_%s' % (permission_type, object.__class__._meta.object_name.lower()) UserObjectPermission.objects.remove_perm(codename, user=user, obj=object) def _post_request(self, user): data = { 'plugin_type': 'TextPlugin', 'placeholder_id': self._placeholder.pk, 'plugin_language': 'en', } request = self.get_post_request(data) request.user = self.reload(user) request._messages = default_storage(request) return request def test_plugin_add_requires_permissions(self): """User wants to add a plugin to the example app placeholder but has no permissions""" self._test_plugin_action_requires_permissions('add') def test_plugin_edit_requires_permissions(self): """User wants to edit a plugin to the example app placeholder but has no permissions""" self._test_plugin_action_requires_permissions('change') def _test_plugin_action_requires_permissions(self, key): self._create_example() if key == 'change': self._create_plugin() normal_guy = self._testuser() admin_instance = self.get_admin() # check all combinations of plugin, app and object permission for perms in itertools.product(*[[False, True]]*3): self._set_perms(normal_guy, [Text, Example1, self.example_object], perms, key) request = self._post_request(normal_guy) if key == 'add': response = admin_instance.add_plugin(request) elif key == 'change': response = admin_instance.edit_plugin(request, self._plugin.id) should_pass = perms[0] and (perms[1] or perms[2]) expected_status_code = HttpResponse.status_code if should_pass else HttpResponseForbidden.status_code self.assertEqual(response.status_code, expected_status_code) # cleanup self._set_perms(normal_guy, [Text, Example1, self.example_object], (False,)*3, key) def _set_perms(self, user, objects, perms, key): for obj, perm in zip(objects, perms): action = 'give' if perm else 'delete' object_key = '_object' if isinstance(obj, models.Model) else '' method_name = '_%s%s_permission' % (action, object_key) getattr(self, method_name)(user, obj, key) class PlaceholderConfTests(TestCase): def test_get_all_plugins_single_page(self): page = create_page('page', 'col_two.html', 'en') placeholder = page.placeholders.get(slot='col_left') conf = { 'col_two': { 'plugins': ['TextPlugin', 'LinkPlugin'], }, 'col_two.html col_left': { 'plugins': ['LinkPlugin'], }, } with self.settings(CMS_PLACEHOLDER_CONF=conf): plugins = plugin_pool.get_all_plugins(placeholder, page) self.assertEqual(len(plugins), 1, plugins) self.assertEqual(plugins[0], LinkPlugin) def test_get_all_plugins_inherit(self): parent = create_page('parent', 'col_two.html', 'en') page = create_page('page', constants.TEMPLATE_INHERITANCE_MAGIC, 'en', parent=parent) placeholder = page.placeholders.get(slot='col_left') conf = { 'col_two': { 'plugins': ['TextPlugin', 'LinkPlugin'], }, 'col_two.html col_left': { 'plugins': ['LinkPlugin'], }, } with self.settings(CMS_PLACEHOLDER_CONF=conf): plugins = plugin_pool.get_all_plugins(placeholder, page) self.assertEqual(len(plugins), 1, plugins) self.assertEqual(plugins[0], LinkPlugin) class PlaceholderI18NTest(CMSTestCase): def _testuser(self): User = get_user_model() u = User(is_staff=True, is_active=True, is_superuser=True) setattr(u, u.USERNAME_FIELD, "test") u.set_password("test") u.save() return u def test_hvad_tabs(self): ex = MultilingualExample1.objects.language('en').create(char_1='one', char_2='two') self._testuser() self.client.login(username='test', password='test') response = self.client.get('/de/admin/placeholderapp/multilingualexample1/%d/' % ex.pk) self.assertContains(response, '<input type="hidden" class="language_button selected" name="de" />') def test_no_tabs(self): ex = Example1.objects.create( char_1='one', char_2='two', char_3='one', char_4='two', ) self._testuser() self.client.login(username='test', password='test') response = self.client.get('/de/admin/placeholderapp/example1/%d/' % ex.pk) self.assertNotContains(response, '<input type="hidden" class="language_button selected" name="de" />') def test_placeholder_tabs(self): ex = TwoPlaceholderExample.objects.create( char_1='one', char_2='two', char_3='one', char_4='two', ) self._testuser() self.client.login(username='test', password='test') response = self.client.get('/de/admin/placeholderapp/twoplaceholderexample/%d/' % ex.pk) self.assertNotContains(response, """<input type="button" onclick="trigger_lang_button(this,'./?language=en');" class="language_button selected" id="debutton" name="en" value="English">""")
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-8 -*- """ flask.ext.wtf ~~~~~~~~~~~~ Flask-WTF extension :copyright: (c) 2010 by Dan Jacob. :license: BSD, see LICENSE for more details. """ try: import sqlalchemy _is_sqlalchemy = True except ImportError: _is_sqlalchemy = False from wtforms import fields, widgets, validators from wtforms.fields import * from wtforms.validators import * from wtforms.widgets import * from wtforms import ValidationError from . import html5 from .form import Form from . import recaptcha from .recaptcha.fields import RecaptchaField from .recaptcha.widgets import RecaptchaWidget from .recaptcha.validators import Recaptcha fields.RecaptchaField = RecaptchaField widgets.RecaptchaWidget = RecaptchaWidget validators.Recaptcha = Recaptcha from .file import FileField from .file import FileAllowed, FileRequired, file_allowed, file_required fields.FileField = FileField validators.file_allowed = file_allowed validators.file_required = file_required validators.FileAllowed = FileAllowed validators.FileRequired = FileRequired __all__ = ['Form', 'ValidationError', 'fields', 'validators', 'widgets', 'html5'] __all__ += [str(v) for v in validators.__all__ ] __all__ += [str(v) for v in (fields.__all__ if hasattr(fields, '__all__') else fields.core.__all__) ] __all__ += [str(v) for v in (widgets.__all__ if hasattr(widgets, '__all__') else widgets.core.__all__)] __all__ += recaptcha.__all__ if _is_sqlalchemy: from wtforms.ext.sqlalchemy.fields import QuerySelectField, \ QuerySelectMultipleField __all__ += ['QuerySelectField', 'QuerySelectMultipleField'] for field in (QuerySelectField, QuerySelectMultipleField): setattr(fields, field.__name__, field)
unknown
codeparrot/codeparrot-clean
#!/usr/bin/python2 import requests import os import getpass import argparse from os.path import expanduser def main(): home = expanduser("~") config = os.path.join(home,'.wa_api_key') parser = argparse.ArgumentParser() parser.add_argument('-s','--short', action="store_true",help="Return just the answer.") parser.add_argument('-l','--long', action="store_false",help="Default. Return full text") parser.add_argument("query", help="Wolfram Alpha query") args = parser.parse_args() if os.path.exists(config): api_key = open(config,"r") key = api_key.readline().strip() api_key.close() else: print "API Key not found. Please enter API key" key= getpass.getpass() api_key = open(config,"w") api_key.write(key) api_key.close() query = args.query r = requests.get("http://api.wolframalpha.com/v2/query?input={}&appid={}&format=plaintext&output=json".format(query,key)) j = r.json()['queryresult'] if j['success']: if args.short: print_short(j) else: print_long(j) else: print 'Query failed. Check spelling/formating' def print_long(j): for field in j['pods']: if 'title' in field: print '\x1b[1;34;40m'+field['title']+'\x1b[0m' for subpod in field['subpods']: if 'plaintext' in subpod: print subpod['plaintext'] def print_short(j): for field in j['pods']: if 'title' in field: if field['title'] == 'Result': for subpod in field['subpods']: if 'plaintext' in subpod: print subpod['plaintext'] if __name__ == '__main__': main()
unknown
codeparrot/codeparrot-clean
from fabric import colors from fabric import api as fab from fabric import decorators from fabric.contrib import files from termcastTemplates import termcastTemplate, termcastPlayTemplate import os, getpass fab.env.colors = True COMMANDS = ('sudo apt-get install ttyrec screen', ) SCRIPTS = ('termcast', 'termcast-play') inetdConfTemplate = ''' telnet stream tcp4 nowait {user} /usr/sbin/tcpd /usr/bin/termcast-play telnet stream tcp6 nowait {user} /usr/sbin/tcpd /usr/bin/termcast-play ''' def write_file(filename, text, use_sudo=False): files.append(filename, text, use_sudo=use_sudo) def write_sudo_file(filename, text): write_file(filename, text, use_sudo=True) @fab.task @decorators.hosts(['localhost']) def install(): user = getpass.getuser() installDir = os.getcwd() for command in COMMANDS: fab.local(command) casts = fab.prompt(colors.cyan('Specify directory where you want the ' 'casts to be stored'), default=os.path.join(installDir, 'casts')) values = {'user': user, 'installDir': installDir, 'termcastPath': os.path.join(installDir, 'termcast'), 'termcastPlayPath': os.path.join(installDir, 'termcast-play'), 'casts': casts} with fab.settings(warn_only=True): fab.local('rm {}'.format(values['termcastPath'])) fab.local('rm {}'.format(values['termcastPlayPath'])) write_file(values['termcastPath'], termcastTemplate.format(**values)) write_file(values['termcastPlayPath'], termcastPlayTemplate.format(**values)) linkCommand = 'sudo ln -s {} /usr/bin' for script in SCRIPTS: with fab.settings(warn_only=True): fab.local(linkCommand.format(os.path.join(installDir, script))) inetdConfText = inetdConfTemplate.format(**values) write_sudo_file('/etc/inetd.conf', inetdConfText) fab.local('sudo /etc/init.d/openbsd-inetd restart') fab.local('chmod a+x {}'.format(values['termcastPath'])) fab.local('chmod a+x {}'.format(values['termcastPlayPath']))
unknown
codeparrot/codeparrot-clean
""" urllib3 - Thread-safe connection pooling and re-using. """ from __future__ import absolute_import import warnings from .connectionpool import ( HTTPConnectionPool, HTTPSConnectionPool, connection_from_url ) from . import exceptions from .filepost import encode_multipart_formdata from .poolmanager import PoolManager, ProxyManager, proxy_from_url from .response import HTTPResponse from .util.request import make_headers from .util.url import get_host from .util.timeout import Timeout from .util.retry import Retry # Set default logging handler to avoid "No handler found" warnings. import logging try: # Python 2.7+ from logging import NullHandler except ImportError: class NullHandler(logging.Handler): def emit(self, record): pass __author__ = 'Andrey Petrov (andrey.petrov@shazow.net)' __license__ = 'MIT' __version__ = 'dev' __all__ = ( 'HTTPConnectionPool', 'HTTPSConnectionPool', 'PoolManager', 'ProxyManager', 'HTTPResponse', 'Retry', 'Timeout', 'add_stderr_logger', 'connection_from_url', 'disable_warnings', 'encode_multipart_formdata', 'get_host', 'make_headers', 'proxy_from_url', ) logging.getLogger(__name__).addHandler(NullHandler()) def add_stderr_logger(level=logging.DEBUG): """ Helper for quickly adding a StreamHandler to the logger. Useful for debugging. Returns the handler after adding it. """ # This method needs to be in this __init__.py to get the __name__ correct # even if urllib3 is vendored within another package. logger = logging.getLogger(__name__) handler = logging.StreamHandler() handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s')) logger.addHandler(handler) logger.setLevel(level) logger.debug('Added a stderr logging handler to logger: %s', __name__) return handler # ... Clean up. del NullHandler # SecurityWarning's always go off by default. warnings.simplefilter('always', exceptions.SecurityWarning, append=True) # SubjectAltNameWarning's should go off once per host warnings.simplefilter('default', exceptions.SubjectAltNameWarning) # InsecurePlatformWarning's don't vary between requests, so we keep it default. warnings.simplefilter('default', exceptions.InsecurePlatformWarning, append=True) # SNIMissingWarnings should go off only once. warnings.simplefilter('default', exceptions.SNIMissingWarning) def disable_warnings(category=exceptions.HTTPWarning): """ Helper for quickly disabling all urllib3 warnings. """ warnings.simplefilter('ignore', category)
unknown
codeparrot/codeparrot-clean
# frozen_string_literal: true # :markup: markdown module AbstractController module Caching # # Abstract Controller Caching Fragments # # Fragment caching is used for caching various blocks within views without # caching the entire action as a whole. This is useful when certain elements of # an action change frequently or depend on complicated state while other parts # rarely change or can be shared amongst multiple parties. The caching is done # using the `cache` helper available in the Action View. See # ActionView::Helpers::CacheHelper for more information. # # While it's strongly recommended that you use key-based cache expiration (see # links in CacheHelper for more information), it is also possible to manually # expire caches. For example: # # expire_fragment('name_of_cache') module Fragments extend ActiveSupport::Concern included do if respond_to?(:class_attribute) class_attribute :fragment_cache_keys else mattr_writer :fragment_cache_keys end self.fragment_cache_keys = [] if respond_to?(:helper_method) helper_method :combined_fragment_cache_key end end module ClassMethods # Allows you to specify controller-wide key prefixes for cache fragments. Pass # either a constant `value`, or a block which computes a value each time a cache # key is generated. # # For example, you may want to prefix all fragment cache keys with a global # version identifier, so you can easily invalidate all caches. # # class ApplicationController # fragment_cache_key "v1" # end # # When it's time to invalidate all fragments, simply change the string constant. # Or, progressively roll out the cache invalidation using a computed value: # # class ApplicationController # fragment_cache_key do # @account.id.odd? ? "v1" : "v2" # end # end def fragment_cache_key(value = nil, &key) self.fragment_cache_keys += [key || -> { value }] end end # Given a key (as described in `expire_fragment`), returns a key array suitable # for use in reading, writing, or expiring a cached fragment. All keys begin # with `:views`, followed by `ENV["RAILS_CACHE_ID"]` or # `ENV["RAILS_APP_VERSION"]` if set, followed by any controller-wide key prefix # values, ending with the specified `key` value. def combined_fragment_cache_key(key) head = self.class.fragment_cache_keys.map { |k| instance_exec(&k) } tail = key.is_a?(Hash) ? url_for(key).split("://").last : key cache_key = [:views, ENV["RAILS_CACHE_ID"] || ENV["RAILS_APP_VERSION"], head, tail] cache_key.flatten!(1) cache_key.compact! cache_key end # Writes `content` to the location signified by `key` (see `expire_fragment` for # acceptable formats). def write_fragment(key, content, options = nil) return content unless cache_configured? key = combined_fragment_cache_key(key) instrument_fragment_cache :write_fragment, key do content = content.to_str cache_store.write(key, content, options) end content end # Reads a cached fragment from the location signified by `key` (see # `expire_fragment` for acceptable formats). def read_fragment(key, options = nil) return unless cache_configured? key = combined_fragment_cache_key(key) instrument_fragment_cache :read_fragment, key do result = cache_store.read(key, options) result.respond_to?(:html_safe) ? result.html_safe : result end end # Check if a cached fragment from the location signified by `key` exists (see # `expire_fragment` for acceptable formats). def fragment_exist?(key, options = nil) return unless cache_configured? key = combined_fragment_cache_key(key) instrument_fragment_cache :exist_fragment?, key do cache_store.exist?(key, options) end end # Removes fragments from the cache. # # `key` can take one of three forms: # # * String - This would normally take the form of a path, like # `pages/45/notes`. # * Hash - Treated as an implicit call to `url_for`, like `{ controller: # 'pages', action: 'notes', id: 45}` # * Regexp - Will remove any fragment that matches, so `%r{pages/\d*/notes}` # might remove all notes. Make sure you don't use anchors in the regex (`^` # or `$`) because the actual filename matched looks like # `./cache/filename/path.cache`. Note: Regexp expiration is only supported # on caches that can iterate over all keys (unlike memcached). # # # `options` is passed through to the cache store's `delete` method (or # `delete_matched`, for Regexp keys). def expire_fragment(key, options = nil) return unless cache_configured? key = combined_fragment_cache_key(key) unless key.is_a?(Regexp) instrument_fragment_cache :expire_fragment, key do if key.is_a?(Regexp) cache_store.delete_matched(key, options) else cache_store.delete(key, options) end end end def instrument_fragment_cache(name, key, &block) # :nodoc: ActiveSupport::Notifications.instrument("#{name}.#{instrument_name}", instrument_payload(key), &block) end end end end
ruby
github
https://github.com/rails/rails
actionpack/lib/abstract_controller/caching/fragments.rb
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import TYPE_CHECKING import warnings from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.paging import ItemPaged from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import HttpRequest, HttpResponse from azure.core.polling import LROPoller, NoPolling, PollingMethod from azure.mgmt.core.exceptions import ARMErrorFormat from azure.mgmt.core.polling.arm_polling import ARMPolling from .. import models as _models if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union T = TypeVar('T') ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] class ApplicationGatewayPrivateEndpointConnectionsOperations(object): """ApplicationGatewayPrivateEndpointConnectionsOperations operations. You should not instantiate this class directly. Instead, you should create a Client instance that instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. :type models: ~azure.mgmt.network.v2020_05_01.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. """ models = _models def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config def _delete_initial( self, resource_group_name, # type: str application_gateway_name, # type: str connection_name, # type: str **kwargs # type: Any ): # type: (...) -> None cls = kwargs.pop('cls', None) # type: ClsType[None] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2020-05-01" accept = "application/json" # Construct URL url = self._delete_initial.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'), 'connectionName': self._serialize.url("connection_name", connection_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.delete(url, query_parameters, header_parameters) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 202, 204]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {}) _delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/privateEndpointConnections/{connectionName}'} # type: ignore def begin_delete( self, resource_group_name, # type: str application_gateway_name, # type: str connection_name, # type: str **kwargs # type: Any ): # type: (...) -> LROPoller[None] """Deletes the specified private endpoint connection on application gateway. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param application_gateway_name: The name of the application gateway. :type application_gateway_name: str :param connection_name: The name of the application gateway private endpoint connection. :type connection_name: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: Pass in True if you'd like the ARMPolling polling method, False for no polling, or your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.PollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of LROPoller that returns either None or the result of cls(response) :rtype: ~azure.core.polling.LROPoller[None] :raises ~azure.core.exceptions.HttpResponseError: """ polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod] cls = kwargs.pop('cls', None) # type: ClsType[None] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = self._delete_initial( resource_group_name=resource_group_name, application_gateway_name=application_gateway_name, connection_name=connection_name, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): if cls: return cls(pipeline_response, None, {}) path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'), 'connectionName': self._serialize.url("connection_name", connection_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs) elif polling is False: polling_method = NoPolling() else: polling_method = polling if cont_token: return LROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return LROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/privateEndpointConnections/{connectionName}'} # type: ignore def _update_initial( self, resource_group_name, # type: str application_gateway_name, # type: str connection_name, # type: str parameters, # type: "_models.ApplicationGatewayPrivateEndpointConnection" **kwargs # type: Any ): # type: (...) -> Optional["_models.ApplicationGatewayPrivateEndpointConnection"] cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ApplicationGatewayPrivateEndpointConnection"]] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2020-05-01" content_type = kwargs.pop("content_type", "application/json") accept = "application/json" # Construct URL url = self._update_initial.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'), 'connectionName': self._serialize.url("connection_name", connection_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') body_content_kwargs = {} # type: Dict[str, Any] body_content = self._serialize.body(parameters, 'ApplicationGatewayPrivateEndpointConnection') body_content_kwargs['content'] = body_content request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 202]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = None if response.status_code == 200: deserialized = self._deserialize('ApplicationGatewayPrivateEndpointConnection', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized _update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/privateEndpointConnections/{connectionName}'} # type: ignore def begin_update( self, resource_group_name, # type: str application_gateway_name, # type: str connection_name, # type: str parameters, # type: "_models.ApplicationGatewayPrivateEndpointConnection" **kwargs # type: Any ): # type: (...) -> LROPoller["_models.ApplicationGatewayPrivateEndpointConnection"] """Updates the specified private endpoint connection on application gateway. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param application_gateway_name: The name of the application gateway. :type application_gateway_name: str :param connection_name: The name of the application gateway private endpoint connection. :type connection_name: str :param parameters: Parameters supplied to update application gateway private endpoint connection operation. :type parameters: ~azure.mgmt.network.v2020_05_01.models.ApplicationGatewayPrivateEndpointConnection :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: Pass in True if you'd like the ARMPolling polling method, False for no polling, or your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.PollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of LROPoller that returns either ApplicationGatewayPrivateEndpointConnection or the result of cls(response) :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_05_01.models.ApplicationGatewayPrivateEndpointConnection] :raises ~azure.core.exceptions.HttpResponseError: """ polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod] cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationGatewayPrivateEndpointConnection"] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = self._update_initial( resource_group_name=resource_group_name, application_gateway_name=application_gateway_name, connection_name=connection_name, parameters=parameters, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): deserialized = self._deserialize('ApplicationGatewayPrivateEndpointConnection', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'), 'connectionName': self._serialize.url("connection_name", connection_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs) elif polling is False: polling_method = NoPolling() else: polling_method = polling if cont_token: return LROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return LROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/privateEndpointConnections/{connectionName}'} # type: ignore def get( self, resource_group_name, # type: str application_gateway_name, # type: str connection_name, # type: str **kwargs # type: Any ): # type: (...) -> "_models.ApplicationGatewayPrivateEndpointConnection" """Gets the specified private endpoint connection on application gateway. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param application_gateway_name: The name of the application gateway. :type application_gateway_name: str :param connection_name: The name of the application gateway private endpoint connection. :type connection_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: ApplicationGatewayPrivateEndpointConnection, or the result of cls(response) :rtype: ~azure.mgmt.network.v2020_05_01.models.ApplicationGatewayPrivateEndpointConnection :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationGatewayPrivateEndpointConnection"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2020-05-01" accept = "application/json" # Construct URL url = self.get.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'), 'connectionName': self._serialize.url("connection_name", connection_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.get(url, query_parameters, header_parameters) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('ApplicationGatewayPrivateEndpointConnection', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/privateEndpointConnections/{connectionName}'} # type: ignore def list( self, resource_group_name, # type: str application_gateway_name, # type: str **kwargs # type: Any ): # type: (...) -> Iterable["_models.ApplicationGatewayPrivateEndpointConnectionListResult"] """Lists all private endpoint connections on an application gateway. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param application_gateway_name: The name of the application gateway. :type application_gateway_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either ApplicationGatewayPrivateEndpointConnectionListResult or the result of cls(response) :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_05_01.models.ApplicationGatewayPrivateEndpointConnectionListResult] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationGatewayPrivateEndpointConnectionListResult"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2020-05-01" accept = "application/json" def prepare_request(next_link=None): # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') if not next_link: # Construct URL url = self.list.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} # type: Dict[str, Any] request = self._client.get(url, query_parameters, header_parameters) return request def extract_data(pipeline_response): deserialized = self._deserialize('ApplicationGatewayPrivateEndpointConnectionListResult', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, iter(list_of_elem) def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response return ItemPaged( get_next, extract_data ) list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/privateEndpointConnections'} # type: ignore
unknown
codeparrot/codeparrot-clean
# Emulates GDAL's gdal_polygonize.py import argparse import logging import subprocess import sys import fiona import numpy as np import rasterio from rasterio.features import shapes logging.basicConfig(stream=sys.stderr, level=logging.INFO) logger = logging.getLogger('rasterio_polygonize') def main(raster_file, vector_file, driver, mask_value): with rasterio.drivers(): with rasterio.open(raster_file) as src: image = src.read_band(1) if mask_value is not None: mask = image == mask_value else: mask = None results = ( {'properties': {'raster_val': v}, 'geometry': s} for i, (s, v) in enumerate( shapes(image, mask=mask, transform=src.affine))) with fiona.open( vector_file, 'w', driver=driver, crs=src.crs, schema={'properties': [('raster_val', 'int')], 'geometry': 'Polygon'}) as dst: dst.writerecords(results) return dst.name if __name__ == '__main__': parser = argparse.ArgumentParser( description="Writes shapes of raster features to a vector file") parser.add_argument( 'input', metavar='INPUT', help="Input file name") parser.add_argument( 'output', metavar='OUTPUT', help="Output file name") parser.add_argument( '--output-driver', metavar='OUTPUT DRIVER', help="Output vector driver name") parser.add_argument( '--mask-value', default=None, type=int, metavar='MASK VALUE', help="Value to mask") args = parser.parse_args() name = main(args.input, args.output, args.output_driver, args.mask_value) print subprocess.check_output( ['ogrinfo', '-so', args.output, name])
unknown
codeparrot/codeparrot-clean
import * as ts from "../../_namespaces/ts.js"; import { dedent } from "../../_namespaces/Utils.js"; import { jsonToReadableText } from "../helpers.js"; import { verifyAlternateResultScenario } from "../helpers/alternateResult.js"; import { solutionBuildWithBaseline } from "../helpers/solutionBuilder.js"; import { baselineTsserverLogs, forEachTscWatchEdit, openFilesForSession, protocolTextSpanFromSubstring, TestSession, verifyGetErrRequest, } from "../helpers/tsserver.js"; import { File, TestServerHost, } from "../helpers/virtualFileSystemWithWatch.js"; describe("unittests:: tsserver:: moduleResolution::", () => { describe("package json file is edited", () => { function setup(packageFileContents: string) { const configFile: File = { path: `/user/username/projects/myproject/src/tsconfig.json`, content: jsonToReadableText({ compilerOptions: { target: "es2016", module: "Node16", outDir: "../out", traceResolution: true, }, }), }; const packageFile: File = { path: `/user/username/projects/myproject/package.json`, content: packageFileContents, }; const fileA: File = { path: `/user/username/projects/myproject/src/fileA.ts`, content: dedent` import { foo } from "./fileB.mjs"; foo(); `, }; const fileB: File = { path: `/user/username/projects/myproject/src/fileB.mts`, content: dedent` export function foo() { } `, }; const host = TestServerHost.createServerHost([ configFile, fileA, fileB, packageFile, ]); const session = new TestSession(host); openFilesForSession([fileA], session); return { host, session, packageFile, verifyErr: () => verifyGetErrRequest({ files: [fileA], session }), }; } it("package json file is edited", () => { const { host, session, packageFile, verifyErr } = setup(jsonToReadableText({ name: "app", version: "1.0.0" })); session.logger.info("Modify package json file to add type module"); host.modifyFile( packageFile.path, jsonToReadableText({ name: "app", version: "1.0.0", type: "module", }), { ignoreWatches: true }, ); host.invokeFsWatches(packageFile.path, "rename", packageFile.path, /*useTildeSuffix*/ undefined); // Create event instead of change host.runQueuedTimeoutCallbacks(); // Failed lookup updates host.runQueuedTimeoutCallbacks(); // Actual update verifyErr(); session.logger.info("Modify package json file to remove type module"); host.writeFile(packageFile.path, packageFile.content); host.runQueuedTimeoutCallbacks(); // Failed lookup updates host.runQueuedTimeoutCallbacks(); // Actual update verifyErr(); session.logger.info("Delete package.json"); host.deleteFile(packageFile.path); host.runQueuedTimeoutCallbacks(); // Failed lookup updates host.runQueuedTimeoutCallbacks(); // Actual update verifyErr(); session.logger.info("Modify package json file to add type module"); host.writeFile( packageFile.path, jsonToReadableText({ name: "app", version: "1.0.0", type: "module", }), ); host.runQueuedTimeoutCallbacks(); // Failed lookup updates host.runQueuedTimeoutCallbacks(); // Actual update verifyErr(); session.logger.info("Delete package.json"); host.deleteFile(packageFile.path); host.runQueuedTimeoutCallbacks(); // Failed lookup updates host.runQueuedTimeoutCallbacks(); // Actual update verifyErr(); baselineTsserverLogs("moduleResolution", "package json file is edited", session); }); it("package json file is edited when package json with type module exists", () => { const { host, session, packageFile, verifyErr } = setup(jsonToReadableText({ name: "app", version: "1.0.0", type: "module", })); session.logger.info("Modify package json file to remove type module"); host.writeFile(packageFile.path, jsonToReadableText({ name: "app", version: "1.0.0" })); host.runQueuedTimeoutCallbacks(); // Failed lookup updates host.runQueuedTimeoutCallbacks(); // Actual update verifyErr(); session.logger.info("Modify package json file to add type module"); host.writeFile(packageFile.path, packageFile.content); host.runQueuedTimeoutCallbacks(); // Failed lookup updates host.runQueuedTimeoutCallbacks(); // Actual update verifyErr(); session.logger.info("Delete package.json"); host.deleteFile(packageFile.path); host.runQueuedTimeoutCallbacks(); // Failed lookup updates host.runQueuedTimeoutCallbacks(); // Actual update verifyErr(); session.logger.info("Modify package json file to without type module"); host.writeFile(packageFile.path, jsonToReadableText({ name: "app", version: "1.0.0" })); host.runQueuedTimeoutCallbacks(); // Failed lookup updates host.runQueuedTimeoutCallbacks(); // Actual update verifyErr(); session.logger.info("Delete package.json"); host.deleteFile(packageFile.path); host.runQueuedTimeoutCallbacks(); // Failed lookup updates host.runQueuedTimeoutCallbacks(); // Actual update verifyErr(); baselineTsserverLogs("moduleResolution", "package json file is edited when package json with type module exists", session); }); }); verifyAlternateResultScenario( /*forTsserver*/ true, (scenario, getHost, edits) => { it(scenario, () => { const host = getHost(); const indexFile = "/home/src/projects/project/index.mts"; const session = new TestSession(host); openFilesForSession([indexFile], session); verifyGetErrRequest({ files: [indexFile], session }); forEachTscWatchEdit(session, edits(), () => verifyGetErrRequest({ session, files: [indexFile] })); baselineTsserverLogs("moduleResolution", scenario, session); }); }, ); describe("using referenced project", () => { it("not built", () => { verify(); }); it("built", () => { verify(/*built*/ true); }); function verify(built?: boolean) { const indexContent = dedent` import { FOO } from "package-a"; console.log(FOO); `; const host = TestServerHost.createServerHost({ "/home/src/projects/project/packages/package-a/package.json": getPackageJson("package-a"), "/home/src/projects/project/packages/package-a/tsconfig.json": getTsConfig(), "/home/src/projects/project/packages/package-a/src/index.ts": `export * from "./subfolder";`, "/home/src/projects/project/packages/package-a/src/subfolder/index.ts": `export const FOO = "bar";`, "/home/src/projects/project/packages/package-b/package.json": getPackageJson("package-b"), "/home/src/projects/project/packages/package-b/tsconfig.json": getTsConfig([{ path: "../package-a" }]), "/home/src/projects/project/packages/package-b/src/index.ts": indexContent, "/home/src/projects/project/node_modules/package-a": { symLink: "/home/src/projects/project/packages/package-a" }, "/home/src/projects/project/node_modules/package-b": { symLink: "/home/src/projects/project/packages/package-b" }, }); if (built) { solutionBuildWithBaseline(host, ["/home/src/projects/project/packages/package-b"]); host.clearOutput(); } const session = new TestSession(host); openFilesForSession(["/home/src/projects/project/packages/package-b/src/index.ts"], session); verifyGetErrRequest({ session, files: ["/home/src/projects/project/packages/package-b/src/index.ts"], }); const { end } = protocolTextSpanFromSubstring(indexContent, "package-a"); session.executeCommandSeq<ts.server.protocol.UpdateOpenRequest>({ command: ts.server.protocol.CommandTypes.UpdateOpen, arguments: { changedFiles: [{ fileName: "/home/src/projects/project/packages/package-b/src/index.ts", textChanges: [{ start: end, end, newText: "X", }], }], }, }); verifyGetErrRequest({ session, files: ["/home/src/projects/project/packages/package-b/src/index.ts"], }); session.executeCommandSeq<ts.server.protocol.UpdateOpenRequest>({ command: ts.server.protocol.CommandTypes.UpdateOpen, arguments: { changedFiles: [{ fileName: "/home/src/projects/project/packages/package-b/src/index.ts", textChanges: [{ start: end, end: { ...end, offset: end.offset + 1 }, newText: "", }], }], }, }); verifyGetErrRequest({ session, files: ["/home/src/projects/project/packages/package-b/src/index.ts"], }); baselineTsserverLogs("moduleResolution", `using referenced project${built ? " built" : ""}`, session); } function getPackageJson(packageName: string) { return jsonToReadableText({ name: packageName, version: "1.0.0", type: "module", main: "build/index.js", exports: { ".": "./build/index.js", "./package.json": "./package.json", "./*": ["./build/*/index.js", "./build/*.js"], }, }); } function getTsConfig(references?: object[]) { return jsonToReadableText({ compilerOptions: { allowSyntheticDefaultImports: true, baseUrl: "./", composite: true, declarationMap: true, esModuleInterop: true, lib: ["es2021"], module: "esnext", moduleResolution: "bundler", outDir: "build", rootDir: "./src", target: "ES2021", traceResolution: true, tsBuildInfoFile: "./build/tsconfig.tsbuildinfo", }, include: ["./src/**/*.ts"], references, }); } }); });
typescript
github
https://github.com/microsoft/TypeScript
src/testRunner/unittests/tsserver/moduleResolution.ts
"""Extract, format and print information about Python stack traces.""" import linecache import sys import types __all__ = ['extract_stack', 'extract_tb', 'format_exception', 'format_exception_only', 'format_list', 'format_stack', 'format_tb', 'print_exc', 'format_exc', 'print_exception', 'print_last', 'print_stack', 'print_tb', 'tb_lineno'] def _print(file, str='', terminator='\n'): file.write(str+terminator) def print_list(extracted_list, file=None): """Print the list of tuples as returned by extract_tb() or extract_stack() as a formatted stack trace to the given file.""" if file is None: file = sys.stderr for filename, lineno, name, line in extracted_list: _print(file, ' File "%s", line %d, in %s' % (filename,lineno,name)) if line: _print(file, ' %s' % line.strip()) def format_list(extracted_list): """Format a list of traceback entry tuples for printing. Given a list of tuples as returned by extract_tb() or extract_stack(), return a list of strings ready for printing. Each string in the resulting list corresponds to the item with the same index in the argument list. Each string ends in a newline; the strings may contain internal newlines as well, for those items whose source text line is not None. """ list = [] for filename, lineno, name, line in extracted_list: item = ' File "%s", line %d, in %s\n' % (filename,lineno,name) if line: item = item + ' %s\n' % line.strip() list.append(item) return list def print_tb(tb, limit=None, file=None): """Print up to 'limit' stack trace entries from the traceback 'tb'. If 'limit' is omitted or None, all entries are printed. If 'file' is omitted or None, the output goes to sys.stderr; otherwise 'file' should be an open file or file-like object with a write() method. """ if file is None: file = sys.stderr if limit is None: if hasattr(sys, 'tracebacklimit'): limit = sys.tracebacklimit n = 0 while tb is not None and (limit is None or n < limit): f = tb.tb_frame lineno = tb.tb_lineno co = f.f_code filename = co.co_filename name = co.co_name _print(file, ' File "%s", line %d, in %s' % (filename, lineno, name)) linecache.checkcache(filename) line = linecache.getline(filename, lineno, f.f_globals) if line: _print(file, ' ' + line.strip()) tb = tb.tb_next n = n+1 def format_tb(tb, limit = None): """A shorthand for 'format_list(extract_stack(f, limit)).""" return format_list(extract_tb(tb, limit)) def extract_tb(tb, limit = None): """Return list of up to limit pre-processed entries from traceback. This is useful for alternate formatting of stack traces. If 'limit' is omitted or None, all entries are extracted. A pre-processed stack trace entry is a quadruple (filename, line number, function name, text) representing the information that is usually printed for a stack trace. The text is a string with leading and trailing whitespace stripped; if the source is not available it is None. """ if limit is None: if hasattr(sys, 'tracebacklimit'): limit = sys.tracebacklimit list = [] n = 0 while tb is not None and (limit is None or n < limit): f = tb.tb_frame lineno = tb.tb_lineno co = f.f_code filename = co.co_filename name = co.co_name linecache.checkcache(filename) line = linecache.getline(filename, lineno, f.f_globals) if line: line = line.strip() else: line = None list.append((filename, lineno, name, line)) tb = tb.tb_next n = n+1 return list def print_exception(etype, value, tb, limit=None, file=None): """Print exception up to 'limit' stack trace entries from 'tb' to 'file'. This differs from print_tb() in the following ways: (1) if traceback is not None, it prints a header "Traceback (most recent call last):"; (2) it prints the exception type and value after the stack trace; (3) if type is SyntaxError and value has the appropriate format, it prints the line where the syntax error occurred with a caret on the next line indicating the approximate position of the error. """ if file is None: file = sys.stderr if tb: _print(file, 'Traceback (most recent call last):') print_tb(tb, limit, file) lines = format_exception_only(etype, value) for line in lines: _print(file, line, '') def format_exception(etype, value, tb, limit = None): """Format a stack trace and the exception information. The arguments have the same meaning as the corresponding arguments to print_exception(). The return value is a list of strings, each ending in a newline and some containing internal newlines. When these lines are concatenated and printed, exactly the same text is printed as does print_exception(). """ if tb: list = ['Traceback (most recent call last):\n'] list = list + format_tb(tb, limit) else: list = [] list = list + format_exception_only(etype, value) return list def format_exception_only(etype, value): """Format the exception part of a traceback. The arguments are the exception type and value such as given by sys.last_type and sys.last_value. The return value is a list of strings, each ending in a newline. Normally, the list contains a single string; however, for SyntaxError exceptions, it contains several lines that (when printed) display detailed information about where the syntax error occurred. The message indicating which exception occurred is always the last string in the list. """ # An instance should not have a meaningful value parameter, but # sometimes does, particularly for string exceptions, such as # >>> raise string1, string2 # deprecated # # Clear these out first because issubtype(string1, SyntaxError) # would raise another exception and mask the original problem. if (isinstance(etype, BaseException) or isinstance(etype, types.InstanceType) or etype is None or type(etype) is str): return [_format_final_exc_line(etype, value)] stype = etype.__name__ if not issubclass(etype, SyntaxError): return [_format_final_exc_line(stype, value)] # It was a syntax error; show exactly where the problem was found. lines = [] try: msg, (filename, lineno, offset, badline) = value.args except Exception: pass else: filename = filename or "<string>" lines.append(' File "%s", line %d\n' % (filename, lineno)) if badline is not None: lines.append(' %s\n' % badline.strip()) if offset is not None: caretspace = badline.rstrip('\n')[:offset].lstrip() # non-space whitespace (likes tabs) must be kept for alignment caretspace = ((c.isspace() and c or ' ') for c in caretspace) # only three spaces to account for offset1 == pos 0 lines.append(' %s^\n' % ''.join(caretspace)) value = msg lines.append(_format_final_exc_line(stype, value)) return lines def _format_final_exc_line(etype, value): """Return a list of a single line -- normal case for format_exception_only""" valuestr = _some_str(value) if value is None or not valuestr: line = "%s\n" % etype else: line = "%s: %s\n" % (etype, valuestr) return line def _some_str(value): try: return str(value) except Exception: pass try: value = unicode(value) return value.encode("ascii", "backslashreplace") except Exception: pass return '<unprintable %s object>' % type(value).__name__ def print_exc(limit=None, file=None): """Shorthand for 'print_exception(sys.exc_type, sys.exc_value, sys.exc_traceback, limit, file)'. (In fact, it uses sys.exc_info() to retrieve the same information in a thread-safe way.)""" if file is None: file = sys.stderr try: etype, value, tb = sys.exc_info() print_exception(etype, value, tb, limit, file) finally: etype = value = tb = None def format_exc(limit=None): """Like print_exc() but return a string.""" try: etype, value, tb = sys.exc_info() return ''.join(format_exception(etype, value, tb, limit)) finally: etype = value = tb = None def print_last(limit=None, file=None): """This is a shorthand for 'print_exception(sys.last_type, sys.last_value, sys.last_traceback, limit, file)'.""" if not hasattr(sys, "last_type"): raise ValueError("no last exception") if file is None: file = sys.stderr print_exception(sys.last_type, sys.last_value, sys.last_traceback, limit, file) def print_stack(f=None, limit=None, file=None): """Print a stack trace from its invocation point. The optional 'f' argument can be used to specify an alternate stack frame at which to start. The optional 'limit' and 'file' arguments have the same meaning as for print_exception(). """ if f is None: try: raise ZeroDivisionError except ZeroDivisionError: f = sys.exc_info()[2].tb_frame.f_back print_list(extract_stack(f, limit), file) def format_stack(f=None, limit=None): """Shorthand for 'format_list(extract_stack(f, limit))'.""" if f is None: try: raise ZeroDivisionError except ZeroDivisionError: f = sys.exc_info()[2].tb_frame.f_back return format_list(extract_stack(f, limit)) def extract_stack(f=None, limit = None): """Extract the raw traceback from the current stack frame. The return value has the same format as for extract_tb(). The optional 'f' and 'limit' arguments have the same meaning as for print_stack(). Each item in the list is a quadruple (filename, line number, function name, text), and the entries are in order from oldest to newest stack frame. """ if f is None: try: raise ZeroDivisionError except ZeroDivisionError: f = sys.exc_info()[2].tb_frame.f_back if limit is None: if hasattr(sys, 'tracebacklimit'): limit = sys.tracebacklimit list = [] n = 0 while f is not None and (limit is None or n < limit): lineno = f.f_lineno co = f.f_code filename = co.co_filename name = co.co_name linecache.checkcache(filename) line = linecache.getline(filename, lineno, f.f_globals) if line: line = line.strip() else: line = None list.append((filename, lineno, name, line)) f = f.f_back n = n+1 list.reverse() return list def tb_lineno(tb): """Calculate correct line number of traceback given in tb. Obsolete in 2.3. """ return tb.tb_lineno
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-8 -*- import sys sys.path[0:0] = [""] import unittest from mongoengine import * from mongoengine.connection import get_db __all__ = ("GeoFieldTest", ) class GeoFieldTest(unittest.TestCase): def setUp(self): connect(db='mongoenginetest') self.db = get_db() def _test_for_expected_error(self, Cls, loc, expected): try: Cls(loc=loc).validate() self.fail('Should not validate the location {0}'.format(loc)) except ValidationError as e: self.assertEqual(expected, e.to_dict()['loc']) def test_geopoint_validation(self): class Location(Document): loc = GeoPointField() invalid_coords = [{"x": 1, "y": 2}, 5, "a"] expected = 'GeoPointField can only accept tuples or lists of (x, y)' for coord in invalid_coords: self._test_for_expected_error(Location, coord, expected) invalid_coords = [[], [1], [1, 2, 3]] for coord in invalid_coords: expected = "Value (%s) must be a two-dimensional point" % repr(coord) self._test_for_expected_error(Location, coord, expected) invalid_coords = [[{}, {}], ("a", "b")] for coord in invalid_coords: expected = "Both values (%s) in point must be float or int" % repr(coord) self._test_for_expected_error(Location, coord, expected) def test_point_validation(self): class Location(Document): loc = PointField() invalid_coords = {"x": 1, "y": 2} expected = 'PointField can only accept a valid GeoJson dictionary or lists of (x, y)' self._test_for_expected_error(Location, invalid_coords, expected) invalid_coords = {"type": "MadeUp", "coordinates": []} expected = 'PointField type must be "Point"' self._test_for_expected_error(Location, invalid_coords, expected) invalid_coords = {"type": "Point", "coordinates": [1, 2, 3]} expected = "Value ([1, 2, 3]) must be a two-dimensional point" self._test_for_expected_error(Location, invalid_coords, expected) invalid_coords = [5, "a"] expected = "PointField can only accept lists of [x, y]" for coord in invalid_coords: self._test_for_expected_error(Location, coord, expected) invalid_coords = [[], [1], [1, 2, 3]] for coord in invalid_coords: expected = "Value (%s) must be a two-dimensional point" % repr(coord) self._test_for_expected_error(Location, coord, expected) invalid_coords = [[{}, {}], ("a", "b")] for coord in invalid_coords: expected = "Both values (%s) in point must be float or int" % repr(coord) self._test_for_expected_error(Location, coord, expected) Location(loc=[1, 2]).validate() Location(loc={ "type": "Point", "coordinates": [ 81.4471435546875, 23.61432859499169 ]}).validate() def test_linestring_validation(self): class Location(Document): loc = LineStringField() invalid_coords = {"x": 1, "y": 2} expected = 'LineStringField can only accept a valid GeoJson dictionary or lists of (x, y)' self._test_for_expected_error(Location, invalid_coords, expected) invalid_coords = {"type": "MadeUp", "coordinates": [[]]} expected = 'LineStringField type must be "LineString"' self._test_for_expected_error(Location, invalid_coords, expected) invalid_coords = {"type": "LineString", "coordinates": [[1, 2, 3]]} expected = "Invalid LineString:\nValue ([1, 2, 3]) must be a two-dimensional point" self._test_for_expected_error(Location, invalid_coords, expected) invalid_coords = [5, "a"] expected = "Invalid LineString must contain at least one valid point" self._test_for_expected_error(Location, invalid_coords, expected) invalid_coords = [[1]] expected = "Invalid LineString:\nValue (%s) must be a two-dimensional point" % repr(invalid_coords[0]) self._test_for_expected_error(Location, invalid_coords, expected) invalid_coords = [[1, 2, 3]] expected = "Invalid LineString:\nValue (%s) must be a two-dimensional point" % repr(invalid_coords[0]) self._test_for_expected_error(Location, invalid_coords, expected) invalid_coords = [[[{}, {}]], [("a", "b")]] for coord in invalid_coords: expected = "Invalid LineString:\nBoth values (%s) in point must be float or int" % repr(coord[0]) self._test_for_expected_error(Location, coord, expected) Location(loc=[[1, 2], [3, 4], [5, 6], [1,2]]).validate() def test_polygon_validation(self): class Location(Document): loc = PolygonField() invalid_coords = {"x": 1, "y": 2} expected = 'PolygonField can only accept a valid GeoJson dictionary or lists of (x, y)' self._test_for_expected_error(Location, invalid_coords, expected) invalid_coords = {"type": "MadeUp", "coordinates": [[]]} expected = 'PolygonField type must be "Polygon"' self._test_for_expected_error(Location, invalid_coords, expected) invalid_coords = {"type": "Polygon", "coordinates": [[[1, 2, 3]]]} expected = "Invalid Polygon:\nValue ([1, 2, 3]) must be a two-dimensional point" self._test_for_expected_error(Location, invalid_coords, expected) invalid_coords = [[[5, "a"]]] expected = "Invalid Polygon:\nBoth values ([5, 'a']) in point must be float or int" self._test_for_expected_error(Location, invalid_coords, expected) invalid_coords = [[[]]] expected = "Invalid Polygon must contain at least one valid linestring" self._test_for_expected_error(Location, invalid_coords, expected) invalid_coords = [[[1, 2, 3]]] expected = "Invalid Polygon:\nValue ([1, 2, 3]) must be a two-dimensional point" self._test_for_expected_error(Location, invalid_coords, expected) invalid_coords = [[[{}, {}]], [("a", "b")]] expected = "Invalid Polygon:\nBoth values ([{}, {}]) in point must be float or int, Both values (('a', 'b')) in point must be float or int" self._test_for_expected_error(Location, invalid_coords, expected) invalid_coords = [[[1, 2], [3, 4]]] expected = "Invalid Polygon:\nLineStrings must start and end at the same point" self._test_for_expected_error(Location, invalid_coords, expected) Location(loc=[[[1, 2], [3, 4], [5, 6], [1, 2]]]).validate() def test_multipoint_validation(self): class Location(Document): loc = MultiPointField() invalid_coords = {"x": 1, "y": 2} expected = 'MultiPointField can only accept a valid GeoJson dictionary or lists of (x, y)' self._test_for_expected_error(Location, invalid_coords, expected) invalid_coords = {"type": "MadeUp", "coordinates": [[]]} expected = 'MultiPointField type must be "MultiPoint"' self._test_for_expected_error(Location, invalid_coords, expected) invalid_coords = {"type": "MultiPoint", "coordinates": [[1, 2, 3]]} expected = "Value ([1, 2, 3]) must be a two-dimensional point" self._test_for_expected_error(Location, invalid_coords, expected) invalid_coords = [[]] expected = "Invalid MultiPoint must contain at least one valid point" self._test_for_expected_error(Location, invalid_coords, expected) invalid_coords = [[[1]], [[1, 2, 3]]] for coord in invalid_coords: expected = "Value (%s) must be a two-dimensional point" % repr(coord[0]) self._test_for_expected_error(Location, coord, expected) invalid_coords = [[[{}, {}]], [("a", "b")]] for coord in invalid_coords: expected = "Both values (%s) in point must be float or int" % repr(coord[0]) self._test_for_expected_error(Location, coord, expected) Location(loc=[[1, 2]]).validate() Location(loc={ "type": "MultiPoint", "coordinates": [ [1, 2], [81.4471435546875, 23.61432859499169] ]}).validate() def test_multilinestring_validation(self): class Location(Document): loc = MultiLineStringField() invalid_coords = {"x": 1, "y": 2} expected = 'MultiLineStringField can only accept a valid GeoJson dictionary or lists of (x, y)' self._test_for_expected_error(Location, invalid_coords, expected) invalid_coords = {"type": "MadeUp", "coordinates": [[]]} expected = 'MultiLineStringField type must be "MultiLineString"' self._test_for_expected_error(Location, invalid_coords, expected) invalid_coords = {"type": "MultiLineString", "coordinates": [[[1, 2, 3]]]} expected = "Invalid MultiLineString:\nValue ([1, 2, 3]) must be a two-dimensional point" self._test_for_expected_error(Location, invalid_coords, expected) invalid_coords = [5, "a"] expected = "Invalid MultiLineString must contain at least one valid linestring" self._test_for_expected_error(Location, invalid_coords, expected) invalid_coords = [[[1]]] expected = "Invalid MultiLineString:\nValue (%s) must be a two-dimensional point" % repr(invalid_coords[0][0]) self._test_for_expected_error(Location, invalid_coords, expected) invalid_coords = [[[1, 2, 3]]] expected = "Invalid MultiLineString:\nValue (%s) must be a two-dimensional point" % repr(invalid_coords[0][0]) self._test_for_expected_error(Location, invalid_coords, expected) invalid_coords = [[[[{}, {}]]], [[("a", "b")]]] for coord in invalid_coords: expected = "Invalid MultiLineString:\nBoth values (%s) in point must be float or int" % repr(coord[0][0]) self._test_for_expected_error(Location, coord, expected) Location(loc=[[[1, 2], [3, 4], [5, 6], [1,2]]]).validate() def test_multipolygon_validation(self): class Location(Document): loc = MultiPolygonField() invalid_coords = {"x": 1, "y": 2} expected = 'MultiPolygonField can only accept a valid GeoJson dictionary or lists of (x, y)' self._test_for_expected_error(Location, invalid_coords, expected) invalid_coords = {"type": "MadeUp", "coordinates": [[]]} expected = 'MultiPolygonField type must be "MultiPolygon"' self._test_for_expected_error(Location, invalid_coords, expected) invalid_coords = {"type": "MultiPolygon", "coordinates": [[[[1, 2, 3]]]]} expected = "Invalid MultiPolygon:\nValue ([1, 2, 3]) must be a two-dimensional point" self._test_for_expected_error(Location, invalid_coords, expected) invalid_coords = [[[[5, "a"]]]] expected = "Invalid MultiPolygon:\nBoth values ([5, 'a']) in point must be float or int" self._test_for_expected_error(Location, invalid_coords, expected) invalid_coords = [[[[]]]] expected = "Invalid MultiPolygon must contain at least one valid Polygon" self._test_for_expected_error(Location, invalid_coords, expected) invalid_coords = [[[[1, 2, 3]]]] expected = "Invalid MultiPolygon:\nValue ([1, 2, 3]) must be a two-dimensional point" self._test_for_expected_error(Location, invalid_coords, expected) invalid_coords = [[[[{}, {}]]], [[("a", "b")]]] expected = "Invalid MultiPolygon:\nBoth values ([{}, {}]) in point must be float or int, Both values (('a', 'b')) in point must be float or int" self._test_for_expected_error(Location, invalid_coords, expected) invalid_coords = [[[[1, 2], [3, 4]]]] expected = "Invalid MultiPolygon:\nLineStrings must start and end at the same point" self._test_for_expected_error(Location, invalid_coords, expected) Location(loc=[[[[1, 2], [3, 4], [5, 6], [1, 2]]]]).validate() def test_indexes_geopoint(self): """Ensure that indexes are created automatically for GeoPointFields. """ class Event(Document): title = StringField() location = GeoPointField() geo_indicies = Event._geo_indices() self.assertEqual(geo_indicies, [{'fields': [('location', '2d')]}]) def test_geopoint_embedded_indexes(self): """Ensure that indexes are created automatically for GeoPointFields on embedded documents. """ class Venue(EmbeddedDocument): location = GeoPointField() name = StringField() class Event(Document): title = StringField() venue = EmbeddedDocumentField(Venue) geo_indicies = Event._geo_indices() self.assertEqual(geo_indicies, [{'fields': [('venue.location', '2d')]}]) def test_indexes_2dsphere(self): """Ensure that indexes are created automatically for GeoPointFields. """ class Event(Document): title = StringField() point = PointField() line = LineStringField() polygon = PolygonField() geo_indicies = Event._geo_indices() self.assertTrue({'fields': [('line', '2dsphere')]} in geo_indicies) self.assertTrue({'fields': [('polygon', '2dsphere')]} in geo_indicies) self.assertTrue({'fields': [('point', '2dsphere')]} in geo_indicies) def test_indexes_2dsphere_embedded(self): """Ensure that indexes are created automatically for GeoPointFields. """ class Venue(EmbeddedDocument): name = StringField() point = PointField() line = LineStringField() polygon = PolygonField() class Event(Document): title = StringField() venue = EmbeddedDocumentField(Venue) geo_indicies = Event._geo_indices() self.assertTrue({'fields': [('venue.line', '2dsphere')]} in geo_indicies) self.assertTrue({'fields': [('venue.polygon', '2dsphere')]} in geo_indicies) self.assertTrue({'fields': [('venue.point', '2dsphere')]} in geo_indicies) def test_geo_indexes_recursion(self): class Location(Document): name = StringField() location = GeoPointField() class Parent(Document): name = StringField() location = ReferenceField(Location) Location.drop_collection() Parent.drop_collection() Parent(name='Berlin').save() info = Parent._get_collection().index_information() self.assertFalse('location_2d' in info) info = Location._get_collection().index_information() self.assertTrue('location_2d' in info) self.assertEqual(len(Parent._geo_indices()), 0) self.assertEqual(len(Location._geo_indices()), 1) def test_geo_indexes_auto_index(self): # Test just listing the fields class Log(Document): location = PointField(auto_index=False) datetime = DateTimeField() meta = { 'indexes': [[("location", "2dsphere"), ("datetime", 1)]] } self.assertEqual([], Log._geo_indices()) Log.drop_collection() Log.ensure_indexes() info = Log._get_collection().index_information() self.assertEqual(info["location_2dsphere_datetime_1"]["key"], [('location', '2dsphere'), ('datetime', 1)]) # Test listing explicitly class Log(Document): location = PointField(auto_index=False) datetime = DateTimeField() meta = { 'indexes': [ {'fields': [("location", "2dsphere"), ("datetime", 1)]} ] } self.assertEqual([], Log._geo_indices()) Log.drop_collection() Log.ensure_indexes() info = Log._get_collection().index_information() self.assertEqual(info["location_2dsphere_datetime_1"]["key"], [('location', '2dsphere'), ('datetime', 1)]) if __name__ == '__main__': unittest.main()
unknown
codeparrot/codeparrot-clean
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import unittest import pytest import six import hyperledger class BaseTestCase(unittest.TestCase): def assertIn(self, object, collection): if six.PY2 and sys.version_info[1] <= 6: return self.assertTrue(object in collection) return super(BaseTestCase, self).assertIn(object, collection) def requires_api_version(version): return pytest.mark.skipif( hyperledger.utils.version_lt( hyperledger.constants.DEFAULT_HYPERLEDGER_API_VERSION, version ), reason="API version is too low (< {0})".format(version) ) class Cleanup(object): if sys.version_info < (2, 7): # Provide a basic implementation of addCleanup for Python < 2.7 def __init__(self, *args, **kwargs): super(Cleanup, self).__init__(*args, **kwargs) self._cleanups = [] def tearDown(self): super(Cleanup, self).tearDown() ok = True while self._cleanups: fn, args, kwargs = self._cleanups.pop(-1) try: fn(*args, **kwargs) except KeyboardInterrupt: raise except: ok = False if not ok: raise def addCleanup(self, function, *args, **kwargs): self._cleanups.append((function, args, kwargs))
unknown
codeparrot/codeparrot-clean
#!/usr/bin/python # -*- coding: utf-8 -*- DOCUMENTATION = ''' --- module: campfire version_added: "1.2" short_description: Send a message to Campfire description: - Send a message to Campfire. - Messages with newlines will result in a "Paste" message being sent. version_added: "1.2" options: subscription: description: - The subscription name to use. required: true token: description: - API token. required: true room: description: - Room number to which the message should be sent. required: true msg: description: - The message body. required: true notify: description: - Send a notification sound before the message. required: false choices: ["56k", "bell", "bezos", "bueller", "clowntown", "cottoneyejoe", "crickets", "dadgummit", "dangerzone", "danielsan", "deeper", "drama", "greatjob", "greyjoy", "guarantee", "heygirl", "horn", "horror", "inconceivable", "live", "loggins", "makeitso", "noooo", "nyan", "ohmy", "ohyeah", "pushit", "rimshot", "rollout", "rumble", "sax", "secret", "sexyback", "story", "tada", "tmyk", "trololo", "trombone", "unix", "vuvuzela", "what", "whoomp", "yeah", "yodel"] # informational: requirements for nodes requirements: [ urllib2, cgi ] author: '"Adam Garside (@fabulops)" <adam.garside@gmail.com>' ''' EXAMPLES = ''' - campfire: subscription=foo token=12345 room=123 msg="Task completed." - campfire: subscription=foo token=12345 room=123 notify=loggins msg="Task completed ... with feeling." ''' def main(): try: import urllib2 except ImportError: module.fail_json(msg="urllib2 is required") try: import cgi except ImportError: module.fail_json(msg="cgi is required") module = AnsibleModule( argument_spec=dict( subscription=dict(required=True), token=dict(required=True), room=dict(required=True), msg=dict(required=True), notify=dict(required=False, choices=["56k", "bell", "bezos", "bueller", "clowntown", "cottoneyejoe", "crickets", "dadgummit", "dangerzone", "danielsan", "deeper", "drama", "greatjob", "greyjoy", "guarantee", "heygirl", "horn", "horror", "inconceivable", "live", "loggins", "makeitso", "noooo", "nyan", "ohmy", "ohyeah", "pushit", "rimshot", "rollout", "rumble", "sax", "secret", "sexyback", "story", "tada", "tmyk", "trololo", "trombone", "unix", "vuvuzela", "what", "whoomp", "yeah", "yodel"]), ), supports_check_mode=False ) subscription = module.params["subscription"] token = module.params["token"] room = module.params["room"] msg = module.params["msg"] notify = module.params["notify"] URI = "https://%s.campfirenow.com" % subscription NSTR = "<message><type>SoundMessage</type><body>%s</body></message>" MSTR = "<message><body>%s</body></message>" AGENT = "Ansible/1.2" try: # Setup basic auth using token as the username pm = urllib2.HTTPPasswordMgrWithDefaultRealm() pm.add_password(None, URI, token, 'X') # Setup Handler and define the opener for the request handler = urllib2.HTTPBasicAuthHandler(pm) opener = urllib2.build_opener(handler) target_url = '%s/room/%s/speak.xml' % (URI, room) # Send some audible notification if requested if notify: req = urllib2.Request(target_url, NSTR % cgi.escape(notify)) req.add_header('Content-Type', 'application/xml') req.add_header('User-agent', AGENT) response = opener.open(req) # Send the message req = urllib2.Request(target_url, MSTR % cgi.escape(msg)) req.add_header('Content-Type', 'application/xml') req.add_header('User-agent', AGENT) response = opener.open(req) except urllib2.HTTPError, e: if not (200 <= e.code < 300): module.fail_json(msg="unable to send msg: '%s', campfire api" " returned error code: '%s'" % (msg, e.code)) except Exception, e: module.fail_json(msg="unable to send msg: %s" % msg) module.exit_json(changed=True, room=room, msg=msg, notify=notify) # import module snippets from ansible.module_utils.basic import * main()
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import time from openerp.osv import fields, osv from openerp.tools.translate import _ class hr_timesheet_invoice_factor(osv.osv): _name = "hr_timesheet_invoice.factor" _description = "Invoice Rate" _order = 'factor' _columns = { 'name': fields.char('Internal Name', required=True, translate=True), 'customer_name': fields.char('Name', help="Label for the customer"), 'factor': fields.float('Discount (%)', required=True, help="Discount in percentage"), } _defaults = { 'factor': lambda *a: 0.0, } class account_analytic_account(osv.osv): def _invoiced_calc(self, cr, uid, ids, name, arg, context=None): obj_invoice = self.pool.get('account.invoice') res = {} cr.execute('SELECT account_id as account_id, l.invoice_id ' 'FROM hr_analytic_timesheet h LEFT JOIN account_analytic_line l ' 'ON (h.line_id=l.id) ' 'WHERE l.account_id = ANY(%s)', (ids,)) account_to_invoice_map = {} for rec in cr.dictfetchall(): account_to_invoice_map.setdefault(rec['account_id'], []).append(rec['invoice_id']) for account in self.browse(cr, uid, ids, context=context): invoice_ids = filter(None, list(set(account_to_invoice_map.get(account.id, [])))) for invoice in obj_invoice.browse(cr, uid, invoice_ids, context=context): res.setdefault(account.id, 0.0) res[account.id] += invoice.amount_untaxed for id in ids: res[id] = round(res.get(id, 0.0),2) return res _inherit = "account.analytic.account" _columns = { 'pricelist_id': fields.many2one('product.pricelist', 'Pricelist', help="The product to invoice is defined on the employee form, the price will be deducted by this pricelist on the product."), 'amount_max': fields.float('Max. Invoice Price', help="Keep empty if this contract is not limited to a total fixed price."), 'amount_invoiced': fields.function(_invoiced_calc, string='Invoiced Amount', help="Total invoiced"), 'to_invoice': fields.many2one('hr_timesheet_invoice.factor', 'Timesheet Invoicing Ratio', help="You usually invoice 100% of the timesheets. But if you mix fixed price and timesheet invoicing, you may use another ratio. For instance, if you do a 20% advance invoice (fixed price, based on a sales order), you should invoice the rest on timesheet with a 80% ratio."), } def on_change_partner_id(self, cr, uid, ids, partner_id, name, context=None): res = super(account_analytic_account, self).on_change_partner_id(cr, uid, ids, partner_id, name, context=context) if partner_id: part = self.pool.get('res.partner').browse(cr, uid, partner_id, context=context) pricelist = part.property_product_pricelist and part.property_product_pricelist.id or False if pricelist: res['value']['pricelist_id'] = pricelist return res def set_close(self, cr, uid, ids, context=None): return self.write(cr, uid, ids, {'state': 'close'}, context=context) def set_cancel(self, cr, uid, ids, context=None): return self.write(cr, uid, ids, {'state': 'cancelled'}, context=context) def set_open(self, cr, uid, ids, context=None): return self.write(cr, uid, ids, {'state': 'open'}, context=context) def set_pending(self, cr, uid, ids, context=None): return self.write(cr, uid, ids, {'state': 'pending'}, context=context) class account_analytic_line(osv.osv): _inherit = 'account.analytic.line' _columns = { 'invoice_id': fields.many2one('account.invoice', 'Invoice', ondelete="set null", copy=False), 'to_invoice': fields.many2one('hr_timesheet_invoice.factor', 'Invoiceable', help="It allows to set the discount while making invoice, keep empty if the activities should not be invoiced."), } def _default_journal(self, cr, uid, context=None): proxy = self.pool.get('hr.employee') record_ids = proxy.search(cr, uid, [('user_id', '=', uid)], context=context) if record_ids: employee = proxy.browse(cr, uid, record_ids[0], context=context) return employee.journal_id and employee.journal_id.id or False return False def _default_general_account(self, cr, uid, context=None): proxy = self.pool.get('hr.employee') record_ids = proxy.search(cr, uid, [('user_id', '=', uid)], context=context) if record_ids: employee = proxy.browse(cr, uid, record_ids[0], context=context) if employee.product_id and employee.product_id.property_account_income: return employee.product_id.property_account_income.id return False _defaults = { 'journal_id' : _default_journal, 'general_account_id' : _default_general_account, } def write(self, cr, uid, ids, vals, context=None): self._check_inv(cr, uid, ids, vals) return super(account_analytic_line,self).write(cr, uid, ids, vals, context=context) def _check_inv(self, cr, uid, ids, vals): select = ids if isinstance(select, (int, long)): select = [ids] if ( not vals.has_key('invoice_id')) or vals['invoice_id' ] == False: for line in self.browse(cr, uid, select): if line.invoice_id: raise osv.except_osv(_('Error!'), _('You cannot modify an invoiced analytic line!')) return True def _get_invoice_price(self, cr, uid, account, product_id, user_id, qty, context = {}): pro_price_obj = self.pool.get('product.pricelist') if account.pricelist_id: pl = account.pricelist_id.id price = pro_price_obj.price_get(cr,uid,[pl], product_id, qty or 1.0, account.partner_id.id, context=context)[pl] else: price = 0.0 return price def _prepare_cost_invoice(self, cr, uid, partner, company_id, currency_id, analytic_lines, context=None): """ returns values used to create main invoice from analytic lines""" account_payment_term_obj = self.pool['account.payment.term'] invoice_name = analytic_lines[0].account_id.name date_due = False if partner.property_payment_term: pterm_list = account_payment_term_obj.compute(cr, uid, partner.property_payment_term.id, value=1, date_ref=time.strftime('%Y-%m-%d')) if pterm_list: pterm_list = [line[0] for line in pterm_list] pterm_list.sort() date_due = pterm_list[-1] return { 'name': "%s - %s" % (time.strftime('%d/%m/%Y'), invoice_name), 'partner_id': partner.id, 'company_id': company_id, 'payment_term': partner.property_payment_term.id or False, 'account_id': partner.property_account_receivable.id, 'currency_id': currency_id, 'date_due': date_due, 'fiscal_position': partner.property_account_position.id } def _prepare_cost_invoice_line(self, cr, uid, invoice_id, product_id, uom, user_id, factor_id, account, analytic_lines, journal_type, data, context=None): product_obj = self.pool['product.product'] uom_context = dict(context or {}, uom=uom) total_price = sum(l.amount for l in analytic_lines) total_qty = sum(l.unit_amount for l in analytic_lines) if data.get('product'): # force product, use its public price if isinstance(data['product'], (tuple, list)): product_id = data['product'][0] else: product_id = data['product'] unit_price = self._get_invoice_price(cr, uid, account, product_id, user_id, total_qty, uom_context) elif journal_type == 'general' and product_id: # timesheets, use sale price unit_price = self._get_invoice_price(cr, uid, account, product_id, user_id, total_qty, uom_context) else: # expenses, using price from amount field unit_price = total_price*-1.0 / total_qty factor = self.pool['hr_timesheet_invoice.factor'].browse(cr, uid, factor_id, context=uom_context) factor_name = factor.customer_name or '' curr_invoice_line = { 'price_unit': unit_price, 'quantity': total_qty, 'product_id': product_id, 'discount': factor.factor, 'invoice_id': invoice_id, 'name': factor_name, 'uos_id': uom, 'account_analytic_id': account.id, } if product_id: product = product_obj.browse(cr, uid, product_id, context=uom_context) factor_name = product_obj.name_get(cr, uid, [product_id], context=uom_context)[0][1] if factor.customer_name: factor_name += ' - ' + factor.customer_name general_account = product.property_account_income or product.categ_id.property_account_income_categ if not general_account: raise osv.except_osv(_('Error!'), _("Configuration Error!") + '\n' + _("Please define income account for product '%s'.") % product.name) taxes = product.taxes_id or general_account.tax_ids tax = self.pool['account.fiscal.position'].map_tax(cr, uid, account.partner_id.property_account_position, taxes) curr_invoice_line.update({ 'invoice_line_tax_id': [(6, 0, tax)], 'name': factor_name, 'invoice_line_tax_id': [(6, 0, tax)], 'account_id': general_account.id, }) note = [] for line in analytic_lines: # set invoice_line_note details = [] if data.get('date', False): details.append(line['date']) if data.get('time', False): if line['product_uom_id']: details.append("%s %s" % (line.unit_amount, line.product_uom_id.name)) else: details.append("%s" % (line['unit_amount'], )) if data.get('name', False): details.append(line['name']) if details: note.append(u' - '.join(map(lambda x: unicode(x) or '', details))) if note: curr_invoice_line['name'] += "\n" + ("\n".join(map(lambda x: unicode(x) or '', note))) return curr_invoice_line def invoice_cost_create(self, cr, uid, ids, data=None, context=None): invoice_obj = self.pool.get('account.invoice') invoice_line_obj = self.pool.get('account.invoice.line') analytic_line_obj = self.pool.get('account.analytic.line') invoices = [] if context is None: context = {} if data is None: data = {} # use key (partner/account, company, currency) # creates one invoice per key invoice_grouping = {} currency_id = False # prepare for iteration on journal and accounts for line in self.browse(cr, uid, ids, context=context): key = (line.account_id.id, line.account_id.company_id.id, line.account_id.pricelist_id.currency_id.id) invoice_grouping.setdefault(key, []).append(line) for (key_id, company_id, currency_id), analytic_lines in invoice_grouping.items(): # key_id is an account.analytic.account account = analytic_lines[0].account_id partner = account.partner_id # will be the same for every line if (not partner) or not (currency_id): raise osv.except_osv(_('Error!'), _('Contract incomplete. Please fill in the Customer and Pricelist fields for %s.') % (account.name)) curr_invoice = self._prepare_cost_invoice(cr, uid, partner, company_id, currency_id, analytic_lines, context=context) invoice_context = dict(context, lang=partner.lang, force_company=company_id, # set force_company in context so the correct product properties are selected (eg. income account) company_id=company_id) # set company_id in context, so the correct default journal will be selected last_invoice = invoice_obj.create(cr, uid, curr_invoice, context=invoice_context) invoices.append(last_invoice) # use key (product, uom, user, invoiceable, analytic account, journal type) # creates one invoice line per key invoice_lines_grouping = {} for analytic_line in analytic_lines: account = analytic_line.account_id if not analytic_line.to_invoice: raise osv.except_osv(_('Error!'), _('Trying to invoice non invoiceable line for %s.') % (analytic_line.product_id.name)) key = (analytic_line.product_id.id, analytic_line.product_uom_id.id, analytic_line.user_id.id, analytic_line.to_invoice.id, analytic_line.account_id, analytic_line.journal_id.type) # We want to retrieve the data in the partner language for the invoice creation analytic_line = analytic_line_obj.browse(cr, uid , [line.id for line in analytic_line], context=invoice_context) invoice_lines_grouping.setdefault(key, []).append(analytic_line) # finally creates the invoice line for (product_id, uom, user_id, factor_id, account, journal_type), lines_to_invoice in invoice_lines_grouping.items(): curr_invoice_line = self._prepare_cost_invoice_line(cr, uid, last_invoice, product_id, uom, user_id, factor_id, account, lines_to_invoice, journal_type, data, context=invoice_context) invoice_line_obj.create(cr, uid, curr_invoice_line, context=context) self.write(cr, uid, [l.id for l in analytic_lines], {'invoice_id': last_invoice}, context=context) invoice_obj.button_reset_taxes(cr, uid, [last_invoice], context) return invoices class hr_analytic_timesheet(osv.osv): _inherit = "hr.analytic.timesheet" def on_change_account_id(self, cr, uid, ids, account_id, user_id=False): res = {} if not account_id: return res res.setdefault('value',{}) acc = self.pool.get('account.analytic.account').browse(cr, uid, account_id) st = acc.to_invoice.id res['value']['to_invoice'] = st or False if acc.state=='pending': res['warning'] = { 'title': _('Warning'), 'message': _('The analytic account is in pending state.\nYou should not work on this account !') } return res class account_invoice(osv.osv): _inherit = "account.invoice" def _get_analytic_lines(self, cr, uid, ids, context=None): iml = super(account_invoice, self)._get_analytic_lines(cr, uid, ids, context=context) inv = self.browse(cr, uid, ids, context=context)[0] if inv.type == 'in_invoice': obj_analytic_account = self.pool.get('account.analytic.account') for il in iml: if il['account_analytic_id']: # *-* browse (or refactor to avoid read inside the loop) to_invoice = obj_analytic_account.read(cr, uid, [il['account_analytic_id']], ['to_invoice'], context=context)[0]['to_invoice'] if to_invoice: il['analytic_lines'][0][2]['to_invoice'] = to_invoice[0] return iml class account_move_line(osv.osv): _inherit = "account.move.line" def create_analytic_lines(self, cr, uid, ids, context=None): res = super(account_move_line, self).create_analytic_lines(cr, uid, ids,context=context) analytic_line_obj = self.pool.get('account.analytic.line') for move_line in self.browse(cr, uid, ids, context=context): #For customer invoice, link analytic line to the invoice so it is not proposed for invoicing in Bill Tasks Work invoice_id = move_line.invoice and move_line.invoice.type in ('out_invoice','out_refund') and move_line.invoice.id or False for line in move_line.analytic_lines: analytic_line_obj.write(cr, uid, line.id, { 'invoice_id': invoice_id, 'to_invoice': line.account_id.to_invoice and line.account_id.to_invoice.id or False }, context=context) return res # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-8 -*- from django.test import TestCase from django.test.client import RequestFactory from djangular.core.urlresolvers import get_all_remote_methods, get_current_remote_methods from .urls import RemoteMethodsView class TemplateRemoteMethods(TestCase): urls = 'server.tests.urls' def setUp(self): self.factory = RequestFactory() def test_get_current_remote_methods(self): view = RemoteMethodsView() view.request = self.factory.get('/straight_methods/') remote_methods = get_current_remote_methods(view) self.assertDictEqual({'foo': {'url': '/straight_methods/', 'headers': {'DjNg-Remote-Method': 'foo'}, 'method': 'auto'}, 'bar': {'url': '/straight_methods/', 'headers': {'DjNg-Remote-Method': 'bar'}, 'method': 'auto'}}, remote_methods) def test_get_all_remote_methods(self): remote_methods = get_all_remote_methods() self.assertDictEqual(remote_methods, {'urlresolvertags': {'blah': {u'url': '/url_resolvers/', u'headers': {u'DjNg-Remote-Method': 'blah'}, u'method': 'auto'}}, 'submethods': {'sub': {'app': {'foo': {u'url': '/sub_methods/sub/app/', u'headers': {u'DjNg-Remote-Method': 'foo'}, u'method': 'auto'}, 'bar': {u'url': '/sub_methods/sub/app/', u'headers': {u'DjNg-Remote-Method': 'bar'}, u'method': 'auto'}}}}})
unknown
codeparrot/codeparrot-clean
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.telemetry.internals; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import java.time.Instant; import java.util.Collections; import java.util.HashMap; import java.util.Map; import io.opentelemetry.proto.metrics.v1.AggregationTemporality; import io.opentelemetry.proto.metrics.v1.Metric; import io.opentelemetry.proto.metrics.v1.NumberDataPoint; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertTrue; public class SinglePointMetricTest { private MetricKey metricKey; private Instant now; /* Test compares the metric representation from returned builder to ensure that the metric is constructed correctly. For example: Gauge metric with name "name" and double value 1.0 at certain time is represented as: name: "name" gauge { data_points { time_unix_nano: 1698063981021420000 as_double: 1.0 } } */ @BeforeEach public void setUp() { metricKey = new MetricKey("name", Collections.emptyMap()); now = Instant.now(); } @Test public void testGaugeWithNumberValue() { SinglePointMetric gaugeNumber = SinglePointMetric.gauge(metricKey, Long.valueOf(1), now, Collections.emptySet()); MetricKey metricKey = gaugeNumber.key(); assertEquals("name", metricKey.name()); Metric metric = gaugeNumber.builder().build(); assertEquals(1, metric.getGauge().getDataPointsCount()); NumberDataPoint point = metric.getGauge().getDataPoints(0); assertEquals(now.getEpochSecond() * Math.pow(10, 9) + now.getNano(), point.getTimeUnixNano()); assertEquals(0, point.getStartTimeUnixNano()); assertEquals(1, point.getAsInt()); assertEquals(0, point.getAttributesCount()); } @Test public void testGaugeWithDoubleValue() { SinglePointMetric gaugeNumber = SinglePointMetric.gauge(metricKey, 1.0, now, Collections.emptySet()); MetricKey metricKey = gaugeNumber.key(); assertEquals("name", metricKey.name()); Metric metric = gaugeNumber.builder().build(); assertEquals(1, metric.getGauge().getDataPointsCount()); NumberDataPoint point = metric.getGauge().getDataPoints(0); assertEquals(now.getEpochSecond() * Math.pow(10, 9) + now.getNano(), point.getTimeUnixNano()); assertEquals(0, point.getStartTimeUnixNano()); assertEquals(1.0, point.getAsDouble()); assertEquals(0, point.getAttributesCount()); } @Test public void testGaugeWithMetricTags() { MetricKey metricKey = new MetricKey("name", Collections.singletonMap("tag", "value")); SinglePointMetric gaugeNumber = SinglePointMetric.gauge(metricKey, 1.0, now, Collections.emptySet()); MetricKey key = gaugeNumber.key(); assertEquals("name", key.name()); Metric metric = gaugeNumber.builder().build(); assertEquals(1, metric.getGauge().getDataPointsCount()); NumberDataPoint point = metric.getGauge().getDataPoints(0); assertEquals(now.getEpochSecond() * Math.pow(10, 9) + now.getNano(), point.getTimeUnixNano()); assertEquals(0, point.getStartTimeUnixNano()); assertEquals(1.0, point.getAsDouble()); assertEquals(1, point.getAttributesCount()); assertEquals("tag", point.getAttributes(0).getKey()); assertEquals("value", point.getAttributes(0).getValue().getStringValue()); } @Test public void testGaugeNumberWithExcludeLabels() { Map<String, String> tags = new HashMap<>(); tags.put("tag1", "value1"); tags.put("tag2", "value2"); MetricKey metricKey = new MetricKey("name", tags); SinglePointMetric gaugeNumber = SinglePointMetric.gauge(metricKey, Long.valueOf(1), now, Collections.singleton("random")); Metric metric = gaugeNumber.builder().build(); assertEquals(1, metric.getGauge().getDataPointsCount()); NumberDataPoint point = metric.getGauge().getDataPoints(0); assertEquals(2, point.getAttributesCount()); for (int i = 0; i < point.getAttributesCount(); i++) { assertTrue( point.getAttributes(i).getKey().equals("tag1") || point.getAttributes(i).getKey().equals("tag2")); assertTrue( point.getAttributes(i).getValue().getStringValue().equals("value1") || point.getAttributes(i).getValue().getStringValue().equals("value2")); } gaugeNumber = SinglePointMetric.gauge(metricKey, Long.valueOf(1), now, Collections.singleton("tag1")); metric = gaugeNumber.builder().build(); assertEquals(1, metric.getGauge().getDataPointsCount()); point = metric.getGauge().getDataPoints(0); assertEquals(1, point.getAttributesCount()); assertEquals("tag2", point.getAttributes(0).getKey()); assertEquals("value2", point.getAttributes(0).getValue().getStringValue()); gaugeNumber = SinglePointMetric.gauge(metricKey, Long.valueOf(1), now, tags.keySet()); metric = gaugeNumber.builder().build(); assertEquals(1, metric.getGauge().getDataPointsCount()); point = metric.getGauge().getDataPoints(0); assertEquals(0, point.getAttributesCount()); } @Test public void testGaugeDoubleWithExcludeLabels() { Map<String, String> tags = new HashMap<>(); tags.put("tag1", "value1"); tags.put("tag2", "value2"); MetricKey metricKey = new MetricKey("name", tags); SinglePointMetric gaugeNumber = SinglePointMetric.gauge(metricKey, 1.0, now, Collections.singleton("random")); Metric metric = gaugeNumber.builder().build(); assertEquals(1, metric.getGauge().getDataPointsCount()); NumberDataPoint point = metric.getGauge().getDataPoints(0); assertEquals(2, point.getAttributesCount()); for (int i = 0; i < point.getAttributesCount(); i++) { assertTrue( point.getAttributes(i).getKey().equals("tag1") || point.getAttributes(i).getKey().equals("tag2")); assertTrue( point.getAttributes(i).getValue().getStringValue().equals("value1") || point.getAttributes(i).getValue().getStringValue().equals("value2")); } gaugeNumber = SinglePointMetric.gauge(metricKey, 1.0, now, Collections.singleton("tag1")); metric = gaugeNumber.builder().build(); assertEquals(1, metric.getGauge().getDataPointsCount()); point = metric.getGauge().getDataPoints(0); assertEquals(1, point.getAttributesCount()); assertEquals("tag2", point.getAttributes(0).getKey()); assertEquals("value2", point.getAttributes(0).getValue().getStringValue()); gaugeNumber = SinglePointMetric.gauge(metricKey, 1.0, now, tags.keySet()); metric = gaugeNumber.builder().build(); assertEquals(1, metric.getGauge().getDataPointsCount()); point = metric.getGauge().getDataPoints(0); assertEquals(0, point.getAttributesCount()); } @Test public void testSum() { SinglePointMetric sum = SinglePointMetric.sum(metricKey, 1.0, false, now, null, Collections.emptySet()); MetricKey key = sum.key(); assertEquals("name", key.name()); Metric metric = sum.builder().build(); assertFalse(metric.getSum().getIsMonotonic()); assertEquals(AggregationTemporality.AGGREGATION_TEMPORALITY_CUMULATIVE, metric.getSum().getAggregationTemporality()); assertEquals(1, metric.getSum().getDataPointsCount()); NumberDataPoint point = metric.getSum().getDataPoints(0); assertEquals(now.getEpochSecond() * Math.pow(10, 9) + now.getNano(), point.getTimeUnixNano()); assertEquals(0, point.getStartTimeUnixNano()); assertEquals(1.0, point.getAsDouble()); assertEquals(0, point.getAttributesCount()); } @Test public void testSumWithStartTimeAndTags() { MetricKey metricKey = new MetricKey("name", Collections.singletonMap("tag", "value")); SinglePointMetric sum = SinglePointMetric.sum(metricKey, 1.0, true, now, now, Collections.emptySet()); MetricKey key = sum.key(); assertEquals("name", key.name()); Metric metric = sum.builder().build(); assertTrue(metric.getSum().getIsMonotonic()); assertEquals(AggregationTemporality.AGGREGATION_TEMPORALITY_CUMULATIVE, metric.getSum().getAggregationTemporality()); assertEquals(1, metric.getSum().getDataPointsCount()); NumberDataPoint point = metric.getSum().getDataPoints(0); assertEquals(now.getEpochSecond() * Math.pow(10, 9) + now.getNano(), point.getTimeUnixNano()); assertEquals(now.getEpochSecond() * Math.pow(10, 9) + now.getNano(), point.getStartTimeUnixNano()); assertEquals(1.0, point.getAsDouble()); assertEquals(1, point.getAttributesCount()); assertEquals("tag", point.getAttributes(0).getKey()); assertEquals("value", point.getAttributes(0).getValue().getStringValue()); } @Test public void testSumWithExcludeLabels() { Map<String, String> tags = new HashMap<>(); tags.put("tag1", "value1"); tags.put("tag2", "value2"); MetricKey metricKey = new MetricKey("name", tags); SinglePointMetric gaugeNumber = SinglePointMetric.sum(metricKey, 1.0, true, now, Collections.singleton("random")); Metric metric = gaugeNumber.builder().build(); assertEquals(1, metric.getSum().getDataPointsCount()); NumberDataPoint point = metric.getSum().getDataPoints(0); assertEquals(2, point.getAttributesCount()); for (int i = 0; i < point.getAttributesCount(); i++) { assertTrue( point.getAttributes(i).getKey().equals("tag1") || point.getAttributes(i).getKey().equals("tag2")); assertTrue( point.getAttributes(i).getValue().getStringValue().equals("value1") || point.getAttributes(i).getValue().getStringValue().equals("value2")); } gaugeNumber = SinglePointMetric.sum(metricKey, 1.0, true, now, Collections.singleton("tag1")); metric = gaugeNumber.builder().build(); assertEquals(1, metric.getSum().getDataPointsCount()); point = metric.getSum().getDataPoints(0); assertEquals(1, point.getAttributesCount()); assertEquals("tag2", point.getAttributes(0).getKey()); assertEquals("value2", point.getAttributes(0).getValue().getStringValue()); gaugeNumber = SinglePointMetric.sum(metricKey, 1.0, true, now, tags.keySet()); metric = gaugeNumber.builder().build(); assertEquals(1, metric.getSum().getDataPointsCount()); point = metric.getSum().getDataPoints(0); assertEquals(0, point.getAttributesCount()); } @Test public void testDeltaSum() { SinglePointMetric sum = SinglePointMetric.deltaSum(metricKey, 1.0, true, now, now, Collections.emptySet()); MetricKey key = sum.key(); assertEquals("name", key.name()); Metric metric = sum.builder().build(); assertTrue(metric.getSum().getIsMonotonic()); assertEquals(AggregationTemporality.AGGREGATION_TEMPORALITY_DELTA, metric.getSum().getAggregationTemporality()); assertEquals(1, metric.getSum().getDataPointsCount()); NumberDataPoint point = metric.getSum().getDataPoints(0); assertEquals(now.getEpochSecond() * Math.pow(10, 9) + now.getNano(), point.getTimeUnixNano()); assertEquals(now.getEpochSecond() * Math.pow(10, 9) + now.getNano(), point.getStartTimeUnixNano()); assertEquals(1.0, point.getAsDouble()); assertEquals(0, point.getAttributesCount()); } @Test public void testDeltaSumWithExcludeLabels() { Map<String, String> tags = new HashMap<>(); tags.put("tag1", "value1"); tags.put("tag2", "value2"); MetricKey metricKey = new MetricKey("name", tags); SinglePointMetric gaugeNumber = SinglePointMetric.deltaSum(metricKey, 1.0, true, now, now, Collections.singleton("random")); Metric metric = gaugeNumber.builder().build(); assertEquals(1, metric.getSum().getDataPointsCount()); NumberDataPoint point = metric.getSum().getDataPoints(0); assertEquals(2, point.getAttributesCount()); for (int i = 0; i < point.getAttributesCount(); i++) { assertTrue( point.getAttributes(i).getKey().equals("tag1") || point.getAttributes(i).getKey().equals("tag2")); assertTrue( point.getAttributes(i).getValue().getStringValue().equals("value1") || point.getAttributes(i).getValue().getStringValue().equals("value2")); } gaugeNumber = SinglePointMetric.deltaSum(metricKey, 1.0, true, now, now, Collections.singleton("tag1")); metric = gaugeNumber.builder().build(); assertEquals(1, metric.getSum().getDataPointsCount()); point = metric.getSum().getDataPoints(0); assertEquals(1, point.getAttributesCount()); assertEquals("tag2", point.getAttributes(0).getKey()); assertEquals("value2", point.getAttributes(0).getValue().getStringValue()); gaugeNumber = SinglePointMetric.deltaSum(metricKey, 1.0, true, now, now, tags.keySet()); metric = gaugeNumber.builder().build(); assertEquals(1, metric.getSum().getDataPointsCount()); point = metric.getSum().getDataPoints(0); assertEquals(0, point.getAttributesCount()); } }
java
github
https://github.com/apache/kafka
clients/src/test/java/org/apache/kafka/common/telemetry/internals/SinglePointMetricTest.java
import time import traceback from datetime import date, datetime, timedelta from threading import Thread from django.core.exceptions import FieldError from django.db import DatabaseError, IntegrityError, connection from django.test import ( SimpleTestCase, TestCase, TransactionTestCase, ignore_warnings, skipUnlessDBFeature, ) from django.utils.encoding import DjangoUnicodeDecodeError from .models import ( Author, Book, DefaultPerson, ManualPrimaryKeyTest, Person, Profile, Publisher, Tag, Thing, ) class GetOrCreateTests(TestCase): def setUp(self): self.lennon = Person.objects.create( first_name='John', last_name='Lennon', birthday=date(1940, 10, 9) ) def test_get_or_create_method_with_get(self): created = Person.objects.get_or_create( first_name="John", last_name="Lennon", defaults={ "birthday": date(1940, 10, 9) } )[1] self.assertFalse(created) self.assertEqual(Person.objects.count(), 1) def test_get_or_create_method_with_create(self): created = Person.objects.get_or_create( first_name='George', last_name='Harrison', defaults={ 'birthday': date(1943, 2, 25) } )[1] self.assertTrue(created) self.assertEqual(Person.objects.count(), 2) def test_get_or_create_redundant_instance(self): """ If we execute the exact same statement twice, the second time, it won't create a Person. """ Person.objects.get_or_create( first_name='George', last_name='Harrison', defaults={ 'birthday': date(1943, 2, 25) } ) created = Person.objects.get_or_create( first_name='George', last_name='Harrison', defaults={ 'birthday': date(1943, 2, 25) } )[1] self.assertFalse(created) self.assertEqual(Person.objects.count(), 2) def test_get_or_create_invalid_params(self): """ If you don't specify a value or default value for all required fields, you will get an error. """ with self.assertRaises(IntegrityError): Person.objects.get_or_create(first_name="Tom", last_name="Smith") def test_get_or_create_with_pk_property(self): """ Using the pk property of a model is allowed. """ Thing.objects.get_or_create(pk=1) def test_get_or_create_on_related_manager(self): p = Publisher.objects.create(name="Acme Publishing") # Create a book through the publisher. book, created = p.books.get_or_create(name="The Book of Ed & Fred") self.assertTrue(created) # The publisher should have one book. self.assertEqual(p.books.count(), 1) # Try get_or_create again, this time nothing should be created. book, created = p.books.get_or_create(name="The Book of Ed & Fred") self.assertFalse(created) # And the publisher should still have one book. self.assertEqual(p.books.count(), 1) # Add an author to the book. ed, created = book.authors.get_or_create(name="Ed") self.assertTrue(created) # The book should have one author. self.assertEqual(book.authors.count(), 1) # Try get_or_create again, this time nothing should be created. ed, created = book.authors.get_or_create(name="Ed") self.assertFalse(created) # And the book should still have one author. self.assertEqual(book.authors.count(), 1) # Add a second author to the book. fred, created = book.authors.get_or_create(name="Fred") self.assertTrue(created) # The book should have two authors now. self.assertEqual(book.authors.count(), 2) # Create an Author not tied to any books. Author.objects.create(name="Ted") # There should be three Authors in total. The book object should have two. self.assertEqual(Author.objects.count(), 3) self.assertEqual(book.authors.count(), 2) # Try creating a book through an author. _, created = ed.books.get_or_create(name="Ed's Recipes", publisher=p) self.assertTrue(created) # Now Ed has two Books, Fred just one. self.assertEqual(ed.books.count(), 2) self.assertEqual(fred.books.count(), 1) # Use the publisher's primary key value instead of a model instance. _, created = ed.books.get_or_create(name='The Great Book of Ed', publisher_id=p.id) self.assertTrue(created) # Try get_or_create again, this time nothing should be created. _, created = ed.books.get_or_create(name='The Great Book of Ed', publisher_id=p.id) self.assertFalse(created) # The publisher should have three books. self.assertEqual(p.books.count(), 3) def test_defaults_exact(self): """ If you have a field named defaults and want to use it as an exact lookup, you need to use 'defaults__exact'. """ obj, created = Person.objects.get_or_create( first_name='George', last_name='Harrison', defaults__exact='testing', defaults={ 'birthday': date(1943, 2, 25), 'defaults': 'testing', } ) self.assertTrue(created) self.assertEqual(obj.defaults, 'testing') obj2, created = Person.objects.get_or_create( first_name='George', last_name='Harrison', defaults__exact='testing', defaults={ 'birthday': date(1943, 2, 25), 'defaults': 'testing', } ) self.assertFalse(created) self.assertEqual(obj, obj2) def test_callable_defaults(self): """ Callables in `defaults` are evaluated if the instance is created. """ obj, created = Person.objects.get_or_create( first_name="George", defaults={"last_name": "Harrison", "birthday": lambda: date(1943, 2, 25)}, ) self.assertTrue(created) self.assertEqual(date(1943, 2, 25), obj.birthday) def test_callable_defaults_not_called(self): def raise_exception(): raise AssertionError obj, created = Person.objects.get_or_create( first_name="John", last_name="Lennon", defaults={"birthday": lambda: raise_exception()}, ) class GetOrCreateTestsWithManualPKs(TestCase): def setUp(self): self.first_pk = ManualPrimaryKeyTest.objects.create(id=1, data="Original") def test_create_with_duplicate_primary_key(self): """ If you specify an existing primary key, but different other fields, then you will get an error and data will not be updated. """ with self.assertRaises(IntegrityError): ManualPrimaryKeyTest.objects.get_or_create(id=1, data="Different") self.assertEqual(ManualPrimaryKeyTest.objects.get(id=1).data, "Original") def test_get_or_create_raises_IntegrityError_plus_traceback(self): """ get_or_create should raise IntegrityErrors with the full traceback. This is tested by checking that a known method call is in the traceback. We cannot use assertRaises here because we need to inspect the actual traceback. Refs #16340. """ try: ManualPrimaryKeyTest.objects.get_or_create(id=1, data="Different") except IntegrityError: formatted_traceback = traceback.format_exc() self.assertIn('obj.save', formatted_traceback) # MySQL emits a warning when broken data is saved @ignore_warnings(module='django.db.backends.mysql.base') def test_savepoint_rollback(self): """ Regression test for #20463: the database connection should still be usable after a DataError or ProgrammingError in .get_or_create(). """ try: Person.objects.get_or_create( birthday=date(1970, 1, 1), defaults={'first_name': b"\xff", 'last_name': b"\xff"}) except (DatabaseError, DjangoUnicodeDecodeError): Person.objects.create( first_name="Bob", last_name="Ross", birthday=date(1950, 1, 1)) else: self.skipTest("This backend accepts broken utf-8.") def test_get_or_create_empty(self): """ If all the attributes on a model have defaults, get_or_create() doesn't require any arguments. """ DefaultPerson.objects.get_or_create() class GetOrCreateTransactionTests(TransactionTestCase): available_apps = ['get_or_create'] def test_get_or_create_integrityerror(self): """ Regression test for #15117. Requires a TransactionTestCase on databases that delay integrity checks until the end of transactions, otherwise the exception is never raised. """ try: Profile.objects.get_or_create(person=Person(id=1)) except IntegrityError: pass else: self.skipTest("This backend does not support integrity checks.") class GetOrCreateThroughManyToMany(TestCase): def test_get_get_or_create(self): tag = Tag.objects.create(text='foo') a_thing = Thing.objects.create(name='a') a_thing.tags.add(tag) obj, created = a_thing.tags.get_or_create(text='foo') self.assertFalse(created) self.assertEqual(obj.pk, tag.pk) def test_create_get_or_create(self): a_thing = Thing.objects.create(name='a') obj, created = a_thing.tags.get_or_create(text='foo') self.assertTrue(created) self.assertEqual(obj.text, 'foo') self.assertIn(obj, a_thing.tags.all()) def test_something(self): Tag.objects.create(text='foo') a_thing = Thing.objects.create(name='a') with self.assertRaises(IntegrityError): a_thing.tags.get_or_create(text='foo') class UpdateOrCreateTests(TestCase): def test_update(self): Person.objects.create( first_name='John', last_name='Lennon', birthday=date(1940, 10, 9) ) p, created = Person.objects.update_or_create( first_name='John', last_name='Lennon', defaults={ 'birthday': date(1940, 10, 10) } ) self.assertFalse(created) self.assertEqual(p.first_name, 'John') self.assertEqual(p.last_name, 'Lennon') self.assertEqual(p.birthday, date(1940, 10, 10)) def test_create(self): p, created = Person.objects.update_or_create( first_name='John', last_name='Lennon', defaults={ 'birthday': date(1940, 10, 10) } ) self.assertTrue(created) self.assertEqual(p.first_name, 'John') self.assertEqual(p.last_name, 'Lennon') self.assertEqual(p.birthday, date(1940, 10, 10)) def test_create_twice(self): params = { 'first_name': 'John', 'last_name': 'Lennon', 'birthday': date(1940, 10, 10), } Person.objects.update_or_create(**params) # If we execute the exact same statement, it won't create a Person. p, created = Person.objects.update_or_create(**params) self.assertFalse(created) def test_integrity(self): """ If you don't specify a value or default value for all required fields, you will get an error. """ with self.assertRaises(IntegrityError): Person.objects.update_or_create(first_name="Tom", last_name="Smith") def test_manual_primary_key_test(self): """ If you specify an existing primary key, but different other fields, then you will get an error and data will not be updated. """ ManualPrimaryKeyTest.objects.create(id=1, data="Original") with self.assertRaises(IntegrityError): ManualPrimaryKeyTest.objects.update_or_create(id=1, data="Different") self.assertEqual(ManualPrimaryKeyTest.objects.get(id=1).data, "Original") def test_with_pk_property(self): """ Using the pk property of a model is allowed. """ Thing.objects.update_or_create(pk=1) def test_error_contains_full_traceback(self): """ update_or_create should raise IntegrityErrors with the full traceback. This is tested by checking that a known method call is in the traceback. We cannot use assertRaises/assertRaises here because we need to inspect the actual traceback. Refs #16340. """ try: ManualPrimaryKeyTest.objects.update_or_create(id=1, data="Different") except IntegrityError: formatted_traceback = traceback.format_exc() self.assertIn('obj.save', formatted_traceback) def test_create_with_related_manager(self): """ Should be able to use update_or_create from the related manager to create a book. Refs #23611. """ p = Publisher.objects.create(name="Acme Publishing") book, created = p.books.update_or_create(name="The Book of Ed & Fred") self.assertTrue(created) self.assertEqual(p.books.count(), 1) def test_update_with_related_manager(self): """ Should be able to use update_or_create from the related manager to update a book. Refs #23611. """ p = Publisher.objects.create(name="Acme Publishing") book = Book.objects.create(name="The Book of Ed & Fred", publisher=p) self.assertEqual(p.books.count(), 1) name = "The Book of Django" book, created = p.books.update_or_create(defaults={'name': name}, id=book.id) self.assertFalse(created) self.assertEqual(book.name, name) self.assertEqual(p.books.count(), 1) def test_create_with_many(self): """ Should be able to use update_or_create from the m2m related manager to create a book. Refs #23611. """ p = Publisher.objects.create(name="Acme Publishing") author = Author.objects.create(name="Ted") book, created = author.books.update_or_create(name="The Book of Ed & Fred", publisher=p) self.assertTrue(created) self.assertEqual(author.books.count(), 1) def test_update_with_many(self): """ Should be able to use update_or_create from the m2m related manager to update a book. Refs #23611. """ p = Publisher.objects.create(name="Acme Publishing") author = Author.objects.create(name="Ted") book = Book.objects.create(name="The Book of Ed & Fred", publisher=p) book.authors.add(author) self.assertEqual(author.books.count(), 1) name = "The Book of Django" book, created = author.books.update_or_create(defaults={'name': name}, id=book.id) self.assertFalse(created) self.assertEqual(book.name, name) self.assertEqual(author.books.count(), 1) def test_defaults_exact(self): """ If you have a field named defaults and want to use it as an exact lookup, you need to use 'defaults__exact'. """ obj, created = Person.objects.update_or_create( first_name='George', last_name='Harrison', defaults__exact='testing', defaults={ 'birthday': date(1943, 2, 25), 'defaults': 'testing', } ) self.assertTrue(created) self.assertEqual(obj.defaults, 'testing') obj, created = Person.objects.update_or_create( first_name='George', last_name='Harrison', defaults__exact='testing', defaults={ 'birthday': date(1943, 2, 25), 'defaults': 'another testing', } ) self.assertFalse(created) self.assertEqual(obj.defaults, 'another testing') def test_create_callable_default(self): obj, created = Person.objects.update_or_create( first_name='George', last_name='Harrison', defaults={'birthday': lambda: date(1943, 2, 25)}, ) self.assertIs(created, True) self.assertEqual(obj.birthday, date(1943, 2, 25)) def test_update_callable_default(self): Person.objects.update_or_create( first_name='George', last_name='Harrison', birthday=date(1942, 2, 25), ) obj, created = Person.objects.update_or_create( first_name='George', defaults={'last_name': lambda: 'NotHarrison'}, ) self.assertIs(created, False) self.assertEqual(obj.last_name, 'NotHarrison') class UpdateOrCreateTransactionTests(TransactionTestCase): available_apps = ['get_or_create'] @skipUnlessDBFeature('has_select_for_update') @skipUnlessDBFeature('supports_transactions') def test_updates_in_transaction(self): """ Objects are selected and updated in a transaction to avoid race conditions. This test forces update_or_create() to hold the lock in another thread for a relatively long time so that it can update while it holds the lock. The updated field isn't a field in 'defaults', so update_or_create() shouldn't have an effect on it. """ lock_status = {'has_grabbed_lock': False} def birthday_sleep(): lock_status['has_grabbed_lock'] = True time.sleep(0.5) return date(1940, 10, 10) def update_birthday_slowly(): Person.objects.update_or_create( first_name='John', defaults={'birthday': birthday_sleep} ) # Avoid leaking connection for Oracle connection.close() def lock_wait(): # timeout after ~0.5 seconds for i in range(20): time.sleep(0.025) if lock_status['has_grabbed_lock']: return True return False Person.objects.create(first_name='John', last_name='Lennon', birthday=date(1940, 10, 9)) # update_or_create in a separate thread t = Thread(target=update_birthday_slowly) before_start = datetime.now() t.start() if not lock_wait(): self.skipTest('Database took too long to lock the row') # Update during lock Person.objects.filter(first_name='John').update(last_name='NotLennon') after_update = datetime.now() # Wait for thread to finish t.join() # The update remains and it blocked. updated_person = Person.objects.get(first_name='John') self.assertGreater(after_update - before_start, timedelta(seconds=0.5)) self.assertEqual(updated_person.last_name, 'NotLennon') class InvalidCreateArgumentsTests(SimpleTestCase): msg = "Invalid field name(s) for model Thing: 'nonexistent'." def test_get_or_create_with_invalid_defaults(self): with self.assertRaisesMessage(FieldError, self.msg): Thing.objects.get_or_create(name='a', defaults={'nonexistent': 'b'}) def test_get_or_create_with_invalid_kwargs(self): with self.assertRaisesMessage(FieldError, self.msg): Thing.objects.get_or_create(name='a', nonexistent='b') def test_update_or_create_with_invalid_defaults(self): with self.assertRaisesMessage(FieldError, self.msg): Thing.objects.update_or_create(name='a', defaults={'nonexistent': 'b'}) def test_update_or_create_with_invalid_kwargs(self): with self.assertRaisesMessage(FieldError, self.msg): Thing.objects.update_or_create(name='a', nonexistent='b') def test_multiple_invalid_fields(self): with self.assertRaisesMessage(FieldError, "Invalid field name(s) for model Thing: 'invalid', 'nonexistent'"): Thing.objects.update_or_create(name='a', nonexistent='b', defaults={'invalid': 'c'})
unknown
codeparrot/codeparrot-clean
#!/usr/bin/env python2.7 # # Copyright 2017 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Search Publisher application. WSGI application for managing search data publishing. """ import cgi import cgitb import logging import logging.config from StringIO import StringIO import sys import urlparse from common import exceptions from serve import constants from serve import http_io from serve.publish.search import search_publish_handler # Load logging configuration file. logging.config.fileConfig("/opt/google/gehttpd/conf/ge_logging.conf", disable_existing_loggers=False) # Get logger. logger = logging.getLogger("ge_search_publisher") class SearchPublishApp(object): """Search data publish application class. Implements WSGI application interface. """ STATUS_OK = "200 OK" STATUS_ERROR = "500 Internal Server Error" RESPONSE_HEADERS = [("Content-type", "text/html")] def __init__(self): """Initialises search publish application.""" self._publish_handler = search_publish_handler.SearchPublishHandler() def __call__(self, environ, start_response): """Executes an application. Parses HTTP requests into internal request object and delegates processing to SearchPublishHandler. Args: environ: WSGI environment. start_response: callable that starts response. Returns: response body. """ request_method = "GET" if "REQUEST_METHOD" in environ: request_method = environ["REQUEST_METHOD"] # Get parameters from HTTP request. request = http_io.Request() if request_method == "GET": form = cgi.FieldStorage(fp=environ["wsgi.input"], environ=environ) for key in form.keys(): request.SetParameter(key, form.getvalue(key, "")) else: try: request_body_size = int(environ.get("CONTENT_LENGTH", 0)) except ValueError: request_body_size = 0 post_input = environ["wsgi.input"].read(request_body_size) logging.debug("POST request body: %s", post_input) self.__ParsePostInput(post_input, request) response = http_io.Response() if request.parameters: self._publish_handler.DoRequest(request, response) else: logger.error("Internal Error - Request has no parameters.") http_io.ResponseWriter.AddJsonFailureBody( response, "Internal Error - Request has no parameters") try: start_response(SearchPublishApp.STATUS_OK, SearchPublishApp.RESPONSE_HEADERS) return response.body except Exception: exc_info = sys.exc_info() start_response(SearchPublishApp.STATUS_ERROR, SearchPublishApp.RESPONSE_HEADERS, exc_info) return self.__FormatException(exc_info) def __FormatException(self, exc_info): dummy_file = StringIO() hook = cgitb.Hook(file=dummy_file) hook(*exc_info) return [dummy_file.getvalue()] def __ParsePostInput(self, post_input, request): post_dct = urlparse.parse_qs(post_input) cmd = post_dct.get(constants.CMD, [""])[0] if not cmd: return # Extract all the parameters for corrresponding command. if cmd == constants.CMD_ADD_SEARCH_DEF: logger.debug("%s: %s", constants.CMD, post_dct.get(constants.CMD, [""])[0]) logger.debug("%s: %s", constants.SEARCH_DEF_NAME, post_dct.get(constants.SEARCH_DEF_NAME, [""])[0]) logger.debug("%s: %s", constants.SEARCH_DEF, post_dct.get(constants.SEARCH_DEF, [""])[0]) request.SetParameter( constants.SEARCH_DEF_NAME, post_dct.get(constants.SEARCH_DEF_NAME, [""])[0]) request.SetParameter( constants.SEARCH_DEF, post_dct.get(constants.SEARCH_DEF, [""])[0]) else: raise exceptions.SearchPublishServeException( "Internal Error - Invalid Request Command: %s." % cmd) request.SetParameter(constants.CMD, cmd) # application instance to use by server. application = SearchPublishApp() def main(): pass if __name__ == "main": main()
unknown
codeparrot/codeparrot-clean
{ "groups": [ { "name": "relaxed", "type": "com.example.SourceType" } ], "properties": [ { "name": "test.two", "type": "java.lang.String" }, { "name": "test.four", "type": "java.util.Map<java.lang.String,java.lang.String>" }, { "name": "wrong.one", "deprecation": { "reason": "This is no longer supported.", "level": "error" } }, { "name": "wrong.two", "type": "java.lang.String", "deprecation": { "replacement": "test.two", "level": "error" } }, { "name": "wrong.three", "deprecation": { "level": "error" } }, { "name": "wrong.four.test", "type": "java.lang.String", "deprecation": { "replacement": "test.four.test", "level": "error" } }, { "name": "custom.map-no-replacement", "type": "java.util.Map<java.lang.String,java.lang.String>", "deprecation": { "reason": "This is no longer supported." } }, { "name": "custom.map-with-replacement", "type": "java.util.Map<java.lang.String,java.lang.String>", "deprecation": { "replacement": "custom.the-map-replacement" } }, { "name": "custom.the-map-replacement", "type": "java.util.Map<java.lang.String,java.lang.String>" }, { "name": "relaxed.thisthat-theother", "type": "java.lang.String", "deprecation": { "replacement": "relaxed.this-that-the-other" } }, { "name": "relaxed.this-that-the-other", "type": "java.lang.String", "sourceType": "com.example.SourceType" } ] }
json
github
https://github.com/spring-projects/spring-boot
core/spring-boot-properties-migrator/src/test/resources/metadata/sample-metadata.json
//===--- RelativePointer.h - Relative Pointer Support -----------*- C++ -*-===// // // This source file is part of the Swift.org open source project // // Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors // Licensed under Apache License v2.0 with Runtime Library Exception // // See https://swift.org/LICENSE.txt for license information // See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors // //===----------------------------------------------------------------------===// /// /// \file /// /// Some data structures emitted by the Swift compiler use relative indirect /// addresses in order to minimize startup cost for a process. By referring to /// the offset of the global offset table entry for a symbol, instead of /// directly referring to the symbol, compiler-emitted data structures avoid /// requiring unnecessary relocation at dynamic linking time. This header /// contains types to help dereference these relative addresses. /// /// Theory of references to objects /// ------------------------------- /// /// A reference can be absolute or relative: /// /// - An absolute reference is a pointer to the object. /// /// - A relative reference is a (signed) offset from the address of the /// reference to the address of its direct referent. /// /// A relative reference can be direct, indirect, or symbolic. /// /// In a direct reference, the direct referent is simply the target object. /// Generally, a statically-emitted relative reference can only be direct /// if it can be resolved to a constant offset by the linker, because loaders /// do not support forming relative references. This means that either the /// reference and object must lie within the same linkage unit or the /// difference must be computed at runtime by code. /// /// In a symbolic reference, the direct referent is a string holding the symbol /// name of the object. A relative reference can only be symbolic if the /// object actually has a symbol at runtime, which may require exporting /// many internal symbols that would otherwise be strippable. /// /// In an indirect reference, the direct referent is a variable holding an /// absolute reference to the object. An indirect relative reference may /// refer to an arbitrary symbol, be it anonymous within the linkage unit /// or completely external to it, but it requires the introduction of an /// intermediate absolute reference that requires load-time initialization. /// However, this initialization can be shared among all indirect references /// within the linkage unit, and the linker will generally place all such /// references adjacent to one another to improve load-time locality. /// /// A reference can be made a dynamic union of more than one of these options. /// This allows the compiler/linker to use a direct reference when possible /// and a less-efficient option where required. However, it also requires /// the cases to be dynamically distinguished. This can be done by setting /// a low bit of the offset, as long as the difference between the direct /// referent's address and the reference is a multiple of 2. This works well /// for "indirectable" references because most objects are known to be /// well-aligned, and the cases that aren't (chiefly functions and strings) /// rarely need the flexibility of this kind of reference. It does not /// work quite as well for "possibly symbolic" references because C strings /// are not naturally aligned, and making them aligned generally requires /// moving them out of the linker's ordinary string section; however, it's /// still workable. /// /// Finally, a relative reference can be near or far. A near reference /// is potentially smaller, but it requires the direct referent to lie /// within a certain distance of the reference, even if dynamically /// initialized. /// /// In Swift, we always prefer to use a near direct relative reference /// when it is possible to do so: that is, when the relationship is always /// between two global objects emitted in the same linkage unit, and there /// is no compatibility constraint requiring the use of an absolute reference. /// /// When more flexibility is required, there are several options: /// /// 1. Use an absolute reference. Size penalty on 64-bit. Requires /// load-time work. /// /// 2. Use a far direct relative reference. Size penalty on 64-bit. /// Requires load-time work when object is outside linkage unit. /// Generally not directly supported by loaders. /// /// 3. Use an always-indirect relative reference. Size penalty of one /// pointer (shared). Requires load-time work even when object is /// within linkage unit. /// /// 4. Use a near indirectable relative reference. Size penalty of one /// pointer (shared) when reference exceeds range. Runtime / code-size /// penalty on access. Requires load-time work (shared) only when /// object is outside linkage unit. /// /// 5. Use a far indirectable relative reference. Size penalty on 64-bit. /// Size penalty of one pointer (shared) when reference exceeds range /// and is initialized statically. Runtime / code-size penalty on access. /// Requires load-time work (shared) only when object is outside linkage /// unit. /// /// 6. Use a near or far symbolic relative reference. No load-time work. /// Severe runtime penalty on access. Requires custom logic to statically /// optimize. Requires emission of symbol for target even if private /// to linkage unit. /// /// 7. Use a near or far direct-or-symbolic relative reference. No /// load-time work. Severe runtime penalty on access if object is /// outside of linkage unit. Requires custom logic to statically optimize. /// /// In general, it's our preference in Swift to use option #4 when there /// is no possibility of initializing the reference dynamically and option #5 /// when there is. This is because it is infeasible to actually share the /// memory for the intermediate absolute reference when it must be allocated /// dynamically. /// /// Symbolic references are an interesting idea that we have not yet made /// use of. They may be acceptable in reflective metadata cases where it /// is desirable to heavily bias towards never using the metadata. However, /// they're only profitable if there wasn't any other indirect reference /// to the target, and it is likely that their optimal use requires a more /// intelligent toolchain from top to bottom. /// /// Note that the cost of load-time work also includes a binary-size penalty /// to store the loader metadata necessary to perform that work. Therefore /// it is better to avoid it even when there are dynamic optimizations in /// place to skip the work itself. /// //===----------------------------------------------------------------------===// #ifndef SWIFT_BASIC_RELATIVEPOINTER_H #define SWIFT_BASIC_RELATIVEPOINTER_H #include <cassert> #include <cstdint> #include <type_traits> #include <utility> namespace swift { namespace detail { /// Apply a relative offset to a base pointer. The offset is applied to the base /// pointer using sign-extended, wrapping arithmetic. template<typename BasePtrTy, typename Offset> static inline uintptr_t applyRelativeOffset(BasePtrTy *basePtr, Offset offset) { static_assert(std::is_integral<Offset>::value && std::is_signed<Offset>::value, "offset type should be signed integer"); auto base = reinterpret_cast<uintptr_t>(basePtr); // We want to do wrapping arithmetic, but with a sign-extended // offset. To do this in C, we need to do signed promotion to get // the sign extension, but we need to perform arithmetic on unsigned values, // since signed overflow is undefined behavior. auto extendOffset = (uintptr_t)(intptr_t)offset; return base + extendOffset; } /// Measure the relative offset between two pointers. This measures /// (referent - base) using wrapping arithmetic. The result is truncated if /// Offset is smaller than a pointer, with an assertion that the /// pre-truncation result is a sign extension of the truncated result. template<typename Offset, typename A, typename B> static inline Offset measureRelativeOffset(A *referent, B *base) { static_assert(std::is_integral<Offset>::value && std::is_signed<Offset>::value, "offset type should be signed integer"); auto distance = (uintptr_t)referent - (uintptr_t)base; // Truncate as unsigned, then wrap around to signed. auto truncatedDistance = (Offset)(typename std::make_unsigned<Offset>::type)distance; // Assert that the truncation didn't discard any non-sign-extended bits. assert((intptr_t)truncatedDistance == (intptr_t)distance && "pointers are too far apart to fit in offset type"); return truncatedDistance; } } // namespace detail /// A relative reference to an object stored in memory. The reference may be /// direct or indirect, and uses the low bit of the (assumed at least /// 2-byte-aligned) pointer to differentiate. template<typename ValueTy, bool Nullable = false, typename Offset = int32_t> class RelativeIndirectPointer { private: static_assert(std::is_integral<Offset>::value && std::is_signed<Offset>::value, "offset type should be signed integer"); /// The relative offset of the pointer's memory from the `this` pointer. /// This is an indirect reference. Offset RelativeOffset; /// RelativePointers should appear in statically-generated metadata. They /// shouldn't be constructed or copied. RelativeIndirectPointer() = delete; RelativeIndirectPointer(RelativeIndirectPointer &&) = delete; RelativeIndirectPointer(const RelativeIndirectPointer &) = delete; RelativeIndirectPointer &operator=(RelativeIndirectPointer &&) = delete; RelativeIndirectPointer &operator=(const RelativeIndirectPointer &) = delete; public: const ValueTy *get() const & { // Check for null. if (Nullable && RelativeOffset == 0) return nullptr; uintptr_t address = detail::applyRelativeOffset(this, RelativeOffset); return *reinterpret_cast<const ValueTy * const *>(address); } /// A zero relative offset encodes a null reference. bool isNull() const & { return RelativeOffset == 0; } operator const ValueTy* () const & { return get(); } const ValueTy *operator->() const & { return get(); } }; /// A relative reference to an object stored in memory. The reference may be /// direct or indirect, and uses the low bit of the (assumed at least /// 2-byte-aligned) pointer to differentiate. template<typename ValueTy, bool Nullable = false, typename Offset = int32_t, typename IndirectType = const ValueTy *> class RelativeIndirectablePointer { private: static_assert(std::is_integral<Offset>::value && std::is_signed<Offset>::value, "offset type should be signed integer"); /// The relative offset of the pointer's memory from the `this` pointer. /// If the low bit is clear, this is a direct reference; otherwise, it is /// an indirect reference. Offset RelativeOffsetPlusIndirect; /// RelativePointers should appear in statically-generated metadata. They /// shouldn't be constructed or copied. RelativeIndirectablePointer() = delete; RelativeIndirectablePointer(RelativeIndirectablePointer &&) = delete; RelativeIndirectablePointer(const RelativeIndirectablePointer &) = delete; RelativeIndirectablePointer &operator=(RelativeIndirectablePointer &&) = delete; RelativeIndirectablePointer &operator=(const RelativeIndirectablePointer &) = delete; public: /// Allow construction and reassignment from an absolute pointer. /// These always produce a direct relative offset. RelativeIndirectablePointer(ValueTy *absolute) : RelativeOffsetPlusIndirect( Nullable && absolute == nullptr ? 0 : detail::measureRelativeOffset<Offset>(absolute, this)) { if (!Nullable) assert(absolute != nullptr && "constructing non-nullable relative pointer from null"); } RelativeIndirectablePointer &operator=(ValueTy *absolute) & { if (!Nullable) assert(absolute != nullptr && "constructing non-nullable relative pointer from null"); RelativeOffsetPlusIndirect = Nullable && absolute == nullptr ? 0 : detail::measureRelativeOffset<Offset>(absolute, this); return *this; } const ValueTy *get() const & { static_assert(alignof(ValueTy) >= 2 && alignof(Offset) >= 2, "alignment of value and offset must be at least 2 to " "make room for indirectable flag"); // Check for null. if (Nullable && RelativeOffsetPlusIndirect == 0) return nullptr; Offset offsetPlusIndirect = RelativeOffsetPlusIndirect; uintptr_t address = detail::applyRelativeOffset(this, offsetPlusIndirect & ~1); // If the low bit is set, then this is an indirect address. Otherwise, // it's direct. if (offsetPlusIndirect & 1) { return *reinterpret_cast<IndirectType const *>(address); } else { return reinterpret_cast<const ValueTy *>(address); } } /// A zero relative offset encodes a null reference. bool isNull() const & { return RelativeOffsetPlusIndirect == 0; } operator const ValueTy* () const & { return get(); } const ValueTy *operator->() const & { return get(); } }; /// A relative reference to an aligned object stored in memory. The reference /// may be direct or indirect, and uses the low bit of the (assumed at least /// 2-byte-aligned) pointer to differentiate. The remaining low bits store /// an additional tiny integer value. template<typename ValueTy, typename IntTy, bool Nullable = false, typename Offset = int32_t, typename IndirectType = const ValueTy *> class RelativeIndirectablePointerIntPair { private: static_assert(std::is_integral<Offset>::value && std::is_signed<Offset>::value, "offset type should be signed integer"); /// The relative offset of the pointer's memory from the `this` pointer. /// If the low bit is clear, this is a direct reference; otherwise, it is /// an indirect reference. Offset RelativeOffsetPlusIndirectAndInt; /// RelativePointers should appear in statically-generated metadata. They /// shouldn't be constructed or copied. RelativeIndirectablePointerIntPair() = delete; RelativeIndirectablePointerIntPair( RelativeIndirectablePointerIntPair &&) = delete; RelativeIndirectablePointerIntPair( const RelativeIndirectablePointerIntPair &) = delete; RelativeIndirectablePointerIntPair& operator=( RelativeIndirectablePointerIntPair &&) = delete; RelativeIndirectablePointerIntPair &operator=( const RelativeIndirectablePointerIntPair &) = delete; // Retrieve the mask for the stored integer value. static Offset getIntMask() { return (alignof(Offset) - 1) & ~(Offset)0x01; } public: const ValueTy *getPointer() const & { Offset offset = getUnresolvedOffset(); // Check for null. if (Nullable && offset == 0) return nullptr; Offset offsetPlusIndirect = offset; uintptr_t address = detail::applyRelativeOffset(this, offsetPlusIndirect & ~1); // If the low bit is set, then this is an indirect address. Otherwise, // it's direct. if (offsetPlusIndirect & 1) { return *reinterpret_cast<const IndirectType *>(address); } else { return reinterpret_cast<const ValueTy *>(address); } } Offset getUnresolvedOffset() const & { static_assert(alignof(ValueTy) >= 2 && alignof(Offset) >= 2, "alignment of value and offset must be at least 2 to " "make room for indirectable flag"); Offset offset = (RelativeOffsetPlusIndirectAndInt & ~getIntMask()); return offset; } /// A zero relative offset encodes a null reference. bool isNull() const & { return getUnresolvedOffset() == 0; } IntTy getInt() const & { return IntTy((RelativeOffsetPlusIndirectAndInt & getIntMask()) >> 1); } }; /// A relative reference to a function, intended to reference private metadata /// functions for the current executable or dynamic library image from /// position-independent constant data. template<typename T, bool Nullable, typename Offset> class RelativeDirectPointerImpl { #if SWIFT_COMPACT_ABSOLUTE_FUNCTION_POINTER static_assert(!std::is_function<T>::value, "relative direct function pointer should not be used under absolute function pointer mode"); #endif private: /// The relative offset of the function's entry point from *this. Offset RelativeOffset; /// RelativePointers should appear in statically-generated metadata. They /// shouldn't be constructed or copied. RelativeDirectPointerImpl() = delete; /// RelativePointers should appear in statically-generated metadata. They /// shouldn't be constructed or copied. RelativeDirectPointerImpl(RelativeDirectPointerImpl &&) = delete; RelativeDirectPointerImpl(const RelativeDirectPointerImpl &) = delete; RelativeDirectPointerImpl &operator=(RelativeDirectPointerImpl &&) = delete; RelativeDirectPointerImpl &operator=(const RelativeDirectPointerImpl &) = delete; public: using ValueTy = T; using PointerTy = T*; // Allow construction and reassignment from an absolute pointer. RelativeDirectPointerImpl(PointerTy absolute) : RelativeOffset(Nullable && absolute == nullptr ? 0 : detail::measureRelativeOffset<Offset>(absolute, this)) { if (!Nullable) assert(absolute != nullptr && "constructing non-nullable relative pointer from null"); } explicit constexpr RelativeDirectPointerImpl(std::nullptr_t) : RelativeOffset (0) { static_assert(Nullable, "can't construct non-nullable pointer from null"); } RelativeDirectPointerImpl &operator=(PointerTy absolute) & { if (!Nullable) assert(absolute != nullptr && "constructing non-nullable relative pointer from null"); RelativeOffset = Nullable && absolute == nullptr ? 0 : detail::measureRelativeOffset<Offset>(absolute, this); return *this; } PointerTy get() const & { // Check for null. if (Nullable && RelativeOffset == 0) return nullptr; // The value is addressed relative to `this`. uintptr_t absolute = detail::applyRelativeOffset(this, RelativeOffset); return reinterpret_cast<PointerTy>(absolute); } void *getWithoutCast() const & { // Check for null. if (Nullable && RelativeOffset == 0) return nullptr; // The value is addressed relative to `this`. uintptr_t absolute = detail::applyRelativeOffset(this, RelativeOffset); return reinterpret_cast<void *>(absolute); } /// Apply the offset to a parameter, instead of `this`. PointerTy getRelative(void *base) const & { return resolve(base, RelativeOffset); } /// A zero relative offset encodes a null reference. bool isNull() const & { return RelativeOffset == 0; } /// Resolve a pointer from a `base` pointer and a value loaded from `base`. template<typename BasePtrTy> static PointerTy resolve(BasePtrTy *base, Offset value) { // Check for null. if (Nullable && value == 0) return nullptr; // The value is addressed relative to `base`. uintptr_t absolute = detail::applyRelativeOffset(base, value); return reinterpret_cast<PointerTy>(absolute); } }; template <typename T, bool Nullable = true, typename Offset = int32_t, typename = void> class RelativeDirectPointer; /// A direct relative reference to an object that is not a function pointer. template <typename T, bool Nullable, typename Offset> class RelativeDirectPointer<T, Nullable, Offset, typename std::enable_if<!std::is_function<T>::value>::type> : private RelativeDirectPointerImpl<T, Nullable, Offset> { using super = RelativeDirectPointerImpl<T, Nullable, Offset>; public: using super::get; using super::super; RelativeDirectPointer &operator=(T *absolute) & { super::operator=(absolute); return *this; } operator typename super::PointerTy() const & { return this->get(); } const typename super::ValueTy *operator->() const & { return this->get(); } const typename super::ValueTy* getRelative(void *base) const & { return this->super::getRelative(base); } using super::isNull; using super::resolve; }; /// A specialization of RelativeDirectPointer for function pointers, /// allowing for calls. template<typename T, bool Nullable, typename Offset> class RelativeDirectPointer<T, Nullable, Offset, typename std::enable_if<std::is_function<T>::value>::type> : private RelativeDirectPointerImpl<T, Nullable, Offset> { using super = RelativeDirectPointerImpl<T, Nullable, Offset>; public: using super::super; RelativeDirectPointer &operator=(T absolute) & { super::operator=(absolute); return *this; } typename super::PointerTy get() const & { void *ptr = this->super::getWithoutCast(); #if SWIFT_PTRAUTH if (Nullable && !ptr) return nullptr; return reinterpret_cast<T *>( ptrauth_sign_unauthenticated(ptr, ptrauth_key_function_pointer, 0)); #else return reinterpret_cast<T *>(ptr); #endif } operator typename super::PointerTy() const & { return this->get(); } template <typename... ArgTy> typename std::invoke_result<T*, ArgTy...>::type operator()(ArgTy... arg) const { #if SWIFT_PTRAUTH void *ptr = this->super::getWithoutCast(); return reinterpret_cast<T *>(ptrauth_sign_unauthenticated( ptr, ptrauth_key_function_pointer, 0))(std::forward<ArgTy>(arg)...); #else return this->super::get()(std::forward<ArgTy>(arg)...); #endif } using super::isNull; using super::resolve; }; /// A direct relative reference to an aligned object, with an additional /// tiny integer value crammed into its low bits. template<typename PointeeTy, typename IntTy, bool Nullable = false, typename Offset = int32_t> class RelativeDirectPointerIntPairImpl { Offset RelativeOffsetPlusInt; /// RelativePointers should appear in statically-generated metadata. They /// shouldn't be constructed or copied. RelativeDirectPointerIntPairImpl() = delete; RelativeDirectPointerIntPairImpl(RelativeDirectPointerIntPairImpl &&) = delete; RelativeDirectPointerIntPairImpl(const RelativeDirectPointerIntPairImpl &) = delete; RelativeDirectPointerIntPairImpl &operator=(RelativeDirectPointerIntPairImpl &&) = delete; RelativeDirectPointerIntPairImpl &operator=(const RelativeDirectPointerIntPairImpl&) = delete; static Offset getMask() { return alignof(Offset) - 1; } public: using ValueTy = PointeeTy; using PointerTy = PointeeTy*; Offset getOffset() const & { return RelativeOffsetPlusInt & ~getMask(); } PointerTy getPointer() const & { Offset offset = getOffset(); // Check for null. if (Nullable && offset == 0) return nullptr; // The value is addressed relative to `this`. uintptr_t absolute = detail::applyRelativeOffset(this, offset); return reinterpret_cast<PointerTy>(absolute); } IntTy getInt() const & { return IntTy(RelativeOffsetPlusInt & getMask()); } Offset getOpaqueValue() const & { return RelativeOffsetPlusInt; } }; /// A direct relative reference to an aligned object, with an additional /// tiny integer value crammed into its low bits. template<typename PointeeTy, typename IntTy, bool Nullable = false, typename Offset = int32_t, typename = void> class RelativeDirectPointerIntPair; template<typename PointeeTy, typename IntTy, bool Nullable, typename Offset> class RelativeDirectPointerIntPair<PointeeTy, IntTy, Nullable, Offset, typename std::enable_if<!std::is_function<PointeeTy>::value>::type> : private RelativeDirectPointerIntPairImpl<PointeeTy, IntTy, Nullable, Offset> { using super = RelativeDirectPointerIntPairImpl<PointeeTy, IntTy, Nullable, Offset>; public: using super::getOffset; using super::getPointer; using super::getInt; using super::getOpaqueValue; }; // Type aliases for "far" relative pointers, which need to be able to reach // across the full address space instead of only across a single small-code- // model image. template<typename T, bool Nullable = false> using FarRelativeIndirectablePointer = RelativeIndirectablePointer<T, Nullable, intptr_t>; template<typename T, bool Nullable = false> using FarRelativeDirectPointer = RelativeDirectPointer<T, Nullable, intptr_t>; } // end namespace swift #endif // SWIFT_BASIC_RELATIVEPOINTER_H
c
github
https://github.com/apple/swift
include/swift/Basic/RelativePointer.h
# -*- coding: utf-8 -*- # Copyright 2010-2015, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """A utility tool to run pnacl-translate for all archtectures. Example usage: The following command generates stripped nexefile_arm.nexe and nexefile_x86_32.nexe and nexefile_x86_64.nexe. python pnacl_translate.py --command=/path/to/toolchain/linux_pnacl \ --input=/path/to/pexefile --output_base=/path/to/nexefile \ --configuration=Release """ import optparse import os import shutil import subprocess import sys import tempfile def Translate(toolchain_root, input_file, output_base): """Translates the input file for three architectures.""" targets = (('arm', 'arm'), ('x86-32', 'x86_32'), ('x86-64', 'x86_64')) translate_command = os.path.join(toolchain_root, 'bin/pnacl-translate') for target in targets: cmd = (translate_command, '--allow-llvm-bitcode-input', '-arch', target[0], input_file, '-o', '%s_%s.nexe' % (output_base, target[1])) print 'Running: ' + ' '.join(cmd) if subprocess.Popen(cmd).wait() != 0: print >> sys.stderr, 'ERROR: ' + ' '.join(cmd) raise RuntimeError('Translate Error') print 'Done: ' + ' '.join(cmd) def StripAndTranslate(toolchain_root, input_file, output_base): """Strips and translates the input file for three architectures.""" strip_command = os.path.join(toolchain_root, 'bin/pnacl-strip') try: temp_dir = tempfile.mkdtemp() temp_file_base = os.path.join(temp_dir, 'stripped') cmd = (strip_command, input_file, '-o', temp_file_base) print 'Running: ' + ' '.join(cmd) if subprocess.Popen(cmd).wait() != 0: print >> sys.stderr, 'ERROR: ' + ' '.join(cmd) raise RuntimeError('Strip Error') print 'Done: ' + ' '.join(cmd) Translate(toolchain_root, temp_file_base, temp_file_base) targets = ('arm', 'x86_32', 'x86_64') for target in targets: cmd = (strip_command, '%s_%s.nexe' % (temp_file_base, target), '-o', '%s_%s.nexe' % (output_base, target)) print 'Running: ' + ' '.join(cmd) if subprocess.Popen(cmd).wait() != 0: print >> sys.stderr, 'ERROR: ' + ' '.join(cmd) raise RuntimeError('Strip Error') print 'Done: ' + ' '.join(cmd) finally: shutil.rmtree(temp_dir) def main(): """Translate pexe file to x86-32 and x86-64 and arm nexe files.""" parser = optparse.OptionParser(usage='Usage: %prog') parser.add_option('--toolchain_root', dest='toolchain_root', help='pnacl toolchain root path') parser.add_option('--input', dest='input', help='input pexe file') parser.add_option('--output_base', dest='output_base', help='output base path') parser.add_option('--configuration', dest='configuration', help='build configuration') (options, _) = parser.parse_args() if not options.toolchain_root: print >> sys.stderr, 'Error: toolchain_root is not set.' sys.exit(1) if not options.input: print >> sys.stderr, 'Error: input is not set.' sys.exit(1) if not options.output_base: print >> sys.stderr, 'Error: output_base is not set.' sys.exit(1) if options.configuration == 'Release': return StripAndTranslate(options.toolchain_root, options.input, options.output_base) else: return Translate(options.toolchain_root, options.input, options.output_base) if __name__ == '__main__': main()
unknown
codeparrot/codeparrot-clean
# Copyright 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import os import tempfile import unittest from telemetry.core import browser_credentials SIMPLE_CREDENTIALS_STRING = """ { "google": { "username": "example", "password": "asdf" } } """ class BackendStub(object): def __init__(self, credentials_type): self.login_needed_called = None self.login_no_longer_needed_called = None self.credentials_type = credentials_type def LoginNeeded(self, config, _, tab): self.login_needed_called = (config, tab) return True def LoginNoLongerNeeded(self, tab): self.login_no_longer_needed_called = (tab, ) class TestBrowserCredentials(unittest.TestCase): def testCredentialsInfrastructure(self): google_backend = BackendStub("google") othersite_backend = BackendStub("othersite") browser_cred = browser_credentials.BrowserCredentials( [google_backend, othersite_backend]) try: with tempfile.NamedTemporaryFile(delete=False) as f: f.write(SIMPLE_CREDENTIALS_STRING) browser_cred.credentials_path = f.name # Should true because it has a password and a backend. self.assertTrue(browser_cred.CanLogin('google')) # Should be false succeed because it has no password. self.assertFalse(browser_cred.CanLogin('othersite')) # Should fail because it has no backend. self.assertRaises( Exception, lambda: browser_cred.CanLogin('foobar')) tab = {} ret = browser_cred.LoginNeeded(tab, 'google') self.assertTrue(ret) self.assertTrue(google_backend.login_needed_called is not None) self.assertEqual(tab, google_backend.login_needed_called[0]) self.assertEqual("example", google_backend.login_needed_called[1]["username"]) self.assertEqual("asdf", google_backend.login_needed_called[1]["password"]) browser_cred.LoginNoLongerNeeded(tab, 'google') self.assertTrue(google_backend.login_no_longer_needed_called is not None) self.assertEqual(tab, google_backend.login_no_longer_needed_called[0]) finally: os.remove(f.name)
unknown
codeparrot/codeparrot-clean
<?php /* * This file is part of the Symfony package. * * (c) Fabien Potencier <fabien@symfony.com> * * For the full copyright and license information, please view the LICENSE * file that was distributed with this source code. */ namespace Symfony\Component\HttpClient; use Symfony\Component\HttpClient\Exception\TransportException; use Symfony\Component\HttpClient\Response\AsyncContext; use Symfony\Component\HttpClient\Response\AsyncResponse; use Symfony\Component\HttpFoundation\IpUtils; use Symfony\Contracts\HttpClient\ChunkInterface; use Symfony\Contracts\HttpClient\HttpClientInterface; use Symfony\Contracts\HttpClient\ResponseInterface; use Symfony\Contracts\Service\ResetInterface; /** * Decorator that blocks requests to private networks by default. * * @author Hallison Boaventura <hallisonboaventura@gmail.com> * @author Nicolas Grekas <p@tchwork.com> */ final class NoPrivateNetworkHttpClient implements HttpClientInterface, ResetInterface { use AsyncDecoratorTrait; use HttpClientTrait; private array $defaultOptions = self::OPTIONS_DEFAULTS; private HttpClientInterface $client; private ?array $subnets; private int $ipFlags; private \ArrayObject $dnsCache; /** * @param string|array|null $subnets String or array of subnets using CIDR notation that should be considered private. * If null is passed, the standard private subnets will be used. */ public function __construct(HttpClientInterface $client, string|array|null $subnets = null) { if (!class_exists(IpUtils::class)) { throw new \LogicException(\sprintf('You cannot use "%s" if the HttpFoundation component is not installed. Try running "composer require symfony/http-foundation".', __CLASS__)); } if (null === $subnets) { $ipFlags = \FILTER_FLAG_IPV4 | \FILTER_FLAG_IPV6; } else { $ipFlags = 0; foreach ((array) $subnets as $subnet) { $ipFlags |= str_contains($subnet, ':') ? \FILTER_FLAG_IPV6 : \FILTER_FLAG_IPV4; } } if (!\defined('STREAM_PF_INET6')) { $ipFlags &= ~\FILTER_FLAG_IPV6; } $this->client = $client; $this->subnets = null !== $subnets ? (array) $subnets : null; $this->ipFlags = $ipFlags; $this->dnsCache = new \ArrayObject(); } public function request(string $method, string $url, array $options = []): ResponseInterface { [$url, $options] = self::prepareRequest($method, $url, $options, $this->defaultOptions, true); $redirectHeaders = parse_url($url['authority']); $host = $redirectHeaders['host']; $url = implode('', $url); $dnsCache = $this->dnsCache; $ip = self::dnsResolve($dnsCache, $host, $this->ipFlags, $options); self::ipCheck($ip, $this->subnets, $this->ipFlags, $host, $url); $onProgress = $options['on_progress'] ?? null; $subnets = $this->subnets; $ipFlags = $this->ipFlags; $options['on_progress'] = static function (int $dlNow, int $dlSize, array $info) use ($onProgress, $subnets, $ipFlags): void { static $lastPrimaryIp = ''; if (!\in_array($info['primary_ip'] ?? '', ['', $lastPrimaryIp], true)) { self::ipCheck($info['primary_ip'], $subnets, $ipFlags, null, $info['url']); $lastPrimaryIp = $info['primary_ip']; } null !== $onProgress && $onProgress($dlNow, $dlSize, $info); }; if (0 >= $maxRedirects = $options['max_redirects']) { return new AsyncResponse($this->client, $method, $url, $options); } $options['max_redirects'] = 0; $redirectHeaders['with_auth'] = $redirectHeaders['no_auth'] = $options['headers']; if (isset($options['normalized_headers']['host']) || isset($options['normalized_headers']['authorization']) || isset($options['normalized_headers']['cookie'])) { $redirectHeaders['no_auth'] = array_filter($redirectHeaders['no_auth'], static fn ($h) => 0 !== stripos($h, 'Host:') && 0 !== stripos($h, 'Authorization:') && 0 !== stripos($h, 'Cookie:')); } return new AsyncResponse($this->client, $method, $url, $options, static function (ChunkInterface $chunk, AsyncContext $context) use (&$method, &$options, $maxRedirects, &$redirectHeaders, $subnets, $ipFlags, $dnsCache): \Generator { if (null !== $chunk->getError() || $chunk->isTimeout() || !$chunk->isFirst()) { yield $chunk; return; } $statusCode = $context->getStatusCode(); if ($statusCode < 300 || 400 <= $statusCode || null === $url = $context->getInfo('redirect_url')) { $context->passthru(); yield $chunk; return; } $host = parse_url($url, \PHP_URL_HOST); $ip = self::dnsResolve($dnsCache, $host, $ipFlags, $options); self::ipCheck($ip, $subnets, $ipFlags, $host, $url); // Do like curl and browsers: turn POST to GET on 301, 302 and 303 if (303 === $statusCode || 'POST' === $method && \in_array($statusCode, [301, 302], true)) { $method = 'HEAD' === $method ? 'HEAD' : 'GET'; unset($options['body'], $options['json']); if (isset($options['normalized_headers']['content-length']) || isset($options['normalized_headers']['content-type']) || isset($options['normalized_headers']['transfer-encoding'])) { $filterContentHeaders = static fn ($h) => 0 !== stripos($h, 'Content-Length:') && 0 !== stripos($h, 'Content-Type:') && 0 !== stripos($h, 'Transfer-Encoding:'); $options['headers'] = array_filter($options['headers'], $filterContentHeaders); $redirectHeaders['no_auth'] = array_filter($redirectHeaders['no_auth'], $filterContentHeaders); $redirectHeaders['with_auth'] = array_filter($redirectHeaders['with_auth'], $filterContentHeaders); } } // Authorization and Cookie headers MUST NOT follow except for the initial host name $port = parse_url($url, \PHP_URL_PORT); $options['headers'] = $redirectHeaders['host'] === $host && ($redirectHeaders['port'] ?? null) === $port ? $redirectHeaders['with_auth'] : $redirectHeaders['no_auth']; static $redirectCount = 0; $context->setInfo('redirect_count', ++$redirectCount); $context->replaceRequest($method, $url, $options); if ($redirectCount >= $maxRedirects) { $context->passthru(); } }); } public function withOptions(array $options): static { $clone = clone $this; $clone->client = $this->client->withOptions($options); $clone->defaultOptions = self::mergeDefaultOptions($options, $this->defaultOptions); return $clone; } public function reset(): void { $this->dnsCache->exchangeArray([]); if ($this->client instanceof ResetInterface) { $this->client->reset(); } } private static function dnsResolve(\ArrayObject $dnsCache, string $host, int $ipFlags, array &$options): string { if ($ip = filter_var(trim($host, '[]'), \FILTER_VALIDATE_IP) ?: $options['resolve'][$host] ?? false) { return $ip; } if ($dnsCache->offsetExists($host)) { return $dnsCache[$host]; } if ((\FILTER_FLAG_IPV4 & $ipFlags) && $ip = gethostbynamel($host)) { return $options['resolve'][$host] = $dnsCache[$host] = $ip[0]; } if (!(\FILTER_FLAG_IPV6 & $ipFlags)) { return $host; } if ($ip = dns_get_record($host, \DNS_AAAA)) { $ip = $ip[0]['ipv6']; } elseif (\extension_loaded('sockets')) { if (!$info = socket_addrinfo_lookup($host, 0, ['ai_socktype' => \SOCK_STREAM, 'ai_family' => \AF_INET6])) { return $host; } $ip = socket_addrinfo_explain($info[0])['ai_addr']['sin6_addr']; } elseif ('localhost' === $host || 'localhost.' === $host) { $ip = '::1'; } else { return $host; } return $options['resolve'][$host] = $dnsCache[$host] = $ip; } private static function ipCheck(string $ip, ?array $subnets, int $ipFlags, ?string $host, string $url): void { if (null === $subnets) { // Quick check, but not reliable enough, see https://github.com/php/php-src/issues/16944 $ipFlags |= \FILTER_FLAG_NO_PRIV_RANGE | \FILTER_FLAG_NO_RES_RANGE; } if (false !== filter_var($ip, \FILTER_VALIDATE_IP, $ipFlags) && !IpUtils::checkIp($ip, $subnets ?? IpUtils::PRIVATE_SUBNETS)) { return; } if (null !== $host) { $type = 'Host'; } else { $host = $ip; $type = 'IP'; } throw new TransportException($type.\sprintf(' "%s" is blocked for "%s".', $host, $url)); } }
php
github
https://github.com/symfony/symfony
src/Symfony/Component/HttpClient/NoPrivateNetworkHttpClient.php
/* Copyright 2016 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package testing import ( "math/rand" "reflect" "sort" "testing" "github.com/google/go-cmp/cmp" "sigs.k8s.io/randfill" apiv1 "k8s.io/api/core/v1" extensionsv1beta1 "k8s.io/api/extensions/v1beta1" "k8s.io/apimachinery/pkg/api/apitesting/roundtrip" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/kubernetes/pkg/api/legacyscheme" ) type orderedGroupVersionKinds []schema.GroupVersionKind func (o orderedGroupVersionKinds) Len() int { return len(o) } func (o orderedGroupVersionKinds) Swap(i, j int) { o[i], o[j] = o[j], o[i] } func (o orderedGroupVersionKinds) Less(i, j int) bool { return o[i].String() < o[j].String() } // TODO: add a reflexive test that verifies that all SetDefaults functions are registered func TestDefaulting(t *testing.T) { // these are the known types with defaulters - you must add to this list if you add a top level defaulter typesWithDefaulting := map[schema.GroupVersionKind]struct{}{ {Group: "", Version: "v1", Kind: "ConfigMap"}: {}, {Group: "", Version: "v1", Kind: "ConfigMapList"}: {}, {Group: "", Version: "v1", Kind: "Endpoints"}: {}, {Group: "", Version: "v1", Kind: "EndpointsList"}: {}, {Group: "", Version: "v1", Kind: "EphemeralContainers"}: {}, {Group: "", Version: "v1", Kind: "Namespace"}: {}, {Group: "", Version: "v1", Kind: "NamespaceList"}: {}, {Group: "", Version: "v1", Kind: "Node"}: {}, {Group: "", Version: "v1", Kind: "NodeList"}: {}, {Group: "", Version: "v1", Kind: "PersistentVolume"}: {}, {Group: "", Version: "v1", Kind: "PersistentVolumeList"}: {}, {Group: "", Version: "v1", Kind: "PersistentVolumeClaim"}: {}, {Group: "", Version: "v1", Kind: "PersistentVolumeClaimList"}: {}, {Group: "", Version: "v1", Kind: "Pod"}: {}, {Group: "", Version: "v1", Kind: "PodList"}: {}, {Group: "", Version: "v1", Kind: "PodTemplate"}: {}, {Group: "", Version: "v1", Kind: "PodTemplateList"}: {}, {Group: "", Version: "v1", Kind: "ReplicationController"}: {}, {Group: "", Version: "v1", Kind: "ReplicationControllerList"}: {}, {Group: "", Version: "v1", Kind: "Secret"}: {}, {Group: "", Version: "v1", Kind: "SecretList"}: {}, {Group: "", Version: "v1", Kind: "Service"}: {}, {Group: "", Version: "v1", Kind: "ServiceList"}: {}, {Group: "apps", Version: "v1beta1", Kind: "StatefulSet"}: {}, {Group: "apps", Version: "v1beta1", Kind: "StatefulSetList"}: {}, {Group: "apps", Version: "v1beta2", Kind: "StatefulSet"}: {}, {Group: "apps", Version: "v1beta2", Kind: "StatefulSetList"}: {}, {Group: "apps", Version: "v1", Kind: "StatefulSet"}: {}, {Group: "apps", Version: "v1", Kind: "StatefulSetList"}: {}, {Group: "autoscaling", Version: "v1", Kind: "HorizontalPodAutoscaler"}: {}, {Group: "autoscaling", Version: "v1", Kind: "HorizontalPodAutoscalerList"}: {}, {Group: "autoscaling", Version: "v2", Kind: "HorizontalPodAutoscaler"}: {}, {Group: "autoscaling", Version: "v2", Kind: "HorizontalPodAutoscalerList"}: {}, {Group: "autoscaling", Version: "v2beta1", Kind: "HorizontalPodAutoscaler"}: {}, {Group: "autoscaling", Version: "v2beta1", Kind: "HorizontalPodAutoscalerList"}: {}, {Group: "autoscaling", Version: "v2beta2", Kind: "HorizontalPodAutoscaler"}: {}, {Group: "autoscaling", Version: "v2beta2", Kind: "HorizontalPodAutoscalerList"}: {}, {Group: "batch", Version: "v1", Kind: "CronJob"}: {}, {Group: "batch", Version: "v1", Kind: "CronJobList"}: {}, {Group: "batch", Version: "v1", Kind: "Job"}: {}, {Group: "batch", Version: "v1", Kind: "JobList"}: {}, {Group: "batch", Version: "v1beta1", Kind: "CronJob"}: {}, {Group: "batch", Version: "v1beta1", Kind: "CronJobList"}: {}, {Group: "batch", Version: "v1beta1", Kind: "JobTemplate"}: {}, {Group: "batch", Version: "v2alpha1", Kind: "CronJob"}: {}, {Group: "batch", Version: "v2alpha1", Kind: "CronJobList"}: {}, {Group: "batch", Version: "v2alpha1", Kind: "JobTemplate"}: {}, {Group: "certificates.k8s.io", Version: "v1beta1", Kind: "PodCertificateRequest"}: {}, {Group: "certificates.k8s.io", Version: "v1beta1", Kind: "PodCertificateRequestList"}: {}, {Group: "certificates.k8s.io", Version: "v1beta1", Kind: "CertificateSigningRequest"}: {}, {Group: "certificates.k8s.io", Version: "v1beta1", Kind: "CertificateSigningRequestList"}: {}, {Group: "discovery.k8s.io", Version: "v1", Kind: "EndpointSlice"}: {}, {Group: "discovery.k8s.io", Version: "v1", Kind: "EndpointSliceList"}: {}, {Group: "discovery.k8s.io", Version: "v1beta1", Kind: "EndpointSlice"}: {}, {Group: "discovery.k8s.io", Version: "v1beta1", Kind: "EndpointSliceList"}: {}, {Group: "extensions", Version: "v1beta1", Kind: "DaemonSet"}: {}, {Group: "extensions", Version: "v1beta1", Kind: "DaemonSetList"}: {}, {Group: "apps", Version: "v1beta2", Kind: "DaemonSet"}: {}, {Group: "apps", Version: "v1beta2", Kind: "DaemonSetList"}: {}, {Group: "apps", Version: "v1", Kind: "DaemonSet"}: {}, {Group: "apps", Version: "v1", Kind: "DaemonSetList"}: {}, {Group: "extensions", Version: "v1beta1", Kind: "Deployment"}: {}, {Group: "extensions", Version: "v1beta1", Kind: "DeploymentList"}: {}, {Group: "apps", Version: "v1beta1", Kind: "Deployment"}: {}, {Group: "apps", Version: "v1beta1", Kind: "DeploymentList"}: {}, {Group: "apps", Version: "v1beta2", Kind: "Deployment"}: {}, {Group: "apps", Version: "v1beta2", Kind: "DeploymentList"}: {}, {Group: "apps", Version: "v1", Kind: "Deployment"}: {}, {Group: "apps", Version: "v1", Kind: "DeploymentList"}: {}, {Group: "extensions", Version: "v1beta1", Kind: "Ingress"}: {}, {Group: "extensions", Version: "v1beta1", Kind: "IngressList"}: {}, {Group: "apps", Version: "v1beta2", Kind: "ReplicaSet"}: {}, {Group: "apps", Version: "v1beta2", Kind: "ReplicaSetList"}: {}, {Group: "apps", Version: "v1", Kind: "ReplicaSet"}: {}, {Group: "apps", Version: "v1", Kind: "ReplicaSetList"}: {}, {Group: "extensions", Version: "v1beta1", Kind: "ReplicaSet"}: {}, {Group: "extensions", Version: "v1beta1", Kind: "ReplicaSetList"}: {}, {Group: "extensions", Version: "v1beta1", Kind: "NetworkPolicy"}: {}, {Group: "extensions", Version: "v1beta1", Kind: "NetworkPolicyList"}: {}, {Group: "rbac.authorization.k8s.io", Version: "v1alpha1", Kind: "ClusterRoleBinding"}: {}, {Group: "rbac.authorization.k8s.io", Version: "v1alpha1", Kind: "ClusterRoleBindingList"}: {}, {Group: "rbac.authorization.k8s.io", Version: "v1alpha1", Kind: "RoleBinding"}: {}, {Group: "rbac.authorization.k8s.io", Version: "v1alpha1", Kind: "RoleBindingList"}: {}, {Group: "rbac.authorization.k8s.io", Version: "v1beta1", Kind: "ClusterRoleBinding"}: {}, {Group: "rbac.authorization.k8s.io", Version: "v1beta1", Kind: "ClusterRoleBindingList"}: {}, {Group: "rbac.authorization.k8s.io", Version: "v1beta1", Kind: "RoleBinding"}: {}, {Group: "rbac.authorization.k8s.io", Version: "v1beta1", Kind: "RoleBindingList"}: {}, {Group: "rbac.authorization.k8s.io", Version: "v1", Kind: "ClusterRoleBinding"}: {}, {Group: "rbac.authorization.k8s.io", Version: "v1", Kind: "ClusterRoleBindingList"}: {}, {Group: "rbac.authorization.k8s.io", Version: "v1", Kind: "RoleBinding"}: {}, {Group: "rbac.authorization.k8s.io", Version: "v1", Kind: "RoleBindingList"}: {}, {Group: "resource.k8s.io", Version: "v1alpha3", Kind: "DeviceTaintRule"}: {}, {Group: "resource.k8s.io", Version: "v1alpha3", Kind: "DeviceTaintRuleList"}: {}, {Group: "resource.k8s.io", Version: "v1alpha3", Kind: "ResourceClaim"}: {}, {Group: "resource.k8s.io", Version: "v1alpha3", Kind: "ResourceClaimList"}: {}, {Group: "resource.k8s.io", Version: "v1alpha3", Kind: "ResourceClaimTemplate"}: {}, {Group: "resource.k8s.io", Version: "v1alpha3", Kind: "ResourceClaimTemplateList"}: {}, {Group: "resource.k8s.io", Version: "v1alpha3", Kind: "ResourceSlice"}: {}, {Group: "resource.k8s.io", Version: "v1alpha3", Kind: "ResourceSliceList"}: {}, {Group: "resource.k8s.io", Version: "v1beta1", Kind: "ResourceClaim"}: {}, {Group: "resource.k8s.io", Version: "v1beta1", Kind: "ResourceClaimList"}: {}, {Group: "resource.k8s.io", Version: "v1beta1", Kind: "ResourceClaimTemplate"}: {}, {Group: "resource.k8s.io", Version: "v1beta1", Kind: "ResourceClaimTemplateList"}: {}, {Group: "resource.k8s.io", Version: "v1beta1", Kind: "ResourceSlice"}: {}, {Group: "resource.k8s.io", Version: "v1beta1", Kind: "ResourceSliceList"}: {}, {Group: "resource.k8s.io", Version: "v1beta2", Kind: "ResourceClaim"}: {}, {Group: "resource.k8s.io", Version: "v1beta2", Kind: "ResourceClaimList"}: {}, {Group: "resource.k8s.io", Version: "v1beta2", Kind: "ResourceClaimTemplate"}: {}, {Group: "resource.k8s.io", Version: "v1beta2", Kind: "ResourceClaimTemplateList"}: {}, {Group: "resource.k8s.io", Version: "v1beta2", Kind: "ResourceSlice"}: {}, {Group: "resource.k8s.io", Version: "v1beta2", Kind: "ResourceSliceList"}: {}, {Group: "resource.k8s.io", Version: "v1", Kind: "ResourceClaim"}: {}, {Group: "resource.k8s.io", Version: "v1", Kind: "ResourceClaimList"}: {}, {Group: "resource.k8s.io", Version: "v1", Kind: "ResourceClaimTemplate"}: {}, {Group: "resource.k8s.io", Version: "v1", Kind: "ResourceClaimTemplateList"}: {}, {Group: "resource.k8s.io", Version: "v1", Kind: "ResourceSlice"}: {}, {Group: "resource.k8s.io", Version: "v1", Kind: "ResourceSliceList"}: {}, {Group: "admissionregistration.k8s.io", Version: "v1alpha1", Kind: "ValidatingAdmissionPolicy"}: {}, {Group: "admissionregistration.k8s.io", Version: "v1alpha1", Kind: "ValidatingAdmissionPolicyList"}: {}, {Group: "admissionregistration.k8s.io", Version: "v1alpha1", Kind: "ValidatingAdmissionPolicyBinding"}: {}, {Group: "admissionregistration.k8s.io", Version: "v1alpha1", Kind: "ValidatingAdmissionPolicyBindingList"}: {}, {Group: "admissionregistration.k8s.io", Version: "v1alpha1", Kind: "MutatingAdmissionPolicy"}: {}, {Group: "admissionregistration.k8s.io", Version: "v1alpha1", Kind: "MutatingAdmissionPolicyList"}: {}, {Group: "admissionregistration.k8s.io", Version: "v1alpha1", Kind: "MutatingAdmissionPolicyBinding"}: {}, {Group: "admissionregistration.k8s.io", Version: "v1alpha1", Kind: "MutatingAdmissionPolicyBindingList"}: {}, {Group: "admissionregistration.k8s.io", Version: "v1beta1", Kind: "ValidatingWebhookConfiguration"}: {}, {Group: "admissionregistration.k8s.io", Version: "v1beta1", Kind: "ValidatingWebhookConfigurationList"}: {}, {Group: "admissionregistration.k8s.io", Version: "v1beta1", Kind: "MutatingWebhookConfiguration"}: {}, {Group: "admissionregistration.k8s.io", Version: "v1beta1", Kind: "MutatingWebhookConfigurationList"}: {}, {Group: "admissionregistration.k8s.io", Version: "v1beta1", Kind: "ValidatingAdmissionPolicy"}: {}, {Group: "admissionregistration.k8s.io", Version: "v1beta1", Kind: "ValidatingAdmissionPolicyList"}: {}, {Group: "admissionregistration.k8s.io", Version: "v1beta1", Kind: "ValidatingAdmissionPolicyBinding"}: {}, {Group: "admissionregistration.k8s.io", Version: "v1beta1", Kind: "ValidatingAdmissionPolicyBindingList"}: {}, {Group: "admissionregistration.k8s.io", Version: "v1beta1", Kind: "MutatingAdmissionPolicy"}: {}, {Group: "admissionregistration.k8s.io", Version: "v1beta1", Kind: "MutatingAdmissionPolicyList"}: {}, {Group: "admissionregistration.k8s.io", Version: "v1beta1", Kind: "MutatingAdmissionPolicyBinding"}: {}, {Group: "admissionregistration.k8s.io", Version: "v1beta1", Kind: "MutatingAdmissionPolicyBindingList"}: {}, {Group: "admissionregistration.k8s.io", Version: "v1", Kind: "ValidatingAdmissionPolicy"}: {}, {Group: "admissionregistration.k8s.io", Version: "v1", Kind: "ValidatingAdmissionPolicyList"}: {}, {Group: "admissionregistration.k8s.io", Version: "v1", Kind: "ValidatingAdmissionPolicyBinding"}: {}, {Group: "admissionregistration.k8s.io", Version: "v1", Kind: "ValidatingAdmissionPolicyBindingList"}: {}, {Group: "admissionregistration.k8s.io", Version: "v1", Kind: "ValidatingWebhookConfiguration"}: {}, {Group: "admissionregistration.k8s.io", Version: "v1", Kind: "ValidatingWebhookConfigurationList"}: {}, {Group: "admissionregistration.k8s.io", Version: "v1", Kind: "MutatingWebhookConfiguration"}: {}, {Group: "admissionregistration.k8s.io", Version: "v1", Kind: "MutatingWebhookConfigurationList"}: {}, {Group: "networking.k8s.io", Version: "v1", Kind: "NetworkPolicy"}: {}, {Group: "networking.k8s.io", Version: "v1", Kind: "NetworkPolicyList"}: {}, {Group: "networking.k8s.io", Version: "v1beta1", Kind: "Ingress"}: {}, {Group: "networking.k8s.io", Version: "v1beta1", Kind: "IngressList"}: {}, {Group: "networking.k8s.io", Version: "v1", Kind: "IngressClass"}: {}, {Group: "networking.k8s.io", Version: "v1", Kind: "IngressClassList"}: {}, {Group: "storage.k8s.io", Version: "v1beta1", Kind: "StorageClass"}: {}, {Group: "storage.k8s.io", Version: "v1beta1", Kind: "StorageClassList"}: {}, {Group: "storage.k8s.io", Version: "v1beta1", Kind: "CSIDriver"}: {}, {Group: "storage.k8s.io", Version: "v1beta1", Kind: "CSIDriverList"}: {}, {Group: "storage.k8s.io", Version: "v1", Kind: "StorageClass"}: {}, {Group: "storage.k8s.io", Version: "v1", Kind: "StorageClassList"}: {}, {Group: "storage.k8s.io", Version: "v1", Kind: "VolumeAttachment"}: {}, {Group: "storage.k8s.io", Version: "v1", Kind: "VolumeAttachmentList"}: {}, {Group: "storage.k8s.io", Version: "v1", Kind: "CSIDriver"}: {}, {Group: "storage.k8s.io", Version: "v1", Kind: "CSIDriverList"}: {}, {Group: "storage.k8s.io", Version: "v1beta1", Kind: "VolumeAttachment"}: {}, {Group: "storage.k8s.io", Version: "v1beta1", Kind: "VolumeAttachmentList"}: {}, {Group: "authentication.k8s.io", Version: "v1", Kind: "TokenRequest"}: {}, {Group: "scheduling.k8s.io", Version: "v1alpha1", Kind: "PriorityClass"}: {}, {Group: "scheduling.k8s.io", Version: "v1beta1", Kind: "PriorityClass"}: {}, {Group: "scheduling.k8s.io", Version: "v1", Kind: "PriorityClass"}: {}, {Group: "scheduling.k8s.io", Version: "v1alpha1", Kind: "PriorityClassList"}: {}, {Group: "scheduling.k8s.io", Version: "v1beta1", Kind: "PriorityClassList"}: {}, {Group: "scheduling.k8s.io", Version: "v1", Kind: "PriorityClassList"}: {}, {Group: "flowcontrol.apiserver.k8s.io", Version: "v1alpha1", Kind: "PriorityLevelConfiguration"}: {}, {Group: "flowcontrol.apiserver.k8s.io", Version: "v1alpha1", Kind: "PriorityLevelConfigurationList"}: {}, {Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta1", Kind: "PriorityLevelConfiguration"}: {}, {Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta1", Kind: "PriorityLevelConfigurationList"}: {}, {Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta2", Kind: "PriorityLevelConfiguration"}: {}, {Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta2", Kind: "PriorityLevelConfigurationList"}: {}, {Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta3", Kind: "PriorityLevelConfiguration"}: {}, {Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta3", Kind: "PriorityLevelConfigurationList"}: {}, {Group: "flowcontrol.apiserver.k8s.io", Version: "v1", Kind: "PriorityLevelConfiguration"}: {}, {Group: "flowcontrol.apiserver.k8s.io", Version: "v1", Kind: "PriorityLevelConfigurationList"}: {}, } scheme := legacyscheme.Scheme var testTypes orderedGroupVersionKinds for gvk := range scheme.AllKnownTypes() { if gvk.Version == runtime.APIVersionInternal { continue } testTypes = append(testTypes, gvk) } sort.Sort(testTypes) for _, gvk := range testTypes { gvk := gvk t.Run(gvk.String(), func(t *testing.T) { // Each sub-tests gets its own fuzzer instance to make running it independent // from what other tests ran before. f := randfill.New().NilChance(.5).NumElements(1, 1).RandSource(rand.NewSource(1)) f.Funcs( func(s *runtime.RawExtension, c randfill.Continue) {}, func(s *metav1.LabelSelector, c randfill.Continue) { c.FillNoCustom(s) s.MatchExpressions = nil // need to fuzz this specially }, func(s *metav1.ListOptions, c randfill.Continue) { c.FillNoCustom(s) s.LabelSelector = "" // need to fuzz requirement strings specially s.FieldSelector = "" // need to fuzz requirement strings specially }, func(s *extensionsv1beta1.ScaleStatus, c randfill.Continue) { c.FillNoCustom(s) s.TargetSelector = "" // need to fuzz requirement strings specially }, ) _, expectedChanged := typesWithDefaulting[gvk] iter := 0 changedOnce := false for { if iter > *roundtrip.FuzzIters { if !expectedChanged || changedOnce { break } // This uses to be 300, but for ResourceClaimList that was not high enough // because depending on the starting conditions, the fuzzer never created the // one combination where defaulting kicked in (empty string in non-empty slice // in another non-empty slice). if iter > 3000 { t.Errorf("expected %s to trigger defaulting due to fuzzing", gvk) break } // if we expected defaulting, continue looping until the fuzzer gives us one // at worst, we will timeout } iter++ src, err := scheme.New(gvk) if err != nil { t.Fatal(err) } f.Fill(src) src.GetObjectKind().SetGroupVersionKind(schema.GroupVersionKind{}) original := src.DeepCopyObject() // get internal withDefaults := src.DeepCopyObject() scheme.Default(withDefaults) if !reflect.DeepEqual(original, withDefaults) { diff := cmp.Diff(original, withDefaults) if !changedOnce { t.Logf("got diff (-fuzzed, +with defaults):\n%s", diff) changedOnce = true } if !expectedChanged { t.Errorf("{Group: \"%s\", Version: \"%s\", Kind: \"%s\"} did not expect defaults to be set - update expected or check defaulter registering: %s", gvk.Group, gvk.Version, gvk.Kind, diff) } } } }) } } func BenchmarkPodDefaulting(b *testing.B) { f := randfill.New().NilChance(.5).NumElements(1, 1).RandSource(rand.NewSource(1)) items := make([]apiv1.Pod, 100) for i := range items { f.Fill(&items[i]) } scheme := legacyscheme.Scheme b.ResetTimer() for i := 0; i < b.N; i++ { pod := &items[i%len(items)] scheme.Default(pod) } b.StopTimer() }
go
github
https://github.com/kubernetes/kubernetes
pkg/api/testing/defaulting_test.go
//===--- ASTBridging.h - header for the swift SILBridging module ----------===// // // This source file is part of the Swift.org open source project // // Copyright (c) 2022 - 2025 Apple Inc. and the Swift project authors // Licensed under Apache License v2.0 with Runtime Library Exception // // See https://swift.org/LICENSE.txt for license information // See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors // //===----------------------------------------------------------------------===// #ifndef SWIFT_AST_ASTBRIDGING_H #define SWIFT_AST_ASTBRIDGING_H /// `ASTBridging.h` is imported into Swift. Be *very* careful with what you /// include here and keep these includes minimal! /// /// See include guidelines and caveats in `BasicBridging.h`. #include "swift/AST/AccessorKind.h" #include "swift/AST/AttrKind.h" #include "swift/AST/DiagnosticKind.h" #include "swift/AST/DiagnosticList.h" #include "swift/AST/ExportKind.h" #include "swift/AST/GenericTypeParamKind.h" #include "swift/AST/Identifier.h" #include "swift/AST/LayoutConstraintKind.h" #include "swift/AST/PlatformKind.h" #include "swift/Basic/BasicBridging.h" #include "swift/Basic/WarningGroupBehavior.h" #ifdef NOT_COMPILED_WITH_SWIFT_PURE_BRIDGING_MODE #include "swift/AST/Attr.h" #include "swift/AST/Decl.h" #endif #include <utility> SWIFT_BEGIN_NULLABILITY_ANNOTATIONS namespace llvm { template<typename T> class ArrayRef; } namespace swift { enum class AccessorKind; class AvailabilityDomainOrIdentifier; class Argument; class ASTContext; struct ASTNode; class CanGenericSignature; struct CaptureListEntry; class DeclAttributes; class DeclBaseName; class DeclNameLoc; class DeclNameRef; class DiagnosticArgument; class DiagnosticEngine; enum class DifferentiabilityKind : uint8_t; class Fingerprint; class Identifier; class IfConfigClauseRangeInfo; class GenericSignature; class GenericSignatureImpl; struct LabeledStmtInfo; class LangOptions; class LayoutConstraint; class LayoutConstraintInfo; struct LifetimeDescriptor; enum class MacroRole : uint32_t; class MacroIntroducedDeclName; enum class MacroIntroducedDeclNameKind; enum class ParamSpecifier : uint8_t; class ParsedAutoDiffParameter; class ProtocolConformanceRef; class RegexLiteralPatternFeature; class RegexLiteralPatternFeatureKind; enum class ReferenceOwnership : uint8_t; class RequirementRepr; class Type; class CanType; class TypeBase; class StmtConditionElement; class SubstitutionMap; enum class RequirementReprKind : unsigned; } struct BridgedASTType; struct BridgedASTTypeArray; class BridgedCanType; class BridgedASTContext; class BridgedLangOptions; struct BridgedSubstitutionMap; struct BridgedGenericSignature; struct BridgedCanGenericSignature; struct BridgedConformance; class BridgedParameterList; // Forward declare the underlying AST node type for each wrapper. namespace swift { #define AST_BRIDGING_WRAPPER(Name) class Name; #include "swift/AST/ASTBridgingWrappers.def" } // end namespace swift // Define the bridging wrappers for each AST node. #define AST_BRIDGING_WRAPPER(Name) BRIDGING_WRAPPER_NONNULL(swift::Name, Name) #define AST_BRIDGING_WRAPPER_CONST(Name) \ BRIDGING_WRAPPER_CONST_NONNULL(swift::Name, Name) #include "swift/AST/ASTBridgingWrappers.def" // For nullable nodes, also define a nullable variant. #define AST_BRIDGING_WRAPPER_NULLABLE(Name) \ BRIDGING_WRAPPER_NULLABLE(swift::Name, Name) #define AST_BRIDGING_WRAPPER_CONST_NULLABLE(Name) \ BRIDGING_WRAPPER_CONST_NULLABLE(swift::Name, Name) #define AST_BRIDGING_WRAPPER_NONNULL(Name) #define AST_BRIDGING_WRAPPER_CONST_NONNULL(Name) #include "swift/AST/ASTBridgingWrappers.def" //===----------------------------------------------------------------------===// // MARK: Identifier //===----------------------------------------------------------------------===// struct BridgedLocatedIdentifier { SWIFT_NAME("name") swift::Identifier Name; SWIFT_NAME("nameLoc") swift::SourceLoc NameLoc; }; struct BridgedConsumedLookupResult { SWIFT_NAME("name") swift::Identifier Name; SWIFT_NAME("nameLoc") swift::SourceLoc NameLoc; SWIFT_NAME("flag") SwiftInt Flag; BRIDGED_INLINE BridgedConsumedLookupResult(swift::Identifier name, swift::SourceLoc sourceLoc, SwiftInt flag); }; class BridgedDeclNameRef { void *_Nullable opaque; public: BRIDGED_INLINE BridgedDeclNameRef(); BRIDGED_INLINE BridgedDeclNameRef(swift::DeclNameRef name); BRIDGED_INLINE swift::DeclNameRef unbridged() const; }; SWIFT_NAME("BridgedDeclNameRef.createParsed(_:moduleSelector:baseName:" "argumentLabels:)") BridgedDeclNameRef BridgedDeclNameRef_createParsed(BridgedASTContext cContext, swift::Identifier cModuleSelector, swift::DeclBaseName cBaseName, BridgedArrayRef cLabels); SWIFT_NAME("BridgedDeclNameRef.createParsed(_:moduleSelector:baseName:)") BridgedDeclNameRef BridgedDeclNameRef_createParsed(BridgedASTContext cContext, swift::Identifier cModuleSelector, swift::DeclBaseName cBaseName); class BridgedDeclNameLoc { const void *_Nullable LocationInfo; uint32_t NumArgumentLabels; bool HasModuleSelectorLoc; public: BridgedDeclNameLoc() : LocationInfo(nullptr), NumArgumentLabels(0), HasModuleSelectorLoc(false) {} BRIDGED_INLINE BridgedDeclNameLoc(swift::DeclNameLoc loc); BRIDGED_INLINE swift::DeclNameLoc unbridged() const; }; SWIFT_NAME("BridgedDeclNameLoc.createParsed(_:baseNameLoc:lParenLoc:" "argumentLabelLocs:rParenLoc:)") BridgedDeclNameLoc BridgedDeclNameLoc_createParsed(BridgedASTContext cContext, swift::SourceLoc baseNameLoc, swift::SourceLoc lParenLoc, BridgedArrayRef cLabelLocs, swift::SourceLoc rParenLoc); SWIFT_NAME("BridgedDeclNameLoc.createParsed(_:moduleSelectorLoc:baseNameLoc:" "lParenLoc:argumentLabelLocs:rParenLoc:)") BridgedDeclNameLoc BridgedDeclNameLoc_createParsed( BridgedASTContext cContext, swift::SourceLoc cModuleSelectorLoc, swift::SourceLoc baseNameLoc, swift::SourceLoc lParenLoc, BridgedArrayRef cLabelLocs, swift::SourceLoc rParenLoc); SWIFT_NAME("BridgedDeclNameLoc.createParsed(_:)") BridgedDeclNameLoc BridgedDeclNameLoc_createParsed(swift::SourceLoc baseNameLoc); SWIFT_NAME("BridgedDeclNameLoc.createParsed(_:moduleSelectorLoc:baseNameLoc:)") BridgedDeclNameLoc BridgedDeclNameLoc_createParsed(BridgedASTContext cContext, swift::SourceLoc moduleSelectorLoc, swift::SourceLoc baseNameLoc); //===----------------------------------------------------------------------===// // MARK: ASTContext //===----------------------------------------------------------------------===// class BridgedASTContext { swift::ASTContext * _Nonnull Ctx; public: SWIFT_UNAVAILABLE("Use init(raw:) instead") BRIDGED_INLINE BridgedASTContext(swift::ASTContext &ctx); SWIFT_UNAVAILABLE("Use '.raw' instead") BRIDGED_INLINE swift::ASTContext &unbridged() const; SWIFT_COMPUTED_PROPERTY void *_Nonnull getRaw() const { return Ctx; } SWIFT_COMPUTED_PROPERTY unsigned getMajorLanguageVersion() const; SWIFT_COMPUTED_PROPERTY BridgedAvailabilityMacroMap getAvailabilityMacroMap() const; SWIFT_COMPUTED_PROPERTY BridgedDiagnosticEngine getDiags() const; }; #define IDENTIFIER_WITH_NAME(Name, _) \ SWIFT_NAME("getter:BridgedASTContext.id_" #Name "(self:)") \ BRIDGED_INLINE swift::Identifier BridgedASTContext_id_##Name( \ BridgedASTContext bridged); #include "swift/AST/KnownIdentifiers.def" SWIFT_NAME("BridgedASTContext.init(raw:)") BRIDGED_INLINE BridgedASTContext BridgedASTContext_fromRaw(void * _Nonnull ptr); SWIFT_NAME("BridgedASTContext.allocate(self:size:alignment:)") BRIDGED_INLINE void *_Nullable BridgedASTContext_allocate(BridgedASTContext bridged, size_t size, size_t alignment); SWIFT_NAME("BridgedASTContext.allocateCopy(self:string:)") BRIDGED_INLINE BridgedStringRef BridgedASTContext_allocateCopyString(BridgedASTContext cContext, BridgedStringRef cStr); SWIFT_NAME("BridgedASTContext.getIdentifier(self:_:)") swift::Identifier BridgedASTContext_getIdentifier(BridgedASTContext cContext, BridgedStringRef cStr); SWIFT_NAME("BridgedASTContext.getDollarIdentifier(self:_:)") swift::Identifier BridgedASTContext_getDollarIdentifier(BridgedASTContext cContext, size_t idx); SWIFT_NAME("getter:BridgedASTContext.langOpts(self:)") BridgedLangOptions BridgedASTContext_langOpts(BridgedASTContext cContext); SWIFT_NAME("BridgedLangOptions.hasAttributeNamed(self:_:)") bool BridgedLangOptions_hasAttributeNamed(BridgedLangOptions cLangOpts, BridgedStringRef cName); enum ENUM_EXTENSIBILITY_ATTR(closed) BridgedCanImportVersion : size_t { CanImportUnversioned, CanImportVersion, CanImportUnderlyingVersion, }; SWIFT_NAME("BridgedASTContext.canImport(self:importPath:location:versionKind:versionComponents:numVersionComponents:)") bool BridgedASTContext_canImport(BridgedASTContext cContext, BridgedStringRef importPath, swift::SourceLoc canImportLoc, BridgedCanImportVersion versionKind, const SwiftInt *_Nullable versionComponents, SwiftInt numVersionComponents); SWIFT_NAME("getter:BridgedASTContext.staticBuildConfigurationPtr(self:)") void * _Nonnull BridgedASTContext_staticBuildConfiguration(BridgedASTContext cContext); //===----------------------------------------------------------------------===// // MARK: AST nodes //===----------------------------------------------------------------------===// void registerBridgedDecl(BridgedStringRef bridgedClassName, SwiftMetatype metatype); struct OptionalBridgedDeclObj { OptionalSwiftObject obj; OptionalBridgedDeclObj(OptionalSwiftObject obj) : obj(obj) {} #ifdef NOT_COMPILED_WITH_SWIFT_PURE_BRIDGING_MODE template <class D> D *_Nullable getAs() const { if (obj) return llvm::cast<D>(static_cast<swift::Decl *>(obj)); return nullptr; } #endif }; struct BridgedDeclObj { SwiftObject obj; #ifdef NOT_COMPILED_WITH_SWIFT_PURE_BRIDGING_MODE template <class D> D *_Nonnull getAs() const { return llvm::cast<D>(static_cast<swift::Decl *>(obj)); } swift::Decl * _Nonnull unbridged() const { return getAs<swift::Decl>(); } #endif BridgedDeclObj(SwiftObject obj) : obj(obj) {} BRIDGED_INLINE BridgedDeclObj(BridgedDecl decl); BridgedOwnedString getDebugDescription() const; BRIDGED_INLINE swift::SourceLoc getLoc() const; SWIFT_IMPORT_UNSAFE BRIDGED_INLINE BridgedDeclObj getModuleContext() const; SWIFT_IMPORT_UNSAFE BRIDGED_INLINE OptionalBridgedDeclObj getParent() const; SWIFT_IMPORT_UNSAFE BRIDGED_INLINE BridgedNullableDeclContext getDeclContext() const; SWIFT_IMPORT_UNSAFE BRIDGED_INLINE BridgedDeclContext asGenericContext() const; SWIFT_IMPORT_UNSAFE BRIDGED_INLINE BridgedDeclContext asTopLevelCodeDecl() const; SWIFT_IMPORT_UNSAFE BRIDGED_INLINE BridgedDeclContext asModuleDecl() const; BRIDGED_INLINE void setImplicit() const; SWIFT_IMPORT_UNSAFE BRIDGED_INLINE BridgedStringRef Type_getName() const; SWIFT_IMPORT_UNSAFE BRIDGED_INLINE BridgedStringRef Value_getUserFacingName() const; SWIFT_IMPORT_UNSAFE BRIDGED_INLINE swift::SourceLoc Value_getNameLoc() const; SWIFT_IMPORT_UNSAFE BRIDGED_INLINE swift::Identifier Value_getBaseIdentifier() const; BRIDGED_INLINE bool hasClangNode() const; BRIDGED_INLINE bool Value_isObjC() const; BRIDGED_INLINE bool AbstractStorage_isConst() const; BRIDGED_INLINE bool GenericType_isGenericAtAnyLevel() const; BRIDGED_INLINE bool NominalType_isGlobalActor() const; SWIFT_IMPORT_UNSAFE BRIDGED_INLINE BridgedASTType NominalType_getDeclaredInterfaceType() const; SWIFT_IMPORT_UNSAFE BRIDGED_INLINE OptionalBridgedDeclObj NominalType_getValueTypeDestructor() const; BRIDGED_INLINE bool Enum_hasRawType() const; BRIDGED_INLINE bool Struct_hasUnreferenceableStorage() const; SWIFT_IMPORT_UNSAFE BRIDGED_INLINE BridgedASTType Class_getSuperclass() const; SWIFT_IMPORT_UNSAFE BRIDGED_INLINE BridgedDeclObj Class_getDestructor() const; BRIDGED_INLINE bool Class_isForeign() const; BRIDGED_INLINE bool ProtocolDecl_requiresClass() const; BRIDGED_INLINE bool ProtocolDecl_isMarkerProtocol() const; BRIDGED_INLINE bool AbstractFunction_isOverridden() const; BRIDGED_INLINE bool Constructor_isInheritable() const; BRIDGED_INLINE bool Destructor_isIsolated() const; BRIDGED_INLINE bool EnumElementDecl_hasAssociatedValues() const; SWIFT_IMPORT_UNSAFE BRIDGED_INLINE BridgedParameterList EnumElementDecl_getParameterList() const; SWIFT_IMPORT_UNSAFE BRIDGED_INLINE BridgedStringRef EnumElementDecl_getNameStr() const; SWIFT_IMPORT_UNSAFE BRIDGED_INLINE BridgedStringRef AccessorDecl_getKindName() const; BRIDGED_INLINE void GenericContext_setGenericSignature(BridgedGenericSignature genericSignature) const; BRIDGED_INLINE void ValueDecl_setAccess(swift::AccessLevel accessLevel) const; BRIDGED_INLINE void NominalTypeDecl_addMember(BridgedDeclObj member) const; }; enum ENUM_EXTENSIBILITY_ATTR(closed) BridgedASTNodeKind : uint8_t { BridgedASTNodeKindExpr, BridgedASTNodeKindStmt, BridgedASTNodeKindDecl }; class BridgedASTNode { intptr_t opaque; BRIDGED_INLINE BridgedASTNode(void *_Nonnull pointer, BridgedASTNodeKind kind); void *_Nonnull getPointer() const { return reinterpret_cast<void *>(opaque & ~0x7); } public: SWIFT_NAME("decl(_:)") static BridgedASTNode createDecl(BridgedDecl d) { return BridgedASTNode(d.unbridged(), BridgedASTNodeKindDecl); } SWIFT_NAME("stmt(_:)") static BridgedASTNode createStmt(BridgedStmt s) { return BridgedASTNode(s.unbridged(), BridgedASTNodeKindStmt); } SWIFT_NAME("expr(_:)") static BridgedASTNode createExor(BridgedExpr e) { return BridgedASTNode(e.unbridged(), BridgedASTNodeKindExpr); } SWIFT_COMPUTED_PROPERTY BridgedASTNodeKind getKind() const { return static_cast<BridgedASTNodeKind>(opaque & 0x7); } SWIFT_IMPORT_UNSAFE BRIDGED_INLINE BridgedExpr castToExpr() const; SWIFT_IMPORT_UNSAFE BRIDGED_INLINE BridgedStmt castToStmt() const; SWIFT_IMPORT_UNSAFE BRIDGED_INLINE BridgedDecl castToDecl() const; BRIDGED_INLINE swift::ASTNode unbridged() const; }; // Declare `.asDecl` on each BridgedXXXDecl type, which upcasts a wrapper for // a Decl subclass to a BridgedDecl. #define DECL(Id, Parent) \ SWIFT_NAME("getter:Bridged" #Id "Decl.asDecl(self:)") \ BridgedDecl Bridged##Id##Decl_asDecl(Bridged##Id##Decl decl); #define ABSTRACT_DECL(Id, Parent) DECL(Id, Parent) #include "swift/AST/DeclNodes.def" // Declare `.asNominalTypeDecl` on each BridgedXXXDecl type that's also a // NominalTypeDecl. #define DECL(Id, Parent) #define NOMINAL_TYPE_DECL(Id, Parent) \ SWIFT_NAME("getter:Bridged" #Id "Decl.asNominalTypeDecl(self:)") \ BridgedNominalTypeDecl Bridged##Id##Decl_asNominalTypeDecl( \ Bridged##Id##Decl decl); #include "swift/AST/DeclNodes.def" // Declare `.asDeclContext` on each BridgedXXXDecl type that's also a // DeclContext. #define DECL(Id, Parent) #define CONTEXT_DECL(Id, Parent) \ SWIFT_NAME("getter:Bridged" #Id "Decl.asDeclContext(self:)") \ BridgedDeclContext Bridged##Id##Decl_asDeclContext(Bridged##Id##Decl decl); #define ABSTRACT_CONTEXT_DECL(Id, Parent) CONTEXT_DECL(Id, Parent) #include "swift/AST/DeclNodes.def" // Declare `.asStmt` on each BridgedXXXStmt type, which upcasts a wrapper for // a Stmt subclass to a BridgedStmt. #define STMT(Id, Parent) \ SWIFT_NAME("getter:Bridged" #Id "Stmt.asStmt(self:)") \ BridgedStmt Bridged##Id##Stmt_asStmt(Bridged##Id##Stmt stmt); #define ABSTRACT_STMT(Id, Parent) STMT(Id, Parent) #include "swift/AST/StmtNodes.def" // Declare `.asExpr` on each BridgedXXXExpr type, which upcasts a wrapper for // a Expr subclass to a BridgedExpr. #define EXPR(Id, Parent) \ SWIFT_NAME("getter:Bridged" #Id "Expr.asExpr(self:)") \ BridgedExpr Bridged##Id##Expr_asExpr(Bridged##Id##Expr expr); #define ABSTRACT_EXPR(Id, Parent) EXPR(Id, Parent) #include "swift/AST/ExprNodes.def" // Declare `.asTypeRepr` on each BridgedXXXTypeRepr type, which upcasts a // wrapper for a TypeRepr subclass to a BridgedTypeRepr. #define TYPEREPR(Id, Parent) \ SWIFT_NAME("getter:Bridged" #Id "TypeRepr.asTypeRepr(self:)") \ BridgedTypeRepr Bridged##Id##TypeRepr_asTypeRepr( \ Bridged##Id##TypeRepr typeRepr); #define ABSTRACT_TYPEREPR(Id, Parent) TYPEREPR(Id, Parent) #include "swift/AST/TypeReprNodes.def" // Declare `.asPattern` on each BridgedXXXPattern type, which upcasts a wrapper // for a Pattern subclass to a BridgedPattern. #define PATTERN(Id, Parent) \ SWIFT_NAME("getter:Bridged" #Id "Pattern.asPattern(self:)") \ BridgedPattern Bridged##Id##Pattern_asPattern(Bridged##Id##Pattern pattern); #include "swift/AST/PatternNodes.def" // Declare `.asDeclAttribute` on each BridgedXXXAttr type, which upcasts a // wrapper for a DeclAttribute subclass to a BridgedDeclAttribute. #define SIMPLE_DECL_ATTR(...) #define DECL_ATTR(_, CLASS, ...) \ SWIFT_NAME("getter:Bridged" #CLASS "Attr.asDeclAttribute(self:)") \ BridgedDeclAttribute Bridged##CLASS##Attr_asDeclAttribute( \ Bridged##CLASS##Attr attr); #include "swift/AST/DeclAttr.def" // Declare `.asTypeAttr` on each BridgedXXXTypeAttr type, which upcasts a // wrapper for a TypeAttr subclass to a BridgedTypeAttr. #define SIMPLE_TYPE_ATTR(...) #define TYPE_ATTR(SPELLING, CLASS) \ SWIFT_NAME("getter:Bridged" #CLASS "TypeAttr.asTypeAttribute(self:)") \ BridgedTypeAttribute Bridged##CLASS##TypeAttr_asTypeAttribute( \ Bridged##CLASS##TypeAttr attr); #include "swift/AST/TypeAttr.def" struct BridgedPatternBindingEntry { BridgedPattern pattern; swift::SourceLoc equalLoc; BridgedNullableExpr init; BridgedNullablePatternBindingInitializer initContext; }; //===----------------------------------------------------------------------===// // MARK: Diagnostic Engine //===----------------------------------------------------------------------===// class BridgedDiagnosticArgument { int64_t storage[3]; public: BRIDGED_INLINE BridgedDiagnosticArgument(const swift::DiagnosticArgument &arg); BRIDGED_INLINE const swift::DiagnosticArgument &unbridged() const; BridgedDiagnosticArgument(SwiftInt i); BridgedDiagnosticArgument(BridgedStringRef s); }; class BridgedFixIt { public: swift::CharSourceRange replacementRange; BridgedStringRef replacementText; }; class BridgedDiagnosticFixIt { public: int64_t storage[7]; BridgedDiagnosticFixIt(swift::SourceLoc start, uint32_t length, BridgedStringRef text); }; class BridgedDiagnostic { public: struct Impl; SWIFT_UNAVAILABLE("Unavailable in Swift") Impl *_Nonnull Raw; SWIFT_UNAVAILABLE("Unavailable in Swift") BridgedDiagnostic(Impl *_Nonnull raw) : Raw(raw) {} SWIFT_UNAVAILABLE("Unavailable in Swift") Impl *_Nonnull unbridged() const { return Raw; } }; // FIXME: Can we bridge InFlightDiagnostic? SWIFT_NAME("BridgedDiagnosticEngine.diagnose(self:at:_:_:highlightAt:" "highlightLength:fixIts:)") void BridgedDiagnosticEngine_diagnose( BridgedDiagnosticEngine, swift::SourceLoc loc, swift::DiagID diagID, BridgedArrayRef arguments, swift::SourceLoc highlightStart, uint32_t hightlightLength, BridgedArrayRef fixIts); SWIFT_NAME("BridgedDiagnosticEngine.getLocationFromExternalSource(self:path:line:column:)") swift::SourceLoc BridgedDiagnostic_getLocationFromExternalSource( BridgedDiagnosticEngine bridgedEngine, BridgedStringRef path, SwiftInt line, SwiftInt column); SWIFT_NAME("getter:BridgedDiagnosticEngine.hadAnyError(self:)") bool BridgedDiagnosticEngine_hadAnyError(BridgedDiagnosticEngine); /// Create a new diagnostic with the given severity, location, and diagnostic /// text. /// /// \returns a diagnostic instance that can be extended with additional /// information and then must be finished via \c SwiftDiagnostic_finish. SWIFT_NAME("BridgedDiagnostic.init(at:message:severity:engine:)") BridgedDiagnostic BridgedDiagnostic_create(swift::SourceLoc loc, BridgedStringRef cText, swift::DiagnosticKind severity, BridgedDiagnosticEngine cDiags); /// Highlight a source range as part of the diagnostic. SWIFT_NAME("BridgedDiagnostic.highlight(self:start:end:)") void BridgedDiagnostic_highlight(BridgedDiagnostic cDiag, swift::SourceLoc LStartLoc, swift::SourceLoc endLoc); /// Add a Fix-It to replace a source range as part of the diagnostic. SWIFT_NAME("BridgedDiagnostic.fixItReplace(self:start:end:replacement:)") void BridgedDiagnostic_fixItReplace(BridgedDiagnostic cDiag, swift::SourceLoc startLoc, swift::SourceLoc endLoc, BridgedStringRef cReplaceText); /// Finish the given diagnostic and emit it. SWIFT_NAME("BridgedDiagnostic.finish(self:)") void BridgedDiagnostic_finish(BridgedDiagnostic cDiag); //===----------------------------------------------------------------------===// // MARK: DeclContexts //===----------------------------------------------------------------------===// SWIFT_NAME("getter:BridgedDeclContext.isLocalContext(self:)") BRIDGED_INLINE bool BridgedDeclContext_isLocalContext(BridgedDeclContext cDeclContext); SWIFT_NAME("getter:BridgedDeclContext.isTypeContext(self:)") BRIDGED_INLINE bool BridgedDeclContext_isTypeContext(BridgedDeclContext dc); SWIFT_NAME("getter:BridgedDeclContext.isModuleScopeContext(self:)") BRIDGED_INLINE bool BridgedDeclContext_isModuleScopeContext(BridgedDeclContext dc); SWIFT_NAME("getter:BridgedDeclContext.isClosureExpr(self:)") BRIDGED_INLINE bool BridgedDeclContext_isClosureExpr(BridgedDeclContext dc); SWIFT_NAME("BridgedDeclContext.castToClosureExpr(self:)") BRIDGED_INLINE BridgedClosureExpr BridgedDeclContext_castToClosureExpr(BridgedDeclContext dc); SWIFT_NAME("getter:BridgedDeclContext.astContext(self:)") BRIDGED_INLINE BridgedASTContext BridgedDeclContext_getASTContext(BridgedDeclContext dc); SWIFT_NAME("getter:BridgedDeclContext.parentSourceFile(self:)") BRIDGED_INLINE BridgedSourceFile BridgedDeclContext_getParentSourceFile(BridgedDeclContext dc); SWIFT_NAME("getter:BridgedSourceFile.isScriptMode(self:)") BRIDGED_INLINE bool BridgedSourceFile_isScriptMode(BridgedSourceFile sf); SWIFT_NAME("BridgedSourceFile.addTopLevelDecl(self:_:)") BRIDGED_INLINE void BridgedSourceFile_addTopLevelDecl(BridgedSourceFile sf, BridgedDecl decl); SWIFT_NAME("BridgedFileUnit.castToSourceFile(self:)") BRIDGED_INLINE BridgedNullableSourceFile BridgedFileUnit_castToSourceFile(BridgedFileUnit fileUnit); SWIFT_NAME("BridgedPatternBindingInitializer.create(declContext:)") BridgedPatternBindingInitializer BridgedPatternBindingInitializer_create(BridgedDeclContext cDeclContext); SWIFT_NAME("getter:BridgedPatternBindingInitializer.asDeclContext(self:)") BridgedDeclContext BridgedPatternBindingInitializer_asDeclContext( BridgedPatternBindingInitializer cInit); SWIFT_NAME("BridgedDefaultArgumentInitializer.create(declContext:index:)") BridgedDefaultArgumentInitializer BridgedDefaultArgumentInitializer_create(BridgedDeclContext cDeclContext, size_t index); SWIFT_NAME("getter:BridgedDefaultArgumentInitializer.asDeclContext(self:)") BridgedDeclContext DefaultArgumentInitializer_asDeclContext( BridgedDefaultArgumentInitializer cInit); SWIFT_NAME("BridgedCustomAttributeInitializer.create(declContext:)") BridgedCustomAttributeInitializer BridgedCustomAttributeInitializer_create(BridgedDeclContext cDeclContext); SWIFT_NAME("getter:BridgedCustomAttributeInitializer.asDeclContext(self:)") BridgedDeclContext BridgedCustomAttributeInitializer_asDeclContext( BridgedCustomAttributeInitializer cInit); SWIFT_NAME("getter:BridgedClosureExpr.asDeclContext(self:)") BridgedDeclContext BridgedClosureExpr_asDeclContext(BridgedClosureExpr cClosure); //===----------------------------------------------------------------------===// // MARK: Availability //===----------------------------------------------------------------------===// BRIDGED_OPTIONAL(swift::PlatformKind, PlatformKind) SWIFT_NAME("BridgedOptionalPlatformKind.init(from:)") BridgedOptionalPlatformKind PlatformKind_fromString(BridgedStringRef cStr); SWIFT_NAME("BridgedOptionalPlatformKind.init(from:)") BridgedOptionalPlatformKind PlatformKind_fromIdentifier(swift::Identifier ident); SWIFT_NAME("BridgedAvailabilityMacroMap.has(self:name:)") bool BridgedAvailabilityMacroMap_hasName(BridgedAvailabilityMacroMap map, BridgedStringRef name); SWIFT_NAME("BridgedAvailabilityMacroMap.has(self:name:version:)") bool BridgedAvailabilityMacroMap_hasNameAndVersion( BridgedAvailabilityMacroMap map, BridgedStringRef name, BridgedVersionTuple version); SWIFT_NAME("BridgedAvailabilityMacroMap.get(self:name:version:)") BridgedArrayRef BridgedAvailabilityMacroMap_getSpecs(BridgedAvailabilityMacroMap map, BridgedStringRef name, BridgedVersionTuple version); struct BridgedAvailabilityMacroDefinition { BridgedStringRef name; BridgedVersionTuple version; BridgedArrayRef specs; }; struct BridgedAvailabilityDomainOrIdentifier { void *_Nullable opaque; BridgedAvailabilityDomainOrIdentifier() : opaque(nullptr) {}; BRIDGED_INLINE BridgedAvailabilityDomainOrIdentifier( swift::AvailabilityDomainOrIdentifier domain); BRIDGED_INLINE swift::AvailabilityDomainOrIdentifier unbridged() const; }; SWIFT_NAME("getter:BridgedAvailabilityDomainOrIdentifier.isDomain(self:)") BRIDGED_INLINE bool BridgedAvailabilityDomainOrIdentifier_isDomain( BridgedAvailabilityDomainOrIdentifier cVal); SWIFT_NAME("getter:BridgedAvailabilityDomainOrIdentifier.asIdentifier(self:)") BRIDGED_INLINE swift::Identifier BridgedAvailabilityDomainOrIdentifier_getAsIdentifier( BridgedAvailabilityDomainOrIdentifier cVal); SWIFT_NAME("BridgedAvailabilitySpec.createWildcard(_:loc:)") BridgedAvailabilitySpec BridgedAvailabilitySpec_createWildcard(BridgedASTContext cContext, swift::SourceLoc loc); SWIFT_NAME( "BridgedAvailabilitySpec.createForDomainIdentifier(_:name:nameLoc:version:" "versionRange:)") BridgedAvailabilitySpec BridgedAvailabilitySpec_createForDomainIdentifier( BridgedASTContext cContext, swift::Identifier name, swift::SourceLoc loc, BridgedVersionTuple cVersion, swift::SourceRange versionRange); SWIFT_NAME("BridgedAvailabilitySpec.clone(self:_:)") BridgedAvailabilitySpec BridgedAvailabilitySpec_clone(BridgedAvailabilitySpec spec, BridgedASTContext cContext); SWIFT_NAME("BridgedAvailabilitySpec.setMacroLoc(self:_:)") void BridgedAvailabilitySpec_setMacroLoc(BridgedAvailabilitySpec spec, swift::SourceLoc loc); SWIFT_NAME("getter:BridgedAvailabilitySpec.domainOrIdentifier(self:)") BridgedAvailabilityDomainOrIdentifier BridgedAvailabilitySpec_getDomainOrIdentifier(BridgedAvailabilitySpec spec); SWIFT_NAME("getter:BridgedAvailabilitySpec.sourceRange(self:)") swift::SourceRange BridgedAvailabilitySpec_getSourceRange(BridgedAvailabilitySpec spec); SWIFT_NAME("getter:BridgedAvailabilitySpec.isWildcard(self:)") bool BridgedAvailabilitySpec_isWildcard(BridgedAvailabilitySpec spec); SWIFT_NAME("getter:BridgedAvailabilitySpec.rawVersion(self:)") BridgedVersionTuple BridgedAvailabilitySpec_getRawVersion(BridgedAvailabilitySpec spec); SWIFT_NAME("getter:BridgedAvailabilitySpec.versionRange(self:)") swift::SourceRange BridgedAvailabilitySpec_getVersionRange(BridgedAvailabilitySpec spec); //===----------------------------------------------------------------------===// // MARK: AutoDiff //===----------------------------------------------------------------------===// enum ENUM_EXTENSIBILITY_ATTR(closed) BridgedDifferentiabilityKind { BridgedDifferentiabilityKindNonDifferentiable = 0, BridgedDifferentiabilityKindForward = 1, BridgedDifferentiabilityKindReverse = 2, BridgedDifferentiabilityKindNormal = 3, BridgedDifferentiabilityKindLinear = 4, }; swift::DifferentiabilityKind unbridged(BridgedDifferentiabilityKind cKind); class BridgedParsedAutoDiffParameter { private: swift::SourceLoc loc; enum Kind { Named, Ordered, Self, } kind; union Value { swift::Identifier name; unsigned index; Value(swift::Identifier name) : name(name) {} Value(unsigned index) : index(index) {} Value() : name() {} } value; BridgedParsedAutoDiffParameter(swift::SourceLoc loc, Kind kind, Value value) : loc(loc), kind(kind), value(value) {} public: SWIFT_NAME("forNamed(_:loc:)") static BridgedParsedAutoDiffParameter forNamed(swift::Identifier name, swift::SourceLoc loc) { return BridgedParsedAutoDiffParameter(loc, Kind::Named, name); } SWIFT_NAME("forOrdered(_:loc:)") static BridgedParsedAutoDiffParameter forOrdered(size_t index, swift::SourceLoc loc) { return BridgedParsedAutoDiffParameter(loc, Kind::Ordered, index); } SWIFT_NAME("forSelf(loc:)") static BridgedParsedAutoDiffParameter forSelf(swift::SourceLoc loc) { return BridgedParsedAutoDiffParameter(loc, Kind::Self, {}); } swift::ParsedAutoDiffParameter unbridged() const; }; //===----------------------------------------------------------------------===// // MARK: DeclAttributes //===----------------------------------------------------------------------===// BRIDGED_OPTIONAL(swift::DeclAttrKind, DeclAttrKind) SWIFT_NAME("BridgedOptionalDeclAttrKind.init(from:)") BridgedOptionalDeclAttrKind BridgedOptionalDeclAttrKind_fromString(BridgedStringRef cStr); struct BridgedDeclAttributes { BridgedNullableDeclAttribute chain; BridgedDeclAttributes() : chain(nullptr) {}; BRIDGED_INLINE BridgedDeclAttributes(swift::DeclAttributes attrs); BRIDGED_INLINE swift::DeclAttributes unbridged() const; }; SWIFT_NAME("BridgedDeclAttribute.shouldBeRejectedByParser(_:)") bool BridgedDeclAttribute_shouldBeRejectedByParser(swift::DeclAttrKind kind); SWIFT_NAME("BridgedDeclAttribute.isDeclModifier(_:)") bool BridgedDeclAttribute_isDeclModifier(swift::DeclAttrKind kind); SWIFT_NAME("BridgedDeclAttributes.add(self:_:)") void BridgedDeclAttributes_add(BridgedDeclAttributes *_Nonnull attrs, BridgedDeclAttribute add); SWIFT_NAME("BridgedDeclAttributes.hasAttribute(self:_:)") bool BridgedDeclAttributes_hasAttribute( const BridgedDeclAttributes *_Nonnull attrs, swift::DeclAttrKind kind); SWIFT_NAME("BridgedDeclAttribute.createSimple(_:kind:atLoc:nameLoc:)") BridgedDeclAttribute BridgedDeclAttribute_createSimple( BridgedASTContext cContext, swift::DeclAttrKind kind, swift::SourceLoc atLoc, swift::SourceLoc nameLoc); SWIFT_NAME("BridgedABIAttr.createParsed(_:atLoc:range:abiDecl:)") BridgedABIAttr BridgedABIAttr_createParsed(BridgedASTContext cContext, swift::SourceLoc atLoc, swift::SourceRange range, BridgedNullableDecl abiDecl); enum ENUM_EXTENSIBILITY_ATTR(closed) BridgedAvailableAttrKind { BridgedAvailableAttrKindDefault, BridgedAvailableAttrKindDeprecated, BridgedAvailableAttrKindUnavailable, BridgedAvailableAttrKindNoAsync, }; SWIFT_NAME("BridgedAvailableAttr.createParsed(_:atLoc:range:domainIdentifier:" "domainLoc:kind:message:renamed:introduced:introducedRange:" "deprecated:deprecatedRange:obsoleted:obsoletedRange:isSPI:)") BridgedAvailableAttr BridgedAvailableAttr_createParsed( BridgedASTContext cContext, swift::SourceLoc atLoc, swift::SourceRange range, swift::Identifier domainIdentifier, swift::SourceLoc domainLoc, BridgedAvailableAttrKind cKind, BridgedStringRef cMessage, BridgedStringRef cRenamed, BridgedVersionTuple cIntroduced, swift::SourceRange introducedRange, BridgedVersionTuple cDeprecated, swift::SourceRange deprecatedRange, BridgedVersionTuple cObsoleted, swift::SourceRange obsoletedRange, bool isSPI); SWIFT_NAME("BridgedAvailableAttr.createUnavailableInEmbedded(_:atLoc:range:)") BridgedAvailableAttr BridgedAvailableAttr_createUnavailableInEmbedded(BridgedASTContext cContext, swift::SourceLoc atLoc, swift::SourceRange range); SWIFT_NAME("BridgedAvailableAttr.setIsGroupMember(self:)") void BridgedAvailableAttr_setIsGroupMember(BridgedAvailableAttr cAttr); SWIFT_NAME("BridgedAvailableAttr.setIsGroupedWithWildcard(self:)") void BridgedAvailableAttr_setIsGroupedWithWildcard(BridgedAvailableAttr cAttr); SWIFT_NAME("BridgedAvailableAttr.setIsGroupTerminator(self:)") void BridgedAvailableAttr_setIsGroupTerminator(BridgedAvailableAttr cAttr); BRIDGED_OPTIONAL(swift::AccessLevel, AccessLevel) SWIFT_NAME("BridgedAccessControlAttr.createParsed(_:range:accessLevel:)") BridgedAccessControlAttr BridgedAccessControlAttr_createParsed(BridgedASTContext cContext, swift::SourceRange range, swift::AccessLevel accessLevel); SWIFT_NAME("BridgedAlignmentAttr.createParsed(_:atLoc:range:value:)") BridgedAlignmentAttr BridgedAlignmentAttr_createParsed(BridgedASTContext cContext, swift::SourceLoc atLoc, swift::SourceRange range, size_t cValue); SWIFT_NAME("BridgedAllowFeatureSuppressionAttr.createParsed(_:atLoc:range:inverted:features:)") BridgedAllowFeatureSuppressionAttr BridgedAllowFeatureSuppressionAttr_createParsed(BridgedASTContext cContext, swift::SourceLoc atLoc, swift::SourceRange range, bool inverted, BridgedArrayRef cFeatures); SWIFT_NAME( "BridgedBackDeployedAttr.createParsed(_:atLoc:range:platform:version:)") BridgedBackDeployedAttr BridgedBackDeployedAttr_createParsed( BridgedASTContext cContext, swift::SourceLoc atLoc, swift::SourceRange range, swift::PlatformKind platform, BridgedVersionTuple cVersion); SWIFT_NAME("BridgedCDeclAttr.createParsed(_:atLoc:range:name:underscored:)") BridgedCDeclAttr BridgedCDeclAttr_createParsed(BridgedASTContext cContext, swift::SourceLoc atLoc, swift::SourceRange range, BridgedStringRef cName, bool underscored); SWIFT_NAME("BridgedCustomAttr.createParsed(atLoc:type:declContext:initContext:" "argumentList:)") BridgedCustomAttr BridgedCustomAttr_createParsed( swift::SourceLoc atLoc, BridgedTypeRepr cType, BridgedDeclContext cDeclContext, BridgedNullableCustomAttributeInitializer cInitContext, BridgedNullableArgumentList cArgumentList); SWIFT_NAME("BridgedDerivativeAttr.createParsed(_:atLoc:range:baseType:" "originalName:originalNameLoc:accessorKind:params:)") BridgedDerivativeAttr BridgedDerivativeAttr_createParsed( BridgedASTContext cContext, swift::SourceLoc atLoc, swift::SourceRange range, BridgedNullableTypeRepr cBaseType, BridgedDeclNameRef cOriginalName, BridgedDeclNameLoc cOriginalNameLoc, swift::AccessorKind AccessorKind, BridgedArrayRef cParams); SWIFT_NAME("BridgedDerivativeAttr.createParsed(_:atLoc:range:baseType:" "originalName:originalNameLoc:params:)") BridgedDerivativeAttr BridgedDerivativeAttr_createParsed( BridgedASTContext cContext, swift::SourceLoc atLoc, swift::SourceRange range, BridgedNullableTypeRepr cBaseType, BridgedDeclNameRef cOriginalName, BridgedDeclNameLoc cOriginalNameLoc, BridgedArrayRef cParams); SWIFT_NAME("BridgedDifferentiableAttr.createParsed(_:atLoc:range:kind:params:" "genericWhereClause:)") BridgedDifferentiableAttr BridgedDifferentiableAttr_createParsed( BridgedASTContext cContext, swift::SourceLoc atLoc, swift::SourceRange range, BridgedDifferentiabilityKind cKind, BridgedArrayRef cParams, BridgedNullableTrailingWhereClause cGenericWhereClause); SWIFT_NAME("BridgedDocumentationAttr.createParsed(_:atLoc:range:metadata:" "accessLevel:)") BridgedDocumentationAttr BridgedDocumentationAttr_createParsed( BridgedASTContext cContext, swift::SourceLoc atLoc, swift::SourceRange range, BridgedStringRef cMetadata, BridgedOptionalAccessLevel accessLevel); SWIFT_NAME( "BridgedDynamicReplacementAttr.createParsed(_:atLoc:attrNameLoc:lParenLoc:" "replacedFunction:replacedFunctionLoc:rParenLoc:)") BridgedDynamicReplacementAttr BridgedDynamicReplacementAttr_createParsed( BridgedASTContext cContext, swift::SourceLoc cAtLoc, swift::SourceLoc cAttrNameLoc, swift::SourceLoc cLParenLoc, BridgedDeclNameRef cReplacedFunction, BridgedDeclNameLoc cReplacedFunctionLoc, swift::SourceLoc cRParenLoc); SWIFT_NAME("BridgedEffectsAttr.createParsed(_:atLoc:range:effectKind:)") BridgedEffectsAttr BridgedEffectsAttr_createParsed( BridgedASTContext cContext, swift::SourceLoc atLoc, swift::SourceRange range, swift::EffectsKind effectKind); SWIFT_NAME("BridgedEffectsAttr.createParsed(_:atLoc:range:customString:" "customStringLoc:)") BridgedEffectsAttr BridgedEffectsAttr_createParsed( BridgedASTContext cContext, swift::SourceLoc atLoc, swift::SourceRange range, BridgedStringRef cCustomString, swift::SourceLoc customStringLoc); enum ENUM_EXTENSIBILITY_ATTR(closed) BridgedExclusivityAttrMode { BridgedExclusivityAttrModeChecked, BridgedExclusivityAttrModeUnchecked, }; SWIFT_NAME("BridgedExclusivityAttr.createParsed(_:atLoc:range:mode:)") BridgedExclusivityAttr BridgedExclusivityAttr_createParsed( BridgedASTContext cContext, swift::SourceLoc atLoc, swift::SourceRange range, BridgedExclusivityAttrMode cMode); SWIFT_NAME("BridgedExposeAttr.createParsed(_:atLoc:range:name:kind:)") BridgedExposeAttr BridgedExposeAttr_createParsed(BridgedASTContext cContext, swift::SourceLoc atLoc, swift::SourceRange range, BridgedStringRef cName, swift::ExposureKind kind); SWIFT_NAME("BridgedExternAttr.createParsed(_:atLoc:range:lParenLoc:rParenLoc:" "kind:moduleName:name:)") BridgedExternAttr BridgedExternAttr_createParsed( BridgedASTContext cContext, swift::SourceLoc atLoc, swift::SourceRange range, swift::SourceLoc lParenLoc, swift::SourceLoc rParenLoc, swift::ExternKind kind, BridgedStringRef cModuleName, BridgedStringRef cName); SWIFT_NAME("BridgedImplementsAttr.createParsed(_:atLoc:range:protocolType:" "memberName:memberNameLoc:)") BridgedImplementsAttr BridgedImplementsAttr_createParsed( BridgedASTContext cContext, swift::SourceLoc atLoc, swift::SourceRange range, BridgedTypeRepr cProtocolType, BridgedDeclNameRef cMemberName, BridgedDeclNameLoc cMemberNameLoc); SWIFT_NAME("BridgedExportAttr.createParsed(_:atLoc:range:kind:)") BridgedExportAttr BridgedExportAttr_createParsed(BridgedASTContext cContext, swift::SourceLoc atLoc, swift::SourceRange range, swift::ExportKind kind); SWIFT_NAME("BridgedInlineAttr.createParsed(_:atLoc:range:kind:)") BridgedInlineAttr BridgedInlineAttr_createParsed(BridgedASTContext cContext, swift::SourceLoc atLoc, swift::SourceRange range, swift::InlineKind kind); enum ENUM_EXTENSIBILITY_ATTR(closed) BridgedParsedLifetimeDependenceKind { BridgedParsedLifetimeDependenceKindDefault, BridgedParsedLifetimeDependenceKindBorrow, BridgedParsedLifetimeDependenceKindInherit, BridgedParsedLifetimeDependenceKindInout }; class BridgedLifetimeDescriptor { union Value { swift::Identifier name; unsigned index; Value(swift::Identifier name) : name(name) {} Value(unsigned index) : index(index) {} Value() : name() {} } value; enum DescriptorKind { Named, Ordered, Self, } kind; BridgedParsedLifetimeDependenceKind dependenceKind; swift::SourceLoc loc; BridgedLifetimeDescriptor(Value value, DescriptorKind kind, BridgedParsedLifetimeDependenceKind dependenceKind, swift::SourceLoc loc) : value(value), kind(kind), dependenceKind(dependenceKind), loc(loc) {} public: SWIFT_NAME("forNamed(_:dependenceKind:loc:)") static BridgedLifetimeDescriptor forNamed(swift::Identifier name, BridgedParsedLifetimeDependenceKind dependenceKind, swift::SourceLoc loc) { return BridgedLifetimeDescriptor(name, DescriptorKind::Named, dependenceKind, loc); } SWIFT_NAME("forOrdered(_:dependenceKind:loc:)") static BridgedLifetimeDescriptor forOrdered(size_t index, BridgedParsedLifetimeDependenceKind dependenceKind, swift::SourceLoc loc) { return BridgedLifetimeDescriptor(index, DescriptorKind::Ordered, dependenceKind, loc); } SWIFT_NAME("forSelf(dependenceKind:loc:)") static BridgedLifetimeDescriptor forSelf(BridgedParsedLifetimeDependenceKind dependenceKind, swift::SourceLoc loc) { return BridgedLifetimeDescriptor({}, DescriptorKind::Self, dependenceKind, loc); } swift::LifetimeDescriptor unbridged(); }; SWIFT_NAME("BridgedLifetimeEntry.createParsed(_:range:sources:)") BridgedLifetimeEntry BridgedLifetimeEntry_createParsed(BridgedASTContext cContext, swift::SourceRange range, BridgedArrayRef cSources); SWIFT_NAME("BridgedLifetimeEntry.createParsed(_:range:sources:target:)") BridgedLifetimeEntry BridgedLifetimeEntry_createParsed( BridgedASTContext cContext, swift::SourceRange range, BridgedArrayRef cSources, BridgedLifetimeDescriptor cTarget); SWIFT_NAME( "BridgedLifetimeAttr.createParsed(_:atLoc:range:entry:isUnderscored:)") BridgedLifetimeAttr BridgedLifetimeAttr_createParsed( BridgedASTContext cContext, swift::SourceLoc atLoc, swift::SourceRange range, BridgedLifetimeEntry cEntry, bool isUnderscored); enum ENUM_EXTENSIBILITY_ATTR(closed) BridgedMacroSyntax { BridgedMacroSyntaxFreestanding, BridgedMacroSyntaxAttached, }; enum ENUM_EXTENSIBILITY_ATTR(closed) BridgedMacroIntroducedDeclNameKind { BridgedMacroIntroducedDeclNameKindNamed, BridgedMacroIntroducedDeclNameKindOverloaded, BridgedMacroIntroducedDeclNameKindPrefixed, BridgedMacroIntroducedDeclNameKindSuffixed, BridgedMacroIntroducedDeclNameKindArbitrary, }; BRIDGED_INLINE swift::MacroIntroducedDeclNameKind unbridge(BridgedMacroIntroducedDeclNameKind kind); struct BridgedMacroIntroducedDeclName { BridgedMacroIntroducedDeclNameKind kind; BridgedDeclNameRef name; BRIDGED_INLINE swift::MacroIntroducedDeclName unbridged() const; }; enum ENUM_EXTENSIBILITY_ATTR(closed) BridgedMacroRole { #define MACRO_ROLE(Name, Description) BridgedMacroRole##Name, #include "swift/Basic/MacroRoles.def" BridgedMacroRoleNone, }; BRIDGED_INLINE swift::MacroRole unbridge(BridgedMacroRole cRole); SWIFT_NAME("BridgedMacroRole.init(from:)") BridgedMacroRole BridgedMacroRole_fromString(BridgedStringRef str); SWIFT_NAME("getter:BridgedMacroRole.isAttached(self:)") BRIDGED_INLINE bool BridgedMacroRole_isAttached(BridgedMacroRole role); SWIFT_NAME("BridgedMacroRoleAttr.createParsed(_:atLoc:range:syntax:lParenLoc:" "role:names:conformances:rParenLoc:)") BridgedMacroRoleAttr BridgedMacroRoleAttr_createParsed( BridgedASTContext cContext, swift::SourceLoc atLoc, swift::SourceRange range, BridgedMacroSyntax cSyntax, swift::SourceLoc lParenLoc, BridgedMacroRole cRole, BridgedArrayRef cNames, BridgedArrayRef cConformances, swift::SourceLoc rParenLoc); SWIFT_NAME("BridgedOriginallyDefinedInAttr.createParsed(_:atLoc:range:" "moduleName:platform:version:)") BridgedOriginallyDefinedInAttr BridgedOriginallyDefinedInAttr_createParsed( BridgedASTContext cContext, swift::SourceLoc atLoc, swift::SourceRange range, BridgedStringRef cModuleName, swift::PlatformKind platform, BridgedVersionTuple cVersion); SWIFT_NAME("BridgedStorageRestrictionsAttr.createParsed(_:atLoc:range:" "initializes:accesses:)") BridgedStorageRestrictionsAttr BridgedStorageRestrictionsAttr_createParsed( BridgedASTContext cContext, swift::SourceLoc atLoc, swift::SourceRange range, BridgedArrayRef cInitializes, BridgedArrayRef cAccesses); SWIFT_NAME( "BridgedSwiftNativeObjCRuntimeBaseAttr.createParsed(_:atLoc:range:name:)") BridgedSwiftNativeObjCRuntimeBaseAttr BridgedSwiftNativeObjCRuntimeBaseAttr_createParsed(BridgedASTContext cContext, swift::SourceLoc atLoc, swift::SourceRange range, swift::Identifier name); SWIFT_NAME("BridgedWarnAttr.createParsed(_:atLoc:range:diagGroupName:behavior:reason:)") BridgedWarnAttr BridgedWarnAttr_createParsed(BridgedASTContext cContext, swift::SourceLoc atLoc, swift::SourceRange range, swift::Identifier diagGroupName, swift::WarningGroupBehavior behavior, BridgedStringRef reason); enum ENUM_EXTENSIBILITY_ATTR(closed) BridgedNonSendableKind { BridgedNonSendableKindSpecific, BridgedNonSendableKindAssumed, }; SWIFT_NAME("BridgedWarningGroupBehaviorRule.getGroupName(self:)") BridgedStringRef BridgedWarningGroupBehaviorRule_getGroupName(BridgedWarningGroupBehaviorRule rule); SWIFT_NAME("BridgedWarningGroupBehaviorRule.getBehavior(self:)") swift::WarningGroupBehavior BridgedWarningGroupBehaviorRule_getBehavior(BridgedWarningGroupBehaviorRule rule); SWIFT_NAME("getDiagnosticGroupLinksCount()") SwiftInt BridgedDiagnosticGroupLinks_getCount(); SWIFT_NAME("getDiagnosticGroupLink(at:)") std::pair<BridgedStringRef, BridgedStringRef> BridgedDiagnosticGroupLinks_getLink(SwiftInt index); SWIFT_NAME("BridgedNonSendableAttr.createParsed(_:atLoc:range:kind:)") BridgedNonSendableAttr BridgedNonSendableAttr_createParsed( BridgedASTContext cContext, swift::SourceLoc atLoc, swift::SourceRange range, BridgedNonSendableKind cKind); SWIFT_NAME("BridgedNonisolatedAttr.createParsed(_:atLoc:range:modifier:)") BridgedNonisolatedAttr BridgedNonisolatedAttr_createParsed( BridgedASTContext cContext, swift::SourceLoc atLoc, swift::SourceRange range, swift::NonIsolatedModifier modifier); SWIFT_NAME("BridgedInheritActorContextAttr.createParsed(_:atLoc:range:modifier:)") BridgedInheritActorContextAttr BridgedInheritActorContextAttr_createParsed( BridgedASTContext cContext, swift::SourceLoc atLoc, swift::SourceRange range, swift::InheritActorContextModifier modifier); SWIFT_NAME("BridgedObjCAttr.createParsedUnnamed(_:atLoc:attrNameLoc:)") BridgedObjCAttr BridgedObjCAttr_createParsedUnnamed(BridgedASTContext cContext, swift::SourceLoc atLoc, swift::SourceLoc attrNameLoc); SWIFT_NAME("BridgedObjCAttr.createParsedNullary(_:atLoc:attrNameLoc:lParenLoc:" "nameLoc:name:rParenLoc:)") BridgedObjCAttr BridgedObjCAttr_createParsedNullary( BridgedASTContext cContext, swift::SourceLoc atLoc, swift::SourceLoc attrNameLoc, swift::SourceLoc lParenLoc, swift::SourceLoc nameLoc, swift::Identifier name, swift::SourceLoc rParenLoc); SWIFT_NAME("BridgedObjCAttr.createParsedSelector(_:atLoc:attrNameLoc:lParenLoc:" "nameLocs:names:rParenLoc:)") BridgedObjCAttr BridgedObjCAttr_createParsedSelector( BridgedASTContext cContext, swift::SourceLoc atLoc, swift::SourceLoc attrNameLoc, swift::SourceLoc lParenLoc, BridgedArrayRef cNameLocs, BridgedArrayRef cNames, swift::SourceLoc rParenLoc); SWIFT_NAME("BridgedObjCImplementationAttr.createParsed(_:atLoc:range:name:isEarlyAdopter:)") BridgedObjCImplementationAttr BridgedObjCImplementationAttr_createParsed( BridgedASTContext cContext, swift::SourceLoc atLoc, swift::SourceRange range, swift::Identifier name, bool isEarlyAdopter); SWIFT_NAME("BridgedObjCRuntimeNameAttr.createParsed(_:atLoc:range:name:)") BridgedObjCRuntimeNameAttr BridgedObjCRuntimeNameAttr_createParsed( BridgedASTContext cContext, swift::SourceLoc atLoc, swift::SourceRange range, swift::Identifier name); enum ENUM_EXTENSIBILITY_ATTR(closed) BridgedOptimizationMode { BridgedOptimizationModeForSpeed, BridgedOptimizationModeForSize, BridgedOptimizationModeNoOptimization, }; SWIFT_NAME("BridgedOptimizeAttr.createParsed(_:atLoc:range:mode:)") BridgedOptimizeAttr BridgedOptimizeAttr_createParsed( BridgedASTContext cContext, swift::SourceLoc atLoc, swift::SourceRange range, BridgedOptimizationMode cMode); SWIFT_NAME("BridgedPrivateImportAttr.createParsed(_:atLoc:attrNameLoc:" "lParenLoc:fileName:rParenLoc:)") BridgedPrivateImportAttr BridgedPrivateImportAttr_createParsed( BridgedASTContext cContext, swift::SourceLoc atLoc, swift::SourceLoc attrNameLoc, swift::SourceLoc lParenLoc, BridgedStringRef cFileName, swift::SourceLoc rParenLoc); SWIFT_NAME( "BridgedProjectedValuePropertyAttr.createParsed(_:atLoc:range:name:)") BridgedProjectedValuePropertyAttr BridgedProjectedValuePropertyAttr_createParsed(BridgedASTContext cContext, swift::SourceLoc atLoc, swift::SourceRange range, swift::Identifier name); SWIFT_NAME("BridgedRawDocCommentAttr.createParsed(_:range:)") BridgedRawDocCommentAttr BridgedRawDocCommentAttr_createParsed(BridgedASTContext cContext, swift::CharSourceRange range); SWIFT_NAME("BridgedRawLayoutAttr.createParsed(_:atLoc:range:size:alignment:)") BridgedRawLayoutAttr BridgedStorageRestrictionsAttr_createParsed( BridgedASTContext cContext, swift::SourceLoc atLoc, swift::SourceRange range, size_t size, size_t alignment); SWIFT_NAME("BridgedRawLayoutAttr.createParsed(_:atLoc:range:like:moveAsLike:)") BridgedRawLayoutAttr BridgedStorageRestrictionsAttr_createParsed( BridgedASTContext cContext, swift::SourceLoc atLoc, swift::SourceRange range, BridgedTypeRepr cLikeType, bool moveAsLike); SWIFT_NAME("BridgedRawLayoutAttr.createParsed(_:atLoc:range:likeArrayOf:count:" "moveAsLike:)") BridgedRawLayoutAttr BridgedStorageRestrictionsAttr_createParsed( BridgedASTContext cContext, swift::SourceLoc atLoc, swift::SourceRange range, BridgedTypeRepr cLikeType, BridgedTypeRepr cCountType, bool moveAsLike); enum ENUM_EXTENSIBILITY_ATTR(closed) BridgedReferenceOwnership { BridgedReferenceOwnershipStrong, BridgedReferenceOwnershipWeak, BridgedReferenceOwnershipUnowned, BridgedReferenceOwnershipUnmanaged, }; swift::ReferenceOwnership unbridged(BridgedReferenceOwnership kind); SWIFT_NAME("BridgedReferenceOwnershipAttr.createParsed(_:atLoc:range:kind:)") BridgedReferenceOwnershipAttr BridgedReferenceOwnershipAttr_createParsed( BridgedASTContext cContext, swift::SourceLoc atLoc, swift::SourceRange range, BridgedReferenceOwnership cKind); SWIFT_NAME("BridgedSectionAttr.createParsed(_:atLoc:range:name:)") BridgedSectionAttr BridgedSectionAttr_createParsed(BridgedASTContext cContext, swift::SourceLoc atLoc, swift::SourceRange range, BridgedStringRef cName); SWIFT_NAME("BridgedSemanticsAttr.createParsed(_:atLoc:range:value:)") BridgedSemanticsAttr BridgedSemanticsAttr_createParsed( BridgedASTContext cContext, swift::SourceLoc atLoc, swift::SourceRange range, BridgedStringRef cValue); SWIFT_NAME("BridgedSetterAccessAttr.createParsed(_:range:accessLevel:)") BridgedSetterAccessAttr BridgedSetterAccessAttr_createParsed(BridgedASTContext cContext, swift::SourceRange range, swift::AccessLevel accessLevel); enum ENUM_EXTENSIBILITY_ATTR(closed) BridgedSpecializationKind : uint8_t { BridgedSpecializationKindFull, BridgedSpecializationKindPartial, }; SWIFT_NAME("BridgedSpecializeAttr.createParsed(_:atLoc:range:whereClause:" "exported:kind:targetFunction:targetFunctionLoc:spiGroups:" "availableAttrs:)") BridgedSpecializeAttr BridgedSpecializeAttr_createParsed( BridgedASTContext cContext, swift::SourceLoc atLoc, swift::SourceRange range, BridgedNullableTrailingWhereClause cWhereClause, bool exported, BridgedSpecializationKind cKind, BridgedDeclNameRef cTargetFunction, BridgedDeclNameLoc cTargetFunctionLoc, BridgedArrayRef cSPIGroups, BridgedArrayRef cAvailableAttrs); SWIFT_NAME("BridgedSpecializedAttr.createParsed(_:atLoc:range:whereClause:" "exported:kind:targetFunction:targetFunctionLoc:spiGroups:" "availableAttrs:)") BridgedSpecializedAttr BridgedSpecializedAttr_createParsed( BridgedASTContext cContext, swift::SourceLoc atLoc, swift::SourceRange range, BridgedNullableTrailingWhereClause cWhereClause, bool exported, BridgedSpecializationKind cKind, BridgedDeclNameRef cTargetFunction, BridgedDeclNameLoc cTargetFunctionLoc, BridgedArrayRef cSPIGroups, BridgedArrayRef cAvailableAttrs); SWIFT_NAME( "BridgedSPIAccessControlAttr.createParsed(_:atLoc:range:spiGroupName:)") BridgedSPIAccessControlAttr BridgedSPIAccessControlAttr_createParsed( BridgedASTContext cContext, swift::SourceLoc atLoc, swift::SourceRange range, swift::Identifier SPIGroupName); SWIFT_NAME("BridgedSILGenNameAttr.createParsed(_:atLoc:range:name:isRaw:)") BridgedSILGenNameAttr BridgedSILGenNameAttr_createParsed( BridgedASTContext cContext, swift::SourceLoc atLoc, swift::SourceRange range, BridgedStringRef cName, bool isRaw); SWIFT_NAME( "BridgedTransposeAttr.createParsed(_:atLoc:range:baseType:originalName:" "originalNameLoc:params:)") BridgedTransposeAttr BridgedTransposeAttr_createParsed( BridgedASTContext cContext, swift::SourceLoc atLoc, swift::SourceRange range, BridgedNullableTypeRepr cBaseType, BridgedDeclNameRef cOriginalName, BridgedDeclNameLoc cOriginalNameLoc, BridgedArrayRef cParams); SWIFT_NAME("BridgedTypeEraserAttr.createParsed(_:atLoc:range:typeExpr:)") BridgedTypeEraserAttr BridgedTypeEraserAttr_createParsed( BridgedASTContext cContext, swift::SourceLoc atLoc, swift::SourceRange range, BridgedTypeExpr cTypeExpr); SWIFT_NAME( "BridgedUnavailableFromAsyncAttr.createParsed(_:atLoc:range:message:)") BridgedUnavailableFromAsyncAttr BridgedUnavailableFromAsyncAttr_createParsed( BridgedASTContext cContext, swift::SourceLoc atLoc, swift::SourceRange range, BridgedStringRef cMessage); //===----------------------------------------------------------------------===// // MARK: Decls //===----------------------------------------------------------------------===// struct BridgedFingerprint; SWIFT_NAME("BridgedDecl.attachParsedAttrs(self:_:)") void BridgedDecl_attachParsedAttrs(BridgedDecl decl, BridgedDeclAttributes attrs); SWIFT_NAME("BridgedDecl.forEachDeclToHoist(self:_:)") void BridgedDecl_forEachDeclToHoist(BridgedDecl decl, BridgedSwiftClosure closure); enum ENUM_EXTENSIBILITY_ATTR(closed) BridgedStaticSpelling { BridgedStaticSpellingNone, BridgedStaticSpellingStatic, BridgedStaticSpellingClass }; struct BridgedAccessorRecord { swift::SourceLoc lBraceLoc; BridgedArrayRef accessors; swift::SourceLoc rBraceLoc; }; SWIFT_NAME("BridgedAccessorDecl.createParsed(_:declContext:kind:storage:" "declLoc:accessorKeywordLoc:parameterList:asyncSpecifierLoc:" "throwsSpecifierLoc:thrownType:)") BridgedAccessorDecl BridgedAccessorDecl_createParsed( BridgedASTContext cContext, BridgedDeclContext cDeclContext, swift::AccessorKind Kind, BridgedAbstractStorageDecl cStorage, swift::SourceLoc declLoc, swift::SourceLoc accessorKeywordLoc, BridgedNullableParameterList cParamList, swift::SourceLoc asyncLoc, swift::SourceLoc throwsLoc, BridgedNullableTypeRepr cThrownType); enum ENUM_EXTENSIBILITY_ATTR(closed) BridgedVarDeclIntroducer { BridgedVarDeclIntroducerLet = 0, BridgedVarDeclIntroducerVar = 1, BridgedVarDeclIntroducerInOut = 2, BridgedVarDeclIntroducerBorrowing = 3, }; SWIFT_NAME("BridgedPatternBindingDecl.createParsed(_:declContext:attributes:" "staticLoc:staticSpelling:introducerLoc:introducer:entries:)") BridgedPatternBindingDecl BridgedPatternBindingDecl_createParsed( BridgedASTContext cContext, BridgedDeclContext cDeclContext, BridgedDeclAttributes cAttrs, swift::SourceLoc staticLoc, BridgedStaticSpelling cStaticSpelling, swift::SourceLoc introducerLoc, BridgedVarDeclIntroducer cIntorducer, BridgedArrayRef cBindingEntries); SWIFT_NAME("BridgedParamDecl.createParsed(_:declContext:specifierLoc:argName:" "argNameLoc:paramName:paramNameLoc:defaultValue:" "defaultValueInitContext:)") BridgedParamDecl BridgedParamDecl_createParsed( BridgedASTContext cContext, BridgedDeclContext cDeclContext, swift::SourceLoc specifierLoc, swift::Identifier argName, swift::SourceLoc argNameLoc, swift::Identifier paramName, swift::SourceLoc paramNameLoc, BridgedNullableExpr defaultValue, BridgedNullableDefaultArgumentInitializer cDefaultArgumentInitContext); SWIFT_NAME("BridgedParamDecl.cloneWithoutType(self:)") BRIDGED_INLINE BridgedParamDecl BridgedParamDecl_cloneWithoutType(BridgedParamDecl cDecl); SWIFT_NAME("BridgedParamDecl.setTypeRepr(self:_:)") BRIDGED_INLINE void BridgedParamDecl_setTypeRepr(BridgedParamDecl cDecl, BridgedTypeRepr cType); SWIFT_NAME("BridgedParamDecl.setInterfaceType(self:_:)") BRIDGED_INLINE void BridgedParamDecl_setInterfaceType(BridgedParamDecl cDecl, BridgedASTType cType); /// The various spellings of ownership modifier that can be used in source. enum ENUM_EXTENSIBILITY_ATTR(closed) BridgedParamSpecifier { BridgedParamSpecifierDefault, BridgedParamSpecifierInOut, BridgedParamSpecifierBorrowing, BridgedParamSpecifierConsuming, BridgedParamSpecifierLegacyShared, BridgedParamSpecifierLegacyOwned, BridgedParamSpecifierImplicitlyCopyableConsuming, }; BRIDGED_INLINE swift::ParamSpecifier unbridge(BridgedParamSpecifier kind); SWIFT_NAME("BridgedParamDecl.setSpecifier(self:_:)") BRIDGED_INLINE void BridgedParamDecl_setSpecifier(BridgedParamDecl cDecl, BridgedParamSpecifier cSpecifier); SWIFT_NAME("BridgedParamDecl.setImplicit(self:)") BRIDGED_INLINE void BridgedParamDecl_setImplicit(BridgedParamDecl cDecl); SWIFT_NAME("BridgedConstructorDecl.setParsedBody(self:_:)") void BridgedConstructorDecl_setParsedBody(BridgedConstructorDecl decl, BridgedBraceStmt body); SWIFT_NAME("BridgedFuncDecl.setParsedBody(self:_:)") void BridgedFuncDecl_setParsedBody(BridgedFuncDecl decl, BridgedBraceStmt body); SWIFT_NAME("BridgedDestructorDecl.setParsedBody(self:_:)") void BridgedDestructorDecl_setParsedBody(BridgedDestructorDecl decl, BridgedBraceStmt body); SWIFT_NAME("BridgedFuncDecl.createParsed(_:declContext:staticLoc:" "staticSpelling:funcKeywordLoc:" "name:nameLoc:genericParamList:parameterList:asyncSpecifierLoc:" "throwsSpecifierLoc:thrownType:returnType:genericWhereClause:)") BridgedFuncDecl BridgedFuncDecl_createParsed( BridgedASTContext cContext, BridgedDeclContext cDeclContext, swift::SourceLoc staticLoc, BridgedStaticSpelling cStaticSpelling, swift::SourceLoc funcKeywordLoc, swift::Identifier name, swift::SourceLoc nameLoc, BridgedNullableGenericParamList genericParamList, BridgedParameterList parameterList, swift::SourceLoc asyncLoc, swift::SourceLoc throwsLoc, BridgedNullableTypeRepr thrownType, BridgedNullableTypeRepr returnType, BridgedNullableTrailingWhereClause opaqueGenericWhereClause); SWIFT_NAME( "BridgedConstructorDecl.createParsed(_:declContext:initKeywordLoc:" "failabilityMarkLoc:isIUO:genericParamList:parameterList:" "asyncSpecifierLoc:throwsSpecifierLoc:thrownType:genericWhereClause:)") BridgedConstructorDecl BridgedConstructorDecl_createParsed( BridgedASTContext cContext, BridgedDeclContext cDeclContext, swift::SourceLoc initKeywordLoc, swift::SourceLoc failabilityMarkLoc, bool isIUO, BridgedNullableGenericParamList genericParams, BridgedParameterList parameterList, swift::SourceLoc asyncLoc, swift::SourceLoc throwsLoc, BridgedNullableTypeRepr thrownType, BridgedNullableTrailingWhereClause genericWhereClause); SWIFT_NAME( "BridgedDestructorDecl.createParsed(_:declContext:deinitKeywordLoc:)") BridgedDestructorDecl BridgedDestructorDecl_createParsed(BridgedASTContext cContext, BridgedDeclContext cDeclContext, swift::SourceLoc deinitKeywordLoc); SWIFT_NAME( "BridgedTypeAliasDecl.createParsed(_:declContext:typealiasKeywordLoc:name:" "nameLoc:genericParamList:equalLoc:underlyingType:genericWhereClause:)") BridgedTypeAliasDecl BridgedTypeAliasDecl_createParsed( BridgedASTContext cContext, BridgedDeclContext cDeclContext, swift::SourceLoc aliasKeywordLoc, swift::Identifier name, swift::SourceLoc nameLoc, BridgedNullableGenericParamList genericParamList, swift::SourceLoc equalLoc, BridgedTypeRepr underlyingType, BridgedNullableTrailingWhereClause genericWhereClause); SWIFT_NAME("BridgedExtensionDecl.setParsedMembers(self:_:fingerprint:)") void BridgedExtensionDecl_setParsedMembers(BridgedExtensionDecl decl, BridgedArrayRef members, BridgedFingerprint fingerprint); SWIFT_NAME( "BridgedEnumDecl.createParsed(_:declContext:enumKeywordLoc:name:nameLoc:" "genericParamList:inheritedTypes:genericWhereClause:braceRange:)") BridgedEnumDecl BridgedEnumDecl_createParsed( BridgedASTContext cContext, BridgedDeclContext cDeclContext, swift::SourceLoc enumKeywordLoc, swift::Identifier name, swift::SourceLoc nameLoc, BridgedNullableGenericParamList genericParamList, BridgedArrayRef cInheritedTypes, BridgedNullableTrailingWhereClause genericWhereClause, swift::SourceRange braceRange); SWIFT_NAME( "BridgedEnumCaseDecl.createParsed(declContext:caseKeywordLoc:elements:)") BridgedEnumCaseDecl BridgedEnumCaseDecl_createParsed(BridgedDeclContext cDeclContext, swift::SourceLoc caseKeywordLoc, BridgedArrayRef cElements); SWIFT_NAME("BridgedEnumElementDecl.createParsed(_:declContext:name:nameLoc:" "parameterList:equalsLoc:rawValue:)") BridgedEnumElementDecl BridgedEnumElementDecl_createParsed( BridgedASTContext cContext, BridgedDeclContext cDeclContext, swift::Identifier name, swift::SourceLoc nameLoc, BridgedNullableParameterList parameterList, swift::SourceLoc equalsLoc, BridgedNullableExpr opaqueRawValue); SWIFT_NAME("BridgedStructDecl.createParsed(_:declContext:structKeywordLoc:name:" "nameLoc:genericParamList:inheritedTypes:genericWhereClause:" "braceRange:)") BridgedNominalTypeDecl BridgedStructDecl_createParsed( BridgedASTContext cContext, BridgedDeclContext cDeclContext, swift::SourceLoc structKeywordLoc, swift::Identifier name, swift::SourceLoc nameLoc, BridgedNullableGenericParamList genericParamList, BridgedArrayRef cInheritedTypes, BridgedNullableTrailingWhereClause genericWhereClause, swift::SourceRange braceRange); SWIFT_NAME( "BridgedClassDecl.createParsed(_:declContext:classKeywordLoc:name:nameLoc:" "genericParamList:inheritedTypes:genericWhereClause:braceRange:isActor:)") BridgedNominalTypeDecl BridgedClassDecl_createParsed( BridgedASTContext cContext, BridgedDeclContext cDeclContext, swift::SourceLoc classKeywordLoc, swift::Identifier name, swift::SourceLoc nameLoc, BridgedNullableGenericParamList genericParamList, BridgedArrayRef cInheritedTypes, BridgedNullableTrailingWhereClause genericWhereClause, swift::SourceRange braceRange, bool isActor); SWIFT_NAME( "BridgedProtocolDecl.createParsed(_:declContext:protocolKeywordLoc:name:" "nameLoc:primaryAssociatedTypeNames:inheritedTypes:" "genericWhereClause:braceRange:)") BridgedNominalTypeDecl BridgedProtocolDecl_createParsed( BridgedASTContext cContext, BridgedDeclContext cDeclContext, swift::SourceLoc protocolKeywordLoc, swift::Identifier name, swift::SourceLoc nameLoc, BridgedArrayRef cPrimaryAssociatedTypeNames, BridgedArrayRef cInheritedTypes, BridgedNullableTrailingWhereClause genericWhereClause, swift::SourceRange braceRange); SWIFT_NAME("BridgedAssociatedTypeDecl.createParsed(_:declContext:" "associatedtypeKeywordLoc:name:nameLoc:inheritedTypes:defaultType:" "genericWhereClause:)") BridgedAssociatedTypeDecl BridgedAssociatedTypeDecl_createParsed( BridgedASTContext cContext, BridgedDeclContext cDeclContext, swift::SourceLoc associatedtypeKeywordLoc, swift::Identifier name, swift::SourceLoc nameLoc, BridgedArrayRef cInheritedTypes, BridgedNullableTypeRepr opaqueDefaultType, BridgedNullableTrailingWhereClause genericWhereClause); SWIFT_NAME( "BridgedMacroDecl.createParsed(_:declContext:macroKeywordLoc:name:nameLoc:" "genericParamList:paramList:arrowLoc:resultType:definition:" "genericWhereClause:)") BridgedMacroDecl BridgedMacroDecl_createParsed( BridgedASTContext cContext, BridgedDeclContext cDeclContext, swift::SourceLoc macroLoc, swift::Identifier name, swift::SourceLoc nameLoc, BridgedNullableGenericParamList cGenericParams, BridgedParameterList cParams, swift::SourceLoc arrowLoc, BridgedNullableTypeRepr cResultType, BridgedNullableExpr cDefinition, BridgedNullableTrailingWhereClause genericWhereClause); SWIFT_NAME("BridgedMacroExpansionDecl.createParsed(_:poundLoc:macroNameRef:" "macroNameLoc:leftAngleLoc:genericArgs:rightAngleLoc:args:)") BridgedMacroExpansionDecl BridgedMacroExpansionDecl_createParsed( BridgedDeclContext cDeclContext, swift::SourceLoc poundLoc, BridgedDeclNameRef cMacroNameRef, BridgedDeclNameLoc cMacroNameLoc, swift::SourceLoc leftAngleLoc, BridgedArrayRef cGenericArgs, swift::SourceLoc rightAngleLoc, BridgedNullableArgumentList cArgList); SWIFT_NAME( "BridgedExtensionDecl.createParsed(_:declContext:extensionKeywordLoc:" "extendedType:inheritedTypes:genericWhereClause:braceRange:)") BridgedExtensionDecl BridgedExtensionDecl_createParsed( BridgedASTContext cContext, BridgedDeclContext cDeclContext, swift::SourceLoc extensionKeywordLoc, BridgedTypeRepr opaqueExtendedType, BridgedArrayRef cInheritedTypes, BridgedNullableTrailingWhereClause genericWhereClause, swift::SourceRange braceRange); enum ENUM_EXTENSIBILITY_ATTR(closed) BridgedOperatorFixity { BridgedOperatorFixityInfix, BridgedOperatorFixityPrefix, BridgedOperatorFixityPostfix, }; SWIFT_NAME("BridgedMissingDecl.create(_:declContext:loc:)") BridgedMissingDecl BridgedMissingDecl_create(BridgedASTContext cContext, BridgedDeclContext cDeclContext, swift::SourceLoc loc); SWIFT_NAME("BridgedOperatorDecl.createParsed(_:declContext:fixity:" "operatorKeywordLoc:name:nameLoc:colonLoc:precedenceGroupName:" "precedenceGroupLoc:)") BridgedOperatorDecl BridgedOperatorDecl_createParsed( BridgedASTContext cContext, BridgedDeclContext cDeclContext, BridgedOperatorFixity cFixity, swift::SourceLoc operatorKeywordLoc, swift::Identifier name, swift::SourceLoc nameLoc, swift::SourceLoc colonLoc, swift::Identifier precedenceGroupName, swift::SourceLoc precedenceGroupLoc); SWIFT_NAME("BridgedPrecedenceGroupDecl.createParsed(declContext:" "precedencegroupKeywordLoc:name:nameLoc:leftBraceLoc:" "associativityLabelLoc:associativityValueLoc:associativity:" "assignmentLabelLoc:assignmentValueLoc:isAssignment:" "higherThanKeywordLoc:higherThanNames:lowerThanKeywordLoc:" "lowerThanNames:rightBraceLoc:)") BridgedPrecedenceGroupDecl BridgedPrecedenceGroupDecl_createParsed( BridgedDeclContext cDeclContext, swift::SourceLoc precedencegroupKeywordLoc, swift::Identifier name, swift::SourceLoc nameLoc, swift::SourceLoc leftBraceLoc, swift::SourceLoc associativityKeywordLoc, swift::SourceLoc associativityValueLoc, swift::Associativity associativity, swift::SourceLoc assignmentKeywordLoc, swift::SourceLoc assignmentValueLoc, bool isAssignment, swift::SourceLoc higherThanKeywordLoc, BridgedArrayRef cHigherThanNames, swift::SourceLoc lowerThanKeywordLoc, BridgedArrayRef cLowerThanNames, swift::SourceLoc rightBraceLoc); enum ENUM_EXTENSIBILITY_ATTR(open) BridgedImportKind { BridgedImportKindModule, BridgedImportKindType, BridgedImportKindStruct, BridgedImportKindClass, BridgedImportKindEnum, BridgedImportKindProtocol, BridgedImportKindVar, BridgedImportKindFunc, }; SWIFT_NAME("BridgedImportDecl.createParsed(_:declContext:importKeywordLoc:" "importKind:importKindLoc:path:)") BridgedImportDecl BridgedImportDecl_createParsed( BridgedASTContext cContext, BridgedDeclContext cDeclContext, swift::SourceLoc importKeywordLoc, BridgedImportKind cImportKind, swift::SourceLoc importKindLoc, BridgedArrayRef cImportPathElements); enum ENUM_EXTENSIBILITY_ATTR(open) BridgedUsingSpecifier { BridgedUsingSpecifierMainActor, BridgedUsingSpecifierNonisolated, }; SWIFT_NAME("BridgedUsingDecl.createParsed(_:declContext:usingKeywordLoc:" "specifierLoc:specifier:)") BridgedUsingDecl BridgedUsingDecl_createParsed(BridgedASTContext cContext, BridgedDeclContext cDeclContext, swift::SourceLoc usingKeywordLoc, swift::SourceLoc specifierLoc, BridgedUsingSpecifier specifier); SWIFT_NAME("BridgedSubscriptDecl.createParsed(_:declContext:staticLoc:" "staticSpelling:subscriptKeywordLoc:genericParamList:parameterList:" "arrowLoc:returnType:genericWhereClause:)") BridgedSubscriptDecl BridgedSubscriptDecl_createParsed( BridgedASTContext cContext, BridgedDeclContext cDeclContext, swift::SourceLoc staticLoc, BridgedStaticSpelling cStaticSpelling, swift::SourceLoc subscriptKeywordLoc, BridgedNullableGenericParamList cGenericParamList, BridgedParameterList cParamList, swift::SourceLoc arrowLoc, BridgedTypeRepr returnType, BridgedNullableTrailingWhereClause genericWhereClause); SWIFT_NAME("BridgedTopLevelCodeDecl.create(_:declContext:)") BridgedTopLevelCodeDecl BridgedTopLevelCodeDecl_create(BridgedASTContext cContext, BridgedDeclContext cDeclContext); SWIFT_NAME("BridgedTopLevelCodeDecl.setBody(self:body:)") void BridgedTopLevelCodeDecl_setBody(BridgedTopLevelCodeDecl cDecl, BridgedBraceStmt cBody); SWIFT_NAME("BridgedTopLevelCodeDecl.dump(self:)") void BridgedTopLevelCodeDecl_dump(BridgedTopLevelCodeDecl decl); SWIFT_NAME("BridgedDecl.dump(self:)") void BridgedDecl_dump(BridgedDecl decl); //===----------------------------------------------------------------------===// // MARK: AbstractStorageDecl //===----------------------------------------------------------------------===// SWIFT_NAME("BridgedAbstractStorageDecl.setAccessors(self:_:)") void BridgedAbstractStorageDecl_setAccessors( BridgedAbstractStorageDecl cStorage, BridgedAccessorRecord accessors); //===----------------------------------------------------------------------===// // MARK: AccessorDecl //===----------------------------------------------------------------------===// SWIFT_NAME("BridgedAccessorDecl.setParsedBody(self:_:)") void BridgedAccessorDecl_setParsedBody(BridgedAccessorDecl decl, BridgedBraceStmt body); //===----------------------------------------------------------------------===// // MARK: NominalTypeDecl //===----------------------------------------------------------------------===// SWIFT_NAME("BridgedNominalTypeDecl.getName(self:)") BRIDGED_INLINE BridgedStringRef BridgedNominalTypeDecl_getName(BridgedNominalTypeDecl decl); SWIFT_NAME("BridgedNominalTypeDecl.isStructWithUnreferenceableStorage(self:)") bool BridgedNominalTypeDecl_isStructWithUnreferenceableStorage( BridgedNominalTypeDecl decl); SWIFT_NAME("BridgedNominalTypeDecl.isGlobalActor(self:)") BRIDGED_INLINE bool BridgedNominalTypeDecl_isGlobalActor(BridgedNominalTypeDecl decl); SWIFT_NAME("BridgedNominalTypeDecl.hasValueDeinit(self:)") BRIDGED_INLINE bool BridgedNominalTypeDecl_hasValueDeinit(BridgedNominalTypeDecl decl); SWIFT_NAME("BridgedNominalTypeDecl.isClass(self:)") BRIDGED_INLINE bool BridgedNominalTypeDecl_isClass(BridgedNominalTypeDecl decl); SWIFT_NAME("BridgedNominalTypeDecl.setParsedMembers(self:_:fingerprint:)") void BridgedNominalTypeDecl_setParsedMembers(BridgedNominalTypeDecl decl, BridgedArrayRef members, BridgedFingerprint fingerprint); SWIFT_NAME("BridgedNominalTypeDecl.getSourceLocation(self:)") BRIDGED_INLINE swift::SourceLoc BridgedNominalTypeDecl_getSourceLocation(BridgedNominalTypeDecl decl); //===----------------------------------------------------------------------===// // MARK: SubscriptDecl //===----------------------------------------------------------------------===// SWIFT_NAME("getter:BridgedSubscriptDecl.asAbstractStorageDecl(self:)") BRIDGED_INLINE BridgedAbstractStorageDecl BridgedSubscriptDecl_asAbstractStorageDecl(BridgedSubscriptDecl decl); //===----------------------------------------------------------------------===// // MARK: VarDecl //===----------------------------------------------------------------------===// SWIFT_NAME("BridgedVarDecl.createImplicitStringInterpolationVar(_:)") BridgedVarDecl BridgedVarDec_createImplicitStringInterpolationVar( BridgedDeclContext cDeclContext); SWIFT_NAME("getter:BridgedVarDecl.asAbstractStorageDecl(self:)") BRIDGED_INLINE BridgedAbstractStorageDecl BridgedVarDecl_asAbstractStorageDecl(BridgedVarDecl decl); //===----------------------------------------------------------------------===// // MARK: Exprs //===----------------------------------------------------------------------===// struct BridgedCallArgument { swift::SourceLoc labelLoc; swift::Identifier label; BridgedExpr argExpr; BRIDGED_INLINE swift::Argument unbridged() const; }; SWIFT_NAME("BridgedArgumentList.createImplicitUnlabeled(_:exprs:)") BridgedArgumentList BridgedArgumentList_createImplicitUnlabeled(BridgedASTContext cContext, BridgedArrayRef cExprs); SWIFT_NAME("BridgedArgumentList.createParsed(_:lParenLoc:args:rParenLoc:" "firstTrailingClosureIndex:)") BridgedArgumentList BridgedArgumentList_createParsed( BridgedASTContext cContext, swift::SourceLoc lParenLoc, BridgedArrayRef cArgs, swift::SourceLoc rParenLoc, size_t cFirstTrailingClosureIndex); SWIFT_NAME("BridgedArrayExpr.createParsed(_:lSquareLoc:elements:commaLocs:" "rSquareLoc:)") BridgedArrayExpr BridgedArrayExpr_createParsed(BridgedASTContext cContext, swift::SourceLoc lLoc, BridgedArrayRef elements, BridgedArrayRef commas, swift::SourceLoc rLoc); SWIFT_NAME( "BridgedArrowExpr.createParsed(_:asyncLoc:throwsLoc:thrownType:arrowLoc:)") BridgedArrowExpr BridgedArrowExpr_createParsed(BridgedASTContext cContext, swift::SourceLoc asyncLoc, swift::SourceLoc throwsLoc, BridgedNullableExpr cThrownType, swift::SourceLoc arrowLoc); SWIFT_NAME("BridgedAssignExpr.createParsed(_:equalsLoc:)") BridgedAssignExpr BridgedAssignExpr_createParsed(BridgedASTContext cContext, swift::SourceLoc equalsLoc); SWIFT_NAME("BridgedAwaitExpr.createParsed(_:awaitLoc:subExpr:)") BridgedAwaitExpr BridgedAwaitExpr_createParsed(BridgedASTContext cContext, swift::SourceLoc awaitLoc, BridgedExpr cSubExpr); SWIFT_NAME("BridgedBindOptionalExpr.createParsed(_:subExpr:questionLoc:)") BridgedBindOptionalExpr BridgedBindOptionalExpr_createParsed(BridgedASTContext cContext, BridgedExpr cSubExpr, swift::SourceLoc questionLoc); SWIFT_NAME("BridgedBooleanLiteralExpr.createParsed(_:value:loc:)") BridgedBooleanLiteralExpr BridgedBooleanLiteralExpr_createParsed(BridgedASTContext cContext, bool value, swift::SourceLoc tokenLoc); SWIFT_NAME("BridgedBorrowExpr.createParsed(_:borrowLoc:subExpr:)") BridgedBorrowExpr BridgedBorrowExpr_createParsed(BridgedASTContext cContext, swift::SourceLoc borrowLoc, BridgedExpr cSubExpr); SWIFT_NAME("BridgedCallExpr.createParsed(_:fn:args:)") BridgedCallExpr BridgedCallExpr_createParsed(BridgedASTContext cContext, BridgedExpr fn, BridgedArgumentList args); class BridgedCaptureListEntry { swift::PatternBindingDecl *_Nonnull PBD; public: BRIDGED_INLINE BridgedCaptureListEntry(swift::CaptureListEntry CLE); BRIDGED_INLINE swift::CaptureListEntry unbridged() const; BRIDGED_INLINE SWIFT_COMPUTED_PROPERTY BridgedVarDecl getVarDecl() const; }; SWIFT_NAME("BridgedCaptureListEntry.createParsed(_:declContext:ownership:" "ownershipRange:name:nameLoc:equalLoc:initializer:)") BridgedCaptureListEntry BridegedCaptureListEntry_createParsed( BridgedASTContext cContext, BridgedDeclContext cDeclContext, BridgedReferenceOwnership cOwnershipKind, swift::SourceRange ownershipRange, swift::Identifier name, swift::SourceLoc nameLoc, swift::SourceLoc equalLoc, BridgedExpr cInitializer); SWIFT_NAME("BridgedCaptureListExpr.createParsed(_:captureList:closure:)") BridgedCaptureListExpr BridgedCaptureListExpr_createParsed(BridgedASTContext cContext, BridgedArrayRef cCaptureList, BridgedClosureExpr cClosure); SWIFT_NAME("BridgedClosureExpr.createParsed(_:declContext:attributes:" "bracketRange:capturedSelfDecl:parameterList:asyncLoc:throwsLoc:" "thrownType:arrowLoc:explicitResultType:inLoc:)") BridgedClosureExpr BridgedClosureExpr_createParsed( BridgedASTContext cContext, BridgedDeclContext cDeclContext, BridgedDeclAttributes cAttributes, swift::SourceRange bracketRange, BridgedNullableVarDecl cCapturedSelfDecl, BridgedNullableParameterList cParameterList, swift::SourceLoc asyncLoc, swift::SourceLoc throwsLoc, BridgedNullableTypeRepr cThrownType, swift::SourceLoc arrowLoc, BridgedNullableTypeRepr cExplicitResultType, swift::SourceLoc inLoc); SWIFT_NAME("BridgedClosureExpr.getParameterList(self:)") BridgedParameterList BridgedClosureExpr_getParameterList(BridgedClosureExpr cClosure); SWIFT_NAME("BridgedClosureExpr.setParameterList(self:_:)") void BridgedClosureExpr_setParameterList(BridgedClosureExpr cClosure, BridgedParameterList cParams); SWIFT_NAME("getter:BridgedClosureExpr.hasAnonymousClosureVars(self:)") bool BridgedClosureExpr_hasAnonymousClosureVars(BridgedClosureExpr cClosure); SWIFT_NAME("BridgedClosureExpr.setHasAnonymousClosureVars(self:)") void BridgedClosureExpr_setHasAnonymousClosureVars(BridgedClosureExpr cClosure); SWIFT_NAME("BridgedClosureExpr.setBody(self:_:)") void BridgedClosureExpr_setBody(BridgedClosureExpr cClosure, BridgedBraceStmt cBody); SWIFT_NAME("BridgedCoerceExpr.createParsed(_:asLoc:type:)") BridgedCoerceExpr BridgedCoerceExpr_createParsed(BridgedASTContext cContext, swift::SourceLoc asLoc, BridgedTypeRepr cType); SWIFT_NAME( "BridgedConditionalCheckedCastExpr.createParsed(_:asLoc:questionLoc:type:)") BridgedConditionalCheckedCastExpr BridgedConditionalCheckedCastExpr_createParsed(BridgedASTContext cContext, swift::SourceLoc asLoc, swift::SourceLoc questionLoc, BridgedTypeRepr cType); SWIFT_NAME("BridgedConsumeExpr.createParsed(_:consumeLoc:subExpr:)") BridgedConsumeExpr BridgedConsumeExpr_createParsed(BridgedASTContext cContext, swift::SourceLoc consumeLoc, BridgedExpr cSubExpr); SWIFT_NAME("BridgedCopyExpr.createParsed(_:copyLoc:subExpr:)") BridgedCopyExpr BridgedCopyExpr_createParsed(BridgedASTContext cContext, swift::SourceLoc copyLoc, BridgedExpr cSubExpr); SWIFT_NAME("BridgedDeclRefExpr.create(_:decl:loc:isImplicit:)") BridgedDeclRefExpr BridgedDeclRefExpr_create(BridgedASTContext cContext, BridgedDecl cDecl, BridgedDeclNameLoc cLoc, bool IsImplicit); SWIFT_NAME("BridgedDictionaryExpr.createParsed(_:lBracketLoc:elements:" "colonLocs:rBracketLoc:)") BridgedDictionaryExpr BridgedDictionaryExpr_createParsed( BridgedASTContext cContext, swift::SourceLoc lBracketLoc, BridgedArrayRef cElements, BridgedArrayRef cCommaLocs, swift::SourceLoc rBracketLoc); SWIFT_NAME("BridgedDiscardAssignmentExpr.createParsed(_:loc:)") BridgedDiscardAssignmentExpr BridgedDiscardAssignmentExpr_createParsed(BridgedASTContext cContext, swift::SourceLoc loc); SWIFT_NAME("BridgedDotSelfExpr.createParsed(_:subExpr:dotLoc:selfLoc:)") BridgedDotSelfExpr BridgedDotSelfExpr_createParsed(BridgedASTContext cContext, BridgedExpr cSubExpr, swift::SourceLoc dotLoc, swift::SourceLoc selfLoc); SWIFT_NAME("BridgedEditorPlaceholderExpr.createParsed(_:placeholder:loc:" "placeholderType:expansionType:)") BridgedEditorPlaceholderExpr BridgedEditorPlaceholderExpr_createParsed( BridgedASTContext cContext, swift::Identifier placeholderId, swift::SourceLoc loc, BridgedNullableTypeRepr cPlaceholderTyR, BridgedNullableTypeRepr cExpansionTyR); SWIFT_NAME("BridgedErrorExpr.create(_:loc:)") BridgedErrorExpr BridgedErrorExpr_create(BridgedASTContext cContext, swift::SourceRange range); SWIFT_NAME("BridgedFloatLiteralExpr.createParsed(_:value:loc:)") BridgedFloatLiteralExpr BridgedFloatLiteralExpr_createParsed(BridgedASTContext cContext, BridgedStringRef cStr, swift::SourceLoc tokenLoc); SWIFT_NAME("BridgedFloatLiteralExpr.setNegative(self:loc:)") BRIDGED_INLINE void BridgedFloatLiteralExpr_setNegative(BridgedFloatLiteralExpr cExpr, swift::SourceLoc loc); SWIFT_NAME("BridgedForceTryExpr.createParsed(_:tryLoc:subExpr:exclaimLoc:)") BridgedForceTryExpr BridgedForceTryExpr_createParsed(BridgedASTContext cContext, swift::SourceLoc tryLoc, BridgedExpr cSubExpr, swift::SourceLoc exclaimLoc); SWIFT_NAME("BridgedForceValueExpr.createParsed(_:subExpr:exclaimLoc:)") BridgedForceValueExpr BridgedForceValueExpr_createParsed(BridgedASTContext cContext, BridgedExpr cSubExpr, swift::SourceLoc exclaimLoc); SWIFT_NAME( "BridgedForcedCheckedCastExpr.createParsed(_:asLoc:exclaimLoc:type:)") BridgedForcedCheckedCastExpr BridgedForcedCheckedCastExpr_createParsed( BridgedASTContext cContext, swift::SourceLoc asLoc, swift::SourceLoc exclaimLoc, BridgedTypeRepr cType); SWIFT_NAME("BridgedUnresolvedSpecializeExpr.createParsed(_:subExpr:lAngleLoc:" "arguments:rAngleLoc:)") BridgedUnresolvedSpecializeExpr BridgedUnresolvedSpecializeExpr_createParsed( BridgedASTContext cContext, BridgedExpr cSubExpr, swift::SourceLoc lAngleLoc, BridgedArrayRef cArguments, swift::SourceLoc rAngleLoc); SWIFT_NAME("BridgedUnsafeExpr.createParsed(_:unsafeLoc:subExpr:)") BridgedUnsafeExpr BridgedUnsafeExpr_createParsed(BridgedASTContext cContext, swift::SourceLoc unsafeLoc, BridgedExpr cSubExpr); SWIFT_NAME("BridgedInOutExpr.createParsed(_:loc:subExpr:)") BridgedInOutExpr BridgedInOutExpr_createParsed(BridgedASTContext cContext, swift::SourceLoc loc, BridgedExpr cSubExpr); SWIFT_NAME("BridgedIntegerLiteralExpr.createParsed(_:value:loc:)") BridgedIntegerLiteralExpr BridgedIntegerLiteralExpr_createParsed(BridgedASTContext cContext, BridgedStringRef cStr, swift::SourceLoc tokenLoc); SWIFT_NAME("BridgedIntegerLiteralExpr.setNegative(self:loc:)") BRIDGED_INLINE void BridgedIntegerLiteralExpr_setNegative(BridgedIntegerLiteralExpr cExpr, swift::SourceLoc loc); SWIFT_NAME("BridgedInterpolatedStringLiteralExpr.createParsed(_:loc:" "literalCapacity:interpolationCount:appendingExpr:)") BridgedInterpolatedStringLiteralExpr BridgedInterpolatedStringLiteralExpr_createParsed( BridgedASTContext cContext, swift::SourceLoc loc, size_t literalCapacity, size_t interpolationCount, BridgedTapExpr cAppendingExpr); SWIFT_NAME("BridgedIsExpr.createParsed(_:isLoc:type:)") BridgedIsExpr BridgedIsExpr_createParsed(BridgedASTContext cContext, swift::SourceLoc isLoc, BridgedTypeRepr cType); SWIFT_NAME("BridgedKeyPathDotExpr.createParsed(_:loc:)") BridgedKeyPathDotExpr BridgedKeyPathDotExpr_createParsed(BridgedASTContext cContext, swift::SourceLoc loc); SWIFT_NAME("BridgedKeyPathExpr.createParsed(_:backslashLoc:parsedRoot:" "parsedPath:hasLeadingDot:)") BridgedKeyPathExpr BridgedKeyPathExpr_createParsed( BridgedASTContext cContext, swift::SourceLoc backslashLoc, BridgedNullableExpr cParsedRoot, BridgedNullableExpr cParsedPath, bool hasLeadingDot); SWIFT_NAME("BridgedKeyPathExpr.createParsedPoundKeyPath(_:poundLoc:lParenLoc:" "names:nameLocs:rParenLoc:)") BridgedKeyPathExpr BridgedKeyPathExpr_createParsedPoundKeyPath( BridgedASTContext cContext, swift::SourceLoc poundLoc, swift::SourceLoc lParenLoc, BridgedArrayRef cNames, BridgedArrayRef cNameLocs, swift::SourceLoc rParenLoc); SWIFT_NAME("BridgedMacroExpansionExpr.createParsed(_:poundLoc:macroNameRef:" "macroNameLoc:leftAngleLoc:genericArgs:rightAngleLoc:args:)") BridgedMacroExpansionExpr BridgedMacroExpansionExpr_createParsed( BridgedDeclContext cDeclContext, swift::SourceLoc poundLoc, BridgedDeclNameRef cMacroNameRef, BridgedDeclNameLoc cMacroNameLoc, swift::SourceLoc leftAngleLoc, BridgedArrayRef cGenericArgs, swift::SourceLoc rightAngleLoc, BridgedNullableArgumentList cArgList); enum ENUM_EXTENSIBILITY_ATTR(open) BridgedMagicIdentifierLiteralKind : uint8_t { #define MAGIC_IDENTIFIER(NAME, STRING) \ BridgedMagicIdentifierLiteralKind##NAME, #include "swift/AST/MagicIdentifierKinds.def" BridgedMagicIdentifierLiteralKindNone, }; SWIFT_NAME("BridgedMagicIdentifierLiteralKind.init(from:)") BridgedMagicIdentifierLiteralKind BridgedMagicIdentifierLiteralKind_fromString(BridgedStringRef cStr); SWIFT_NAME("BridgedMagicIdentifierLiteralExpr.createParsed(_:kind:loc:)") BridgedMagicIdentifierLiteralExpr BridgedMagicIdentifierLiteralExpr_createParsed( BridgedASTContext cContext, BridgedMagicIdentifierLiteralKind cKind, swift::SourceLoc loc); SWIFT_NAME("BridgedNilLiteralExpr.createParsed(_:nilKeywordLoc:)") BridgedNilLiteralExpr BridgedNilLiteralExpr_createParsed(BridgedASTContext cContext, swift::SourceLoc nilKeywordLoc); enum ENUM_EXTENSIBILITY_ATTR(open) BridgedObjCSelectorKind { BridgedObjCSelectorKindMethod, BridgedObjCSelectorKindGetter, BridgedObjCSelectorKindSetter, }; SWIFT_NAME("BridgedObjCSelectorExpr.createParsed(_:kind:keywordLoc:lParenLoc:" "modifierLoc:subExpr:rParenLoc:)") BridgedObjCSelectorExpr BridgedObjCSelectorExpr_createParsed( BridgedASTContext cContext, BridgedObjCSelectorKind cKind, swift::SourceLoc keywordLoc, swift::SourceLoc lParenLoc, swift::SourceLoc modifierLoc, BridgedExpr cSubExpr, swift::SourceLoc rParenLoc); enum ENUM_EXTENSIBILITY_ATTR(open) BridgedObjectLiteralKind : size_t { #define POUND_OBJECT_LITERAL(Name, Desc, Proto) BridgedObjectLiteralKind_##Name, #include "swift/AST/TokenKinds.def" BridgedObjectLiteralKind_none, }; SWIFT_NAME("BridgedObjectLiteralKind.init(from:)") BridgedObjectLiteralKind BridgedObjectLiteralKind_fromString(BridgedStringRef cStr); SWIFT_NAME("BridgedObjectLiteralExpr.createParsed(_:poundLoc:kind:args:)") BridgedObjectLiteralExpr BridgedObjectLiteralExpr_createParsed( BridgedASTContext cContext, swift::SourceLoc poundLoc, BridgedObjectLiteralKind cKind, BridgedArgumentList args); SWIFT_NAME("BridgedOptionalTryExpr.createParsed(_:tryLoc:subExpr:questionLoc:)") BridgedOptionalTryExpr BridgedOptionalTryExpr_createParsed( BridgedASTContext cContext, swift::SourceLoc tryLoc, BridgedExpr cSubExpr, swift::SourceLoc questionLoc); SWIFT_NAME("BridgedPackElementExpr.createParsed(_:eachLoc:packRefExpr:)") BridgedPackElementExpr BridgedPackElementExpr_createParsed(BridgedASTContext cContext, swift::SourceLoc eachLoc, BridgedExpr cPackRefExpr); SWIFT_NAME("BridgedPackExpansionExpr.createParsed(_:repeatLoc:patternExpr:)") BridgedPackExpansionExpr BridgedPackExpansionExpr_createParsed(BridgedASTContext cContext, swift::SourceLoc repeatLoc, BridgedExpr cPatternExpr); SWIFT_NAME("BridgedParenExpr.createParsed(_:leftParenLoc:expr:rightParenLoc:)") BridgedParenExpr BridgedParenExpr_createParsed(BridgedASTContext cContext, swift::SourceLoc lParen, BridgedExpr cExpr, swift::SourceLoc rParen); SWIFT_NAME("BridgedPostfixUnaryExpr.createParsed(_:operator:operand:)") BridgedPostfixUnaryExpr BridgedPostfixUnaryExpr_createParsed(BridgedASTContext cContext, BridgedExpr oper, BridgedExpr operand); SWIFT_NAME("BridgedPrefixUnaryExpr.createParsed(_:operator:operand:)") BridgedPrefixUnaryExpr BridgedPrefixUnaryExpr_createParsed(BridgedASTContext cContext, BridgedExpr oper, BridgedExpr operand); class BridgedRegexLiteralPatternFeatureKind final { unsigned RawValue; public: BRIDGED_INLINE SWIFT_NAME("init(rawValue:)") BridgedRegexLiteralPatternFeatureKind(SwiftInt rawValue); using UnbridgedTy = swift::RegexLiteralPatternFeatureKind; BRIDGED_INLINE BridgedRegexLiteralPatternFeatureKind(UnbridgedTy kind); BRIDGED_INLINE UnbridgedTy unbridged() const; }; class BridgedRegexLiteralPatternFeature final { swift::CharSourceRange Range; BridgedRegexLiteralPatternFeatureKind Kind; public: SWIFT_NAME("init(kind:at:)") BridgedRegexLiteralPatternFeature(BridgedRegexLiteralPatternFeatureKind kind, swift::CharSourceRange range) : Range(range), Kind(kind) {} using UnbridgedTy = swift::RegexLiteralPatternFeature; BRIDGED_INLINE BridgedRegexLiteralPatternFeature(UnbridgedTy feature); BRIDGED_INLINE UnbridgedTy unbridged() const; }; class BridgedRegexLiteralPatternFeatures final { BridgedRegexLiteralPatternFeature *_Nullable Data; SwiftInt Count; public: BridgedRegexLiteralPatternFeatures() : Data(nullptr), Count(0) {} SWIFT_NAME("init(baseAddress:count:)") BridgedRegexLiteralPatternFeatures( BridgedRegexLiteralPatternFeature *_Nullable data, SwiftInt count) : Data(data), Count(count) {} using UnbridgedTy = llvm::ArrayRef<BridgedRegexLiteralPatternFeature>; BRIDGED_INLINE UnbridgedTy unbridged() const; SWIFT_IMPORT_UNSAFE BridgedRegexLiteralPatternFeature *_Nullable getData() const { return Data; } SwiftInt getCount() const { return Count; } }; SWIFT_NAME("BridgedRegexLiteralExpr.createParsed(_:loc:regexText:)") BridgedRegexLiteralExpr BridgedRegexLiteralExpr_createParsed(BridgedASTContext cContext, swift::SourceLoc loc, BridgedStringRef cRegexText); SWIFT_NAME("BridgedSequenceExpr.createParsed(_:exprs:)") BridgedSequenceExpr BridgedSequenceExpr_createParsed(BridgedASTContext cContext, BridgedArrayRef exprs); SWIFT_NAME("BridgedSingleValueStmtExpr.createWithWrappedBranches(_:stmt:" "declContext:mustBeExpr:)") BridgedSingleValueStmtExpr BridgedSingleValueStmtExpr_createWithWrappedBranches( BridgedASTContext cContext, BridgedStmt S, BridgedDeclContext cDeclContext, bool mustBeExpr); SWIFT_NAME("BridgedStringLiteralExpr.createParsed(_:value:loc:)") BridgedStringLiteralExpr BridgedStringLiteralExpr_createParsed(BridgedASTContext cContext, BridgedStringRef cStr, swift::SourceLoc tokenLoc); SWIFT_NAME("BridgedSuperRefExpr.createParsed(_:superLoc:)") BridgedSuperRefExpr BridgedSuperRefExpr_createParsed(BridgedASTContext cContext, swift::SourceLoc superLoc); SWIFT_NAME("BridgedSubscriptExpr.createParsed(_:baseExpr:args:)") BridgedSubscriptExpr BridgedSubscriptExpr_createParsed(BridgedASTContext cContext, BridgedExpr cBaseExpr, BridgedArgumentList cArgs); SWIFT_NAME("BridgedTapExpr.create(_:body:)") BridgedTapExpr BridgedTapExpr_create(BridgedASTContext cContext, BridgedBraceStmt cBody); SWIFT_NAME("BridgedTernaryExpr.createParsed(_:questionLoc:thenExpr:colonLoc:)") BridgedTernaryExpr BridgedTernaryExpr_createParsed(BridgedASTContext cContext, swift::SourceLoc questionLoc, BridgedExpr cThenExpr, swift::SourceLoc colonLoc); SWIFT_NAME("BridgedTryExpr.createParsed(_:tryLoc:subExpr:)") BridgedTryExpr BridgedTryExpr_createParsed(BridgedASTContext cContext, swift::SourceLoc tryLoc, BridgedExpr cSubExpr); SWIFT_NAME("BridgedTupleExpr.createParsed(_:leftParenLoc:exprs:labels:" "labelLocs:rightParenLoc:)") BridgedTupleExpr BridgedTupleExpr_createParsed( BridgedASTContext cContext, swift::SourceLoc lParen, BridgedArrayRef subs, BridgedArrayRef names, BridgedArrayRef cNameLocs, swift::SourceLoc rParen); SWIFT_NAME("BridgedTupleExpr.createParsedDictionaryElement(_:key:value:)") BridgedTupleExpr BridgedTupleExpr_createParsedDictionaryElement( BridgedASTContext cContext, BridgedExpr cKeyExpr, BridgedExpr cValueExpr); SWIFT_NAME("BridgedTypeExpr.createParsed(_:type:)") BridgedTypeExpr BridgedTypeExpr_createParsed(BridgedASTContext cContext, BridgedTypeRepr cType); enum ENUM_EXTENSIBILITY_ATTR(open) BridgedDeclRefKind : size_t { BridgedDeclRefKindOrdinary, BridgedDeclRefKindBinaryOperator, BridgedDeclRefKindPostfixOperator, BridgedDeclRefKindPrefixOperator, }; SWIFT_NAME("BridgedUnresolvedDeclRefExpr.createParsed(_:name:kind:loc:)") BridgedUnresolvedDeclRefExpr BridgedUnresolvedDeclRefExpr_createParsed( BridgedASTContext cContext, BridgedDeclNameRef cName, BridgedDeclRefKind cKind, BridgedDeclNameLoc cLoc); SWIFT_NAME("BridgedUnresolvedDotExpr.createParsed(_:base:dotLoc:name:nameLoc:)") BridgedUnresolvedDotExpr BridgedUnresolvedDotExpr_createParsed( BridgedASTContext cContext, BridgedExpr base, swift::SourceLoc dotLoc, BridgedDeclNameRef cName, BridgedDeclNameLoc cNameLoc); SWIFT_NAME("BridgedUnresolvedMemberExpr.createParsed(_:dotLoc:name:nameLoc:)") BridgedUnresolvedMemberExpr BridgedUnresolvedMemberExpr_createParsed( BridgedASTContext cContext, swift::SourceLoc dotLoc, BridgedDeclNameRef cName, BridgedDeclNameLoc cNameLoc); SWIFT_NAME("BridgedUnresolvedPatternExpr.createParsed(_:pattern:)") BridgedUnresolvedPatternExpr BridgedUnresolvedPatternExpr_createParsed(BridgedASTContext cContext, BridgedPattern cPattern); SWIFT_NAME("BridgedExpr.setImplicit(self:)") void BridgedExpr_setImplicit(BridgedExpr cExpr); SWIFT_NAME("BridgedExpr.dump(self:)") void BridgedExpr_dump(BridgedExpr expr); //===----------------------------------------------------------------------===// // MARK: Stmts //===----------------------------------------------------------------------===// struct BridgedLabeledStmtInfo { SWIFT_NAME("name") swift::Identifier Name; SWIFT_NAME("loc") swift::SourceLoc Loc; BRIDGED_INLINE swift::LabeledStmtInfo unbridged() const; }; class BridgedStmtConditionElement { void *_Nonnull Raw; public: BRIDGED_INLINE BridgedStmtConditionElement(swift::StmtConditionElement elem); BRIDGED_INLINE swift::StmtConditionElement unbridged() const; }; SWIFT_NAME("BridgedStmtConditionElement.createBoolean(expr:)") BridgedStmtConditionElement BridgedStmtConditionElement_createBoolean(BridgedExpr expr); SWIFT_NAME("BridgedStmtConditionElement.createPatternBinding(_:introducerLoc:" "pattern:initializer:)") BridgedStmtConditionElement BridgedStmtConditionElement_createPatternBinding( BridgedASTContext cContext, swift::SourceLoc introducerLoc, BridgedPattern cPattern, BridgedExpr cInitializer); SWIFT_NAME("BridgedStmtConditionElement.createPoundAvailable(info:)") BridgedStmtConditionElement BridgedStmtConditionElement_createPoundAvailable( BridgedPoundAvailableInfo info); SWIFT_NAME("BridgedPoundAvailableInfo.createParsed(_:poundLoc:lParenLoc:specs:" "rParenLoc:isUnavailable:)") BridgedPoundAvailableInfo BridgedPoundAvailableInfo_createParsed( BridgedASTContext cContext, swift::SourceLoc poundLoc, swift::SourceLoc lParenLoc, BridgedArrayRef cSpecs, swift::SourceLoc rParenLoc, bool isUnavailability); SWIFT_NAME("BridgedStmtConditionElement.createHasSymbol(_:poundLoc:lParenLoc:" "symbol:rParenLoc:)") BridgedStmtConditionElement BridgedStmtConditionElement_createHasSymbol( BridgedASTContext cContext, swift::SourceLoc poundLoc, swift::SourceLoc lParenLoc, BridgedNullableExpr cSymbolExpr, swift::SourceLoc rParenLoc); struct BridgedCaseLabelItemInfo { SWIFT_NAME("isDefault") bool IsDefault; SWIFT_NAME("pattern") BridgedPattern ThePattern; SWIFT_NAME("whereLoc") swift::SourceLoc WhereLoc; SWIFT_NAME("guardExpr") BridgedNullableExpr GuardExpr; }; SWIFT_NAME("BridgedBraceStmt.createParsed(_:lBraceLoc:elements:rBraceLoc:)") BridgedBraceStmt BridgedBraceStmt_createParsed(BridgedASTContext cContext, swift::SourceLoc lBLoc, BridgedArrayRef elements, swift::SourceLoc rBLoc); SWIFT_NAME("BridgedBraceStmt.createImplicit(_:lBraceLoc:element:rBraceLoc:)") BridgedBraceStmt BridgedBraceStmt_createImplicit(BridgedASTContext cContext, swift::SourceLoc lBLoc, BridgedASTNode element, swift::SourceLoc rBLoc); SWIFT_NAME("BridgedBreakStmt.createParsed(_:loc:targetName:targetLoc:)") BridgedBreakStmt BridgedBreakStmt_createParsed(BridgedDeclContext cDeclContext, swift::SourceLoc loc, swift::Identifier targetName, swift::SourceLoc targetLoc); SWIFT_NAME("BridgedCaseStmt.createParsedSwitchCase(_:introducerLoc:" "caseLabelItems:unknownAttrLoc:terminatorLoc:body:)") BridgedCaseStmt BridgedCaseStmt_createParsedSwitchCase( BridgedASTContext cContext, swift::SourceLoc introducerLoc, BridgedArrayRef cCaseLabelItems, swift::SourceLoc unknownAttrLoc, swift::SourceLoc terminatorLoc, BridgedBraceStmt cBody); SWIFT_NAME( "BridgedCaseStmt.createParsedDoCatch(_:catchLoc:caseLabelItems:body:)") BridgedCaseStmt BridgedCaseStmt_createParsedDoCatch( BridgedASTContext cContext, swift::SourceLoc catchLoc, BridgedArrayRef cCaseLabelItems, BridgedBraceStmt cBody); SWIFT_NAME("BridgedContinueStmt.createParsed(_:loc:targetName:targetLoc:)") BridgedContinueStmt BridgedContinueStmt_createParsed( BridgedDeclContext cDeclContext, swift::SourceLoc loc, swift::Identifier targetName, swift::SourceLoc targetLoc); SWIFT_NAME("BridgedDeferStmt.createParsed(_:deferLoc:)") BridgedDeferStmt BridgedDeferStmt_createParsed(BridgedDeclContext cDeclContext, swift::SourceLoc deferLoc); SWIFT_NAME("getter:BridgedDeferStmt.tempDecl(self:)") BridgedFuncDecl BridgedDeferStmt_getTempDecl(BridgedDeferStmt bridged); SWIFT_NAME("BridgedDiscardStmt.createParsed(_:discardLoc:subExpr:)") BridgedDiscardStmt BridgedDiscardStmt_createParsed(BridgedASTContext cContext, swift::SourceLoc discardLoc, BridgedExpr cSubExpr); SWIFT_NAME("BridgedDoStmt.createParsed(_:labelInfo:doLoc:body:)") BridgedDoStmt BridgedDoStmt_createParsed(BridgedASTContext cContext, BridgedLabeledStmtInfo cLabelInfo, swift::SourceLoc doLoc, BridgedBraceStmt cBody); SWIFT_NAME( "BridgedDoCatchStmt.createParsed(_:labelInfo:doLoc:throwsLoc:thrownType:" "body:catches:)") BridgedDoCatchStmt BridgedDoCatchStmt_createParsed( BridgedDeclContext cDeclContext, BridgedLabeledStmtInfo cLabelInfo, swift::SourceLoc doLoc, swift::SourceLoc throwsLoc, BridgedNullableTypeRepr cThrownType, BridgedStmt cBody, BridgedArrayRef cCatches); SWIFT_NAME("BridgedFallthroughStmt.createParsed(loc:declContext:)") BridgedFallthroughStmt BridgedFallthroughStmt_createParsed(swift::SourceLoc loc, BridgedDeclContext cDC); SWIFT_NAME( "BridgedForEachStmt.createParsed(_:labelInfo:forLoc:tryLoc:awaitLoc:" "unsafeLoc:pattern:inLoc:sequence:whereLoc:whereExpr:body:declContext:)") BridgedForEachStmt BridgedForEachStmt_createParsed( BridgedASTContext cContext, BridgedLabeledStmtInfo cLabelInfo, swift::SourceLoc forLoc, swift::SourceLoc tryLoc, swift::SourceLoc awaitLoc, swift::SourceLoc unsafeLoc, BridgedPattern cPat, swift::SourceLoc inLoc, BridgedExpr cSequence, swift::SourceLoc whereLoc, BridgedNullableExpr cWhereExpr, BridgedBraceStmt cBody, BridgedDeclContext cDeclContext); SWIFT_NAME("BridgedGuardStmt.createParsed(_:guardLoc:conds:body:)") BridgedGuardStmt BridgedGuardStmt_createParsed(BridgedASTContext cContext, swift::SourceLoc guardLoc, BridgedArrayRef cConds, BridgedBraceStmt cBody); SWIFT_NAME("BridgedIfStmt.createParsed(_:labelInfo:ifLoc:conditions:then:" "elseLoc:else:)") BridgedIfStmt BridgedIfStmt_createParsed( BridgedASTContext cContext, BridgedLabeledStmtInfo cLabelInfo, swift::SourceLoc ifLoc, BridgedArrayRef cConds, BridgedBraceStmt cThen, swift::SourceLoc elseLoc, BridgedNullableStmt cElse); SWIFT_NAME("BridgedPoundAssertStmt.createParsed(_:range:condition:message:)") BridgedPoundAssertStmt BridgedPoundAssertStmt_createParsed( BridgedASTContext cContext, swift::SourceRange range, BridgedExpr cConditionExpr, BridgedStringRef cMessage); SWIFT_NAME("BridgedRepeatWhileStmt.createParsed(_:labelInfo:repeatLoc:cond:" "whileLoc:body:)") BridgedRepeatWhileStmt BridgedRepeatWhileStmt_createParsed( BridgedASTContext cContext, BridgedLabeledStmtInfo cLabelInfo, swift::SourceLoc repeatLoc, BridgedExpr cCond, swift::SourceLoc whileLoc, BridgedStmt cBody); SWIFT_NAME("BridgedReturnStmt.createParsed(_:loc:expr:)") BridgedReturnStmt BridgedReturnStmt_createParsed(BridgedASTContext cContext, swift::SourceLoc loc, BridgedNullableExpr expr); SWIFT_NAME("BridgedSwitchStmt.createParsed(_:labelInfo:switchLoc:subjectExpr:" "lBraceLoc:cases:rBraceLoc:)") BridgedSwitchStmt BridgedSwitchStmt_createParsed( BridgedASTContext cContext, BridgedLabeledStmtInfo cLabelInfo, swift::SourceLoc switchLoc, BridgedExpr cSubjectExpr, swift::SourceLoc lBraceLoc, BridgedArrayRef cCases, swift::SourceLoc rBraceLoc); SWIFT_NAME("BridgedThenStmt.createParsed(_:thenLoc:result:)") BridgedThenStmt BridgedThenStmt_createParsed(BridgedASTContext cContext, swift::SourceLoc thenLoc, BridgedExpr cResult); SWIFT_NAME("BridgedThrowStmt.createParsed(_:throwLoc:subExpr:)") BridgedThrowStmt BridgedThrowStmt_createParsed(BridgedASTContext cContext, swift::SourceLoc throwLoc, BridgedExpr cSubExpr); SWIFT_NAME("BridgedWhileStmt.createParsed(_:labelInfo:whileLoc:cond:body:)") BridgedWhileStmt BridgedWhileStmt_createParsed( BridgedASTContext cContext, BridgedLabeledStmtInfo cLabelInfo, swift::SourceLoc whileLoc, BridgedArrayRef cCond, BridgedStmt cBody); SWIFT_NAME( "BridgedYieldStmt.createParsed(_:yieldLoc:lParenLoc:yields:rParenLoc:)") BridgedYieldStmt BridgedYieldStmt_createParsed(BridgedASTContext cContext, swift::SourceLoc yieldLoc, swift::SourceLoc lParenLoc, BridgedArrayRef cYields, swift::SourceLoc rParenLoc); SWIFT_NAME("BridgedStmt.dump(self:)") void BridgedStmt_dump(BridgedStmt statement); //===----------------------------------------------------------------------===// // MARK: TypeAttributes //===----------------------------------------------------------------------===// class BridgedTypeOrCustomAttr { public: enum Kind : uint8_t { TypeAttr, CustomAttr, } kind; private: intptr_t opaque; void *_Nonnull getPointer() const { return reinterpret_cast<void *>(opaque & ~0x7); } BRIDGED_INLINE BridgedTypeOrCustomAttr(void *_Nonnull pointer, Kind kind); public: SWIFT_NAME("typeAttr(_:)") static BridgedTypeOrCustomAttr createTypeAttr(BridgedTypeAttribute typeAttr) { return BridgedTypeOrCustomAttr(typeAttr.unbridged(), Kind::TypeAttr); } SWIFT_NAME("customAttr(_:)") static BridgedTypeOrCustomAttr createCust0kAttr(BridgedCustomAttr customAttr) { return BridgedTypeOrCustomAttr(customAttr.unbridged(), Kind::CustomAttr); } Kind getKind() const { return static_cast<Kind>(opaque & 0x7); } SWIFT_IMPORT_UNSAFE BRIDGED_INLINE BridgedTypeAttribute castToTypeAttr() const; SWIFT_IMPORT_UNSAFE BRIDGED_INLINE BridgedCustomAttr castToCustomAttr() const; }; BRIDGED_OPTIONAL(swift::TypeAttrKind, TypeAttrKind) SWIFT_NAME("BridgedOptionalTypeAttrKind.init(from:)") BridgedOptionalTypeAttrKind BridgedOptionalTypeAttrKind_fromString(BridgedStringRef cStr); SWIFT_NAME("BridgedTypeAttribute.createSimple(_:kind:atLoc:nameLoc:)") BridgedTypeAttribute BridgedTypeAttribute_createSimple( BridgedASTContext cContext, swift::TypeAttrKind kind, swift::SourceLoc atLoc, swift::SourceLoc nameLoc); enum ENUM_EXTENSIBILITY_ATTR(closed) BridgedIsolatedTypeAttrIsolationKind { BridgedIsolatedTypeAttrIsolationKind_DynamicIsolation, }; SWIFT_NAME("BridgedConventionTypeAttr.createParsed(_:atLoc:nameLoc:parensRange:" "name:nameLoc:witnessMethodProtocol:clangType:clangTypeLoc:)") BridgedConventionTypeAttr BridgedConventionTypeAttr_createParsed( BridgedASTContext cContext, swift::SourceLoc atLoc, swift::SourceLoc kwLoc, swift::SourceRange parens, BridgedStringRef cName, swift::SourceLoc nameLoc, BridgedDeclNameRef cWitnessMethodProtocol, BridgedStringRef cClangType, swift::SourceLoc clangTypeLoc); SWIFT_NAME("BridgedDifferentiableTypeAttr.createParsed(_:atLoc:nameLoc:" "parensRange:kind:kindLoc:)") BridgedDifferentiableTypeAttr BridgedDifferentiableTypeAttr_createParsed( BridgedASTContext cContext, swift::SourceLoc atLoc, swift::SourceLoc nameLoc, swift::SourceRange parensRange, BridgedDifferentiabilityKind cKind, swift::SourceLoc kindLoc); SWIFT_NAME("BridgedLifetimeTypeAttr.createParsed(_:atLoc:nameLoc:" "parensRange:entry:)") BridgedLifetimeTypeAttr BridgedLifetimeTypeAttr_createParsed( BridgedASTContext cContext, swift::SourceLoc atLoc, swift::SourceLoc nameLoc, swift::SourceRange parensRange, BridgedLifetimeEntry entry); SWIFT_NAME("BridgedIsolatedTypeAttr.createParsed(_:atLoc:nameLoc:parensRange:" "isolationKind:isolationKindLoc:)") BridgedIsolatedTypeAttr BridgedIsolatedTypeAttr_createParsed( BridgedASTContext cContext, swift::SourceLoc atLoc, swift::SourceLoc nameLoc, swift::SourceRange parensRange, BridgedIsolatedTypeAttrIsolationKind cIsolation, swift::SourceLoc isolationLoc); SWIFT_NAME("BridgedOpaqueReturnTypeOfTypeAttr.createParsed(_:atLoc:nameLoc:" "parensRange:" "mangled:mangledLoc:index:indexLoc:)") BridgedOpaqueReturnTypeOfTypeAttr BridgedOpaqueReturnTypeOfTypeAttr_createParsed( BridgedASTContext cContext, swift::SourceLoc atLoc, swift::SourceLoc kwLoc, swift::SourceRange parens, BridgedStringRef cMangled, swift::SourceLoc mangledDoc, size_t index, swift::SourceLoc indexLoc); //===----------------------------------------------------------------------===// // MARK: TypeReprs //===----------------------------------------------------------------------===// SWIFT_NAME("BridgedUnqualifiedIdentTypeRepr.createParsed(_:name:loc:)") BridgedUnqualifiedIdentTypeRepr BridgedUnqualifiedIdentTypeRepr_createParsed( BridgedASTContext cContext, BridgedDeclNameRef cName, BridgedDeclNameLoc cLoc); SWIFT_NAME( "BridgedArrayTypeRepr.createParsed(_:base:leftSquareLoc:rightSquareLoc:)") BridgedArrayTypeRepr BridgedArrayTypeRepr_createParsed( BridgedASTContext cContext, BridgedTypeRepr base, swift::SourceLoc lSquareLoc, swift::SourceLoc rSquareLoc); SWIFT_NAME("BridgedAttributedTypeRepr.createParsed(_:base:attributes:)") BridgedAttributedTypeRepr BridgedAttributedTypeRepr_createParsed(BridgedASTContext cContext, BridgedTypeRepr base, BridgedArrayRef cAttributes); SWIFT_NAME("BridgedCompositionTypeRepr.createEmpty(_:anyKeywordLoc:)") BridgedCompositionTypeRepr BridgedCompositionTypeRepr_createEmpty(BridgedASTContext cContext, swift::SourceLoc anyLoc); SWIFT_NAME("BridgedCompositionTypeRepr.createParsed(_:types:ampersandLoc:)") BridgedCompositionTypeRepr BridgedCompositionTypeRepr_createParsed(BridgedASTContext cContext, BridgedArrayRef types, swift::SourceLoc firstAmpLoc); SWIFT_NAME("BridgedCompileTimeLiteralTypeRepr.createParsed(_:base:specifierLoc:)") BridgedCompileTimeLiteralTypeRepr BridgedCompileTimeLiteralTypeRepr_createParsed(BridgedASTContext cContext, BridgedTypeRepr base, swift::SourceLoc specifierLoc); SWIFT_NAME("BridgedDeclRefTypeRepr.createParsed(_:base:name:nameLoc:" "genericArguments:angleRange:)") BridgedDeclRefTypeRepr BridgedDeclRefTypeRepr_createParsed( BridgedASTContext cContext, BridgedTypeRepr cBase, BridgedDeclNameRef cName, BridgedDeclNameLoc cLoc, BridgedArrayRef cGenericArguments, swift::SourceRange angleRange); SWIFT_NAME("BridgedDictionaryTypeRepr.createParsed(_:leftSquareLoc:keyType:" "colonLoc:valueType:rightSquareLoc:)") BridgedDictionaryTypeRepr BridgedDictionaryTypeRepr_createParsed( BridgedASTContext cContext, swift::SourceLoc lSquareLoc, BridgedTypeRepr keyType, swift::SourceLoc colonloc, BridgedTypeRepr valueType, swift::SourceLoc rSquareLoc); SWIFT_NAME("BridgedErrorTypeRepr.create(_:range:)") BridgedErrorTypeRepr BridgedErrorTypeRepr_create(BridgedASTContext cContext, swift::SourceRange range); SWIFT_NAME("BridgedFunctionTypeRepr.createParsed(_:argsType:asyncLoc:throwsLoc:" "thrownType:arrowLoc:resultType:)") BridgedFunctionTypeRepr BridgedFunctionTypeRepr_createParsed( BridgedASTContext cContext, BridgedTypeRepr argsTy, swift::SourceLoc asyncLoc, swift::SourceLoc throwsLoc, BridgedNullableTypeRepr thrownType, swift::SourceLoc arrowLoc, BridgedTypeRepr resultType); SWIFT_NAME("BridgedUnqualifiedIdentTypeRepr.createParsed(_:name:nameLoc:" "genericArgs:leftAngleLoc:rightAngleLoc:)") BridgedUnqualifiedIdentTypeRepr BridgedUnqualifiedIdentTypeRepr_createParsed( BridgedASTContext cContext, BridgedDeclNameRef cName, BridgedDeclNameLoc cNameLoc, BridgedArrayRef genericArgs, swift::SourceLoc lAngleLoc, swift::SourceLoc rAngleLoc); SWIFT_NAME("BridgedOptionalTypeRepr.createParsed(_:base:questionLoc:)") BridgedOptionalTypeRepr BridgedOptionalTypeRepr_createParsed(BridgedASTContext cContext, BridgedTypeRepr base, swift::SourceLoc questionLoc); SWIFT_NAME("BridgedImplicitlyUnwrappedOptionalTypeRepr.createParsed(_:base:" "exclaimLoc:)") BridgedImplicitlyUnwrappedOptionalTypeRepr BridgedImplicitlyUnwrappedOptionalTypeRepr_createParsed( BridgedASTContext cContext, BridgedTypeRepr base, swift::SourceLoc exclamationLoc); SWIFT_NAME("BridgedInlineArrayTypeRepr.createParsed(_:count:element:brackets:)") BridgedInlineArrayTypeRepr BridgedInlineArrayTypeRepr_createParsed( BridgedASTContext cContext, BridgedTypeRepr cCountType, BridgedTypeRepr cElementType, swift::SourceRange bracketsRange); SWIFT_NAME("BridgedInverseTypeRepr.createParsed(_:tildeLoc:constraint:)") BridgedInverseTypeRepr BridgedInverseTypeRepr_createParsed(BridgedASTContext cContext, swift::SourceLoc tildeLoc, BridgedTypeRepr cConstraint); SWIFT_NAME("BridgedIsolatedTypeRepr.createParsed(_:base:specifierLoc:)") BridgedIsolatedTypeRepr BridgedIsolatedTypeRepr_createParsed(BridgedASTContext cContext, BridgedTypeRepr base, swift::SourceLoc specifierLoc); SWIFT_NAME("BridgedLifetimeDependentTypeRepr.createParsed(_:base:entry:)") BridgedLifetimeDependentTypeRepr BridgedLifetimeDependentTypeRepr_createParsed(BridgedASTContext cContext, BridgedTypeRepr base, BridgedLifetimeEntry cEntry); SWIFT_NAME("BridgedMetatypeTypeRepr.createParsed(_:base:typeKeywordLoc:)") BridgedMetatypeTypeRepr BridgedMetatypeTypeRepr_createParsed(BridgedASTContext cContext, BridgedTypeRepr baseType, swift::SourceLoc typeLoc); SWIFT_NAME( "BridgedOwnershipTypeRepr.createParsed(_:base:specifier:specifierLoc:)") BridgedOwnershipTypeRepr BridgedOwnershipTypeRepr_createParsed( BridgedASTContext cContext, BridgedTypeRepr base, BridgedParamSpecifier cSpecifier, swift::SourceLoc specifierLoc); SWIFT_NAME("BridgedPlaceholderTypeRepr.createParsed(_:loc:)") BridgedPlaceholderTypeRepr BridgedPlaceholderTypeRepr_createParsed(BridgedASTContext cContext, swift::SourceLoc loc); SWIFT_NAME("BridgedProtocolTypeRepr.createParsed(_:base:protocolKeywordLoc:)") BridgedProtocolTypeRepr BridgedProtocolTypeRepr_createParsed(BridgedASTContext cContext, BridgedTypeRepr baseType, swift::SourceLoc protoLoc); SWIFT_NAME("BridgedPackElementTypeRepr.createParsed(_:base:eachKeywordLoc:)") BridgedPackElementTypeRepr BridgedPackElementTypeRepr_createParsed( BridgedASTContext cContext, BridgedTypeRepr base, swift::SourceLoc eachLoc); SWIFT_NAME( "BridgedPackExpansionTypeRepr.createParsed(_:base:repeatKeywordLoc:)") BridgedPackExpansionTypeRepr BridgedPackExpansionTypeRepr_createParsed(BridgedASTContext cContext, BridgedTypeRepr base, swift::SourceLoc repeatLoc); SWIFT_NAME("BridgedSendingTypeRepr.createParsed(_:base:specifierLoc:)") BridgedSendingTypeRepr BridgedSendingTypeRepr_createParsed(BridgedASTContext cContext, BridgedTypeRepr base, swift::SourceLoc specifierLoc); SWIFT_NAME("BridgedCallerIsolatedTypeRepr.createParsed(_:base:specifierLoc:)") BridgedCallerIsolatedTypeRepr BridgedCallerIsolatedTypeRepr_createParsed(BridgedASTContext cContext, BridgedTypeRepr base, swift::SourceLoc specifierLoc); SWIFT_NAME( "BridgedTupleTypeRepr.createParsed(_:elements:leftParenLoc:rightParenLoc:)") BridgedTupleTypeRepr BridgedTupleTypeRepr_createParsed( BridgedASTContext cContext, BridgedArrayRef elements, swift::SourceLoc lParenLoc, swift::SourceLoc rParenLoc); SWIFT_NAME( "BridgedNamedOpaqueReturnTypeRepr.createParsed(_:base:genericParamList:)") BridgedNamedOpaqueReturnTypeRepr BridgedNamedOpaqueReturnTypeRepr_createParsed( BridgedASTContext cContext, BridgedTypeRepr baseTy, BridgedGenericParamList genericParams); SWIFT_NAME("BridgedOpaqueReturnTypeRepr.createParsed(_:someKeywordLoc:base:)") BridgedOpaqueReturnTypeRepr BridgedOpaqueReturnTypeRepr_createParsed(BridgedASTContext cContext, swift::SourceLoc opaqueLoc, BridgedTypeRepr baseTy); SWIFT_NAME("BridgedExistentialTypeRepr.createParsed(_:anyKeywordLoc:base:)") BridgedExistentialTypeRepr BridgedExistentialTypeRepr_createParsed(BridgedASTContext cContext, swift::SourceLoc anyLoc, BridgedTypeRepr baseTy); SWIFT_NAME("BridgedVarargTypeRepr.createParsed(_:base:ellipsisLoc:)") BridgedVarargTypeRepr BridgedVarargTypeRepr_createParsed(BridgedASTContext cContext, BridgedTypeRepr base, swift::SourceLoc ellipsisLoc); SWIFT_NAME( "BridgedIntegerTypeRepr.createParsed(_:string:loc:minusLoc:)") BridgedIntegerTypeRepr BridgedIntegerTypeRepr_createParsed( BridgedASTContext cContext, BridgedStringRef cString, swift::SourceLoc loc, swift::SourceLoc minusLoc); SWIFT_NAME("BridgedTypeRepr.dump(self:)") void BridgedTypeRepr_dump(BridgedTypeRepr type); //===----------------------------------------------------------------------===// // MARK: Patterns //===----------------------------------------------------------------------===// SWIFT_NAME("getter:BridgedPattern.singleVar(self:)") BridgedNullableVarDecl BridgedPattern_getSingleVar(BridgedPattern cPattern); SWIFT_NAME("BridgedAnyPattern.createParsed(_:loc:)") BridgedAnyPattern BridgedAnyPattern_createParsed(BridgedASTContext cContext, swift::SourceLoc loc); SWIFT_NAME("BridgedAnyPattern.createImplicit(_:)") BridgedAnyPattern BridgedAnyPattern_createImplicit(BridgedASTContext cContext); SWIFT_NAME("BridgedBindingPattern.createParsed(_:keywordLoc:isLet:subPattern:)") BridgedBindingPattern BridgedBindingPattern_createParsed(BridgedASTContext cContext, swift::SourceLoc keywordLoc, bool isLet, BridgedPattern cSubPattern); SWIFT_NAME("BridgedBindingPattern.createImplicitCatch(_:loc:)") BridgedBindingPattern BridgedBindingPattern_createImplicitCatch(BridgedDeclContext cDeclContext, swift::SourceLoc loc); SWIFT_NAME("BridgedExprPattern.createParsed(_:expr:)") BridgedExprPattern BridgedExprPattern_createParsed(BridgedDeclContext cDeclContext, BridgedExpr cExpr); SWIFT_NAME("BridgedIsPattern.createParsed(_:isLoc:typeExpr:)") BridgedIsPattern BridgedIsPattern_createParsed(BridgedASTContext cContext, swift::SourceLoc isLoc, BridgedTypeExpr cTypeExpr); SWIFT_NAME("BridgedNamedPattern.createParsed(_:declContext:name:loc:)") BridgedNamedPattern BridgedNamedPattern_createParsed(BridgedASTContext astContext, BridgedDeclContext declContext, swift::Identifier name, swift::SourceLoc loc); SWIFT_NAME( "BridgedParenPattern.createParsed(_:lParenLoc:subPattern:rParenLoc:)") BridgedParenPattern BridgedParenPattern_createParsed( BridgedASTContext cContext, swift::SourceLoc lParenLoc, BridgedPattern cSubPattern, swift::SourceLoc rParenLoc); struct BridgedTuplePatternElt { swift::Identifier Label; swift::SourceLoc LabelLoc; BridgedPattern ThePattern; }; SWIFT_NAME("BridgedTuplePattern.createParsed(_:lParenLoc:elements:rParenLoc:)") BridgedTuplePattern BridgedTuplePattern_createParsed( BridgedASTContext cContext, swift::SourceLoc lParenLoc, BridgedArrayRef cElements, swift::SourceLoc rParenLoc); SWIFT_NAME("BridgedTypedPattern.createParsed(_:pattern:type:)") BridgedTypedPattern BridgedTypedPattern_createParsed(BridgedASTContext cContext, BridgedPattern cPattern, BridgedTypeRepr cType); SWIFT_NAME("BridgedTypedPattern.createPropagated(_:pattern:type:)") BridgedTypedPattern BridgedTypedPattern_createPropagated( BridgedASTContext cContext, BridgedPattern cPattern, BridgedTypeRepr cType); SWIFT_NAME("BridgedPattern.setImplicit(self:)") void BridgedPattern_setImplicit(BridgedPattern cPattern); SWIFT_NAME("getter:BridgedPattern.boundName(self:)") swift::Identifier BridgedPattern_getBoundName(BridgedPattern cPattern); //===----------------------------------------------------------------------===// // MARK: Generics //===----------------------------------------------------------------------===// class BridgedLayoutConstraint { swift::LayoutConstraintInfo *_Nullable raw; public: SWIFT_UNAVAILABLE("Use the factory methods") BRIDGED_INLINE BridgedLayoutConstraint(); SWIFT_UNAVAILABLE("Use the factory methods") BRIDGED_INLINE BridgedLayoutConstraint(swift::LayoutConstraint constraint); BRIDGED_INLINE SWIFT_COMPUTED_PROPERTY bool getIsNull() const; SWIFT_COMPUTED_PROPERTY swift::LayoutConstraintKind getKind() const; BRIDGED_INLINE SWIFT_COMPUTED_PROPERTY bool getIsKnownLayout() const; BRIDGED_INLINE SWIFT_COMPUTED_PROPERTY bool getIsTrivial() const; SWIFT_UNAVAILABLE("Unavailable in Swift") BRIDGED_INLINE swift::LayoutConstraint unbridged() const; }; SWIFT_NAME("BridgedLayoutConstraint.getLayoutConstraint(_:id:)") BridgedLayoutConstraint BridgedLayoutConstraint_getLayoutConstraint(BridgedASTContext cContext, swift::Identifier ID); SWIFT_NAME("BridgedLayoutConstraint.getLayoutConstraint(_:kind:)") BridgedLayoutConstraint BridgedLayoutConstraint_getLayoutConstraint(BridgedASTContext cContext, swift::LayoutConstraintKind kind); SWIFT_NAME( "BridgedLayoutConstraint.getLayoutConstraint(_:kind:size:alignment:)") BridgedLayoutConstraint BridgedLayoutConstraint_getLayoutConstraint(BridgedASTContext cContext, swift::LayoutConstraintKind kind, size_t size, size_t alignment); struct BridgedRequirementRepr { swift::SourceLoc SeparatorLoc; swift::RequirementReprKind Kind; BridgedTypeRepr FirstType; BridgedNullableTypeRepr SecondType; BridgedLayoutConstraint LayoutConstraint; swift::SourceLoc LayoutConstraintLoc; bool IsExpansionPattern; swift::RequirementRepr unbridged() const; }; SWIFT_NAME("BridgedRequirementRepr.createTypeConstraint(subject:colonLoc:" "constraint:isExpansionPattern:)") BridgedRequirementRepr BridgedRequirementRepr_createTypeConstraint( BridgedTypeRepr cSubject, swift::SourceLoc colonLoc, BridgedTypeRepr cConstraint, bool isExpansionPattern); SWIFT_NAME("BridgedRequirementRepr.createSameType(firstType:equalLoc:" "secondType:isExpansionPattern:)") BridgedRequirementRepr BridgedRequirementRepr_createSameType( BridgedTypeRepr cFirstType, swift::SourceLoc equalLoc, BridgedTypeRepr cSecondType, bool isExpansionPattern); SWIFT_NAME("BridgedRequirementRepr.createLayoutConstraint(subject:colonLoc:" "layout:layoutLoc:isExpansionPattern:)") BridgedRequirementRepr BridgedRequirementRepr_createLayoutConstraint( BridgedTypeRepr cSubject, swift::SourceLoc colonLoc, BridgedLayoutConstraint cLayout, swift::SourceLoc layoutLoc, bool isExpansionPattern); SWIFT_NAME("BridgedGenericParamList.createParsed(_:leftAngleLoc:parameters:" "genericWhereClause:rightAngleLoc:)") BridgedGenericParamList BridgedGenericParamList_createParsed( BridgedASTContext cContext, swift::SourceLoc leftAngleLoc, BridgedArrayRef cParameters, BridgedNullableTrailingWhereClause genericWhereClause, swift::SourceLoc rightAngleLoc); SWIFT_NAME( "BridgedGenericTypeParamDecl.createParsed(_:declContext:specifierLoc:" "name:nameLoc:inheritedType:index:paramKind:)") BridgedGenericTypeParamDecl BridgedGenericTypeParamDecl_createParsed( BridgedASTContext cContext, BridgedDeclContext cDeclContext, swift::SourceLoc specifierLoc, swift::Identifier name, swift::SourceLoc nameLoc, BridgedNullableTypeRepr opaqueInheritedType, size_t index, swift::GenericTypeParamKind paramKind); SWIFT_NAME("BridgedGenericTypeParamDecl.createImplicit(declContext:" "name:depth:index:paramKind:)") BridgedGenericTypeParamDecl BridgedGenericTypeParamDecl_createImplicit( BridgedDeclContext cDeclContext, swift::Identifier name, SwiftInt depth, SwiftInt index, swift::GenericTypeParamKind paramKind); SWIFT_NAME( "BridgedTrailingWhereClause.createParsed(_:whereKeywordLoc:requirements:)") BridgedTrailingWhereClause BridgedTrailingWhereClause_createParsed(BridgedASTContext cContext, swift::SourceLoc whereKeywordLoc, BridgedArrayRef cRequirements); SWIFT_NAME("BridgedParameterList.createParsed(_:leftParenLoc:parameters:" "rightParenLoc:)") BridgedParameterList BridgedParameterList_createParsed( BridgedASTContext cContext, swift::SourceLoc leftParenLoc, BridgedArrayRef cParameters, swift::SourceLoc rightParenLoc); SWIFT_NAME("getter:BridgedParameterList.size(self:)") size_t BridgedParameterList_size(BridgedParameterList cParameterList); SWIFT_NAME("BridgedParameterList.get(self:_:)") BridgedParamDecl BridgedParameterList_get(BridgedParameterList cParameterList, size_t i); //===----------------------------------------------------------------------===// // MARK: Misc //===----------------------------------------------------------------------===// struct BridgedTupleTypeElement { swift::Identifier Name; swift::SourceLoc NameLoc; swift::Identifier SecondName; swift::SourceLoc SecondNameLoc; swift::SourceLoc UnderscoreLoc; swift::SourceLoc ColonLoc; BridgedTypeRepr Type; swift::SourceLoc TrailingCommaLoc; }; enum ENUM_EXTENSIBILITY_ATTR(open) BridgedMacroDefinitionKind : size_t { /// An expanded macro. BridgedExpandedMacro = 0, /// An external macro, spelled with either the old spelling (Module.Type) /// or the new spelling `#externalMacro(module: "Module", type: "Type")`. BridgedExternalMacro, /// The builtin definition for "externalMacro". BridgedBuiltinExternalMacro, /// The builtin definition for the "isolation" macro. BridgedBuiltinIsolationMacro, }; struct BridgedASTType { enum class TraitResult { IsNot, CanBe, Is }; enum class MetatypeRepresentation { Thin, Thick, ObjC }; enum class FunctionTypeRepresentation { Thick = 0, Block, Thin, CFunctionPointer, Method = 8, ObjCMethod, WitnessMethod, Closure, CXXMethod, KeyPathAccessorGetter, KeyPathAccessorSetter, KeyPathAccessorEquals, KeyPathAccessorHash }; swift::TypeBase * _Nullable type; BRIDGED_INLINE swift::Type unbridged() const; BridgedOwnedString getDebugDescription() const; SWIFT_IMPORT_UNSAFE BRIDGED_INLINE BridgedCanType getCanonicalType() const; SWIFT_IMPORT_UNSAFE BRIDGED_INLINE BridgedDiagnosticArgument asDiagnosticArgument() const; BRIDGED_INLINE bool hasArchetype() const; BRIDGED_INLINE bool isLegalFormalType() const; BRIDGED_INLINE bool isGenericAtAnyLevel() const; BRIDGED_INLINE bool hasTypeParameter() const; BRIDGED_INLINE bool hasLocalArchetype() const; BRIDGED_INLINE bool hasDynamicSelf() const; BRIDGED_INLINE bool isArchetype() const; BRIDGED_INLINE bool archetypeRequiresClass() const; BRIDGED_INLINE bool isExistentialArchetype() const; BRIDGED_INLINE bool isExistentialArchetypeWithError() const; BRIDGED_INLINE bool isExistential() const; BRIDGED_INLINE bool isDynamicSelf() const; BRIDGED_INLINE bool isClassExistential() const; BRIDGED_INLINE bool isGenericTypeParam() const; BRIDGED_INLINE bool isEscapable() const; BRIDGED_INLINE bool isNoEscape() const; BRIDGED_INLINE bool isInteger() const; BRIDGED_INLINE bool isUnownedStorageType() const; BRIDGED_INLINE bool isMetatypeType() const; BRIDGED_INLINE bool isExistentialMetatypeType() const; BRIDGED_INLINE bool isTuple() const; BRIDGED_INLINE bool isFunction() const; BRIDGED_INLINE bool isLoweredFunction() const; BRIDGED_INLINE bool isNoEscapeFunction() const; BRIDGED_INLINE bool isThickFunction() const; BRIDGED_INLINE bool isAsyncFunction() const; BRIDGED_INLINE bool isCalleeConsumedFunction() const; BRIDGED_INLINE bool isBuiltinInteger() const; BRIDGED_INLINE bool isBuiltinFloat() const; BRIDGED_INLINE bool isBuiltinVector() const; BRIDGED_INLINE bool isBuiltinFixedArray() const; BRIDGED_INLINE bool isBox() const; BRIDGED_INLINE bool isPack() const; BRIDGED_INLINE bool isSILPack() const; SWIFT_IMPORT_UNSAFE BRIDGED_INLINE BridgedASTType getBuiltinVectorElementType() const; SWIFT_IMPORT_UNSAFE BRIDGED_INLINE BridgedCanType getBuiltinFixedArrayElementType() const; SWIFT_IMPORT_UNSAFE BRIDGED_INLINE BridgedCanType getBuiltinFixedArraySizeType() const; SWIFT_IMPORT_UNSAFE BRIDGED_INLINE BridgedASTType getOptionalType() const; BRIDGED_INLINE bool isBuiltinFixedWidthInteger(SwiftInt width) const; BRIDGED_INLINE bool isOptional() const; BRIDGED_INLINE bool isBuiltinType() const; SWIFT_IMPORT_UNSAFE BRIDGED_INLINE BridgedASTType getAnyPointerElementType() const; BRIDGED_INLINE bool isUnsafeBufferPointerType() const; BRIDGED_INLINE bool isUnsafeMutableBufferPointerType() const; BRIDGED_INLINE bool isUnsafeRawBufferPointerType() const; BRIDGED_INLINE bool isUnsafeMutableRawBufferPointerType() const; SWIFT_IMPORT_UNSAFE BRIDGED_INLINE OptionalBridgedDeclObj getNominalOrBoundGenericNominal() const; BRIDGED_INLINE TraitResult canBeClass() const; SWIFT_IMPORT_UNSAFE BRIDGED_INLINE OptionalBridgedDeclObj getAnyNominal() const; SWIFT_IMPORT_UNSAFE BRIDGED_INLINE BridgedASTType getInstanceTypeOfMetatype() const; SWIFT_IMPORT_UNSAFE BRIDGED_INLINE BridgedASTType getStaticTypeOfDynamicSelf() const; SWIFT_IMPORT_UNSAFE BRIDGED_INLINE BridgedASTType getInterfaceTypeOfArchetype() const; SWIFT_IMPORT_UNSAFE BRIDGED_INLINE BridgedASTType getSuperClassType() const; BRIDGED_INLINE MetatypeRepresentation getRepresentationOfMetatype() const; BRIDGED_INLINE BridgedOptionalInt getValueOfIntegerType() const; SWIFT_IMPORT_UNSAFE BRIDGED_INLINE BridgedSubstitutionMap getContextSubstitutionMap() const; SWIFT_IMPORT_UNSAFE BRIDGED_INLINE BridgedGenericSignature getInvocationGenericSignatureOfFunctionType() const; BRIDGED_INLINE FunctionTypeRepresentation getFunctionTypeRepresentation() const; SWIFT_IMPORT_UNSAFE BRIDGED_INLINE BridgedASTType subst(BridgedSubstitutionMap substMap) const; SWIFT_IMPORT_UNSAFE BRIDGED_INLINE BridgedASTType mapOutOfEnvironment() const; SWIFT_IMPORT_UNSAFE BRIDGED_INLINE BridgedCanType getReducedType(BridgedGenericSignature sig) const; SWIFT_IMPORT_UNSAFE BRIDGED_INLINE swift::Identifier GenericTypeParam_getName() const; BRIDGED_INLINE SwiftInt GenericTypeParam_getDepth() const; BRIDGED_INLINE SwiftInt GenericTypeParam_getIndex() const; BRIDGED_INLINE swift::GenericTypeParamKind GenericTypeParam_getParamKind() const; SWIFT_IMPORT_UNSAFE BRIDGED_INLINE BridgedConformance checkConformance(BridgedDeclObj proto) const; BRIDGED_INLINE bool containsSILPackExpansionType() const; BRIDGED_INLINE bool isSILPackElementAddress() const; SWIFT_IMPORT_UNSAFE BRIDGED_INLINE BridgedASTTypeArray BoundGenericType_getGenericArgs() const; }; class BridgedCanType { swift::TypeBase * _Nullable type; public: BRIDGED_INLINE BridgedCanType(); BRIDGED_INLINE BridgedCanType(swift::CanType ty); BRIDGED_INLINE swift::CanType unbridged() const; SWIFT_IMPORT_UNSAFE BRIDGED_INLINE BridgedASTType getRawType() const; SWIFT_IMPORT_UNSAFE BRIDGED_INLINE BridgedCanGenericSignature SILFunctionType_getSubstGenericSignature() const; }; struct BridgedASTTypeArray { BridgedArrayRef typeArray; SwiftInt getCount() const { return SwiftInt(typeArray.Length); } SWIFT_IMPORT_UNSAFE BRIDGED_INLINE BridgedASTType getAt(SwiftInt index) const; }; struct BridgedConformance { void * _Nullable opaqueValue; BRIDGED_INLINE BridgedConformance(swift::ProtocolConformanceRef conformance); BRIDGED_INLINE swift::ProtocolConformanceRef unbridged() const; BridgedOwnedString getDebugDescription() const; BRIDGED_INLINE bool isConcrete() const; BRIDGED_INLINE bool isValid() const; BRIDGED_INLINE bool isSpecializedConformance() const; BRIDGED_INLINE bool isInheritedConformance() const; SWIFT_IMPORT_UNSAFE BRIDGED_INLINE BridgedASTType getType() const; SWIFT_IMPORT_UNSAFE BRIDGED_INLINE BridgedDeclObj getRequirement() const; SWIFT_IMPORT_UNSAFE BRIDGED_INLINE BridgedConformance getGenericConformance() const; SWIFT_IMPORT_UNSAFE BRIDGED_INLINE BridgedConformance getInheritedConformance() const; SWIFT_IMPORT_UNSAFE BRIDGED_INLINE BridgedSubstitutionMap getSpecializedSubstitutions() const; SWIFT_IMPORT_UNSAFE BRIDGED_INLINE BridgedConformance getAssociatedConformance(BridgedASTType assocType, BridgedDeclObj proto) const; }; struct BridgedConformanceArray { BridgedArrayRef pcArray; SwiftInt getCount() const { return SwiftInt(pcArray.Length); } SWIFT_IMPORT_UNSAFE BRIDGED_INLINE BridgedConformance getAt(SwiftInt index) const; }; struct BridgedSubstitutionMap { uint64_t storage[1]; static SWIFT_IMPORT_UNSAFE BridgedSubstitutionMap get(BridgedGenericSignature genSig, BridgedArrayRef replacementTypes); BRIDGED_INLINE BridgedSubstitutionMap(swift::SubstitutionMap map); BRIDGED_INLINE swift::SubstitutionMap unbridged() const; BRIDGED_INLINE BridgedSubstitutionMap(); BridgedOwnedString getDebugDescription() const; BRIDGED_INLINE bool isEmpty() const; BRIDGED_INLINE bool isEqualTo(BridgedSubstitutionMap rhs) const; BRIDGED_INLINE bool hasAnySubstitutableParams() const; BRIDGED_INLINE SwiftInt getNumConformances() const; SWIFT_IMPORT_UNSAFE BRIDGED_INLINE BridgedConformance getConformance(SwiftInt index) const; SWIFT_IMPORT_UNSAFE BRIDGED_INLINE BridgedASTTypeArray getReplacementTypes() const; }; struct BridgedGenericSignature { const swift::GenericSignatureImpl * _Nullable impl; BRIDGED_INLINE swift::GenericSignature unbridged() const; BridgedOwnedString getDebugDescription() const; SWIFT_IMPORT_UNSAFE BRIDGED_INLINE BridgedASTTypeArray getGenericParams() const; SWIFT_IMPORT_UNSAFE BRIDGED_INLINE BridgedASTType mapTypeIntoEnvironment(BridgedASTType type) const; BRIDGED_INLINE BridgedCanGenericSignature getCanonicalSignature() const; }; struct BridgedCanGenericSignature { const swift::GenericSignatureImpl *_Nullable impl; BRIDGED_INLINE swift::CanGenericSignature unbridged() const; BRIDGED_INLINE BridgedGenericSignature getGenericSignature() const; SWIFT_IMPORT_UNSAFE BRIDGED_INLINE BridgedASTType mapTypeIntoEnvironment(BridgedASTType type) const; }; struct BridgedFingerprint { uint64_t v1; uint64_t v2; BRIDGED_INLINE swift::Fingerprint unbridged() const; }; enum ENUM_EXTENSIBILITY_ATTR(closed) BridgedPoundKeyword : uint8_t { #define POUND_KEYWORD(NAME) BridgedPoundKeyword_##NAME, #include "swift/AST/TokenKinds.def" BridgedPoundKeyword_None, }; SWIFT_NAME("BridgedPoundKeyword.init(from:)") BridgedPoundKeyword BridgedPoundKeyword_fromString(BridgedStringRef cStr); //===----------------------------------------------------------------------===// // MARK: #if handling //===----------------------------------------------------------------------===// /// Bridged version of IfConfigClauseRangeInfo::ClauseKind. enum ENUM_EXTENSIBILITY_ATTR(closed) BridgedIfConfigClauseKind : size_t { IfConfigActive, IfConfigInactive, IfConfigEnd }; /// Bridged version of IfConfigClauseRangeInfo. struct BridgedIfConfigClauseRangeInfo { swift::SourceLoc directiveLoc; swift::SourceLoc bodyLoc; swift::SourceLoc endLoc; BridgedIfConfigClauseKind kind; BRIDGED_INLINE swift::IfConfigClauseRangeInfo unbridged() const; }; //===----------------------------------------------------------------------===// // MARK: Plugins //===----------------------------------------------------------------------===// SWIFT_BEGIN_ASSUME_NONNULL typedef void *PluginHandle; typedef const void *PluginCapabilityPtr; /// Set a capability data to the plugin object. Since the data is just a opaque /// pointer, it's not used in AST at all. void Plugin_setCapability(PluginHandle handle, PluginCapabilityPtr _Nullable data); /// Get a capability data set by \c Plugin_setCapability . PluginCapabilityPtr _Nullable Plugin_getCapability(PluginHandle handle); /// Lock the plugin. Clients should lock it during sending and recving the /// response. void Plugin_lock(PluginHandle handle); /// Unlock the plugin. void Plugin_unlock(PluginHandle handle); /// Launch the plugin if it's not running. bool Plugin_spawnIfNeeded(PluginHandle handle); /// Sends the message to the plugin, returns true if there was an error. /// Clients should receive the response by \c Plugin_waitForNextMessage . bool Plugin_sendMessage(PluginHandle handle, const BridgedData data); /// Receive a message from the plugin. bool Plugin_waitForNextMessage(PluginHandle handle, BridgedData *data); SWIFT_END_ASSUME_NONNULL SWIFT_END_NULLABILITY_ANNOTATIONS #ifndef PURE_BRIDGING_MODE // In _not_ PURE_BRIDGING_MODE, bridging functions are inlined and therefore // included in the header file. This is because they rely on C++ headers that // we don't want to pull in when using "pure bridging mode". #include "ASTBridgingImpl.h" #endif #endif // SWIFT_AST_ASTBRIDGING_H
c
github
https://github.com/apple/swift
include/swift/AST/ASTBridging.h
// Copyright IBM Corp. 2016, 2025 // SPDX-License-Identifier: MPL-2.0 package plugin import ( "crypto/tls" "math" "os" log "github.com/hashicorp/go-hclog" plugin "github.com/hashicorp/go-plugin" "github.com/hashicorp/vault/sdk/helper/pluginutil" "github.com/hashicorp/vault/sdk/logical" "google.golang.org/grpc" ) // BackendPluginName is the name of the plugin that can be // dispensed from the plugin server. const BackendPluginName = "backend" type TLSProviderFunc func() (*tls.Config, error) type ServeOpts struct { BackendFactoryFunc logical.Factory TLSProviderFunc TLSProviderFunc Logger log.Logger } // Serve is a helper function used to serve a backend plugin. This // should be ran on the plugin's main process. func Serve(opts *ServeOpts) error { logger := opts.Logger if logger == nil { logger = log.New(&log.LoggerOptions{ Level: log.Trace, Output: os.Stderr, JSONFormat: true, }) } // pluginMap is the map of plugins we can dispense. pluginSets := map[int]plugin.PluginSet{ // Version 3 used to supports both protocols. We want to keep it around // since it's possible old plugins built against this version will still // work with gRPC. There is currently no difference between version 3 // and version 4. 3: { "backend": &GRPCBackendPlugin{ Factory: opts.BackendFactoryFunc, Logger: logger, }, }, 4: { "backend": &GRPCBackendPlugin{ Factory: opts.BackendFactoryFunc, Logger: logger, }, }, 5: { "backend": &GRPCBackendPlugin{ Factory: opts.BackendFactoryFunc, MultiplexingSupport: false, Logger: logger, }, }, } err := pluginutil.OptionallyEnableMlock() if err != nil { return err } serveOpts := &plugin.ServeConfig{ HandshakeConfig: HandshakeConfig, VersionedPlugins: pluginSets, TLSProvider: opts.TLSProviderFunc, Logger: logger, // A non-nil value here enables gRPC serving for this plugin... GRPCServer: func(opts []grpc.ServerOption) *grpc.Server { opts = append(opts, grpc.MaxRecvMsgSize(math.MaxInt32)) opts = append(opts, grpc.MaxSendMsgSize(math.MaxInt32)) return plugin.DefaultGRPCServer(opts) }, } plugin.Serve(serveOpts) return nil } // ServeMultiplex is a helper function used to serve a backend plugin. This // should be ran on the plugin's main process. func ServeMultiplex(opts *ServeOpts) error { logger := opts.Logger if logger == nil { logger = log.New(&log.LoggerOptions{ Level: log.Trace, Output: os.Stderr, JSONFormat: true, }) } // pluginMap is the map of plugins we can dispense. pluginSets := map[int]plugin.PluginSet{ // Version 3 used to supports both protocols. We want to keep it around // since it's possible old plugins built against this version will still // work with gRPC. There is currently no difference between version 3 // and version 4. 3: { "backend": &GRPCBackendPlugin{ Factory: opts.BackendFactoryFunc, Logger: logger, }, }, 4: { "backend": &GRPCBackendPlugin{ Factory: opts.BackendFactoryFunc, Logger: logger, }, }, 5: { "backend": &GRPCBackendPlugin{ Factory: opts.BackendFactoryFunc, MultiplexingSupport: true, Logger: logger, }, }, } err := pluginutil.OptionallyEnableMlock() if err != nil { return err } serveOpts := &plugin.ServeConfig{ HandshakeConfig: HandshakeConfig, VersionedPlugins: pluginSets, Logger: logger, // A non-nil value here enables gRPC serving for this plugin... GRPCServer: func(opts []grpc.ServerOption) *grpc.Server { opts = append(opts, grpc.MaxRecvMsgSize(math.MaxInt32)) opts = append(opts, grpc.MaxSendMsgSize(math.MaxInt32)) return plugin.DefaultGRPCServer(opts) }, // TLSProvider is required to support v3 and v4 plugins. // It will be ignored for v5 which uses AutoMTLS TLSProvider: opts.TLSProviderFunc, } plugin.Serve(serveOpts) return nil } // handshakeConfigs are used to just do a basic handshake between // a plugin and host. If the handshake fails, a user friendly error is shown. // This prevents users from executing bad plugins or executing a plugin // directory. It is a UX feature, not a security feature. var HandshakeConfig = plugin.HandshakeConfig{ MagicCookieKey: "VAULT_BACKEND_PLUGIN", MagicCookieValue: "6669da05-b1c8-4f49-97d9-c8e5bed98e20", }
go
github
https://github.com/hashicorp/vault
sdk/plugin/serve.go
# -*- coding: utf-8 -*- # # This file is part of Bika LIMS # # Copyright 2011-2016 by it's authors. # Some rights reserved. See LICENSE.txt, AUTHORS.txt. from plone import api from bika.lims import logger def SetDepartmentCookies(event): """Login event handler. When user logs in for the first time, we are setting department filtering cookie values. """ # Fix for https://jira.bikalabs.com/browse/LIMS-2597 if not is_bika_installed(): logger.warn("Package 'bika.lims' is not installed, skipping event handler for IUserLoggedInEvent.") return # get the bika_setup object portal = api.portal.get() bika_setup = portal.get("bika_setup") # just to be sure... # This should go into the api.py module once it is in place if bika_setup is None: raise RuntimeError("bika_setup not found in this Bika LIMS installation") # Getting request, response and username request = api.env.getRequest() response = request.RESPONSE user = api.user.get_current() username = user and user.getUserName() or None portal_catalog = api.portal.get_tool("portal_catalog") if bika_setup.getAllowDepartmentFiltering(): dep_for_cookie = '' if username == 'admin': departments = portal_catalog(portal_type='Department', sort_on='sortable_title', sort_order='ascending', inactive_state='active') for department in departments: dep_for_cookie += department.UID + ',' response.setCookie('dep_filter_disabled', 'true', path='/', max_age=24 * 3600) else: labcontact = portal_catalog(portal_type='LabContact', getUsername=username) if labcontact: departments = labcontact[0].getObject().getSortedDepartments() dep_for_cookie = departments[0].UID() if len(departments) > 0 else '' response.setCookie('filter_by_department_info', dep_for_cookie, path='/', max_age=24 * 3600) else: response.setCookie('filter_by_department_info', None, path='/', max_age=0) response.setCookie('dep_filter_disabled', None, path='/', max_age=0) def ClearDepartmentCookies(event): """Logout event handler. When user explicitly logs out from the Logout menu, clean department filtering related cookies. """ if not is_bika_installed(): logger.warn("Package 'bika.lims' is not installed, skipping event handler for IUserLoggedOutEvent.") return request = api.env.getRequest() response = request.RESPONSE # Voiding our special cookie on logout response.setCookie('filter_by_department_info', None, path='/', max_age=0) response.setCookie('dep_filter_disabled', None, path='/', max_age=0) def is_bika_installed(): """Check if Bika LIMS is installed in the Portal """ qi = api.portal.get_tool("portal_quickinstaller") return qi.isProductInstalled("bika.lims")
unknown
codeparrot/codeparrot-clean
# UrbanFootprint v1.5 # Copyright (C) 2017 Calthorpe Analytics # # This file is part of UrbanFootprint version 1.5 # # UrbanFootprint is distributed under the terms of the GNU General # Public License version 3, as published by the Free Software Foundation. This # code is distributed WITHOUT ANY WARRANTY, without implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License v3 for more details; see <http://www.gnu.org/licenses/>. from tastypie.resources import ModelResource from footprint.main.lib.functions import one_or_none from footprint.main.models import DbEntity, ConfigEntity from footprint.main.models.presentation.layer.layer import Layer from footprint.main.models.presentation.layer_selection import get_or_create_layer_selection_class_for_layer from footprint.main.publishing.layer_initialization import LayerLibraryKey from footprint.main.publishing.layer_publishing import update_or_create_layer_selections_for_layer __author__ = 'calthorpe_analytics' class FeatureResourceMixin(ModelResource): def search_params(self, params): """ The user may optionally specify a layer_selection__id instead of feature ids when querying for features. This prevents huge feature id lists in the URL. :param params :return: """ return params def resolve_layer_selection(self, params): """ Used to get that actual selected features, which is a short cut querying, so we don't have to query for potentially thousands of ids. If No layer exists then there is also no LayerSelection, in which case we return None """ layer = self.resolve_layer(params) config_entity = self.resolve_config_entity(params) if not layer: return None update_or_create_layer_selections_for_layer(layer, users=[self.resolve_user(params)]) layer_selection_class = get_or_create_layer_selection_class_for_layer(layer, config_entity, False) return layer_selection_class.objects.get(user=self.resolve_user(params)) def remove_params(self, params): """ Remove params that are used to identity the Feature subclass, but not for filtering instances :param params: :return: """ return ['layer_selection__id', 'config_entity__id', 'layer__id', 'db_entity__id', 'id', 'file_dataset__id'] def resolve_config_entity(self, params): if params.get('config_entity__id'): return ConfigEntity.objects.get_subclass(id=params['config_entity__id']) else: return self.resolve_db_entity(params).config_entity def resolve_db_entity(self, params): if params.get('db_entity__id'): return DbEntity.objects.get(id=params['db_entity__id']) else: return self.resolve_layer(params).db_entity def resolve_layer(self, params): """ Resolve the Layer if one exists. We don't resolve a Layer from a DbEntity, only from a layer__id. It's assumed that if a Layer id doesn't come from the request params that the user doesn't doesn't need Features related to LayerSelection, thus no Layer is needed :param params: :return: """ if params.get('layer__id'): return Layer.objects.get(id=params['layer__id']) return None def resolve_instance(self, params, dynamic_model_class): return dynamic_model_class.objects.get(id=params['id']) def resolve_model_class(self, config_entity=None, db_entity=None): """ Resolves the model class of the dynamic resource class. In this case it's a Feature subclass """ return db_entity.feature_class if \ db_entity else \ config_entity.feature_class_of_base_class(self._meta.queryset.model)
unknown
codeparrot/codeparrot-clean
#!/usr/bin/env python2.7 """ File : update_db.py Author : Bjorn Barrefors <bjorn dot peter dot barrefors AT cern dot ch> Description: Update database and keep learning """ # system modules import logging import sys import getopt import datetime from logging.handlers import TimedRotatingFileHandler # package modules from cuadrnt.utils.config import get_config from cuadrnt.data_management.tools.sites import SiteManager from cuadrnt.data_management.tools.datasets import DatasetManager from cuadrnt.data_management.tools.popularity import PopularityManager from cuadrnt.data_management.core.storage import StorageManager class UpdateDB(object): """ Update DB with new dataset and site data """ def __init__(self, config=dict()): self.logger = logging.getLogger(__name__) self.config = config self.storage = StorageManager(self.config) self.sites = SiteManager(self.config) self.datasets = DatasetManager(self.config) self.popularity = PopularityManager(self.config) def start(self): """ Begin Database Update """ t1 = datetime.datetime.utcnow() self.sites.update_db() self.datasets.update_db() self.popularity.update_db() t2 = datetime.datetime.utcnow() td = t2 - t1 self.logger.info('Update DB took %s', str(td)) def main(argv): """ Main driver for Update DB """ log_level = logging.WARNING config = get_config(path='/var/opt/cuadrnt', file_name='cuadrnt.cfg') try: opts, args = getopt.getopt(argv, 'h', ['help', 'log=']) except getopt.GetoptError: print "usage: update_db.py [--log=notset|debug|info|warning|error|critical]" print " or: update_db.py --help" sys.exit() for opt, arg in opts: if opt in ('-h', '--help'): print "usage: update_db.py [--log=notset|debug|info|warning|error|critical]" print " or: update_db.py --help" sys.exit() elif opt in ('--log'): log_level = getattr(logging, arg.upper()) if not isinstance(log_level, int): print "%s is not a valid log level" % (str(arg)) print "usage: update_db.py [--log=notset|debug|info|warning|error|critical]" print " or: update_db.py --help" sys.exit() else: print "usage: update_db.py [--log=notset|debug|info|warning|error|critical]" print " or: update_db.py --help" print "error: option %s not recognized" % (str(opt)) sys.exit() log_path = config['paths']['log'] log_file = 'update_db.log' file_name = '%s/%s' % (log_path, log_file) logger = logging.getLogger() logger.setLevel(log_level) handler = TimedRotatingFileHandler(file_name, when='midnight', interval=1, backupCount=6) formatter = logging.Formatter('%(asctime)s [%(levelname)s] %(name)s:%(funcName)s:%(lineno)d: %(message)s', datefmt='%H:%M') handler.setFormatter(formatter) logger.addHandler(handler) update_db = UpdateDB(config) update_db.start() if __name__ == "__main__": main(sys.argv[1:]) sys.exit()
unknown
codeparrot/codeparrot-clean
#!/usr/bin/env python # -*- coding: utf-8 -*- MISC_HTML_SOURCE = """ <font size="2" style="color: rgb(31, 31, 31); font-family: monospace; font-variant: normal; line-height: normal; ">test1</font> <div style="color: rgb(31, 31, 31); font-family: monospace; font-variant: normal; line-height: normal; font-size: 12px; font-style: normal; "> <b>test2</b></div><div style="color: rgb(31, 31, 31); font-family: monospace; font-variant: normal; line-height: normal; font-size: 12px; "> <i>test3</i></div><div style="color: rgb(31, 31, 31); font-family: monospace; font-variant: normal; line-height: normal; font-size: 12px; "> <u>test4</u></div><div style="color: rgb(31, 31, 31); font-family: monospace; font-variant: normal; line-height: normal; font-size: 12px; "> <strike>test5</strike></div><div style="color: rgb(31, 31, 31); font-family: monospace; font-variant: normal; line-height: normal; "> <font size="5">test6</font></div><div><ul><li><font color="#1f1f1f" face="monospace" size="2">test7</font></li><li> <font color="#1f1f1f" face="monospace" size="2">test8</font></li></ul><div><ol><li><font color="#1f1f1f" face="monospace" size="2">test9</font> </li><li><font color="#1f1f1f" face="monospace" size="2">test10</font></li></ol></div></div> <blockquote style="margin: 0 0 0 40px; border: none; padding: 0px;"><div><div><div><font color="#1f1f1f" face="monospace" size="2"> test11</font></div></div></div></blockquote><blockquote style="margin: 0 0 0 40px; border: none; padding: 0px;"> <blockquote style="margin: 0 0 0 40px; border: none; padding: 0px;"><div><font color="#1f1f1f" face="monospace" size="2"> test12</font></div><div><font color="#1f1f1f" face="monospace" size="2"><br></font></div></blockquote></blockquote> <font color="#1f1f1f" face="monospace" size="2"><a href="http://google.com">google</a></font> <a href="javascript:alert('malicious code')">test link</a> """ EDI_LIKE_HTML_SOURCE = """<div style="font-family: 'Lucica Grande', Ubuntu, Arial, Verdana, sans-serif; font-size: 12px; color: rgb(34, 34, 34); background-color: #FFF; "> <p>Hello ${object.partner_id.name},</p> <p>A new invoice is available for you: </p> <p style="border-left: 1px solid #8e0000; margin-left: 30px;"> &nbsp;&nbsp;<strong>REFERENCES</strong><br /> &nbsp;&nbsp;Invoice number: <strong>${object.number}</strong><br /> &nbsp;&nbsp;Invoice total: <strong>${object.amount_total} ${object.currency_id.name}</strong><br /> &nbsp;&nbsp;Invoice date: ${object.date_invoice}<br /> &nbsp;&nbsp;Order reference: ${object.origin}<br /> &nbsp;&nbsp;Your contact: <a href="mailto:${object.user_id.email or ''}?subject=Invoice%20${object.number}">${object.user_id.name}</a> </p> <br/> <p>It is also possible to directly pay with Paypal:</p> <a style="margin-left: 120px;" href="${object.paypal_url}"> <img class="oe_edi_paypal_button" src="https://www.paypal.com/en_US/i/btn/btn_paynowCC_LG.gif"/> </a> <br/> <p>If you have any question, do not hesitate to contact us.</p> <p>Thank you for choosing ${object.company_id.name or 'us'}!</p> <br/> <br/> <div style="width: 375px; margin: 0px; padding: 0px; background-color: #8E0000; border-top-left-radius: 5px 5px; border-top-right-radius: 5px 5px; background-repeat: repeat no-repeat;"> <h3 style="margin: 0px; padding: 2px 14px; font-size: 12px; color: #DDD;"> <strong style="text-transform:uppercase;">${object.company_id.name}</strong></h3> </div> <div style="width: 347px; margin: 0px; padding: 5px 14px; line-height: 16px; background-color: #F2F2F2;"> <span style="color: #222; margin-bottom: 5px; display: block; "> ${object.company_id.street}<br/> ${object.company_id.street2}<br/> ${object.company_id.zip} ${object.company_id.city}<br/> ${object.company_id.state_id and ('%s, ' % object.company_id.state_id.name) or ''} ${object.company_id.country_id.name or ''}<br/> </span> <div style="margin-top: 0px; margin-right: 0px; margin-bottom: 0px; margin-left: 0px; padding-top: 0px; padding-right: 0px; padding-bottom: 0px; padding-left: 0px; "> Phone:&nbsp; ${object.company_id.phone} </div> <div> Web :&nbsp;<a href="${object.company_id.website}">${object.company_id.website}</a> </div> </div> </div></body></html>""" OERP_WEBSITE_HTML_1 = """ <div> <div class="container"> <div class="row"> <div class="col-md-12 text-center mt16 mb16" data-snippet-id="colmd"> <h2>OpenERP HR Features</h2> <h3 class="text-muted">Manage your company most important asset: People</h3> </div> <div class="col-md-4" data-snippet-id="colmd"> <img class="img-rounded img-responsive" src="/website/static/src/img/china_thumb.jpg"> <h4 class="mt16">Streamline Recruitments</h4> <p>Post job offers and keep track of each application received. Follow applicants in your recruitment process with the smart kanban view.</p> <p>Save time by automating some communications with email templates. Resumes are indexed automatically, allowing you to easily find for specific profiles.</p> </div> <div class="col-md-4" data-snippet-id="colmd"> <img class="img-rounded img-responsive" src="/website/static/src/img/desert_thumb.jpg"> <h4 class="mt16">Enterprise Social Network</h4> <p>Break down information silos. Share knowledge and best practices amongst all employees. Follow specific people or documents and join groups of interests to share expertise and documents.</p> <p>Interact with your collegues in real time with live chat.</p> </div> <div class="col-md-4" data-snippet-id="colmd"> <img class="img-rounded img-responsive" src="/website/static/src/img/deers_thumb.jpg"> <h4 class="mt16">Leaves Management</h4> <p>Keep track of the vacation days accrued by each employee. Employees enter their requests (paid holidays, sick leave, etc), for managers to approve and validate. It's all done in just a few clicks. The agenda of each employee is updated accordingly.</p> </div> </div> </div> </div>""" OERP_WEBSITE_HTML_1_IN = [ 'Manage your company most important asset: People', 'img class="img-rounded img-responsive" src="/website/static/src/img/china_thumb.jpg"', ] OERP_WEBSITE_HTML_1_OUT = [ 'Break down information silos.', 'Keep track of the vacation days accrued by each employee', 'img class="img-rounded img-responsive" src="/website/static/src/img/deers_thumb.jpg', ] OERP_WEBSITE_HTML_2 = """ <div class="mt16 cke_widget_editable cke_widget_element oe_editable oe_dirty" data-oe-model="blog.post" data-oe-id="6" data-oe-field="content" data-oe-type="html" data-oe-translate="0" data-oe-expression="blog_post.content" data-cke-widget-data="{}" data-cke-widget-keep-attr="0" data-widget="oeref" contenteditable="true" data-cke-widget-editable="text"> <section class="mt16 mb16" data-snippet-id="text-block"> <div class="container"> <div class="row"> <div class="col-md-12 text-center mt16 mb32" data-snippet-id="colmd"> <h2> OpenERP Project Management </h2> <h3 class="text-muted">Infinitely flexible. Incredibly easy to use.</h3> </div> <div class="col-md-12 mb16 mt16" data-snippet-id="colmd"> <p> OpenERP's <b>collaborative and realtime</b> project management helps your team get work done. Keep track of everything, from the big picture to the minute details, from the customer contract to the billing. </p><p> Organize projects around <b>your own processes</b>. Work on tasks and issues using the kanban view, schedule tasks using the gantt chart and control deadlines in the calendar view. Every project may have it's own stages allowing teams to optimize their job. </p> </div> </div> </div> </section> <section class="" data-snippet-id="image-text"> <div class="container"> <div class="row"> <div class="col-md-6 mt16 mb16" data-snippet-id="colmd"> <img class="img-responsive shadow" src="/website/static/src/img/image_text.jpg"> </div> <div class="col-md-6 mt32" data-snippet-id="colmd"> <h3>Manage Your Shops</h3> <p> OpenERP's Point of Sale introduces a super clean interface with no installation required that runs online and offline on modern hardwares. </p><p> It's full integration with the company inventory and accounting, gives you real time statistics and consolidations amongst all shops without the hassle of integrating several applications. </p> </div> </div> </div> </section> <section class="" data-snippet-id="text-image"> <div class="container"> <div class="row"> <div class="col-md-6 mt32" data-snippet-id="colmd"> <h3>Enterprise Social Network</h3> <p> Make every employee feel more connected and engaged with twitter-like features for your own company. Follow people, share best practices, 'like' top ideas, etc. </p><p> Connect with experts, follow what interests you, share documents and promote best practices with OpenERP Social application. Get work done with effective collaboration across departments, geographies and business applications. </p> </div> <div class="col-md-6 mt16 mb16" data-snippet-id="colmd"> <img class="img-responsive shadow" src="/website/static/src/img/text_image.png"> </div> </div> </div> </section><section class="" data-snippet-id="portfolio"> <div class="container"> <div class="row"> <div class="col-md-12 text-center mt16 mb32" data-snippet-id="colmd"> <h2>Our Porfolio</h2> <h4 class="text-muted">More than 500 successful projects</h4> </div> <div class="col-md-4" data-snippet-id="colmd"> <img class="img-thumbnail img-responsive" src="/website/static/src/img/deers.jpg"> <img class="img-thumbnail img-responsive" src="/website/static/src/img/desert.jpg"> <img class="img-thumbnail img-responsive" src="/website/static/src/img/china.jpg"> </div> <div class="col-md-4" data-snippet-id="colmd"> <img class="img-thumbnail img-responsive" src="/website/static/src/img/desert.jpg"> <img class="img-thumbnail img-responsive" src="/website/static/src/img/china.jpg"> <img class="img-thumbnail img-responsive" src="/website/static/src/img/deers.jpg"> </div> <div class="col-md-4" data-snippet-id="colmd"> <img class="img-thumbnail img-responsive" src="/website/static/src/img/landscape.jpg"> <img class="img-thumbnail img-responsive" src="/website/static/src/img/china.jpg"> <img class="img-thumbnail img-responsive" src="/website/static/src/img/desert.jpg"> </div> </div> </div> </section> </div> """ OERP_WEBSITE_HTML_2_IN = [ 'management helps your team get work done', ] OERP_WEBSITE_HTML_2_OUT = [ 'Make every employee feel more connected', 'img class="img-responsive shadow" src="/website/static/src/img/text_image.png', ] TEXT_1 = """I contact you about our meeting tomorrow. Here is the schedule I propose: 9 AM: brainstorming about our new amazing business app 9.45 AM: summary 10 AM: meeting with Ignasse to present our app Is everything ok for you ? -- MySignature""" TEXT_1_IN = ["""I contact you about our meeting tomorrow. Here is the schedule I propose: 9 AM: brainstorming about our new amazing business app 9.45 AM: summary 10 AM: meeting with Ignasse to present our app Is everything ok for you ?"""] TEXT_1_OUT = ["""-- MySignature"""] TEXT_2 = """Salut Raoul! Le 28 oct. 2012 à 00:02, Raoul Grosbedon a écrit : > I contact you about our meeting tomorrow. Here is the schedule I propose: (quote) Of course. This seems viable. > 2012/10/27 Bert Tartopoils : >> blahblahblah (quote)? >> >> blahblahblah (quote) >> >> Bert TARTOPOILS >> bert.tartopoils@miam.miam >> > > > -- > RaoulSignature Bert TARTOPOILS bert.tartopoils@miam.miam """ TEXT_2_IN = ["Salut Raoul!", "Of course. This seems viable."] TEXT_2_OUT = ["I contact you about our meeting tomorrow. Here is the schedule I propose: (quote)", """> 2012/10/27 Bert Tartopoils : >> blahblahblah (quote)? >> >> blahblahblah (quote) >> >> Bert TARTOPOILS >> bert.tartopoils@miam.miam >> > > > -- > RaoulSignature"""] HTML_1 = """<p>I contact you about our meeting for tomorrow. Here is the schedule I propose: (keep) 9 AM: brainstorming about our new amazing business app 9.45 AM: summary 10 AM: meeting with Ignasse to present our app Is everything ok for you ? -- MySignature</p>""" HTML_1_IN = ["""I contact you about our meeting for tomorrow. Here is the schedule I propose: (keep) 9 AM: brainstorming about our new amazing business app 9.45 AM: summary 10 AM: meeting with Ignasse to present our app Is everything ok for you ?"""] HTML_1_OUT = ["""-- MySignature"""] HTML_2 = """<div> <font><span>I contact you about our meeting for tomorrow. Here is the schedule I propose:</span></font> </div> <div> <ul> <li><span>9 AM: brainstorming about our new amazing business app</span></li> <li><span>9.45 AM: summary</span></li> <li><span>10 AM: meeting with Fabien to present our app</span></li> </ul> </div> <div> <font><span>Is everything ok for you ?</span></font> </div>""" HTML_2_IN = ["<font><span>I contact you about our meeting for tomorrow. Here is the schedule I propose:</span></font>", "<li><span>9 AM: brainstorming about our new amazing business app</span></li>", "<li><span>9.45 AM: summary</span></li>", "<li><span>10 AM: meeting with Fabien to present our app</span></li>", "<font><span>Is everything ok for you ?</span></font>"] HTML_2_OUT = [] HTML_3 = """<div><pre>This is an answer. Regards, XXXXXX ----- Mail original -----</pre> <pre>Hi, My CRM-related question. Regards, XXXX</pre></div>""" HTML_3_IN = ["""<div><pre>This is an answer. Regards, XXXXXX ----- Mail original -----</pre>"""] HTML_3_OUT = ["Hi,", "My CRM-related question.", "Regards,"] HTML_4 = """ <div> <div>Hi Nicholas,</div> <br> <div>I'm free now. 00447710085916.</div> <br> <div>Regards,</div> <div>Nicholas</div> <br> <span id="OLK_SRC_BODY_SECTION"> <div style="font-family:Calibri; font-size:11pt; text-align:left; color:black; BORDER-BOTTOM: medium none; BORDER-LEFT: medium none; PADDING-BOTTOM: 0in; PADDING-LEFT: 0in; PADDING-RIGHT: 0in; BORDER-TOP: #b5c4df 1pt solid; BORDER-RIGHT: medium none; PADDING-TOP: 3pt"> <span style="font-weight:bold">From: </span>OpenERP Enterprise &lt;<a href="mailto:sales@openerp.com">sales@openerp.com</a>&gt;<br><span style="font-weight:bold">Reply-To: </span>&lt;<a href="mailto:sales@openerp.com">sales@openerp.com</a>&gt;<br><span style="font-weight:bold">Date: </span>Wed, 17 Apr 2013 13:30:47 +0000<br><span style="font-weight:bold">To: </span>Microsoft Office User &lt;<a href="mailto:n.saxlund@babydino.com">n.saxlund@babydino.com</a>&gt;<br><span style="font-weight:bold">Subject: </span>Re: your OpenERP.com registration<br> </div> <br> <div> <p>Hello Nicholas Saxlund, </p> <p>I noticed you recently registered to our OpenERP Online solution. </p> <p>You indicated that you wish to use OpenERP in your own company. We would like to know more about your your business needs and requirements, and see how we can help you. When would you be available to discuss your project ? </p> <p>Best regards, </p> <pre><a href="http://openerp.com">http://openerp.com</a> Belgium: +32.81.81.37.00 U.S.: +1 (650) 307-6736 India: +91 (79) 40 500 100 </pre> </div> </span> </div>""" HTML_5 = """<div><pre>Hi, I have downloaded OpenERP installer 7.0 and successfully installed the postgresql server and the OpenERP. I created a database and started to install module by log in as administrator. However, I was not able to install any module due to "OpenERP Server Error" as shown in the attachement. Could you please let me know how could I fix this problem? &nbsp;Regards, Goh Sin Yih ________________________________ From: OpenERP Enterprise &lt;sales@openerp.com&gt; To: sinyih_goh@yahoo.com Sent: Friday, February 8, 2013 12:46 AM Subject: Feedback From Your OpenERP Trial Hello Goh Sin Yih, Thank you for having tested OpenERP Online. I noticed you started a trial of OpenERP Online (gsy) but you did not decide to keep using it. So, I just wanted to get in touch with you to get your feedback. Can you tell me what kind of application you were you looking for and why you didn't decide to continue with OpenERP? Thanks in advance for providing your feedback, Do not hesitate to contact me if you have any questions, Thanks, </pre>""" GMAIL_1 = """Hello,<div><br></div><div>Ok for me. I am replying directly in gmail, without signature.</div><div><br></div><div>Kind regards,</div><div><br></div><div>Demo.<br><br><div>On Thu, Nov 8, 2012 at 5:29 PM, <span>&lt;<a href="mailto:dummy@example.com">dummy@example.com</a>&gt;</span> wrote:<br><blockquote><div>I contact you about our meeting for tomorrow. Here is the schedule I propose:</div><div><ul><li>9 AM: brainstorming about our new amazing business app&lt;/span&gt;&lt;/li&gt;</li> <li>9.45 AM: summary</li><li>10 AM: meeting with Fabien to present our app</li></ul></div><div>Is everything ok for you ?</div> <div><p>--<br>Administrator</p></div> <div><p>Log in our portal at: <a href="http://localhost:8069#action=login&amp;db=mail_1&amp;login=demo">http://localhost:8069#action=login&amp;db=mail_1&amp;login=demo</a></p></div> </blockquote></div><br></div>""" GMAIL_1_IN = ['Ok for me. I am replying directly in gmail, without signature.'] GMAIL_1_OUT = ['Administrator', 'Log in our portal at:'] THUNDERBIRD_1 = """<div>On 11/08/2012 05:29 PM, <a href="mailto:dummy@example.com">dummy@example.com</a> wrote:<br></div> <blockquote> <div>I contact you about our meeting for tomorrow. Here is the schedule I propose:</div> <div> <ul><li>9 AM: brainstorming about our new amazing business app&lt;/span&gt;&lt;/li&gt;</li> <li>9.45 AM: summary</li> <li>10 AM: meeting with Fabien to present our app</li> </ul></div> <div>Is everything ok for you ?</div> <div> <p>--<br> Administrator</p> </div> <div> <p>Log in our portal at: <a href="http://localhost:8069#action=login&amp;db=mail_1&amp;token=rHdWcUART5PhEnJRaXjH">http://localhost:8069#action=login&amp;db=mail_1&amp;token=rHdWcUART5PhEnJRaXjH</a></p> </div> </blockquote> Ok for me. I am replying directly below your mail, using Thunderbird, with a signature.<br><br> Did you receive my email about my new laptop, by the way ?<br><br> Raoul.<br><pre>-- Raoul Grosbedonn&#233;e </pre>""" THUNDERBIRD_1_IN = ['Ok for me. I am replying directly below your mail, using Thunderbird, with a signature.'] THUNDERBIRD_1_OUT = ['I contact you about our meeting for tomorrow.', 'Raoul Grosbedon'] HOTMAIL_1 = """<div> <div dir="ltr"><br>&nbsp; I have an amazing company, i'm learning OpenERP, it is a small company yet, but plannig to grow up quickly. <br>&nbsp;<br>Kindest regards,<br>xxx<br> <div> <div id="SkyDrivePlaceholder"> </div> <hr id="stopSpelling"> Subject: Re: your OpenERP.com registration<br>From: xxx@xxx.xxx<br>To: xxx@xxx.xxx<br>Date: Wed, 27 Mar 2013 17:12:12 +0000 <br><br> Hello xxx, <br> I noticed you recently created an OpenERP.com account to access OpenERP Apps. <br> You indicated that you wish to use OpenERP in your own company. We would like to know more about your your business needs and requirements, and see how we can help you. When would you be available to discuss your project ?<br> Best regards,<br> <pre> <a href="http://openerp.com" target="_blank">http://openerp.com</a> Belgium: +32.81.81.37.00 U.S.: +1 (650) 307-6736 India: +91 (79) 40 500 100 </pre> </div> </div> </div>""" HOTMAIL_1_IN = ["I have an amazing company, i'm learning OpenERP, it is a small company yet, but plannig to grow up quickly."] HOTMAIL_1_OUT = ["Subject: Re: your OpenERP.com registration", " I noticed you recently created an OpenERP.com account to access OpenERP Apps.", "We would like to know more about your your business needs and requirements", "Belgium: +32.81.81.37.00"] MSOFFICE_1 = """ <div> <div class="WordSection1"> <p class="MsoNormal"> <span style="font-size:11.0pt;font-family:&quot;Calibri&quot;,&quot;sans-serif&quot;;color:#1F497D"> Our requirements are simple. Just looking to replace some spreadsheets for tracking quotes and possibly using the timecard module. We are a company of 25 engineers providing product design services to clients. </span> </p> <p></p> <p></p> <p class="MsoNormal"> <span style="font-size:11.0pt;font-family:&quot;Calibri&quot;,&quot;sans-serif&quot;;color:#1F497D"> I’ll install on a windows server and run a very limited trial to see how it works. If we adopt OpenERP we will probably move to Linux or look for a hosted SaaS option. </span> </p> <p></p> <p></p> <p class="MsoNormal"> <span style="font-size:11.0pt;font-family:&quot;Calibri&quot;,&quot;sans-serif&quot;;color:#1F497D"> <br> I am also evaluating Adempiere and maybe others. </span> </p> <p></p> <p></p> <p class="MsoNormal"> <span style="font-size:11.0pt;font-family:&quot;Calibri&quot;,&quot;sans-serif&quot;;color:#1F497D"> </span> </p> <p>&nbsp;</p> <p></p> <p class="MsoNormal"> <span style="font-size:11.0pt;font-family:&quot;Calibri&quot;,&quot;sans-serif&quot;;color:#1F497D"> I expect the trial will take 2-3 months as this is not a high priority for us. </span> </p> <p></p> <p></p> <p class="MsoNormal"> <span style="font-size:11.0pt;font-family:&quot;Calibri&quot;,&quot;sans-serif&quot;;color:#1F497D"> </span> </p> <p>&nbsp;</p> <p></p> <p class="MsoNormal"> <span style="font-size:11.0pt;font-family:&quot;Calibri&quot;,&quot;sans-serif&quot;;color:#1F497D"> Alan </span> </p> <p></p> <p></p> <p class="MsoNormal"> <span style="font-size:11.0pt;font-family:&quot;Calibri&quot;,&quot;sans-serif&quot;;color:#1F497D"> </span> </p> <p>&nbsp;</p> <p></p> <div> <div style="border:none;border-top:solid #B5C4DF 1.0pt;padding:3.0pt 0in 0in 0in"> <p class="MsoNormal"> <b><span style="font-size:10.0pt;font-family:&quot;Tahoma&quot;,&quot;sans-serif&quot;"> From: </span></b> <span style="font-size:10.0pt;font-family:&quot;Tahoma&quot;,&quot;sans-serif&quot;"> OpenERP Enterprise [mailto:sales@openerp.com] <br><b>Sent:</b> Monday, 11 March, 2013 14:47<br><b>To:</b> Alan Widmer<br><b>Subject:</b> Re: your OpenERP.com registration </span> </p> <p></p> <p></p> </div> </div> <p class="MsoNormal"></p> <p>&nbsp;</p> <p>Hello Alan Widmer, </p> <p></p> <p>I noticed you recently downloaded OpenERP. </p> <p></p> <p> Uou mentioned you wish to use OpenERP in your own company. Please let me more about your business needs and requirements? When will you be available to discuss about your project? </p> <p></p> <p>Thanks for your interest in OpenERP, </p> <p></p> <p>Feel free to contact me if you have any questions, </p> <p></p> <p>Looking forward to hear from you soon. </p> <p></p> <pre><p>&nbsp;</p></pre> <pre>--<p></p></pre> <pre>Nicolas<p></p></pre> <pre><a href="http://openerp.com">http://openerp.com</a><p></p></pre> <pre>Belgium: +32.81.81.37.00<p></p></pre> <pre>U.S.: +1 (650) 307-6736<p></p></pre> <pre>India: +91 (79) 40 500 100<p></p></pre> <pre>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<p></p></pre> </div> </div>""" MSOFFICE_1_IN = ['Our requirements are simple. Just looking to replace some spreadsheets for tracking quotes and possibly using the timecard module.'] MSOFFICE_1_OUT = ['I noticed you recently downloaded OpenERP.', 'Uou mentioned you wish to use OpenERP in your own company.', 'Belgium: +32.81.81.37.00'] MSOFFICE_2 = """ <div> <div class="WordSection1"> <p class="MsoNormal"> <span style="font-size:11.0pt;font-family:&quot;Calibri&quot;,&quot;sans-serif&quot;;color:#1F497D">Nicolas,</span></p><p></p> <p></p> <p class="MsoNormal" style="text-indent:.5in"> <span style="font-size:11.0pt;font-family:&quot;Calibri&quot;,&quot;sans-serif&quot;;color:#1F497D">We are currently investigating the possibility of moving away from our current ERP </span></p><p></p> <p></p> <p class="MsoNormal"> <span style="font-size:11.0pt;font-family:&quot;Calibri&quot;,&quot;sans-serif&quot;;color:#1F497D"> </span></p><p>&nbsp;</p> <p></p> <p class="MsoNormal"> <span style="font-size:11.0pt;font-family:&quot;Calibri&quot;,&quot;sans-serif&quot;;color:#1F497D">Thank You</span></p><p></p> <p></p> <p class="MsoNormal"> <span style="font-size:11.0pt;font-family:&quot;Calibri&quot;,&quot;sans-serif&quot;;color:#1F497D">Matt</span></p><p></p> <p></p> <p class="MsoNormal"> <span style="font-size:11.0pt;font-family:&quot;Calibri&quot;,&quot;sans-serif&quot;;color:#1F497D"> </span></p><p>&nbsp;</p> <p></p> <div> <p class="MsoNormal"> <span style="font-size:11.0pt;font-family:&quot;Calibri&quot;,&quot;sans-serif&quot;;color:#1F497D">Raoul Petitpoil</span></p><p></p> <p></p> <p class="MsoNormal"> <span style="font-size:11.0pt;font-family:&quot;Calibri&quot;,&quot;sans-serif&quot;;color:#1F497D">Poil Industries</span></p><p></p> <p></p> <p class="MsoNormal"> <span style="font-size:11.0pt;font-family:&quot;Calibri&quot;,&quot;sans-serif&quot;;color:#1F497D">Information Technology</span></p><p></p> <p></p> <p class="MsoNormal"> <span style="font-size:11.0pt;font-family:&quot;Calibri&quot;,&quot;sans-serif&quot;;color:#1F497D">920 Super Street</span></p><p></p> <p></p> <p class="MsoNormal"> <span style="font-size:11.0pt;font-family:&quot;Calibri&quot;,&quot;sans-serif&quot;;color:#1F497D">Sanchez, Pa 17046 USA</span></p><p></p> <p></p> <p class="MsoNormal"> <span style="font-size:11.0pt;font-family:&quot;Calibri&quot;,&quot;sans-serif&quot;;color:#1F497D">Tel: xxx.xxx</span></p><p></p> <p></p> <p class="MsoNormal"> <span style="font-size:11.0pt;font-family:&quot;Calibri&quot;,&quot;sans-serif&quot;;color:#1F497D">Fax: xxx.xxx</span></p><p></p> <p></p> <p class="MsoNormal"> <span style="font-size:11.0pt;font-family:&quot;Calibri&quot;,&quot;sans-serif&quot;;color:#1F497D">Email: </span> <a href="mailto:raoul@petitpoil.com"> <span style="font-size:11.0pt;font-family:&quot;Calibri&quot;,&quot;sans-serif&quot;;color:blue">raoul@petitpoil.com</span> </a> <span style="font-size:11.0pt;font-family:&quot;Calibri&quot;,&quot;sans-serif&quot;;color:#1F497D"> </span></p><p></p> <p></p> <p class="MsoNormal"> <span style="font-size:11.0pt;font-family:&quot;Calibri&quot;,&quot;sans-serif&quot;;color:#1F497D">www.poilindustries.com</span></p><p></p> <p></p> <p class="MsoNormal"> <span style="font-size:11.0pt;font-family:&quot;Calibri&quot;,&quot;sans-serif&quot;;color:#1F497D">www.superproducts.com</span></p><p></p> <p></p> </div> <p class="MsoNormal"> <span style="font-size:11.0pt;font-family:&quot;Calibri&quot;,&quot;sans-serif&quot;;color:#1F497D"> </span></p><p>&nbsp;</p> <p></p> <div> <div style="border:none;border-top:solid #B5C4DF 1.0pt;padding:3.0pt 0in 0in 0in"> <p class="MsoNormal"> <b> <span style="font-size:10.0pt;font-family:&quot;Tahoma&quot;,&quot;sans-serif&quot;">From:</span> </b> <span style="font-size:10.0pt;font-family:&quot;Tahoma&quot;,&quot;sans-serif&quot;"> OpenERP Enterprise [mailto:sales@openerp.com] <br><b>Sent:</b> Wednesday, April 17, 2013 1:31 PM<br><b>To:</b> Matt Witters<br><b>Subject:</b> Re: your OpenERP.com registration</span></p><p></p> <p></p> </div> </div> <p class="MsoNormal"></p> <p>&nbsp;</p> <p>Hello Raoul Petitpoil, </p> <p></p> <p>I noticed you recently downloaded OpenERP. </p> <p></p> <p>You indicated that you wish to use OpenERP in your own company. We would like to know more about your your business needs and requirements, and see how we can help you. When would you be available to discuss your project ? </p> <p></p> <p>Best regards, </p> <p></p> <pre> <p>&nbsp;</p> </pre> <pre>--<p></p></pre> <pre>Nicolas<p></p></pre> <pre> <a href="http://openerp.com">http://openerp.com</a> <p></p> </pre> <pre>Belgium: +32.81.81.37.00<p></p></pre> <pre>U.S.: +1 (650) 307-6736<p></p></pre> <pre>India: +91 (79) 40 500 100<p></p></pre> <pre>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; <p></p></pre> </div> </div>""" MSOFFICE_2_IN = ['We are currently investigating the possibility'] MSOFFICE_2_OUT = ['I noticed you recently downloaded OpenERP.', 'You indicated that you wish', 'Belgium: +32.81.81.37.00'] MSOFFICE_3 = """<div> <div class="WordSection1"> <p class="MsoNormal"> <span style="font-size:11.0pt;font-family:&quot;Calibri&quot;,&quot;sans-serif&quot;;color:#1F497D">Hi Nicolas&nbsp;!</span></p><p></p> <p></p> <p class="MsoNormal"> <span style="font-size:11.0pt;font-family:&quot;Calibri&quot;,&quot;sans-serif&quot;;color:#1F497D"> </span></p><p>&nbsp;</p> <p></p> <p class="MsoNormal"> <span lang="EN-US" style="font-size:11.0pt;font-family:&quot;Calibri&quot;,&quot;sans-serif&quot;;color:#1F497D">Yes I’d be glad to hear about your offers as we struggle every year with the planning/approving of LOA. </span></p><p></p> <p></p> <p class="MsoNormal"> <span lang="EN-US" style="font-size:11.0pt;font-family:&quot;Calibri&quot;,&quot;sans-serif&quot;;color:#1F497D">I saw your boss yesterday on tv and immediately wanted to test the interface. </span></p><p></p> <p></p> <p class="MsoNormal"> <span lang="EN-US" style="font-size:11.0pt;font-family:&quot;Calibri&quot;,&quot;sans-serif&quot;;color:#1F497D"> </span></p><p>&nbsp;</p> <p></p> <div> <p class="MsoNormal"> <b> <span lang="NL-BE" style="font-size:10.0pt;font-family:&quot;Trebuchet MS&quot;,&quot;sans-serif&quot;;color:gray">Bien à vous, </span></b></p><p></p><b> </b> <p></p> <p class="MsoNormal"> <b> <span lang="NL-BE" style="font-size:10.0pt;font-family:&quot;Trebuchet MS&quot;,&quot;sans-serif&quot;;color:gray">Met vriendelijke groeten, </span></b></p><p></p><b> </b> <p></p> <p class="MsoNormal"> <b> <span lang="EN-GB" style="font-size:10.0pt;font-family:&quot;Trebuchet MS&quot;,&quot;sans-serif&quot;;color:gray">Best regards,</span></b></p><p></p><b> </b> <p></p> <p class="MsoNormal"> <b> <span lang="EN-GB" style="font-size:10.0pt;font-family:&quot;Trebuchet MS&quot;,&quot;sans-serif&quot;;color:gray"> </span></b></p><p><b>&nbsp;</b></p><b> </b> <p></p> <p class="MsoNormal"> <b> <span lang="EN-GB" style="font-size:10.0pt;font-family:&quot;Trebuchet MS&quot;,&quot;sans-serif&quot;;color:gray">R. Petitpoil&nbsp;&nbsp;&nbsp; <br></span> </b> <span lang="EN-GB" style="font-size:10.0pt;font-family:&quot;Trebuchet MS&quot;,&quot;sans-serif&quot;;color:gray">Human Resource Manager<b><br><br>Field Resource s.a n.v.&nbsp;&nbsp;<i> <br></i></b>Hermesstraat 6A <br>1930 Zaventem</span> <span lang="EN-GB" style="font-size:8.0pt;font-family:&quot;Tahoma&quot;,&quot;sans-serif&quot;;color:gray"><br></span> <b> <span lang="FR" style="font-size:10.0pt;font-family:Wingdings;color:#1F497D">(</span> </b> <b> <span lang="FR" style="font-size:9.0pt;font-family:Wingdings;color:#1F497D"> </span> </b> <b> <span lang="EN-GB" style="font-size:8.0pt;font-family:&quot;Trebuchet MS&quot;,&quot;sans-serif&quot;;color:gray">xxx.xxx &nbsp;</span> </b> <b> <span lang="EN-GB" style="font-size:9.0pt;font-family:&quot;Trebuchet MS&quot;,&quot;sans-serif&quot;;color:gray"><br></span> </b> <b> <span lang="FR" style="font-size:10.0pt;font-family:&quot;Wingdings 2&quot;;color:#1F497D">7</span> </b> <b> <span lang="FR" style="font-size:9.0pt;font-family:&quot;Wingdings 2&quot;;color:#1F497D"> </span> </b> <b> <span lang="EN-GB" style="font-size:8.0pt;font-family:&quot;Trebuchet MS&quot;,&quot;sans-serif&quot;;color:gray">+32 2 727.05.91<br></span> </b> <span lang="EN-GB" style="font-size:24.0pt;font-family:Webdings;color:green">P</span> <span lang="EN-GB" style="font-size:8.0pt;font-family:&quot;Tahoma&quot;,&quot;sans-serif&quot;;color:green"> <b>&nbsp;&nbsp; </b></span> <b> <span lang="EN-GB" style="font-size:9.0pt;font-family:&quot;Trebuchet MS&quot;,&quot;sans-serif&quot;;color:green">Please consider the environment before printing this email.</span> </b> <span lang="EN-GB" style="font-size:11.0pt;font-family:&quot;Calibri&quot;,&quot;sans-serif&quot;;color:navy"> </span> <span lang="EN-GB" style="font-family:&quot;Calibri&quot;,&quot;sans-serif&quot;;color:navy"> </span></p><p></p> <p></p> </div> <p class="MsoNormal"> <span lang="EN-US" style="font-size:11.0pt;font-family:&quot;Calibri&quot;,&quot;sans-serif&quot;;color:#1F497D"> </span></p><p>&nbsp;</p> <p></p> <div> <div style="border:none;border-top:solid #B5C4DF 1.0pt;padding:3.0pt 0cm 0cm 0cm"> <p class="MsoNormal"> <b> <span lang="FR" style="font-size:10.0pt;font-family:&quot;Tahoma&quot;,&quot;sans-serif&quot;">De&nbsp;:</span> </b> <span lang="FR" style="font-size:10.0pt;font-family:&quot;Tahoma&quot;,&quot;sans-serif&quot;"> OpenERP Enterprise [mailto:sales@openerp.com] <br><b>Envoyé&nbsp;:</b> jeudi 18 avril 2013 11:31<br><b>À&nbsp;:</b> Paul Richard<br><b>Objet&nbsp;:</b> Re: your OpenERP.com registration</span></p><p></p> <p></p> </div> </div> <p class="MsoNormal"></p> <p>&nbsp;</p> <p>Hello Raoul PETITPOIL, </p> <p></p> <p>I noticed you recently registered to our OpenERP Online solution. </p> <p></p> <p>You indicated that you wish to use OpenERP in your own company. We would like to know more about your your business needs and requirements, and see how we can help you. When would you be available to discuss your project ? </p> <p></p> <p>Best regards, </p> <p></p> <pre> <p>&nbsp;</p> </pre> <pre>--<p></p></pre> <pre>Nicolas<p></p></pre> <pre> <a href="http://openerp.com">http://openerp.com</a> <p></p> </pre> <pre>Belgium: +32.81.81.37.00<p></p></pre> <pre>U.S.: +1 (650) 307-6736<p></p></pre> <pre>India: +91 (79) 40 500 100<p></p></pre> <pre>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; <p></p></pre> </div> </div>""" MSOFFICE_3_IN = ['I saw your boss yesterday'] MSOFFICE_3_OUT = ['I noticed you recently downloaded OpenERP.', 'You indicated that you wish', 'Belgium: +32.81.81.37.00'] # ------------------------------------------------------------ # Test cases coming from bugs # ------------------------------------------------------------ # bug: read more not apparent, strange message in read more span BUG1 = """<pre>Hi Migration Team, Paragraph 1, blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah. Paragraph 2, blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah. Paragraph 3, blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah. Thanks. Regards, -- Olivier Laurent Migration Manager OpenERP SA Chaussée de Namur, 40 B-1367 Gérompont Tel: +32.81.81.37.00 Web: http://www.openerp.com</pre>""" BUG_1_IN = [ 'Hi Migration Team', 'Paragraph 1' ] BUG_1_OUT = [ 'Olivier Laurent', 'Chaussée de Namur', '81.81.37.00', 'openerp.com', ] BUG2 = """ <div> <br> <div class="moz-forward-container"><br> <br> -------- Original Message -------- <table class="moz-email-headers-table" border="0" cellpadding="0" cellspacing="0"> <tbody> <tr> <th nowrap="" valign="BASELINE" align="RIGHT">Subject: </th> <td>Fwd: TR: OpenERP S.A. Payment Reminder</td> </tr> <tr> <th nowrap="" valign="BASELINE" align="RIGHT">Date: </th> <td>Wed, 16 Oct 2013 14:11:13 +0200</td> </tr> <tr> <th nowrap="" valign="BASELINE" align="RIGHT">From: </th> <td>Christine Herrmann <a class="moz-txt-link-rfc2396E" href="mailto:che@openerp.com">&lt;che@openerp.com&gt;</a></td> </tr> <tr> <th nowrap="" valign="BASELINE" align="RIGHT">To: </th> <td><a class="moz-txt-link-abbreviated" href="mailto:online@openerp.com">online@openerp.com</a></td> </tr> </tbody> </table> <br> <br> <br> <div class="moz-forward-container"><br> <br> -------- Message original -------- <table class="moz-email-headers-table" border="0" cellpadding="0" cellspacing="0"> <tbody> <tr> <th nowrap="" valign="BASELINE" align="RIGHT">Sujet: </th> <td>TR: OpenERP S.A. Payment Reminder</td> </tr> <tr> <th nowrap="" valign="BASELINE" align="RIGHT">Date&nbsp;: </th> <td>Wed, 16 Oct 2013 10:34:45 -0000</td> </tr> <tr> <th nowrap="" valign="BASELINE" align="RIGHT">De&nbsp;: </th> <td>Ida Siwatala <a class="moz-txt-link-rfc2396E" href="mailto:infos@inzoservices.com">&lt;infos@inzoservices.com&gt;</a></td> </tr> <tr> <th nowrap="" valign="BASELINE" align="RIGHT">Répondre à&nbsp;: </th> <td><a class="moz-txt-link-abbreviated" href="mailto:catchall@mail.odoo.com">catchall@mail.odoo.com</a></td> </tr> <tr> <th nowrap="" valign="BASELINE" align="RIGHT">Pour&nbsp;: </th> <td>Christine Herrmann (che) <a class="moz-txt-link-rfc2396E" href="mailto:che@openerp.com">&lt;che@openerp.com&gt;</a></td> </tr> </tbody> </table> <br> <br> <div> <div class="WordSection1"> <p class="MsoNormal"><span style="font-size:11.0pt;font-family:&quot;Calibri&quot;,&quot;sans-serif&quot;;color:#1F497D">Bonjour,</span></p> <p class="MsoNormal"><span style="font-size:11.0pt;font-family:&quot;Calibri&quot;,&quot;sans-serif&quot;;color:#1F497D"></span></p> <p>&nbsp;</p> <p class="MsoNormal"><span style="font-size:11.0pt;font-family:&quot;Calibri&quot;,&quot;sans-serif&quot;;color:#1F497D">Pourriez-vous me faire un retour sur ce point.</span></p> <p class="MsoNormal"><span style="font-size:11.0pt;font-family:&quot;Calibri&quot;,&quot;sans-serif&quot;;color:#1F497D"></span></p> <p>&nbsp;</p> <p class="MsoNormal"><span style="font-size:11.0pt;font-family:&quot;Calibri&quot;,&quot;sans-serif&quot;;color:#1F497D">Cordialement</span></p> <p class="MsoNormal"><span style="font-size:11.0pt;font-family:&quot;Calibri&quot;,&quot;sans-serif&quot;;color:#1F497D"></span></p> <p>&nbsp;</p> <div> <div style="border:none;border-top:solid #B5C4DF 1.0pt;padding:3.0pt 0cm 0cm 0cm"> <p class="MsoNormal"><b><span style="font-size:10.0pt;font-family:&quot;Tahoma&quot;,&quot;sans-serif&quot;">De&nbsp;:</span></b><span style="font-size:10.0pt;font-family:&quot;Tahoma&quot;,&quot;sans-serif&quot;"> Ida Siwatala [<a class="moz-txt-link-freetext" href="mailto:infos@inzoservices.com">mailto:infos@inzoservices.com</a>] <br> <b>Envoyé&nbsp;:</b> vendredi 4 octobre 2013 20:03<br> <b>À&nbsp;:</b> 'Followers of INZO-services-8-all-e-Maxime-Lisbonne-77176-Savigny-le-temple-France'<br> <b>Objet&nbsp;:</b> RE: OpenERP S.A. Payment Reminder</span></p> </div> </div> <p>&nbsp;</p> <p class="MsoNormal"><span style="font-size:11.0pt;font-family:&quot;Calibri&quot;,&quot;sans-serif&quot;;color:#1F497D">Bonsoir,</span></p> <p class="MsoNormal"><span style="font-size:11.0pt;font-family:&quot;Calibri&quot;,&quot;sans-serif&quot;;color:#1F497D"></span></p> <p>&nbsp;</p> <p class="MsoNormal"><span style="font-size:11.0pt;font-family:&quot;Calibri&quot;,&quot;sans-serif&quot;;color:#1F497D">Je me permets de revenir vers vous par écrit , car j’ai fait 2 appels vers votre service en exposant mon problème, mais je n’ai pas eu de retour.</span></p> <p class="MsoNormal"><span style="font-size:11.0pt;font-family:&quot;Calibri&quot;,&quot;sans-serif&quot;;color:#1F497D">Cela fait un mois que j’ai fait la souscription de votre produit, mais je me rends compte qu’il est pas adapté à ma situation ( fonctionnalité manquante et surtout je n’ai pas beaucoup de temps à passer à résoudre des bugs). </span></p> <p class="MsoNormal"><span style="font-size:11.0pt;font-family:&quot;Calibri&quot;,&quot;sans-serif&quot;;color:#1F497D">C’est pourquoi , j’ai demandé qu’un accord soit trouvé avec vous pour annuler le contrat (tout en vous payant le mois d’utilisation de septembre).</span></p> <p class="MsoNormal"><span style="font-size:11.0pt;font-family:&quot;Calibri&quot;,&quot;sans-serif&quot;;color:#1F497D"></span></p> <p>&nbsp;</p> <p class="MsoNormal"><span style="font-size:11.0pt;font-family:&quot;Calibri&quot;,&quot;sans-serif&quot;;color:#1F497D">Pourriez-vous me faire un retour sur ce point.</span></p> <p class="MsoNormal"><span style="font-size:11.0pt;font-family:&quot;Calibri&quot;,&quot;sans-serif&quot;;color:#1F497D"></span></p> <p>&nbsp;</p> <p class="MsoNormal"><span style="font-size:11.0pt;font-family:&quot;Calibri&quot;,&quot;sans-serif&quot;;color:#1F497D">Cordialement,</span></p> <p class="MsoNormal"><span style="font-size:11.0pt;font-family:&quot;Calibri&quot;,&quot;sans-serif&quot;;color:#1F497D"></span></p> <p>&nbsp;</p> <p class="MsoNormal"><span style="font-size:11.0pt;font-family:&quot;Calibri&quot;,&quot;sans-serif&quot;;color:#1F497D">Ida Siwatala</span></p> <p class="MsoNormal"><span style="font-size:11.0pt;font-family:&quot;Calibri&quot;,&quot;sans-serif&quot;;color:#1F497D"></span></p> <p>&nbsp;</p> <p class="MsoNormal"><b><span style="font-size:10.0pt;font-family:&quot;Tahoma&quot;,&quot;sans-serif&quot;">De&nbsp;:</span></b><span style="font-size:10.0pt;font-family:&quot;Tahoma&quot;,&quot;sans-serif&quot;"> <a href="mailto:che@openerp.com">che@openerp.com</a> [<a href="mailto:che@openerp.com">mailto:che@openerp.com</a>] <br> <b>Envoyé&nbsp;:</b> vendredi 4 octobre 2013 17:41<br> <b>À&nbsp;:</b> <a href="mailto:infos@inzoservices.com">infos@inzoservices.com</a><br> <b>Objet&nbsp;:</b> OpenERP S.A. Payment Reminder</span></p> <p>&nbsp;</p> <div> <p style="background:white"><span style="font-size:9.0pt;font-family:&quot;Arial&quot;,&quot;sans-serif&quot;;color:#222222">Dear INZO services,</span></p> <p style="background:white"><span style="font-size:9.0pt;font-family:&quot;Arial&quot;,&quot;sans-serif&quot;;color:#222222">Exception made if there was a mistake of ours, it seems that the following amount stays unpaid. Please, take appropriate measures in order to carry out this payment in the next 8 days. </span></p> <p class="MsoNormal" style="background:white"><span style="font-size:9.0pt;font-family:&quot;Arial&quot;,&quot;sans-serif&quot;;color:#222222"></span></p> <p>&nbsp;</p> <table class="MsoNormalTable" style="width:100.0%;border:outset 1.5pt" width="100%" border="1" cellpadding="0"> <tbody> <tr> <td style="padding:.75pt .75pt .75pt .75pt"> <p class="MsoNormal">Date de facturation</p> </td> <td style="padding:.75pt .75pt .75pt .75pt"> <p class="MsoNormal">Description</p> </td> <td style="padding:.75pt .75pt .75pt .75pt"> <p class="MsoNormal">Reference</p> </td> <td style="padding:.75pt .75pt .75pt .75pt"> <p class="MsoNormal">Due Date</p> </td> <td style="padding:.75pt .75pt .75pt .75pt"> <p class="MsoNormal">Amount (€)</p> </td> <td style="padding:.75pt .75pt .75pt .75pt"> <p class="MsoNormal">Lit.</p> </td> </tr> <tr> <td style="padding:.75pt .75pt .75pt .75pt"> <p class="MsoNormal"><b>2013-09-24</b></p> </td> <td style="padding:.75pt .75pt .75pt .75pt"> <p class="MsoNormal"><b>2013/1121</b></p> </td> <td style="padding:.75pt .75pt .75pt .75pt"> <p class="MsoNormal"><b>Enterprise - Inzo Services - Juillet 2013</b></p> </td> <td style="padding:.75pt .75pt .75pt .75pt"> <p class="MsoNormal"><b>2013-09-24</b></p> </td> <td style="padding:.75pt .75pt .75pt .75pt"> <p class="MsoNormal"><b>420.0</b></p> </td> <td style="padding:.75pt .75pt .75pt .75pt"><br> </td> </tr> <tr> <td style="padding:.75pt .75pt .75pt .75pt"><br> </td> <td style="border:none;padding:.75pt .75pt .75pt .75pt"><br> </td> <td style="border:none;padding:.75pt .75pt .75pt .75pt"><br> </td> <td style="border:none;padding:.75pt .75pt .75pt .75pt"><br> </td> <td style="border:none;padding:.75pt .75pt .75pt .75pt"><br> </td> <td style="border:none;padding:.75pt .75pt .75pt .75pt"><br> </td> </tr> </tbody> </table> <p class="MsoNormal" style="text-align:center;background:white" align="center"><span style="font-size:9.0pt;font-family:&quot;Arial&quot;,&quot;sans-serif&quot;;color:#222222">Amount due : 420.00 € </span></p> <p style="background:white"><span style="font-size:9.0pt;font-family:&quot;Arial&quot;,&quot;sans-serif&quot;;color:#222222">Would your payment have been carried out after this mail was sent, please ignore this message. Do not hesitate to contact our accounting department. </span></p> <p class="MsoNormal" style="background:white"><span style="font-size:9.0pt;font-family:&quot;Arial&quot;,&quot;sans-serif&quot;;color:#222222"><br> Best Regards, <br> Aurore Lesage <br> OpenERP<br> Chaussée de Namur, 40 <br> B-1367 Grand Rosières <br> Tel: +32.81.81.37.00 - Fax: +32.81.73.35.01 <br> E-mail : <a href="mailto:ale@openerp.com">ale@openerp.com</a> <br> Web: <a href="http://www.openerp.com">http://www.openerp.com</a></span></p> </div> </div> </div> --<br> INZO services <small>Sent by <a style="color:inherit" href="http://www.openerp.com">OpenERP S.A.</a> using <a style="color:inherit" href="https://www.openerp.com/">OpenERP</a>.</small> <small>Access your messages and documents <a style="color:inherit" href="https://accounts.openerp.com?db=openerp#action=mail.action_mail_redirect&amp;login=che&amp;message_id=5750830">in OpenERP</a></small> <br> <pre class="moz-signature" cols="72">-- Christine Herrmann OpenERP Chaussée de Namur, 40 B-1367 Grand Rosières Tel: +32.81.81.37.00 - Fax: +32.81.73.35.01 Web: <a class="moz-txt-link-freetext" href="http://www.openerp.com">http://www.openerp.com</a> </pre> <br> </div> <br> <br> </div> <br> </div>""" BUG_2_IN = [ 'read more', '...', ] BUG_2_OUT = [ 'Fwd: TR: OpenERP S.A' 'fait un mois' ]
unknown
codeparrot/codeparrot-clean
import os import codecs from os import path from docutils import nodes from docutils.parsers.rst import Directive, directives class IncludeCode(Directive): """ Include a code example from a file with sections delimited with special comments. """ has_content = False required_arguments = 1 optional_arguments = 0 final_argument_whitespace = False option_spec = { 'section': directives.unchanged_required, 'comment': directives.unchanged_required, 'marker': directives.unchanged_required, 'include': directives.unchanged_required, 'exclude': directives.unchanged_required, 'hideexcludes': directives.flag, 'linenos': directives.flag, 'language': directives.unchanged_required, 'encoding': directives.encoding, 'prepend': directives.unchanged_required, 'append': directives.unchanged_required, } def run(self): document = self.state.document arg0 = self.arguments[0] (filename, sep, section) = arg0.partition('#') if not document.settings.file_insertion_enabled: return [document.reporter.warning('File insertion disabled', line=self.lineno)] env = document.settings.env if filename.startswith('/') or filename.startswith(os.sep): rel_fn = filename[1:] else: docdir = path.dirname(env.doc2path(env.docname, base=None)) rel_fn = path.join(docdir, filename) try: fn = path.join(env.srcdir, rel_fn) except UnicodeDecodeError: # the source directory is a bytestring with non-ASCII characters; # let's try to encode the rel_fn in the file system encoding rel_fn = rel_fn.encode(sys.getfilesystemencoding()) fn = path.join(env.srcdir, rel_fn) encoding = self.options.get('encoding', env.config.source_encoding) codec_info = codecs.lookup(encoding) try: f = codecs.StreamReaderWriter(open(fn, 'U'), codec_info[2], codec_info[3], 'strict') lines = f.readlines() f.close() except (IOError, OSError): return [document.reporter.warning( 'Include file %r not found or reading it failed' % filename, line=self.lineno)] except UnicodeError: return [document.reporter.warning( 'Encoding %r used for reading included file %r seems to ' 'be wrong, try giving an :encoding: option' % (encoding, filename))] comment = self.options.get('comment', '//') marker = self.options.get('marker', comment + '#') lenm = len(marker) if not section: section = self.options.get('section') include_sections = self.options.get('include', '') exclude_sections = self.options.get('exclude', '') include = set(include_sections.split(',')) if include_sections else set() exclude = set(exclude_sections.split(',')) if exclude_sections else set() hideexcludes = 'hideexcludes' in self.options if section: include |= set([section]) within = set() res = [] excluding = False for line in lines: index = line.find(marker) if index >= 0: section_name = line[index+lenm:].strip() if section_name in within: within ^= set([section_name]) if excluding and not (exclude & within): excluding = False else: within |= set([section_name]) if not excluding and (exclude & within): excluding = True if not hideexcludes: res.append(' ' * index + comment + ' ' + section_name.replace('-', ' ') + ' ...\n') elif not (exclude & within) and (not include or (include & within)): res.append(line) lines = res def countwhile(predicate, iterable): count = 0 for x in iterable: if predicate(x): count += 1 else: return count nonempty = filter(lambda l: l.strip(), lines) tabcounts = map(lambda l: countwhile(lambda c: c == ' ', l), nonempty) tabshift = min(tabcounts) if tabcounts else 0 if tabshift > 0: lines = map(lambda l: l[tabshift:] if len(l) > tabshift else l, lines) prepend = self.options.get('prepend') append = self.options.get('append') if prepend: lines.insert(0, prepend + '\n') if append: lines.append(append + '\n') text = ''.join(lines) retnode = nodes.literal_block(text, text, source=fn) retnode.line = 1 retnode.attributes['line_number'] = self.lineno language = self.options.get('language') if language: retnode['language'] = language if 'linenos' in self.options: retnode['linenos'] = True document.settings.env.note_dependency(rel_fn) return [retnode] def setup(app): app.require_sphinx('1.0') app.add_directive('includecode', IncludeCode)
unknown
codeparrot/codeparrot-clean
package client import ( "context" "fmt" "io" "net/url" "time" "github.com/moby/moby/client/internal/timestamp" ) // ContainerLogsOptions holds parameters to filter logs with. type ContainerLogsOptions struct { ShowStdout bool ShowStderr bool Since string Until string Timestamps bool Follow bool Tail string Details bool } // ContainerLogsResult is the result of a container logs operation. type ContainerLogsResult interface { io.ReadCloser } // ContainerLogs returns the logs generated by a container in an [io.ReadCloser]. // It's up to the caller to close the stream. // // The underlying [io.ReadCloser] is automatically closed if the context is canceled, // // The stream format on the response uses one of two formats: // // - If the container is using a TTY, there is only a single stream (stdout) // and data is copied directly from the container output stream, no extra // multiplexing or headers. // - If the container is *not* using a TTY, streams for stdout and stderr are // multiplexed. // // The format of the multiplexed stream is defined in the [stdcopy] package, // and as follows: // // [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4}[]byte{OUTPUT} // // STREAM_TYPE can be 1 for [Stdout] and 2 for [Stderr]. Refer to [stdcopy.StdType] // for details. SIZE1, SIZE2, SIZE3, and SIZE4 are four bytes of uint32 encoded // as big endian, this is the size of OUTPUT. You can use [stdcopy.StdCopy] // to demultiplex this stream. // // [stdcopy]: https://pkg.go.dev/github.com/moby/moby/api/pkg/stdcopy // [stdcopy.StdCopy]: https://pkg.go.dev/github.com/moby/moby/api/pkg/stdcopy#StdCopy // [stdcopy.StdType]: https://pkg.go.dev/github.com/moby/moby/api/pkg/stdcopy#StdType // [Stdout]: https://pkg.go.dev/github.com/moby/moby/api/pkg/stdcopy#Stdout // [Stderr]: https://pkg.go.dev/github.com/moby/moby/api/pkg/stdcopy#Stderr func (cli *Client) ContainerLogs(ctx context.Context, containerID string, options ContainerLogsOptions) (ContainerLogsResult, error) { containerID, err := trimID("container", containerID) if err != nil { return nil, err } query := url.Values{} if options.ShowStdout { query.Set("stdout", "1") } if options.ShowStderr { query.Set("stderr", "1") } if options.Since != "" { ts, err := timestamp.GetTimestamp(options.Since, time.Now()) if err != nil { return nil, fmt.Errorf(`invalid value for "since": %w`, err) } query.Set("since", ts) } if options.Until != "" { ts, err := timestamp.GetTimestamp(options.Until, time.Now()) if err != nil { return nil, fmt.Errorf(`invalid value for "until": %w`, err) } query.Set("until", ts) } if options.Timestamps { query.Set("timestamps", "1") } if options.Details { query.Set("details", "1") } if options.Follow { query.Set("follow", "1") } query.Set("tail", options.Tail) resp, err := cli.get(ctx, "/containers/"+containerID+"/logs", query, nil) if err != nil { return nil, err } return &containerLogsResult{ ReadCloser: newCancelReadCloser(ctx, resp.Body), }, nil } type containerLogsResult struct { io.ReadCloser } var ( _ io.ReadCloser = (*containerLogsResult)(nil) _ ContainerLogsResult = (*containerLogsResult)(nil) )
go
github
https://github.com/moby/moby
client/container_logs.go
# -*- coding: utf-8 -*- import datetime from django.conf import settings from django.contrib.auth.models import User from django.db import models from picklefield.fields import PickledObjectField import hashlib, random, sys, os, time VERIFIER_EXPIRE_DAYS = getattr(settings, 'VERIFIER_EXPIRE_DAYS', 3) __all__ = ['Nonce', 'Association', 'UserAssociation', 'UserPasswordQueueManager', 'UserPasswordQueue', 'UserEmailVerifier'] class Nonce(models.Model): """ openid nonce """ server_url = models.CharField(max_length=255) timestamp = models.IntegerField() salt = models.CharField(max_length=40) def __unicode__(self): return u"Nonce: %s" % self.id class Association(models.Model): """ association openid url and lifetime """ server_url = models.TextField(max_length=2047) handle = models.CharField(max_length=255) secret = models.TextField(max_length=255) # Stored base64 encoded issued = models.IntegerField() lifetime = models.IntegerField() assoc_type = models.TextField(max_length=64) def __unicode__(self): return u"Association: %s, %s" % (self.server_url, self.handle) class UserAssociation(models.Model): """ model to manage association between openid and user """ #todo: rename this field so that it sounds good for other methods #for exaple, for password provider this will hold password openid_url = models.CharField(blank=False, max_length=255) user = models.ForeignKey(User) #in the future this must be turned into an #association with a Provider record #to hold things like login badge, etc provider_name = models.CharField(max_length=64, default='unknown') last_used_timestamp = models.DateTimeField(null=True) class Meta(object): unique_together = ( ('user','provider_name'), ('openid_url', 'provider_name') ) def __unicode__(self): return "Openid %s with user %s" % (self.openid_url, self.user) class UserPasswordQueueManager(models.Manager): """ manager for UserPasswordQueue object """ def get_new_confirm_key(self): "Returns key that isn't being used." # The random module is seeded when this Apache child is created. # Use SECRET_KEY as added salt. while 1: confirm_key = hashlib.md5("%s%s%s%s" % ( random.randint(0, sys.maxint - 1), os.getpid(), time.time(), settings.SECRET_KEY)).hexdigest() try: self.get(confirm_key=confirm_key) except self.model.DoesNotExist: break return confirm_key class UserPasswordQueue(models.Model): """ model for new password queue. """ user = models.ForeignKey(User, unique=True) new_password = models.CharField(max_length=30) confirm_key = models.CharField(max_length=40) objects = UserPasswordQueueManager() def __unicode__(self): return self.user.username class UserEmailVerifier(models.Model): '''Model that stores the required values to verify an email address''' key = models.CharField(max_length=255, unique=True, primary_key=True) value = PickledObjectField() verified = models.BooleanField(default=False) expires_on = models.DateTimeField(blank=True) def save(self, *args, **kwargs): if not self.expires_on: self.expires_on = datetime.datetime.now() + \ datetime.timedelta(VERIFIER_EXPIRE_DAYS) super(UserEmailVerifier, self).save(*args, **kwargs) def has_expired(self): now = datetime.datetime.now() return now > self.expires_on def __unicode__(self): return self.key
unknown
codeparrot/codeparrot-clean
from django.contrib.auth.hashers import make_password from django.contrib.auth.templatetags.auth import render_password_as_hash from django.test import SimpleTestCase, override_settings class RenderPasswordAsHashTests(SimpleTestCase): @override_settings( PASSWORD_HASHERS=["django.contrib.auth.hashers.PBKDF2PasswordHasher"] ) def test_valid_password(self): value = ( "pbkdf2_sha256$100000$a6Pucb1qSFcD$WmCkn9Hqidj48NVe5x0FEM6A9YiOqQcl/83m2Z5u" "dm0=" ) hashed_html = ( "<p><strong>algorithm</strong>: <bdi>pbkdf2_sha256</bdi> " "<strong>iterations</strong>: <bdi>100000</bdi> " "<strong>salt</strong>: <bdi>a6Pucb******</bdi> " "<strong>hash</strong>: <bdi>WmCkn9**************************************" "</bdi></p>" ) self.assertEqual(render_password_as_hash(value), hashed_html) def test_invalid_password(self): expected = ( "<p><strong>Invalid password format or unknown hashing algorithm.</strong>" "</p>" ) for value in ["pbkdf2_sh", "md5$password", "invalid", "testhash$password"]: with self.subTest(value=value): self.assertEqual(render_password_as_hash(value), expected) def test_no_password(self): expected = "<p><strong>No password set.</strong></p>" for value in ["", None, make_password(None)]: with self.subTest(value=value): self.assertEqual(render_password_as_hash(value), expected)
python
github
https://github.com/django/django
tests/auth_tests/test_templatetags.py
//===-- Import.h - Representation of imports --------------------*- C++ -*-===// // // This source file is part of the Swift.org open source project // // Copyright (c) 2014 - 2020 Apple Inc. and the Swift project authors // Licensed under Apache License v2.0 with Runtime Library Exception // // See https://swift.org/LICENSE.txt for license information // See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors // //===----------------------------------------------------------------------===// /// /// \file /// This file contains types used to represent information about imports /// throughout the AST. /// //===----------------------------------------------------------------------===// #ifndef SWIFT_IMPORT_H #define SWIFT_IMPORT_H #include "swift/AST/AttrKind.h" #include "swift/AST/Identifier.h" #include "swift/Basic/Located.h" #include "swift/Basic/OptionSet.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/DenseMapInfo.h" #include "llvm/ADT/PointerIntPair.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringRef.h" #include "llvm/Support/raw_ostream.h" #include <algorithm> #include <optional> namespace swift { class ASTContext; class ModuleDecl; class ImportDecl; // MARK: - Fundamental import enums /// Describes what kind of name is being imported. /// /// If the enumerators here are changed, make sure to update all diagnostics /// using ImportKind as a select index. enum class ImportKind : uint8_t { Module = 0, Type, Struct, Class, Enum, Protocol, Var, Func }; inline bool isScopedImportKind(ImportKind importKind) { return importKind != ImportKind::Module; } /// Possible attributes for imports in source files. enum class ImportFlags { /// The imported module is exposed to anyone who imports the parent module. Exported = 0x1, /// This source file has access to testable declarations in the imported /// module. Testable = 0x2, /// This source file has access to private declarations in the imported /// module. PrivateImport = 0x4, /// The imported module is an implementation detail of this file and should /// not be required to be present if the main module is ever imported /// elsewhere. /// /// Mutually exclusive with Exported. ImplementationOnly = 0x8, /// The module is imported to have access to named SPIs which is an /// implementation detail of this file. SPIAccessControl = 0x10, /// The module is imported assuming that the module itself predates /// concurrency. Preconcurrency = 0x20, /// The module's symbols are linked weakly. WeakLinked = 0x40, /// Used for DenseMap. Reserved = 0x80, /// The imported module can only be referenced from SPI decls, or /// implementation details. SPIOnly = 0x100 }; /// \see ImportFlags using ImportOptions = OptionSet<ImportFlags>; void simple_display(llvm::raw_ostream &out, ImportOptions options); ImportOptions getImportOptions(ImportDecl *ID); // MARK: - Import Paths namespace detail { using ImportPathElement = Located<Identifier>; using ImportPathRaw = llvm::ArrayRef<ImportPathElement>; template<typename Subclass> class ImportPathBase { public: using Element = ImportPathElement; using Raw = ImportPathRaw; protected: Raw raw; ImportPathBase(Raw raw) : raw(raw) { } public: const Raw &getRaw() const { return raw; } Raw::iterator begin() const { return raw.begin(); } Raw::iterator end() const { return raw.end(); } const Element &operator[](size_t i) const { return raw[i]; } bool empty() const { return raw.empty(); } size_t size() const { return raw.size(); } const Element &front() const { return raw.front(); } const Element &back() const { return raw.back(); } /// True if \c this and \c other are precisely equal, including SourceLocs. bool operator==(const Subclass &other) const { return raw == other.raw; } /// True if \c this and \c other contain the same identifiers in the same /// order, ignoring SourceLocs. bool isSameAs(const Subclass &other) const { return size() == other.size() && std::equal(this->begin(), this->end(), other.begin(), [](const Element &l, const Element &r) -> bool { return l.Item == r.Item; } ); } Subclass getTopLevelPath() const { assert(size() >= 1 && "nothing to take"); return Subclass(raw.take_front()); } Subclass getParentPath() const { assert(size() >= 0 && "nothing to take"); return Subclass(raw.drop_back()); } SourceRange getSourceRange() const { if (empty()) return SourceRange(); return SourceRange(raw.front().Loc, raw.back().Loc); } void print(llvm::raw_ostream &os) const { llvm::interleave(*this, [&](Element elem) { os << elem.Item.str(); }, [&]() { os << "."; }); } void getString(SmallVectorImpl<char> &modulePathStr) const { llvm::raw_svector_ostream os(modulePathStr); print(os); } }; // These shims avoid circularity between ASTContext.h and Import.h. ImportPathRaw ImportPathBuilder_copyToImpl(ASTContext &ctx, ImportPathRaw raw); Identifier ImportPathBuilder_getIdentifierImpl(ASTContext &ctx, StringRef string); template<typename Subclass> class ImportPathBuilder { using Scratch = llvm::SmallVector<ImportPathElement, 4>; Scratch scratch; public: using value_type = Scratch::value_type; using reference = Scratch::reference; using iterator = Scratch::iterator; using const_iterator = Scratch::const_iterator; using difference_type = Scratch::difference_type; using size_type = Scratch::size_type; Subclass get() const { return Subclass(scratch); } Subclass copyTo(ASTContext &ctx) const { return Subclass(ImportPathBuilder_copyToImpl(ctx, scratch)); } ImportPathBuilder() : scratch() { } ImportPathBuilder(const ImportPathElement &elem) : scratch() { scratch = { elem }; } ImportPathBuilder(Identifier name, SourceLoc loc = SourceLoc()) : ImportPathBuilder(ImportPathElement(name, loc)) { } template<typename Iterator> ImportPathBuilder(Iterator begin, Iterator end) : scratch(begin, end) { } template<typename Range> ImportPathBuilder(Range collection) : scratch(collection.begin(), collection.end()) { } /// Parses \p text into elements separated by \p separator, with identifiers /// from \p ctx and invalid SourceLocs. /// /// \warning This is not very robust; for instance, it doesn't check the /// validity of the identifiers. ImportPathBuilder(ASTContext &ctx, StringRef text, char separator) : scratch() { while (!text.empty()) { StringRef next; std::tie(next, text) = text.split(separator); push_back(ImportPathBuilder_getIdentifierImpl(ctx, next)); } } /// Parses \p text into elements separated by \p separator, with identifiers /// from \p ctx starting at \p loc. /// /// \warning This is not very robust; for instance, it doesn't check the /// validity of the identifiers. ImportPathBuilder(ASTContext &ctx, StringRef text, char separator, SourceLoc loc) : scratch() { while (!text.empty()) { StringRef next; std::tie(next, text) = text.split(separator); push_back({ImportPathBuilder_getIdentifierImpl(ctx, next), loc}); loc = loc.getAdvancedLocOrInvalid(next.size() + 1); } } void push_back(const ImportPathElement &elem) { scratch.push_back(elem); } void push_back(Identifier name, SourceLoc loc = SourceLoc()) { scratch.push_back({ name, loc }); } void pop_back() { scratch.pop_back(); } bool empty() const { return scratch.empty(); } size_t size() const { return scratch.size(); } llvm::SmallVector<ImportPathElement, 4>::iterator begin() { return scratch.begin(); } llvm::SmallVector<ImportPathElement, 4>::iterator end() { return scratch.end(); } const ImportPathElement &front() const { return scratch.front(); } ImportPathElement &front() { return scratch.front(); } const ImportPathElement &back() const { return scratch.back(); } ImportPathElement &back() { return scratch.back(); } template<typename Iterator> void append(Iterator begin, Iterator end) { scratch.append(begin, end); } template<typename Range> void append(Range collection) { append(collection.begin(), collection.end()); } }; } /// @name ImportPathBase Comparison Operators /// @{ template <typename Subclass> inline bool operator<(const detail::ImportPathBase<Subclass> &LHS, const detail::ImportPathBase<Subclass> &RHS) { using Element = typename detail::ImportPathBase<Subclass>::Element; auto Comparator = [](const Element &l, const Element &r) { return l.Item.compare(r.Item) < 0; }; return std::lexicographical_compare(LHS.begin(), LHS.end(), RHS.begin(), RHS.end(), Comparator); } /// @} /// An undifferentiated series of dotted identifiers in an \c import statement, /// like \c Foo.Bar. Each identifier is packaged with its corresponding source /// location. /// /// The first element of an \c ImportPath is always a top-level module name. The /// remaining elements could specify a scope (naming a declaration in the /// module) or a chain of submodule names. \c ImportPath does not differentiate /// between these cases; its \c getModule() and \c getAccess() methods take an /// \c ImportKind parameter to decide how to divvy up these identifiers. /// /// \c ImportPath is only used when analyzing the parsed representation of code. /// Most code should use \c ImportPath::Module or \c ImportPath::Access, which /// have semantic meaning. /// /// \c ImportPath is essentially a wrapper around \c ArrayRef and does not own /// its elements, so something else needs to manage their lifetime. /// \c ImportDecl owns the memory backing \c ImportDecl::getImportPath(). class ImportPath : public detail::ImportPathBase<ImportPath> { public: /// A single dotted name from an \c ImportPath, \c ImportPath::Module, or /// \c ImportPath::Access, with its source location. using Element = detail::ImportPathBase<ImportPath>::Element; /// The backing type for \c ImportPath, \c ImportPath::Module, and /// \c ImportPath::Access; namely, an \c ArrayRef of \c ImportPath::Elements. using Raw = detail::ImportPathBase<ImportPath>::Raw; /// A helper type which encapsulates a temporary vector and can produce an /// import path from it. In addition to the obvious use in a temporary /// variable, this type can be used mid-expression to produce an import path /// that is valid until the end of the expression. using Builder = detail::ImportPathBuilder<ImportPath>; /// Represents an access path--the portion of an \c ImportPath which describes /// the name of a declaration to scope the import to. /// /// \c ImportPath::Access is used in scoped imports to designate a specific /// declaration inside the module. The import will only* cover this /// declaration, and will import it with a higher "priority" than usual, so /// name lookup will prefer it over identically-named declarations visible /// through other imports. /// /// (* Not actually only--e.g. extensions will be imported too. The primary /// use case for scoped imports is actually to resolve name conflicts, not to /// reduce the set of visible declarations.) /// /// When \c ImportPath::Access is empty, this means the import covers all /// declarations in the module. /// /// Although in theory Swift could support scoped imports of nested /// declarations, in practice it currently only supports scoped imports of /// top-level declarations. Reflecting this, \c ImportPath::Access is backed /// by an \c ArrayRef, but it asserts that the access path has zero or one /// elements. /// /// \c ImportPath::Access is essentially a wrapper around \c ArrayRef and does /// not own its elements, so something else needs to manage their lifetime. /// \c ImportDecl owns the memory backing \c ImportDecl::getAccessPath(). class Access : public detail::ImportPathBase<Access> { public: /// A helper type which encapsulates a temporary vector and can produce a /// scope path from it. In addition to the obvious use in a temporary /// variable, this type can be used mid-expression to produce a scope path /// that is valid until the end of the expression. using Builder = detail::ImportPathBuilder<Access>; Access(ImportPath::Raw raw) : ImportPathBase(raw) { assert(size() <= 1 && "nested scoped imports are not supported"); } Access() : ImportPathBase({}) { } /// Returns \c true if the scope of this import includes \c name. An empty /// scope matches all names. bool matches(DeclName name) const { return empty() || DeclName(front().Item).matchesRef(name); } }; /// Represents a module path--the portion of an \c ImportPath which describes /// the name of the module being imported, possibly including submodules. /// /// \c ImportPath::Module contains one or more identifiers. The first /// identifier names a top-level module. The second and subsequent /// identifiers, if present, chain together to name a specific submodule to /// import. (Although Swift modules cannot currently contain submodules, Swift /// can import Clang submodules.) /// /// \c ImportPath::Module is essentially a wrapper around \c ArrayRef and /// does not own its elements, so something else needs to manage their /// lifetime. \c ImportDecl owns the memory backing /// \c ImportDecl::getModulePath(). class Module : public detail::ImportPathBase<Module> { public: /// A helper type which encapsulates a temporary vector and can produce a /// module path from it. In addition to the obvious use in a temporary /// variable, this type can be used mid-expression to produce a module path /// that is valid until the end of the expression. using Builder = detail::ImportPathBuilder<Module>; Module(ImportPath::Raw raw) : ImportPathBase(raw) { assert(size() >= 1 && "must have a top-level module"); } // Note: This type does not have a constructor which just takes an // `Identifier` because it would not be able to create a temporary // `ImportPath::Element` with a long enough lifetime to return. Use // `ImportPath::Module::Builder` to create a temporary module path. bool hasSubmodule() const { return size() != 1; } ImportPath::Raw getSubmodulePath() const { return getRaw().drop_front(); } }; ImportPath(Raw raw) : ImportPathBase(raw) { assert(raw.size() >= 1 && "ImportPath must contain a module name"); } /// Extracts the portion of the \c ImportPath which represents a module name, /// including submodules if appropriate. Module getModulePath(bool isScoped) const { if (isScoped) return Module(getRaw().drop_back()); return Module(getRaw()); } /// Extracts the portion of the \c ImportPath which represents a scope for the /// import. Access getAccessPath(bool isScoped) const { if (isScoped) { assert(size() >= 2 && "scoped ImportPath must contain a decl name"); return Access(getRaw().take_back()); } return Access(); } /// Extracts the portion of the \c ImportPath which represents a module name, /// including submodules, assuming the \c ImportDecl has the indicated /// \c importKind. Module getModulePath(ImportKind importKind) const { return getModulePath(isScopedImportKind(importKind)); } /// Extracts the portion of the \c ImportPath which represents a scope for the /// import, assuming the \c ImportDecl has the indicated \c importKind. Access getAccessPath(ImportKind importKind) const { return getAccessPath(isScopedImportKind(importKind)); } private: struct UnsafePrivateConstructorTag {}; // Doesn't require a module name like the public constructor. // Only used for getEmptyKey() and getTombstoneKey(). ImportPath(Raw raw, UnsafePrivateConstructorTag tag) : ImportPathBase(raw) {} public: static ImportPath getEmptyKey() { return swift::ImportPath(llvm::DenseMapInfo<Raw>::getEmptyKey(), UnsafePrivateConstructorTag{}); } static ImportPath getTombstoneKey() { return swift::ImportPath(llvm::DenseMapInfo<Raw>::getTombstoneKey(), UnsafePrivateConstructorTag{}); } }; // MARK: - Abstractions of imports /// Convenience struct to keep track of an import path and whether or not it /// is scoped. class UnloadedImportedModule { // This is basically an ArrayRef with a bit stolen from the pointer. // FIXME: Extract an ArrayRefIntPair type from this. llvm::PointerIntPair<ImportPath::Raw::iterator, 1, bool> dataAndIsScoped; ImportPath::Raw::size_type length; ImportPath::Raw::iterator data() const { return dataAndIsScoped.getPointer(); } bool isScoped() const { return dataAndIsScoped.getInt(); } ImportPath::Raw getRaw() const { return ImportPath::Raw(data(), length); } UnloadedImportedModule(ImportPath::Raw raw, bool isScoped) : dataAndIsScoped(raw.data(), isScoped), length(raw.size()) { } public: UnloadedImportedModule(ImportPath importPath, bool isScoped) : UnloadedImportedModule(importPath.getRaw(), isScoped) { } UnloadedImportedModule(ImportPath importPath, ImportKind importKind) : UnloadedImportedModule(importPath, isScopedImportKind(importKind)) { } ImportPath getImportPath() const { return ImportPath(getRaw()); } ImportPath::Module getModulePath() const { return getImportPath().getModulePath(isScoped()); } ImportPath::Access getAccessPath() const { return getImportPath().getAccessPath(isScoped()); } friend bool operator==(const UnloadedImportedModule &lhs, const UnloadedImportedModule &rhs) { return (lhs.getRaw() == rhs.getRaw()) && (lhs.isScoped() == rhs.isScoped()); } }; /// Convenience struct to keep track of a module along with its access path. struct alignas(uint64_t) ImportedModule { /// The access path from an import: `import Foo.Bar` -> `Foo.Bar`. ImportPath::Access accessPath; /// The actual module corresponding to the import. /// /// Invariant: The pointer is non-null. ModuleDecl *importedModule; ImportedModule(ImportPath::Access accessPath, ModuleDecl *importedModule) : accessPath(accessPath), importedModule(importedModule) { assert(this->importedModule); } explicit ImportedModule(ModuleDecl *importedModule) : ImportedModule(ImportPath::Access(), importedModule) { } bool operator==(const ImportedModule &other) const { return (this->importedModule == other.importedModule) && (this->accessPath == other.accessPath); } /// Uniques the items in \p imports, ignoring the source locations of the /// access paths. /// /// The order of items in \p imports is \e not preserved. static void removeDuplicates(SmallVectorImpl<ImportedModule> &imports); // Purely here to allow ImportedModule and UnloadedImportedModule to // substitute into the same templates. ImportPath::Access getAccessPath() const { return accessPath; } /// Arbitrarily orders ImportedModule records, for inclusion in sets and such. class Order { public: bool operator()(const ImportedModule &lhs, const ImportedModule &rhs) const { if (lhs.importedModule != rhs.importedModule) return std::less<const ModuleDecl *>()(lhs.importedModule, rhs.importedModule); if (lhs.accessPath.getRaw().data() != rhs.accessPath.getRaw().data()) return std::less<ImportPath::Raw::iterator>()(lhs.accessPath.begin(), rhs.accessPath.begin()); return lhs.accessPath.size() < rhs.accessPath.size(); } }; }; /// Augments a type representing an import to also include information about the /// import's attributes. This is usually used with either \c ImportedModule or /// \c UnloadedImportedModule. template<class ModuleInfo> struct AttributedImport { /// Information about the module and access path being imported. ModuleInfo module; /// The location of the 'import' keyword, for an explicit import. SourceLoc importLoc; /// Flags indicating which attributes of this import are present. ImportOptions options; /// If this is a @_private import, the value of its 'sourceFile:' argument; /// otherwise, empty string. StringRef sourceFileArg; /// Names of explicitly imported SPI groups. ArrayRef<Identifier> spiGroups; /// When the import declaration has a `@preconcurrency` annotation, this /// is the source range covering the annotation. SourceRange preconcurrencyRange; /// If the import declaration has a `@_documentation(visibility: <access>)` /// attribute, this is the given access level. std::optional<AccessLevel> docVisibility; /// Access level limiting how imported types can be exported. AccessLevel accessLevel; /// Location of the attribute that defined \c accessLevel. Also indicates /// if the access level was implicit or explicit. SourceRange accessLevelRange; /// Location of the `@_implementationOnly` attribute if set. SourceRange implementationOnlyRange; AttributedImport(ModuleInfo module, SourceLoc importLoc = SourceLoc(), ImportOptions options = ImportOptions(), StringRef filename = {}, ArrayRef<Identifier> spiGroups = {}, SourceRange preconcurrencyRange = {}, std::optional<AccessLevel> docVisibility = std::nullopt, AccessLevel accessLevel = AccessLevel::Public, SourceRange accessLevelRange = SourceRange(), SourceRange implementationOnlyRange = SourceRange()) : module(module), importLoc(importLoc), options(options), sourceFileArg(filename), spiGroups(spiGroups), preconcurrencyRange(preconcurrencyRange), docVisibility(docVisibility), accessLevel(accessLevel), accessLevelRange(accessLevelRange), implementationOnlyRange(implementationOnlyRange) { assert(!(options.contains(ImportFlags::Exported) && options.contains(ImportFlags::ImplementationOnly)) || options.contains(ImportFlags::Reserved)); } template<class OtherModuleInfo> AttributedImport(ModuleInfo module, AttributedImport<OtherModuleInfo> other) : AttributedImport(module, other.importLoc, other.options, other.sourceFileArg, other.spiGroups, other.preconcurrencyRange, other.docVisibility, other.accessLevel, other.accessLevelRange, other.implementationOnlyRange) { } friend bool operator==(const AttributedImport<ModuleInfo> &lhs, const AttributedImport<ModuleInfo> &rhs) { return lhs.module == rhs.module && lhs.options.toRaw() == rhs.options.toRaw() && lhs.sourceFileArg == rhs.sourceFileArg && lhs.spiGroups == rhs.spiGroups && lhs.docVisibility == rhs.docVisibility && lhs.accessLevel == rhs.accessLevel && lhs.accessLevelRange == rhs.accessLevelRange && lhs.implementationOnlyRange == rhs.implementationOnlyRange; } AttributedImport<ImportedModule> getLoaded(ModuleDecl *loadedModule) const { return { ImportedModule(module.getAccessPath(), loadedModule), *this }; } }; void simple_display(llvm::raw_ostream &out, const ImportedModule &import); void simple_display(llvm::raw_ostream &out, const UnloadedImportedModule &import); // This is a quasi-implementation detail of the template version below. void simple_display(llvm::raw_ostream &out, const AttributedImport<std::tuple<>> &import); template<typename ModuleInfo> void simple_display(llvm::raw_ostream &out, const AttributedImport<ModuleInfo> &import) { // Print the module. simple_display(out, import.module); // Print the other details of the import, using the std::tuple<> // specialization. AttributedImport<std::tuple<>> importWithoutModule({}, import); simple_display(out, importWithoutModule); } // MARK: - Implicit imports /// The kind of stdlib that should be imported. enum class ImplicitStdlibKind { /// No standard library should be implicitly imported. None, /// The Builtin module should be implicitly imported. Builtin, /// The regular Swift standard library should be implicitly imported. Stdlib }; /// Represents unprocessed options for implicit imports. struct ImplicitImportInfo { /// The implicit stdlib to import. ImplicitStdlibKind StdlibKind; /// Whether we should attempt to import an underlying Clang half of this /// module. bool ShouldImportUnderlyingModule; /// The bridging header path for this module, empty if there is none. StringRef BridgingHeaderPath; /// The names of additional modules to be loaded and implicitly imported. SmallVector<AttributedImport<UnloadedImportedModule>, 4> AdditionalUnloadedImports; /// An additional list of already-loaded modules which should be implicitly /// imported. SmallVector<AttributedImport<ImportedModule>, 4> AdditionalImports; ImplicitImportInfo() : StdlibKind(ImplicitStdlibKind::None), ShouldImportUnderlyingModule(false) {} }; /// Contains names of and pointers to modules that must be implicitly imported. struct ImplicitImportList { ArrayRef<AttributedImport<ImportedModule>> imports; ArrayRef<AttributedImport<UnloadedImportedModule>> unloadedImports; friend bool operator==(const ImplicitImportList &lhs, const ImplicitImportList &rhs) { return lhs.imports == rhs.imports && lhs.unloadedImports == rhs.unloadedImports; } }; /// A list of modules to implicitly import. void simple_display(llvm::raw_ostream &out, const ImplicitImportList &importList); } // MARK: - DenseMapInfo namespace llvm { template<> struct DenseMapInfo<swift::ImportOptions> { using ImportOptions = swift::ImportOptions; using UnsignedDMI = DenseMapInfo<uint8_t>; static inline ImportOptions getEmptyKey() { return ImportOptions(UnsignedDMI::getEmptyKey()); } static inline ImportOptions getTombstoneKey() { return ImportOptions(UnsignedDMI::getTombstoneKey()); } static inline unsigned getHashValue(ImportOptions options) { return UnsignedDMI::getHashValue(options.toRaw()); } static bool isEqual(ImportOptions a, ImportOptions b) { return UnsignedDMI::isEqual(a.toRaw(), b.toRaw()); } }; template <> class DenseMapInfo<swift::ImportedModule> { using ImportedModule = swift::ImportedModule; using ModuleDecl = swift::ModuleDecl; public: static ImportedModule getEmptyKey() { return {{}, llvm::DenseMapInfo<ModuleDecl *>::getEmptyKey()}; } static ImportedModule getTombstoneKey() { return {{}, llvm::DenseMapInfo<ModuleDecl *>::getTombstoneKey()}; } static unsigned getHashValue(const ImportedModule &val) { auto pair = std::make_pair(val.accessPath.size(), val.importedModule); return llvm::DenseMapInfo<decltype(pair)>::getHashValue(pair); } static bool isEqual(const ImportedModule &lhs, const ImportedModule &rhs) { return lhs.importedModule == rhs.importedModule && lhs.accessPath.isSameAs(rhs.accessPath); } }; template<typename ModuleInfo> struct DenseMapInfo<swift::AttributedImport<ModuleInfo>> { using AttributedImport = swift::AttributedImport<ModuleInfo>; using ModuleInfoDMI = DenseMapInfo<ModuleInfo>; using ImportOptionsDMI = DenseMapInfo<swift::ImportOptions>; using StringRefDMI = DenseMapInfo<StringRef>; using SourceLocDMI = DenseMapInfo<swift::SourceLoc>; // We can't include spiGroups in the hash because ArrayRef<Identifier> is not // DenseMapInfo-able, but we do check that the spiGroups match in isEqual(). static inline AttributedImport getEmptyKey() { return AttributedImport( ModuleInfoDMI::getEmptyKey(), SourceLocDMI::getEmptyKey(), ImportOptionsDMI::getEmptyKey(), StringRefDMI::getEmptyKey(), {}, {}, std::nullopt, swift::AccessLevel::Public, {}); } static inline AttributedImport getTombstoneKey() { return AttributedImport( ModuleInfoDMI::getTombstoneKey(), SourceLocDMI::getEmptyKey(), ImportOptionsDMI::getTombstoneKey(), StringRefDMI::getTombstoneKey(), {}, {}, std::nullopt, swift::AccessLevel::Public, {}); } static inline unsigned getHashValue(const AttributedImport &import) { return detail::combineHashValue( ModuleInfoDMI::getHashValue(import.module), detail::combineHashValue( ImportOptionsDMI::getHashValue(import.options), StringRefDMI::getHashValue(import.sourceFileArg))); } static bool isEqual(const AttributedImport &a, const AttributedImport &b) { return ModuleInfoDMI::isEqual(a.module, b.module) && ImportOptionsDMI::isEqual(a.options, b.options) && StringRefDMI::isEqual(a.sourceFileArg, b.sourceFileArg) && a.spiGroups == b.spiGroups && a.docVisibility == b.docVisibility && a.accessLevel == b.accessLevel && a.accessLevelRange == b.accessLevelRange; } }; template <> class DenseMapInfo<swift::ImportPath> { using ImportPath = swift::ImportPath; public: static ImportPath getEmptyKey() { return swift::ImportPath::getEmptyKey(); } static ImportPath getTombstoneKey() { return swift::ImportPath::getTombstoneKey(); } static unsigned getHashValue(const ImportPath &val) { return llvm::DenseMapInfo<ImportPath::Raw>::getHashValue(val.getRaw()); } static bool isEqual(const ImportPath &lhs, const ImportPath &rhs) { return lhs == rhs; } }; } #endif
c
github
https://github.com/apple/swift
include/swift/AST/Import.h
# NOTE: This is a common file that is overwritten by realm/ci-actions sync service # and should only be modified in that repository. name: No Response # Both `issue_comment` and `scheduled` event types are required for this Action # to work properly. on: issue_comment: types: [created] schedule: # Schedule at 00:00 every day - cron: '0 0 * * *' jobs: noResponse: runs-on: ubuntu-latest steps: - uses: lee-dohm/no-response@v0.5.0 with: token: ${{ github.token }} responseRequiredLabel: More-information-needed
unknown
github
https://github.com/realm/realm-swift
.github/workflows/no-response.yml
# Copyright (c) 2012 The Khronos Group Inc. # Permission is hereby granted, free of charge, to any person obtaining a copy of this software and /or associated documentation files (the "Materials "), to deal in the Materials without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Materials, and to permit persons to whom the Materials are furnished to do so, subject to # the following conditions: # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Materials. # THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS. # See Core.Logic.FJudgementContext for the information # of the 'context' parameter. # This sample judging object does the following: # # JudgeBaseline: just verifies that the standard steps did not crash. # JudgeSuperior: also verifies that the validation steps are not in error. # JudgeExemplary: same as intermediate badge. # We import an assistant script that includes the common verifications # methods. The assistant buffers its checks, so that running them again # does not incurs an unnecessary performance hint. from StandardDataSets.scripts import JudgeAssistant # Please feed your node list here: tagLst = [] attrName = '' attrVal = '' dataToCheck = '' class SimpleJudgingObject: def __init__(self, _tagLst, _attrName, _attrVal, _data): self.tagList = _tagLst self.attrName = _attrName self.attrVal = _attrVal self.dataToCheck = _data self.status_baseline = False self.status_superior = False self.status_exemplary = False self.__assistant = JudgeAssistant.JudgeAssistant() def JudgeBaseline(self, context): # No step should not crash self.__assistant.CheckCrashes(context) # Import/export/validate must exist and pass, while Render must only exist. self.__assistant.CheckSteps(context, ["Import", "Export", "Validate"], ["Render"]) if (self.__assistant.GetResults() == False): self.status_baseline = False return False # Compare the rendered images between import and export # Then compare images against reference test to check for non-equivalence if ( self.__assistant.CompareRenderedImages(context) ): self.__assistant.CompareImagesAgainst(context, "_reference_no_transform", None, None, 5, True, False) self.status_baseline = self.__assistant.DeferJudgement(context) return self.status_baseline # To pass intermediate you need to pass basic, this object could also include additional # tests that were specific to the intermediate badge. def JudgeSuperior(self, context): self.status_superior = self.status_baseline return self.status_superior # To pass advanced you need to pass intermediate, this object could also include additional # tests that were specific to the advanced badge def JudgeExemplary(self, context): self.status_exemplary = self.status_superior return self.status_exemplary # This is where all the work occurs: "judgingObject" is an absolutely necessary token. # The dynamic loader looks very specifically for a class instance named "judgingObject". # judgingObject = SimpleJudgingObject(tagLst, attrName, attrVal, dataToCheck);
unknown
codeparrot/codeparrot-clean
//===----------------------------------------------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_TOOLS_EXTRA_CLANG_TIDY_GOOGLE_GLOBALNAMESINHEADERSCHECK_H #define LLVM_CLANG_TOOLS_EXTRA_CLANG_TIDY_GOOGLE_GLOBALNAMESINHEADERSCHECK_H #include "../ClangTidyCheck.h" #include "../utils/FileExtensionsUtils.h" namespace clang::tidy::google::readability { /// Flag global namespace pollution in header files. /// Right now it only triggers on using declarations and directives. /// /// For the user-facing documentation see: /// https://clang.llvm.org/extra/clang-tidy/checks/google/global-names-in-headers.html class GlobalNamesInHeadersCheck : public ClangTidyCheck { public: GlobalNamesInHeadersCheck(StringRef Name, ClangTidyContext *Context); void registerMatchers(ast_matchers::MatchFinder *Finder) override; void check(const ast_matchers::MatchFinder::MatchResult &Result) override; private: FileExtensionsSet HeaderFileExtensions; }; } // namespace clang::tidy::google::readability #endif // LLVM_CLANG_TOOLS_EXTRA_CLANG_TIDY_GOOGLE_GLOBALNAMESINHEADERSCHECK_H
c
github
https://github.com/llvm/llvm-project
clang-tools-extra/clang-tidy/google/GlobalNamesInHeadersCheck.h
""" Evaluate expressions. """ from __future__ import absolute_import #Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module. import __init__ from fabmetheus_utilities.geometry.geometry_utilities.evaluate_elements import setting from fabmetheus_utilities.vector3 import Vector3 from fabmetheus_utilities import archive from fabmetheus_utilities import euclidean from fabmetheus_utilities import gcodec from fabmetheus_utilities import settings import math import os import sys import traceback __author__ = 'Enrique Perez (perez_enrique@yahoo.com)' __credits__ = 'Art of Illusion <http://www.artofillusion.org/>' __date__ = '$Date: 2008/02/05 $' __license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html' globalModuleFunctionsDictionary = {} def addPrefixDictionary(dictionary, keys, value): 'Add prefixed key values to dictionary.' for key in keys: dictionary[key.lstrip('_')] = value def addQuoteWord(evaluatorWords, word): 'Add quote word and remainder if the word starts with a quote character or dollar sign, otherwise add the word.' if len(word) < 2: evaluatorWords.append(word) return firstCharacter = word[0] if firstCharacter == '$': dotIndex = word.find('.', 1) if dotIndex > -1: evaluatorWords.append(word[: dotIndex]) evaluatorWords.append(word[dotIndex :]) return if firstCharacter != '"' and firstCharacter != "'": evaluatorWords.append(word) return nextQuoteIndex = word.find(firstCharacter, 1) if nextQuoteIndex < 0 or nextQuoteIndex == len(word) - 1: evaluatorWords.append(word) return nextQuoteIndex += 1 evaluatorWords.append(word[: nextQuoteIndex]) evaluatorWords.append(word[nextQuoteIndex :]) def addToPathsRecursively(paths, vector3Lists): 'Add to vector3 paths recursively.' if vector3Lists.__class__ == Vector3 or vector3Lists.__class__ .__name__ == 'Vector3Index': paths.append([ vector3Lists ]) return path = [] for vector3List in vector3Lists: if vector3List.__class__ == list: addToPathsRecursively(paths, vector3List) elif vector3List.__class__ == Vector3: path.append(vector3List) if len(path) > 0: paths.append(path) def addValueToEvaluatedDictionary(elementNode, evaluatedDictionary, key): 'Get the evaluated dictionary.' value = getEvaluatedValueObliviously(elementNode, key) if value == None: valueString = str(elementNode.attributes[key]) print('Warning, addValueToEvaluatedDictionary in evaluate can not get a value for:') print(valueString) evaluatedDictionary[key + '__Warning__'] = 'Can not evaluate: ' + valueString.replace('"', ' ').replace( "'", ' ') else: evaluatedDictionary[key] = value def addVector3ToElementNode(elementNode, key, vector3): 'Add vector3 to xml element.' elementNode.attributes[key] = '[%s,%s,%s]' % (vector3.x, vector3.y, vector3.z) def compareExecutionOrderAscending(module, otherModule): 'Get comparison in order to sort modules in ascending execution order.' if module.globalExecutionOrder < otherModule.globalExecutionOrder: return -1 if module.globalExecutionOrder > otherModule.globalExecutionOrder: return 1 if module.__name__ < otherModule.__name__: return -1 return int(module.__name__ > otherModule.__name__) def convertToPaths(dictionary): 'Recursively convert any ElementNodes to paths.' if dictionary.__class__ == Vector3 or dictionary.__class__.__name__ == 'Vector3Index': return keys = getKeys(dictionary) if keys == None: return for key in keys: value = dictionary[key] if value.__class__.__name__ == 'ElementNode': if value.xmlObject != None: dictionary[key] = getFloatListListsByPaths(value.xmlObject.getPaths()) else: convertToPaths(dictionary[key]) def convertToTransformedPaths(dictionary): 'Recursively convert any ElementNodes to paths.' if dictionary.__class__ == Vector3 or dictionary.__class__.__name__ == 'Vector3Index': return keys = getKeys(dictionary) if keys == None: return for key in keys: value = dictionary[key] if value.__class__.__name__ == 'ElementNode': if value.xmlObject != None: dictionary[key] = value.xmlObject.getTransformedPaths() else: convertToTransformedPaths(dictionary[key]) def executeLeftOperations( evaluators, operationLevel ): 'Evaluate the expression value from the numeric and operation evaluators.' for negativeIndex in xrange( - len(evaluators), - 1 ): evaluatorIndex = negativeIndex + len(evaluators) evaluators[evaluatorIndex].executeLeftOperation( evaluators, evaluatorIndex, operationLevel ) def executeNextEvaluatorArguments(evaluator, evaluators, evaluatorIndex, nextEvaluator): 'Execute the nextEvaluator arguments.' if evaluator.value == None: print('Warning, executeNextEvaluatorArguments in evaluate can not get a evaluator.value for:') print(evaluatorIndex) print(evaluators) print(evaluator) return nextEvaluator.value = evaluator.value(*nextEvaluator.arguments) del evaluators[evaluatorIndex] def executePairOperations(evaluators, operationLevel): 'Evaluate the expression value from the numeric and operation evaluators.' for negativeIndex in xrange(1 - len(evaluators), - 1): evaluatorIndex = negativeIndex + len(evaluators) evaluators[evaluatorIndex].executePairOperation(evaluators, evaluatorIndex, operationLevel) def getBracketEvaluators(bracketBeginIndex, bracketEndIndex, evaluators): 'Get the bracket evaluators.' return getEvaluatedExpressionValueEvaluators(evaluators[bracketBeginIndex + 1 : bracketEndIndex]) def getBracketsExist(evaluators): 'Evaluate the expression value.' bracketBeginIndex = None for negativeIndex in xrange( - len(evaluators), 0 ): bracketEndIndex = negativeIndex + len(evaluators) evaluatorEnd = evaluators[ bracketEndIndex ] evaluatorWord = evaluatorEnd.word if evaluatorWord in ['(', '[', '{']: bracketBeginIndex = bracketEndIndex elif evaluatorWord in [')', ']', '}']: if bracketBeginIndex == None: print('Warning, bracketBeginIndex in evaluateBrackets in evaluate is None.') print('This may be because the brackets are not balanced.') print(evaluators) del evaluators[ bracketEndIndex ] return evaluators[ bracketBeginIndex ].executeBracket(bracketBeginIndex, bracketEndIndex, evaluators) evaluators[ bracketBeginIndex ].word = None return True return False def getBracketValuesDeleteEvaluator(bracketBeginIndex, bracketEndIndex, evaluators): 'Get the bracket values and delete the evaluator.' evaluatedExpressionValueEvaluators = getBracketEvaluators(bracketBeginIndex, bracketEndIndex, evaluators) bracketValues = [] for evaluatedExpressionValueEvaluator in evaluatedExpressionValueEvaluators: bracketValues.append( evaluatedExpressionValueEvaluator.value ) del evaluators[ bracketBeginIndex + 1: bracketEndIndex + 1 ] return bracketValues def getCapitalizedSuffixKey(prefix, suffix): 'Get key with capitalized suffix.' if prefix == '' or prefix.endswith('.'): return prefix + suffix return prefix + suffix[:1].upper()+suffix[1:] def getDictionarySplitWords(dictionary, value): 'Get split line for evaluators.' if getIsQuoted(value): return [value] for dictionaryKey in dictionary.keys(): value = value.replace(dictionaryKey, ' ' + dictionaryKey + ' ') dictionarySplitWords = [] for word in value.split(): dictionarySplitWords.append(word) return dictionarySplitWords def getElementNodeByKey(elementNode, key): 'Get the xml element by key.' if key not in elementNode.attributes: return None word = str(elementNode.attributes[key]).strip() evaluatedLinkValue = getEvaluatedLinkValue(elementNode, word) if evaluatedLinkValue.__class__.__name__ == 'ElementNode': return evaluatedLinkValue print('Warning, could not get ElementNode in getElementNodeByKey in evaluate for:') print(key) print(evaluatedLinkValue) print(elementNode) return None def getElementNodeObject(evaluatedLinkValue): 'Get ElementNodeObject.' if evaluatedLinkValue.__class__.__name__ != 'ElementNode': print('Warning, could not get ElementNode in getElementNodeObject in evaluate for:') print(evaluatedLinkValue.__class__.__name__) print(evaluatedLinkValue) return None if evaluatedLinkValue.xmlObject == None: print('Warning, evaluatedLinkValue.xmlObject is None in getElementNodeObject in evaluate for:') print(evaluatedLinkValue) return None return evaluatedLinkValue.xmlObject def getElementNodesByKey(elementNode, key): 'Get the xml elements by key.' if key not in elementNode.attributes: return [] word = str(elementNode.attributes[key]).strip() evaluatedLinkValue = getEvaluatedLinkValue(elementNode, word) if evaluatedLinkValue.__class__.__name__ == 'ElementNode': return [evaluatedLinkValue] if evaluatedLinkValue.__class__ == list: return evaluatedLinkValue print('Warning, could not get ElementNodes in getElementNodesByKey in evaluate for:') print(key) print(evaluatedLinkValue) print(elementNode) return [] def getEndIndexConvertEquationValue( bracketEndIndex, evaluatorIndex, evaluators ): 'Get the bracket end index and convert the equation value evaluators into a string.' evaluator = evaluators[evaluatorIndex] if evaluator.__class__ != EvaluatorValue: return bracketEndIndex if not evaluator.word.startswith('equation.'): return bracketEndIndex if evaluators[ evaluatorIndex + 1 ].word != ':': return bracketEndIndex valueBeginIndex = evaluatorIndex + 2 equationValueString = '' for valueEvaluatorIndex in xrange( valueBeginIndex, len(evaluators) ): valueEvaluator = evaluators[ valueEvaluatorIndex ] if valueEvaluator.word == ',' or valueEvaluator.word == '}': if equationValueString == '': return bracketEndIndex else: evaluators[ valueBeginIndex ] = EvaluatorValue( equationValueString ) valueDeleteIndex = valueBeginIndex + 1 del evaluators[ valueDeleteIndex : valueEvaluatorIndex ] return bracketEndIndex - valueEvaluatorIndex + valueDeleteIndex equationValueString += valueEvaluator.word return bracketEndIndex def getEvaluatedBoolean(defaultValue, elementNode, key): 'Get the evaluated boolean.' if elementNode == None: return defaultValue if key in elementNode.attributes: return euclidean.getBooleanFromValue(getEvaluatedValueObliviously(elementNode, key)) return defaultValue def getEvaluatedDictionaryByCopyKeys(copyKeys, elementNode): 'Get the evaluated dictionary by copyKeys.' evaluatedDictionary = {} for key in elementNode.attributes.keys(): if key in copyKeys: evaluatedDictionary[key] = elementNode.attributes[key] else: addValueToEvaluatedDictionary(elementNode, evaluatedDictionary, key) return evaluatedDictionary def getEvaluatedDictionaryByEvaluationKeys(elementNode, evaluationKeys): 'Get the evaluated dictionary.' evaluatedDictionary = {} for key in elementNode.attributes.keys(): if key in evaluationKeys: addValueToEvaluatedDictionary(elementNode, evaluatedDictionary, key) return evaluatedDictionary def getEvaluatedExpressionValue(elementNode, value): 'Evaluate the expression value.' try: return getEvaluatedExpressionValueBySplitLine(elementNode, getEvaluatorSplitWords(value)) except: print('Warning, in getEvaluatedExpressionValue in evaluate could not get a value for:') print(value) traceback.print_exc(file=sys.stdout) return None def getEvaluatedExpressionValueBySplitLine(elementNode, words): 'Evaluate the expression value.' evaluators = [] for wordIndex, word in enumerate(words): nextWord = '' nextWordIndex = wordIndex + 1 if nextWordIndex < len(words): nextWord = words[nextWordIndex] evaluator = getEvaluator(elementNode, evaluators, nextWord, word) if evaluator != None: evaluators.append(evaluator) while getBracketsExist(evaluators): pass evaluatedExpressionValueEvaluators = getEvaluatedExpressionValueEvaluators(evaluators) if len( evaluatedExpressionValueEvaluators ) > 0: return evaluatedExpressionValueEvaluators[0].value return None def getEvaluatedExpressionValueEvaluators(evaluators): 'Evaluate the expression value from the numeric and operation evaluators.' for evaluatorIndex, evaluator in enumerate(evaluators): evaluator.executeCenterOperation(evaluators, evaluatorIndex) for negativeIndex in xrange(1 - len(evaluators), 0): evaluatorIndex = negativeIndex + len(evaluators) evaluators[evaluatorIndex].executeRightOperation(evaluators, evaluatorIndex) executeLeftOperations(evaluators, 200) for operationLevel in [80, 60, 40, 20, 15]: executePairOperations(evaluators, operationLevel) executeLeftOperations(evaluators, 13) executePairOperations(evaluators, 12) for negativeIndex in xrange(-len(evaluators), 0): evaluatorIndex = negativeIndex + len(evaluators) evaluators[evaluatorIndex].executePairOperation(evaluators, evaluatorIndex, 10) for evaluatorIndex in xrange(len(evaluators) - 1, -1, -1): evaluators[evaluatorIndex].executePairOperation(evaluators, evaluatorIndex, 0) return evaluators def getEvaluatedFloat(defaultValue, elementNode, key): 'Get the evaluated float.' if elementNode == None: return defaultValue if key in elementNode.attributes: return euclidean.getFloatFromValue(getEvaluatedValueObliviously(elementNode, key)) return defaultValue def getEvaluatedInt(defaultValue, elementNode, key): 'Get the evaluated int.' if elementNode == None: return None if key in elementNode.attributes: try: return getIntFromFloatString(getEvaluatedValueObliviously(elementNode, key)) except: print('Warning, could not evaluate the int.') print(key) print(elementNode.attributes[key]) return defaultValue def getEvaluatedIntByKeys(defaultValue, elementNode, keys): 'Get the evaluated int by keys.' for key in keys: defaultValue = getEvaluatedInt(defaultValue, elementNode, key) return defaultValue def getEvaluatedLinkValue(elementNode, word): 'Get the evaluated link value.' if word == '': return '' if getStartsWithCurlyEqualRoundSquare(word): return getEvaluatedExpressionValue(elementNode, word) return word def getEvaluatedString(defaultValue, elementNode, key): 'Get the evaluated string.' if elementNode == None: return defaultValue if key in elementNode.attributes: return str(getEvaluatedValueObliviously(elementNode, key)) return defaultValue def getEvaluatedValue(defaultValue, elementNode, key): 'Get the evaluated value.' if elementNode == None: return defaultValue if key in elementNode.attributes: return getEvaluatedValueObliviously(elementNode, key) return defaultValue def getEvaluatedValueObliviously(elementNode, key): 'Get the evaluated value.' value = str(elementNode.attributes[key]).strip() if key == 'id' or key == 'name' or key == 'tags': return value return getEvaluatedLinkValue(elementNode, value) def getEvaluator(elementNode, evaluators, nextWord, word): 'Get the evaluator.' if word in globalSplitDictionary: return globalSplitDictionary[word](elementNode, word) firstCharacter = word[: 1] if firstCharacter == "'" or firstCharacter == '"': if len(word) > 1: if firstCharacter == word[-1]: return EvaluatorValue(word[1 : -1]) if firstCharacter == '$': return EvaluatorValue(word[1 :]) dotIndex = word.find('.') functions = elementNode.getXMLProcessor().functions if dotIndex > -1 and len(word) > 1: if dotIndex == 0 and word[1].isalpha(): return EvaluatorAttribute(elementNode, word) if dotIndex > 0: untilDot = word[: dotIndex] if untilDot in globalModuleEvaluatorDictionary: return globalModuleEvaluatorDictionary[untilDot](elementNode, word) if len(functions) > 0: if untilDot in functions[-1].localDictionary: return EvaluatorLocal(elementNode, word) if firstCharacter.isalpha() or firstCharacter == '_': if len(functions) > 0: if word in functions[-1].localDictionary: return EvaluatorLocal(elementNode, word) wordElement = elementNode.getElementNodeByID(word) if wordElement != None: if wordElement.getNodeName() == 'class': return EvaluatorClass(wordElement, word) if wordElement.getNodeName() == 'function': return EvaluatorFunction(wordElement, word) return EvaluatorValue(word) return EvaluatorNumeric(elementNode, word) def getEvaluatorSplitWords(value): 'Get split words for evaluators.' if value.startswith('='): value = value[len('=') :] if len(value) < 1: return [] global globalDictionaryOperatorBegin uniqueQuoteIndex = 0 word = '' quoteString = None quoteDictionary = {} for characterIndex in xrange(len(value)): character = value[characterIndex] if character == '"' or character == "'": if quoteString == None: quoteString = '' elif quoteString != None: if character == quoteString[: 1]: uniqueQuoteIndex = getUniqueQuoteIndex(uniqueQuoteIndex, value) uniqueToken = getTokenByNumber(uniqueQuoteIndex) quoteDictionary[uniqueToken] = quoteString + character character = uniqueToken quoteString = None if quoteString == None: word += character else: quoteString += character beginSplitWords = getDictionarySplitWords(globalDictionaryOperatorBegin, word) global globalSplitDictionaryOperator evaluatorSplitWords = [] for beginSplitWord in beginSplitWords: if beginSplitWord in globalDictionaryOperatorBegin: evaluatorSplitWords.append(beginSplitWord) else: evaluatorSplitWords += getDictionarySplitWords(globalSplitDictionaryOperator, beginSplitWord) for evaluatorSplitWordIndex, evaluatorSplitWord in enumerate(evaluatorSplitWords): for quoteDictionaryKey in quoteDictionary.keys(): if quoteDictionaryKey in evaluatorSplitWord: evaluatorSplitWords[evaluatorSplitWordIndex] = evaluatorSplitWord.replace(quoteDictionaryKey, quoteDictionary[quoteDictionaryKey]) evaluatorTransitionWords = [] for evaluatorSplitWord in evaluatorSplitWords: addQuoteWord(evaluatorTransitionWords, evaluatorSplitWord) return evaluatorTransitionWords def getFloatListFromBracketedString( bracketedString ): 'Get list from a bracketed string.' if not getIsBracketed( bracketedString ): return None bracketedString = bracketedString.strip().replace('[', '').replace(']', '').replace('(', '').replace(')', '') if len( bracketedString ) < 1: return [] splitLine = bracketedString.split(',') floatList = [] for word in splitLine: evaluatedFloat = euclidean.getFloatFromValue(word) if evaluatedFloat != None: floatList.append( evaluatedFloat ) return floatList def getFloatListListsByPaths(paths): 'Get float lists by paths.' floatListLists = [] for path in paths: floatListList = [] for point in path: floatListList.append( point.getFloatList() ) return floatListLists def getIntFromFloatString(value): 'Get the int from the string.' floatString = str(value).strip() if floatString == '': return None dotIndex = floatString.find('.') if dotIndex < 0: return int(value) return int( round( float(floatString) ) ) def getIsBracketed(word): 'Determine if the word is bracketed.' if len(word) < 2: return False firstCharacter = word[0] lastCharacter = word[-1] if firstCharacter == '(' and lastCharacter == ')': return True return firstCharacter == '[' and lastCharacter == ']' def getIsQuoted(word): 'Determine if the word is quoted.' if len(word) < 2: return False firstCharacter = word[0] lastCharacter = word[-1] if firstCharacter == '"' and lastCharacter == '"': return True return firstCharacter == "'" and lastCharacter == "'" def getKeys(repository): 'Get keys for repository.' repositoryClass = repository.__class__ if repositoryClass == list or repositoryClass == tuple: return range(len(repository)) if repositoryClass == dict: return repository.keys() return None def getLocalAttributeValueString(key, valueString): 'Get the local attribute value string with augmented assignment.' augmentedStatements = '+= -= *= /= %= **='.split() for augmentedStatement in augmentedStatements: if valueString.startswith(augmentedStatement): return key + augmentedStatement[: -1] + valueString[len(augmentedStatement) :] return valueString def getMatchingPlugins(elementNode, namePathDictionary): 'Get the plugins whose names are in the attribute dictionary.' matchingPlugins = [] namePathDictionaryCopy = namePathDictionary.copy() for key in elementNode.attributes: dotIndex = key.find('.') if dotIndex > - 1: keyUntilDot = key[: dotIndex] if keyUntilDot in namePathDictionaryCopy: pluginModule = archive.getModuleWithPath( namePathDictionaryCopy[ keyUntilDot ] ) del namePathDictionaryCopy[ keyUntilDot ] if pluginModule != None: matchingPlugins.append( pluginModule ) return matchingPlugins def getNextChildIndex(elementNode): 'Get the next childNode index.' for childNodeIndex, childNode in enumerate( elementNode.parentNode.childNodes ): if childNode == elementNode: return childNodeIndex + 1 return len( elementNode.parentNode.childNodes ) def getPathByKey(defaultPath, elementNode, key): 'Get path from prefix and xml element.' if key not in elementNode.attributes: return defaultPath word = str(elementNode.attributes[key]).strip() evaluatedLinkValue = getEvaluatedLinkValue(elementNode, word) if evaluatedLinkValue.__class__ == list: return getPathByList(evaluatedLinkValue) elementNodeObject = getElementNodeObject(evaluatedLinkValue) if elementNodeObject == None: return defaultPath return elementNodeObject.getPaths()[0] def getPathByList(vertexList): 'Get the paths by list.' if len(vertexList) < 1: return Vector3() if vertexList[0].__class__ != list: vertexList = [vertexList] path = [] for floatList in vertexList: vector3 = getVector3ByFloatList(floatList, Vector3()) path.append(vector3) return path def getPathByPrefix(elementNode, path, prefix): 'Get path from prefix and xml element.' if len(path) < 2: print('Warning, bug, path is too small in evaluate in setPathByPrefix.') return pathByKey = getPathByKey([], elementNode, getCapitalizedSuffixKey(prefix, 'path')) if len( pathByKey ) < len(path): for pointIndex in xrange( len( pathByKey ) ): path[pointIndex] = pathByKey[pointIndex] else: path = pathByKey path[0] = getVector3ByPrefix(path[0], elementNode, getCapitalizedSuffixKey(prefix, 'pathStart')) path[-1] = getVector3ByPrefix(path[-1], elementNode, getCapitalizedSuffixKey(prefix, 'pathEnd')) return path def getPathsByKey(defaultPaths, elementNode, key): 'Get paths by key.' if key not in elementNode.attributes: return defaultPaths word = str(elementNode.attributes[key]).strip() evaluatedLinkValue = getEvaluatedLinkValue(elementNode, word) if evaluatedLinkValue.__class__ == dict or evaluatedLinkValue.__class__ == list: convertToPaths(evaluatedLinkValue) return getPathsByLists(evaluatedLinkValue) elementNodeObject = getElementNodeObject(evaluatedLinkValue) if elementNodeObject == None: return defaultPaths return elementNodeObject.getPaths() def getPathsByLists(vertexLists): 'Get paths by lists.' vector3Lists = getVector3ListsRecursively(vertexLists) paths = [] addToPathsRecursively(paths, vector3Lists) return paths def getRadiusArealizedBasedOnAreaRadius(elementNode, radius, sides): 'Get the areal radius from the radius, number of sides and cascade radiusAreal.' if elementNode.getCascadeBoolean(False, 'radiusAreal'): return radius return radius * euclidean.getRadiusArealizedMultiplier(sides) def getSidesBasedOnPrecision(elementNode, radius): 'Get the number of polygon sides.' return int(math.ceil(math.sqrt(0.5 * radius / setting.getPrecision(elementNode)) * math.pi)) def getSidesMinimumThreeBasedOnPrecision(elementNode, radius): 'Get the number of polygon sides, with a minimum of three.' return max(getSidesBasedOnPrecision(elementNode, radius), 3) def getSidesMinimumThreeBasedOnPrecisionSides(elementNode, radius): 'Get the number of polygon sides, with a minimum of three.' sides = getSidesMinimumThreeBasedOnPrecision(elementNode, radius) return getEvaluatedFloat(sides, elementNode, 'sides') def getSplitDictionary(): 'Get split dictionary.' global globalSplitDictionaryOperator splitDictionary = globalSplitDictionaryOperator.copy() global globalDictionaryOperatorBegin splitDictionary.update( globalDictionaryOperatorBegin ) splitDictionary['and'] = EvaluatorAnd splitDictionary['false'] = EvaluatorFalse splitDictionary['False'] = EvaluatorFalse splitDictionary['or'] = EvaluatorOr splitDictionary['not'] = EvaluatorNot splitDictionary['true'] = EvaluatorTrue splitDictionary['True'] = EvaluatorTrue splitDictionary['none'] = EvaluatorNone splitDictionary['None'] = EvaluatorNone return splitDictionary def getStartsWithCurlyEqualRoundSquare(word): 'Determine if the word starts with round or square brackets.' return word.startswith('{') or word.startswith('=') or word.startswith('(') or word.startswith('[') def getTokenByNumber(number): 'Get token by number.' return '_%s_' % number def getTransformedPathByKey(defaultTransformedPath, elementNode, key): 'Get transformed path from prefix and xml element.' if key not in elementNode.attributes: return defaultTransformedPath value = elementNode.attributes[key] if value.__class__ == list: return value word = str(value).strip() evaluatedLinkValue = getEvaluatedLinkValue(elementNode, word) if evaluatedLinkValue.__class__ == list: return getPathByList(evaluatedLinkValue) elementNodeObject = getElementNodeObject(evaluatedLinkValueClass) if elementNodeObject == None: return defaultTransformedPath return elementNodeObject.getTransformedPaths()[0] def getTransformedPathByPrefix(elementNode, path, prefix): 'Get path from prefix and xml element.' if len(path) < 2: print('Warning, bug, path is too small in evaluate in setPathByPrefix.') return pathByKey = getTransformedPathByKey([], elementNode, getCapitalizedSuffixKey(prefix, 'path')) if len( pathByKey ) < len(path): for pointIndex in xrange( len( pathByKey ) ): path[pointIndex] = pathByKey[pointIndex] else: path = pathByKey path[0] = getVector3ByPrefix(path[0], elementNode, getCapitalizedSuffixKey(prefix, 'pathStart')) path[-1] = getVector3ByPrefix(path[-1], elementNode, getCapitalizedSuffixKey(prefix, 'pathEnd')) return path def getTransformedPathsByKey(defaultTransformedPaths, elementNode, key): 'Get transformed paths by key.' if key not in elementNode.attributes: return defaultTransformedPaths value = elementNode.attributes[key] if value.__class__ == list: return getPathsByLists(value) word = str(value).strip() evaluatedLinkValue = getEvaluatedLinkValue(elementNode, word) if evaluatedLinkValue.__class__ == dict or evaluatedLinkValue.__class__ == list: convertToTransformedPaths(evaluatedLinkValue) return getPathsByLists(evaluatedLinkValue) elementNodeObject = getElementNodeObject(evaluatedLinkValue) if elementNodeObject == None: return defaultTransformedPaths return elementNodeObject.getTransformedPaths() def getUniqueQuoteIndex( uniqueQuoteIndex, word ): 'Get uniqueQuoteIndex.' uniqueQuoteIndex += 1 while getTokenByNumber(uniqueQuoteIndex) in word: uniqueQuoteIndex += 1 return uniqueQuoteIndex def getUniqueToken(word): 'Get unique token.' uniqueString = '@#!' for character in uniqueString: if character not in word: return character uniqueNumber = 0 while True: for character in uniqueString: uniqueToken = character + str(uniqueNumber) if uniqueToken not in word: return uniqueToken uniqueNumber += 1 def getVector3ByDictionary( dictionary, vector3 ): 'Get vector3 by dictionary.' if 'x' in dictionary: vector3 = getVector3IfNone(vector3) vector3.x = euclidean.getFloatFromValue(dictionary['x']) if 'y' in dictionary: vector3 = getVector3IfNone(vector3) vector3.y = euclidean.getFloatFromValue(dictionary['y']) if 'z' in dictionary: vector3 = getVector3IfNone(vector3) vector3.z = euclidean.getFloatFromValue( dictionary['z'] ) return vector3 def getVector3ByDictionaryListValue(value, vector3): 'Get vector3 by dictionary, list or value.' if value.__class__ == Vector3 or value.__class__.__name__ == 'Vector3Index': return value if value.__class__ == dict: return getVector3ByDictionary(value, vector3) if value.__class__ == list: return getVector3ByFloatList(value, vector3) floatFromValue = euclidean.getFloatFromValue(value) if floatFromValue == None: return vector3 vector3.setToXYZ(floatFromValue, floatFromValue, floatFromValue) return vector3 def getVector3ByFloatList(floatList, vector3): 'Get vector3 by float list.' if len(floatList) > 0: vector3 = getVector3IfNone(vector3) vector3.x = euclidean.getFloatFromValue(floatList[0]) if len(floatList) > 1: vector3 = getVector3IfNone(vector3) vector3.y = euclidean.getFloatFromValue(floatList[1]) if len(floatList) > 2: vector3 = getVector3IfNone(vector3) vector3.z = euclidean.getFloatFromValue(floatList[2]) return vector3 def getVector3ByMultiplierPrefix( elementNode, multiplier, prefix, vector3 ): 'Get vector3 from multiplier, prefix and xml element.' if multiplier == 0.0: return vector3 oldMultipliedValueVector3 = vector3 * multiplier vector3ByPrefix = getVector3ByPrefix(oldMultipliedValueVector3.copy(), elementNode, prefix) if vector3ByPrefix == oldMultipliedValueVector3: return vector3 return vector3ByPrefix / multiplier def getVector3ByMultiplierPrefixes( elementNode, multiplier, prefixes, vector3 ): 'Get vector3 from multiplier, prefixes and xml element.' for prefix in prefixes: vector3 = getVector3ByMultiplierPrefix( elementNode, multiplier, prefix, vector3 ) return vector3 def getVector3ByPrefix(defaultVector3, elementNode, prefix): 'Get vector3 from prefix and xml element.' value = getEvaluatedValue(None, elementNode, prefix) if value != None: defaultVector3 = getVector3ByDictionaryListValue(value, defaultVector3) prefix = archive.getUntilDot(prefix) x = getEvaluatedFloat(None, elementNode, prefix + '.x') if x != None: defaultVector3 = getVector3IfNone(defaultVector3) defaultVector3.x = x y = getEvaluatedFloat(None, elementNode, prefix + '.y') if y != None: defaultVector3 = getVector3IfNone(defaultVector3) defaultVector3.y = y z = getEvaluatedFloat(None, elementNode, prefix + '.z') if z != None: defaultVector3 = getVector3IfNone(defaultVector3) defaultVector3.z = z return defaultVector3 def getVector3ByPrefixes( elementNode, prefixes, vector3 ): 'Get vector3 from prefixes and xml element.' for prefix in prefixes: vector3 = getVector3ByPrefix(vector3, elementNode, prefix) return vector3 def getVector3FromElementNode(elementNode): 'Get vector3 from xml element.' vector3 = Vector3( getEvaluatedFloat(0.0, elementNode, 'x'), getEvaluatedFloat(0.0, elementNode, 'y'), getEvaluatedFloat(0.0, elementNode, 'z')) return getVector3ByPrefix(vector3, elementNode, 'cartesian') def getVector3IfNone(vector3): 'Get new vector3 if the original vector3 is none.' if vector3 == None: return Vector3() return vector3 def getVector3ListsRecursively(floatLists): 'Get vector3 lists recursively.' if len(floatLists) < 1: return Vector3() firstElement = floatLists[0] if firstElement.__class__ == Vector3: return floatLists if firstElement.__class__ != list: return getVector3ByFloatList(floatLists, Vector3()) vector3ListsRecursively = [] for floatList in floatLists: vector3ListsRecursively.append(getVector3ListsRecursively(floatList)) return vector3ListsRecursively def getVisibleObjects(archivableObjects): 'Get the visible objects.' visibleObjects = [] for archivableObject in archivableObjects: if archivableObject.getVisible(): visibleObjects.append(archivableObject) return visibleObjects def processArchivable(archivableClass, elementNode): 'Get any new elements and process the archivable.' if elementNode == None: return elementNode.xmlObject = archivableClass() elementNode.xmlObject.setToElementNode(elementNode) elementNode.getXMLProcessor().processChildNodes(elementNode) def processCondition(elementNode): 'Process the xml element condition.' xmlProcessor = elementNode.getXMLProcessor() if elementNode.xmlObject == None: elementNode.xmlObject = ModuleElementNode(elementNode) if elementNode.xmlObject.conditionSplitWords == None: return if len(xmlProcessor.functions ) < 1: print('Warning, the (in) element is not in a function in processCondition in evaluate for:') print(elementNode) return if int(getEvaluatedExpressionValueBySplitLine(elementNode, elementNode.xmlObject.conditionSplitWords)) > 0: xmlProcessor.functions[-1].processChildNodes(elementNode) else: elementNode.xmlObject.processElse(elementNode) def removeIdentifiersFromDictionary(dictionary): 'Remove the identifier elements from a dictionary.' euclidean.removeElementsFromDictionary(dictionary, ['id', 'name', 'tags']) return dictionary def setAttributesByArguments(argumentNames, arguments, elementNode): 'Set the attribute dictionary to the arguments.' for argumentIndex, argument in enumerate(arguments): elementNode.attributes[argumentNames[argumentIndex]] = argument def setFunctionLocalDictionary(arguments, function): 'Evaluate the function statement and delete the evaluators.' function.localDictionary = {'_arguments' : arguments} if len(arguments) > 0: firstArgument = arguments[0] if firstArgument.__class__ == dict: function.localDictionary = firstArgument return if 'parameters' not in function.elementNode.attributes: return parameters = function.elementNode.attributes['parameters'].strip() if parameters == '': return parameterWords = parameters.split(',') for parameterWordIndex, parameterWord in enumerate(parameterWords): strippedWord = parameterWord.strip() keyValue = KeyValue().getByEqual(strippedWord) if parameterWordIndex < len(arguments): function.localDictionary[keyValue.key] = arguments[parameterWordIndex] else: strippedValue = keyValue.value if strippedValue == None: print('Warning there is no default parameter in getParameterValue for:') print(strippedWord) print(parameterWords) print(arguments) print(function.elementNode.attributes) else: strippedValue = strippedValue.strip() function.localDictionary[keyValue.key.strip()] = strippedValue if len(arguments) > len(parameterWords): print('Warning there are too many initializeFunction parameters for:') print(function.elementNode.attributes) print(parameterWords) print(arguments) def setLocalAttribute(elementNode): 'Set the local attribute if any.' if elementNode.xmlObject != None: return for key in elementNode.attributes: if key[: 1].isalpha(): value = getEvaluatorSplitWords(getLocalAttributeValueString(key, elementNode.attributes[key].strip())) elementNode.xmlObject = KeyValue(key, value) return elementNode.xmlObject = KeyValue() class BaseFunction: 'Class to get equation results.' def __init__(self, elementNode): 'Initialize.' self.elementNode = elementNode self.localDictionary = {} self.xmlProcessor = elementNode.getXMLProcessor() def __repr__(self): 'Get the string representation of this Class.' return str(self.__dict__) def getReturnValue(self): 'Get return value.' self.getReturnValueWithoutDeletion() del self.xmlProcessor.functions[-1] return self.returnValue def processChildNodes(self, elementNode): 'Process childNodes if shouldReturn is false.' for childNode in elementNode.childNodes: if self.shouldReturn: return self.xmlProcessor.processElementNode(childNode) class ClassFunction(BaseFunction): 'Class to get class results.' def getReturnValueByArguments(self, *arguments): 'Get return value by arguments.' setFunctionLocalDictionary(arguments, self) return self.getReturnValue() def getReturnValueWithoutDeletion(self): 'Get return value without deleting last function.' self.returnValue = None self.shouldReturn = False self.xmlProcessor.functions.append(self) self.processChildNodes(self.elementNode) return self.returnValue class ClassObject: 'Class to hold class attributes and functions.' def __init__(self, elementNode): 'Initialize.' self.functionDictionary = elementNode.xmlObject.functionDictionary self.selfDictionary = {} for variable in elementNode.xmlObject.variables: self.selfDictionary[variable] = None def __repr__(self): 'Get the string representation of this Class.' return str(self.__dict__) def _getAccessibleAttribute(self, attributeName): 'Get the accessible attribute.' if attributeName in self.selfDictionary: return self.selfDictionary[attributeName] if attributeName in self.functionDictionary: function = self.functionDictionary[attributeName] function.classObject = self return function.getReturnValueByArguments return None def _setAccessibleAttribute(self, attributeName, value): 'Set the accessible attribute.' if attributeName in self.selfDictionary: self.selfDictionary[attributeName] = value class EmptyObject: 'An empty object.' def __init__(self): 'Do nothing.' pass class Evaluator: 'Base evaluator class.' def __init__(self, elementNode, word): 'Set value to none.' self.value = None self.word = word def __repr__(self): 'Get the string representation of this Class.' return str(self.__dict__) def executeBracket( self, bracketBeginIndex, bracketEndIndex, evaluators ): 'Execute the bracket.' pass def executeCenterOperation(self, evaluators, evaluatorIndex): 'Execute operator which acts on the center.' pass def executeDictionary(self, dictionary, evaluators, keys, evaluatorIndex, nextEvaluator): 'Execute the dictionary.' del evaluators[evaluatorIndex] enumeratorKeys = euclidean.getEnumeratorKeys(dictionary, keys) if enumeratorKeys.__class__ == list: nextEvaluator.value = [] for enumeratorKey in enumeratorKeys: if enumeratorKey in dictionary: nextEvaluator.value.append(dictionary[enumeratorKey]) else: print('Warning, key in executeKey in Evaluator in evaluate is not in for:') print(enumeratorKey) print(dictionary) return if enumeratorKeys in dictionary: nextEvaluator.value = dictionary[enumeratorKeys] else: print('Warning, key in executeKey in Evaluator in evaluate is not in for:') print(enumeratorKeys) print(dictionary) def executeFunction(self, evaluators, evaluatorIndex, nextEvaluator): 'Execute the function.' pass def executeKey(self, evaluators, keys, evaluatorIndex, nextEvaluator): 'Execute the key index.' if self.value.__class__ == str: self.executeString(evaluators, keys, evaluatorIndex, nextEvaluator) return if self.value.__class__ == list: self.executeList(evaluators, keys, evaluatorIndex, nextEvaluator) return if self.value.__class__ == dict: self.executeDictionary(self.value, evaluators, keys, evaluatorIndex, nextEvaluator) return getAccessibleDictionaryFunction = getattr(self.value, '_getAccessibleDictionary', None) if getAccessibleDictionaryFunction != None: self.executeDictionary(getAccessibleDictionaryFunction(), evaluators, keys, evaluatorIndex, nextEvaluator) return if self.value.__class__.__name__ != 'ElementNode': return del evaluators[evaluatorIndex] enumeratorKeys = euclidean.getEnumeratorKeys(self.value.attributes, keys) if enumeratorKeys.__class__ == list: nextEvaluator.value = [] for enumeratorKey in enumeratorKeys: if enumeratorKey in self.value.attributes: nextEvaluator.value.append(getEvaluatedExpressionValue(self.value, self.value.attributes[enumeratorKey])) else: print('Warning, key in executeKey in Evaluator in evaluate is not in for:') print(enumeratorKey) print(self.value.attributes) return if enumeratorKeys in self.value.attributes: nextEvaluator.value = getEvaluatedExpressionValue(self.value, self.value.attributes[enumeratorKeys]) else: print('Warning, key in executeKey in Evaluator in evaluate is not in for:') print(enumeratorKeys) print(self.value.attributes) def executeLeftOperation(self, evaluators, evaluatorIndex, operationLevel): 'Execute operator which acts from the left.' pass def executeList(self, evaluators, keys, evaluatorIndex, nextEvaluator): 'Execute the key index.' del evaluators[evaluatorIndex] enumeratorKeys = euclidean.getEnumeratorKeys(self.value, keys) if enumeratorKeys.__class__ == list: nextEvaluator.value = [] for enumeratorKey in enumeratorKeys: intKey = euclidean.getIntFromValue(enumeratorKey) if self.getIsInRange(intKey): nextEvaluator.value.append(self.value[intKey]) else: print('Warning, key in executeList in Evaluator in evaluate is not in for:') print(enumeratorKey) print(self.value) return intKey = euclidean.getIntFromValue(enumeratorKeys) if self.getIsInRange(intKey): nextEvaluator.value = self.value[intKey] else: print('Warning, key in executeList in Evaluator in evaluate is not in for:') print(enumeratorKeys) print(self.value) def executePairOperation(self, evaluators, evaluatorIndex, operationLevel): 'Operate on two evaluators.' pass def executeRightOperation( self, evaluators, evaluatorIndex ): 'Execute operator which acts from the right.' pass def executeString(self, evaluators, keys, evaluatorIndex, nextEvaluator): 'Execute the string.' del evaluators[evaluatorIndex] enumeratorKeys = euclidean.getEnumeratorKeys(self.value, keys) if enumeratorKeys.__class__ == list: nextEvaluator.value = '' for enumeratorKey in enumeratorKeys: intKey = euclidean.getIntFromValue(enumeratorKey) if self.getIsInRange(intKey): nextEvaluator.value += self.value[intKey] else: print('Warning, key in executeString in Evaluator in evaluate is not in for:') print(enumeratorKey) print(self.value) return intKey = euclidean.getIntFromValue(enumeratorKeys) if self.getIsInRange(intKey): nextEvaluator.value = self.value[intKey] else: print('Warning, key in executeString in Evaluator in evaluate is not in for:') print(enumeratorKeys) print(self.value) def getIsInRange(self, keyIndex): 'Determine if the keyIndex is in range.' if keyIndex == None: return False return keyIndex >= -len(self.value) and keyIndex < len(self.value) class EvaluatorAddition(Evaluator): 'Class to add two evaluators.' def executePair( self, evaluators, evaluatorIndex ): 'Add two evaluators.' leftIndex = evaluatorIndex - 1 rightIndex = evaluatorIndex + 1 if leftIndex < 0: print('Warning, no leftKey in executePair in EvaluatorAddition for:') print(evaluators) print(evaluatorIndex) print(self) del evaluators[evaluatorIndex] return if rightIndex >= len(evaluators): print('Warning, no rightKey in executePair in EvaluatorAddition for:') print(evaluators) print(evaluatorIndex) print(self) del evaluators[evaluatorIndex] return rightValue = evaluators[rightIndex].value evaluators[leftIndex].value = self.getOperationValue(evaluators[leftIndex].value, evaluators[rightIndex].value) del evaluators[ evaluatorIndex : evaluatorIndex + 2 ] def executePairOperation(self, evaluators, evaluatorIndex, operationLevel): 'Operate on two evaluators.' if operationLevel == 20: self.executePair(evaluators, evaluatorIndex) def getEvaluatedValues(self, enumerable, keys, value): 'Get evaluatedValues.' if enumerable.__class__ == dict: evaluatedValues = {} for key in keys: evaluatedValues[key] = self.getOperationValue(value, enumerable[key]) return evaluatedValues evaluatedValues = [] for key in keys: evaluatedValues.append(self.getOperationValue(value, enumerable[key])) return evaluatedValues def getOperationValue(self, leftValue, rightValue): 'Get operation value.' leftKeys = getKeys(leftValue) rightKeys = getKeys(rightValue) if leftKeys == None and rightKeys == None: return self.getValueFromValuePair(leftValue, rightValue) if leftKeys == None: return self.getEvaluatedValues(rightValue, rightKeys, leftValue) if rightKeys == None: return self.getEvaluatedValues(leftValue, leftKeys, rightValue) leftKeys.sort(reverse=True) rightKeys.sort(reverse=True) if leftKeys != rightKeys: print('Warning, the leftKeys are different from the rightKeys in getOperationValue in EvaluatorAddition for:') print('leftValue') print(leftValue) print(leftKeys) print('rightValue') print(rightValue) print(rightKeys) print(self) return None if leftValue.__class__ == dict or rightValue.__class__ == dict: evaluatedValues = {} for leftKey in leftKeys: evaluatedValues[leftKey] = self.getOperationValue(leftValue[leftKey], rightValue[leftKey]) return evaluatedValues evaluatedValues = [] for leftKey in leftKeys: evaluatedValues.append(self.getOperationValue(leftValue[leftKey], rightValue[leftKey])) return evaluatedValues def getValueFromValuePair(self, leftValue, rightValue): 'Add two values.' return leftValue + rightValue class EvaluatorEqual(EvaluatorAddition): 'Class to compare two evaluators.' def executePairOperation(self, evaluators, evaluatorIndex, operationLevel): 'Operate on two evaluators.' if operationLevel == 15: self.executePair(evaluators, evaluatorIndex) def getBooleanFromValuePair(self, leftValue, rightValue): 'Compare two values.' return leftValue == rightValue def getValueFromValuePair(self, leftValue, rightValue): 'Get value from comparison.' return self.getBooleanFromValuePair(leftValue, rightValue) class EvaluatorSubtraction(EvaluatorAddition): 'Class to subtract two evaluators.' def executeLeft( self, evaluators, evaluatorIndex ): 'Minus the value to the right.' leftIndex = evaluatorIndex - 1 rightIndex = evaluatorIndex + 1 leftValue = None if leftIndex >= 0: leftValue = evaluators[leftIndex].value if leftValue != None: return rightValue = evaluators[rightIndex].value if rightValue == None: print('Warning, can not minus.') print(evaluators[rightIndex].word) else: evaluators[rightIndex].value = self.getNegativeValue(rightValue) del evaluators[evaluatorIndex] def executeLeftOperation(self, evaluators, evaluatorIndex, operationLevel): 'Minus the value to the right.' if operationLevel == 200: self.executeLeft(evaluators, evaluatorIndex) def getNegativeValue( self, value ): 'Get the negative value.' keys = getKeys(value) if keys == None: return self.getValueFromSingleValue(value) for key in keys: value[key] = self.getNegativeValue(value[key]) return value def getValueFromSingleValue( self, value ): 'Minus value.' return -value def getValueFromValuePair(self, leftValue, rightValue): 'Subtract two values.' return leftValue - rightValue class EvaluatorAnd(EvaluatorAddition): 'Class to compare two evaluators.' def executePairOperation(self, evaluators, evaluatorIndex, operationLevel): 'Operate on two evaluators.' if operationLevel == 12: self.executePair(evaluators, evaluatorIndex) def getBooleanFromValuePair(self, leftValue, rightValue): 'And two values.' return leftValue and rightValue def getValueFromValuePair(self, leftValue, rightValue): 'Get value from comparison.' return self.getBooleanFromValuePair(leftValue, rightValue) class EvaluatorAttribute(Evaluator): 'Class to handle an attribute.' def executeFunction(self, evaluators, evaluatorIndex, nextEvaluator): 'Execute the function.' executeNextEvaluatorArguments(self, evaluators, evaluatorIndex, nextEvaluator) def executeRightOperation( self, evaluators, evaluatorIndex ): 'Execute operator which acts from the right.' attributeName = self.word[1 :] previousIndex = evaluatorIndex - 1 previousEvaluator = evaluators[previousIndex] if previousEvaluator.value.__class__ == dict: from fabmetheus_utilities.geometry.geometry_utilities.evaluate_enumerables import dictionary_attribute self.value = dictionary_attribute._getAccessibleAttribute(attributeName, previousEvaluator.value) elif previousEvaluator.value.__class__ == list: from fabmetheus_utilities.geometry.geometry_utilities.evaluate_enumerables import list_attribute self.value = list_attribute._getAccessibleAttribute(attributeName, previousEvaluator.value) elif previousEvaluator.value.__class__ == str: from fabmetheus_utilities.geometry.geometry_utilities.evaluate_enumerables import string_attribute self.value = string_attribute._getAccessibleAttribute(attributeName, previousEvaluator.value) else: attributeKeywords = attributeName.split('.') self.value = previousEvaluator.value for attributeKeyword in attributeKeywords: self.value = getattr(self.value, '_getAccessibleAttribute', None)(attributeKeyword) if self.value == None: print('Warning, EvaluatorAttribute in evaluate can not get a getAccessibleAttributeFunction for:') print(attributeName) print(previousEvaluator.value) print(self) return del evaluators[previousIndex] class EvaluatorBracketCurly(Evaluator): 'Class to evaluate a string.' def executeBracket(self, bracketBeginIndex, bracketEndIndex, evaluators): 'Execute the bracket.' for evaluatorIndex in xrange(bracketEndIndex - 3, bracketBeginIndex, - 1): bracketEndIndex = getEndIndexConvertEquationValue(bracketEndIndex, evaluatorIndex, evaluators) evaluatedExpressionValueEvaluators = getBracketEvaluators(bracketBeginIndex, bracketEndIndex, evaluators) self.value = {} for evaluatedExpressionValueEvaluator in evaluatedExpressionValueEvaluators: keyValue = evaluatedExpressionValueEvaluator.value self.value[keyValue.key] = keyValue.value del evaluators[bracketBeginIndex + 1: bracketEndIndex + 1] class EvaluatorBracketRound(Evaluator): 'Class to evaluate a string.' def __init__(self, elementNode, word): 'Set value to none.' self.arguments = [] self.value = None self.word = word def executeBracket( self, bracketBeginIndex, bracketEndIndex, evaluators ): 'Execute the bracket.' self.arguments = getBracketValuesDeleteEvaluator(bracketBeginIndex, bracketEndIndex, evaluators) if len( self.arguments ) < 1: return if len( self.arguments ) > 1: self.value = self.arguments else: self.value = self.arguments[0] def executeRightOperation( self, evaluators, evaluatorIndex ): 'Evaluate the statement and delete the evaluators.' previousIndex = evaluatorIndex - 1 if previousIndex < 0: return evaluators[ previousIndex ].executeFunction( evaluators, previousIndex, self ) class EvaluatorBracketSquare(Evaluator): 'Class to evaluate a string.' def executeBracket( self, bracketBeginIndex, bracketEndIndex, evaluators ): 'Execute the bracket.' self.value = getBracketValuesDeleteEvaluator(bracketBeginIndex, bracketEndIndex, evaluators) def executeRightOperation( self, evaluators, evaluatorIndex ): 'Evaluate the statement and delete the evaluators.' previousIndex = evaluatorIndex - 1 if previousIndex < 0: return if self.value.__class__ != list: return evaluators[ previousIndex ].executeKey( evaluators, self.value, previousIndex, self ) class EvaluatorClass(Evaluator): 'Class evaluator class.' def __init__(self, elementNode, word): 'Set value to none.' self.elementNode = elementNode self.value = None self.word = word def executeFunction(self, evaluators, evaluatorIndex, nextEvaluator): 'Execute the function.' if self.elementNode.xmlObject == None: self.elementNode.xmlObject = FunctionVariable(self.elementNode) nextEvaluator.value = ClassObject(self.elementNode) initializeFunction = None if '_init' in self.elementNode.xmlObject.functionDictionary: function = self.elementNode.xmlObject.functionDictionary['_init'] function.classObject = nextEvaluator.value setFunctionLocalDictionary(nextEvaluator.arguments, function) function.getReturnValue() del evaluators[evaluatorIndex] class EvaluatorComma(Evaluator): 'Class to join two evaluators.' def executePairOperation(self, evaluators, evaluatorIndex, operationLevel): 'Operate on two evaluators.' if operationLevel != 0: return previousIndex = evaluatorIndex - 1 if previousIndex < 0: evaluators[evaluatorIndex].value = None return if evaluators[previousIndex].word == ',': evaluators[evaluatorIndex].value = None return del evaluators[evaluatorIndex] class EvaluatorConcatenate(Evaluator): 'Class to join two evaluators.' def executePairOperation(self, evaluators, evaluatorIndex, operationLevel): 'Operate on two evaluators.' if operationLevel != 80: return leftIndex = evaluatorIndex - 1 if leftIndex < 0: del evaluators[evaluatorIndex] return rightIndex = evaluatorIndex + 1 if rightIndex >= len(evaluators): del evaluators[ leftIndex : rightIndex ] return leftValue = evaluators[leftIndex].value rightValue = evaluators[rightIndex].value if leftValue.__class__ == rightValue.__class__ and (leftValue.__class__ == list or rightValue.__class__ == str): evaluators[leftIndex].value = leftValue + rightValue del evaluators[ evaluatorIndex : evaluatorIndex + 2 ] return if leftValue.__class__ == list and rightValue.__class__ == int: if rightValue > 0: originalList = leftValue[:] for copyIndex in xrange( rightValue - 1 ): leftValue += originalList evaluators[leftIndex].value = leftValue del evaluators[ evaluatorIndex : evaluatorIndex + 2 ] return if leftValue.__class__ == dict and rightValue.__class__ == dict: leftValue.update(rightValue) evaluators[leftIndex].value = leftValue del evaluators[ evaluatorIndex : evaluatorIndex + 2 ] return del evaluators[ leftIndex : evaluatorIndex + 2 ] class EvaluatorDictionary(Evaluator): 'Class to join two evaluators.' def executePairOperation(self, evaluators, evaluatorIndex, operationLevel): 'Operate on two evaluators.' if operationLevel != 10: return leftEvaluatorIndex = evaluatorIndex - 1 if leftEvaluatorIndex < 0: print('Warning, leftEvaluatorIndex is less than zero in EvaluatorDictionary for:') print(self) print(evaluators) return rightEvaluatorIndex = evaluatorIndex + 1 if rightEvaluatorIndex >= len(evaluators): print('Warning, rightEvaluatorIndex too high in EvaluatorDictionary for:') print(rightEvaluatorIndex) print(self) print(evaluators) return evaluators[rightEvaluatorIndex].value = KeyValue(evaluators[leftEvaluatorIndex].value, evaluators[rightEvaluatorIndex].value) del evaluators[ leftEvaluatorIndex : rightEvaluatorIndex ] class EvaluatorDivision(EvaluatorAddition): 'Class to divide two evaluators.' def executePairOperation(self, evaluators, evaluatorIndex, operationLevel): 'Operate on two evaluators.' if operationLevel == 40: self.executePair(evaluators, evaluatorIndex) def getValueFromValuePair(self, leftValue, rightValue): 'Divide two values.' return leftValue / rightValue class EvaluatorElement(Evaluator): 'Element evaluator class.' def __init__(self, elementNode, word): 'Set value to none.' self.elementNode = elementNode self.value = None self.word = word def executeCenterOperation(self, evaluators, evaluatorIndex): 'Execute operator which acts on the center.' dotIndex = self.word.find('.') if dotIndex < 0: print('Warning, EvaluatorElement in evaluate can not find the dot for:') print(functionName) print(self) return attributeName = self.word[dotIndex + 1 :] moduleName = self.word[: dotIndex] if moduleName in globalModuleFunctionsDictionary: self.value = globalModuleFunctionsDictionary[moduleName](attributeName, self.elementNode) return pluginModule = None if moduleName in globalElementNameSet: pluginModule = archive.getModuleWithPath(archive.getElementsPath(moduleName)) if pluginModule == None: print('Warning, EvaluatorElement in evaluate can not get a pluginModule for:') print(moduleName) print(self) return getAccessibleAttributeFunction = pluginModule._getAccessibleAttribute globalModuleFunctionsDictionary[moduleName] = getAccessibleAttributeFunction self.value = getAccessibleAttributeFunction(attributeName, self.elementNode) def executeFunction(self, evaluators, evaluatorIndex, nextEvaluator): 'Execute the function.' executeNextEvaluatorArguments(self, evaluators, evaluatorIndex, nextEvaluator) class EvaluatorFalse(Evaluator): 'Class to evaluate a string.' def __init__(self, elementNode, word): 'Set value to zero.' self.value = False self.word = word class EvaluatorFunction(Evaluator): 'Function evaluator class.' def __init__(self, elementNode, word): 'Set value to none.' self.elementNode = elementNode self.value = None self.word = word def executeFunction(self, evaluators, evaluatorIndex, nextEvaluator): 'Execute the function.' if self.elementNode.xmlObject == None: if 'return' in self.elementNode.attributes: value = self.elementNode.attributes['return'] self.elementNode.xmlObject = getEvaluatorSplitWords(value) else: self.elementNode.xmlObject = [] self.function = Function(self.elementNode ) setFunctionLocalDictionary(nextEvaluator.arguments, self.function) nextEvaluator.value = self.function.getReturnValue() del evaluators[evaluatorIndex] class EvaluatorFundamental(Evaluator): 'Fundamental evaluator class.' def executeCenterOperation(self, evaluators, evaluatorIndex): 'Execute operator which acts on the center.' dotIndex = self.word.find('.') if dotIndex < 0: print('Warning, EvaluatorFundamental in evaluate can not find the dot for:') print(functionName) print(self) return attributeName = self.word[dotIndex + 1 :] moduleName = self.word[: dotIndex] if moduleName in globalModuleFunctionsDictionary: self.value = globalModuleFunctionsDictionary[moduleName](attributeName) return pluginModule = None if moduleName in globalFundamentalNameSet: pluginModule = archive.getModuleWithPath(archive.getFundamentalsPath(moduleName)) else: underscoredName = '_' + moduleName if underscoredName in globalFundamentalNameSet: pluginModule = archive.getModuleWithPath(archive.getFundamentalsPath(underscoredName)) if pluginModule == None: print('Warning, EvaluatorFundamental in evaluate can not get a pluginModule for:') print(moduleName) print(self) return getAccessibleAttributeFunction = pluginModule._getAccessibleAttribute globalModuleFunctionsDictionary[moduleName] = getAccessibleAttributeFunction self.value = getAccessibleAttributeFunction(attributeName) def executeFunction(self, evaluators, evaluatorIndex, nextEvaluator): 'Execute the function.' executeNextEvaluatorArguments(self, evaluators, evaluatorIndex, nextEvaluator) class EvaluatorGreaterEqual( EvaluatorEqual ): 'Class to compare two evaluators.' def getBooleanFromValuePair(self, leftValue, rightValue): 'Compare two values.' return leftValue >= rightValue class EvaluatorGreater( EvaluatorEqual ): 'Class to compare two evaluators.' def getBooleanFromValuePair(self, leftValue, rightValue): 'Compare two values.' return leftValue > rightValue class EvaluatorLessEqual( EvaluatorEqual ): 'Class to compare two evaluators.' def getBooleanFromValuePair(self, leftValue, rightValue): 'Compare two values.' return leftValue <= rightValue class EvaluatorLess( EvaluatorEqual ): 'Class to compare two evaluators.' def getBooleanFromValuePair(self, leftValue, rightValue): 'Compare two values.' return leftValue < rightValue class EvaluatorLocal(EvaluatorElement): 'Class to get a local variable.' def executeCenterOperation(self, evaluators, evaluatorIndex): 'Execute operator which acts on the center.' functions = self.elementNode.getXMLProcessor().functions if len(functions) < 1: print('Warning, there are no functions in EvaluatorLocal in evaluate for:') print(self.word) return attributeKeywords = self.word.split('.') self.value = functions[-1].localDictionary[attributeKeywords[0]] for attributeKeyword in attributeKeywords[1 :]: self.value = self.value._getAccessibleAttribute(attributeKeyword) class EvaluatorModulo( EvaluatorDivision ): 'Class to modulo two evaluators.' def getValueFromValuePair(self, leftValue, rightValue): 'Modulo two values.' return leftValue % rightValue class EvaluatorMultiplication( EvaluatorDivision ): 'Class to multiply two evaluators.' def getValueFromValuePair(self, leftValue, rightValue): 'Multiply two values.' return leftValue * rightValue class EvaluatorNone(Evaluator): 'Class to evaluate None.' def __init__(self, elementNode, word): 'Set value to none.' self.value = None self.word = str(word) class EvaluatorNot(EvaluatorSubtraction): 'Class to compare two evaluators.' def executeLeftOperation(self, evaluators, evaluatorIndex, operationLevel): 'Minus the value to the right.' if operationLevel == 13: self.executeLeft(evaluators, evaluatorIndex) def getValueFromSingleValue( self, value ): 'Minus value.' return not value class EvaluatorNotEqual( EvaluatorEqual ): 'Class to compare two evaluators.' def getBooleanFromValuePair(self, leftValue, rightValue): 'Compare two values.' return leftValue != rightValue class EvaluatorNumeric(Evaluator): 'Class to evaluate a string.' def __init__(self, elementNode, word): 'Set value.' self.value = None self.word = word try: if '.' in word: self.value = float(word) else: self.value = int(word) except: print('Warning, EvaluatorNumeric in evaluate could not get a numeric value for:') print(word) print(elementNode) class EvaluatorOr( EvaluatorAnd ): 'Class to compare two evaluators.' def getBooleanFromValuePair(self, leftValue, rightValue): 'Or two values.' return leftValue or rightValue class EvaluatorPower(EvaluatorAddition): 'Class to power two evaluators.' def executePairOperation(self, evaluators, evaluatorIndex, operationLevel): 'Operate on two evaluators.' if operationLevel == 60: self.executePair(evaluators, evaluatorIndex) def getValueFromValuePair(self, leftValue, rightValue): 'Power of two values.' return leftValue ** rightValue class EvaluatorSelf(EvaluatorElement): 'Class to handle self.' def executeCenterOperation(self, evaluators, evaluatorIndex): 'Execute operator which acts on the center.' functions = self.elementNode.getXMLProcessor().functions if len(functions) < 1: print('Warning, there are no functions in executeCenterOperation in EvaluatorSelf in evaluate for:') print(self.elementNode) return function = functions[-1] attributeKeywords = self.word.split('.') self.value = function.classObject for attributeKeyword in attributeKeywords[1 :]: self.value = self.value._getAccessibleAttribute(attributeKeyword) class EvaluatorTrue(Evaluator): 'Class to evaluate a string.' def __init__(self, elementNode, word): 'Set value to true.' self.value = True self.word = word class EvaluatorValue(Evaluator): 'Class to evaluate a string.' def __init__(self, word): 'Set value to none.' self.value = word self.word = str(word) class Function(BaseFunction): 'Class to get equation results.' def __init__(self, elementNode): 'Initialize.' self.elementNode = elementNode self.evaluatorSplitLine = elementNode.xmlObject self.localDictionary = {} self.xmlProcessor = elementNode.getXMLProcessor() def getReturnValueWithoutDeletion(self): 'Get return value without deleting last function.' self.returnValue = None self.xmlProcessor.functions.append(self) if len(self.evaluatorSplitLine) < 1: self.shouldReturn = False self.processChildNodes(self.elementNode) else: self.returnValue = getEvaluatedExpressionValueBySplitLine(self.elementNode, self.evaluatorSplitLine) return self.returnValue class FunctionVariable: 'Class to hold class functions and variable set.' def __init__(self, elementNode): 'Initialize.' self.functionDictionary = {} self.variables = [] self.processClass(elementNode) def addToVariableSet(self, elementNode): 'Add to variables.' setLocalAttribute(elementNode) keySplitLine = elementNode.xmlObject.key.split('.') if len(keySplitLine) == 2: if keySplitLine[0] == 'self': variable = keySplitLine[1] if variable not in self.variables: self.variables.append(variable) def processClass(self, elementNode): 'Add class to FunctionVariable.' for childNode in elementNode.childNodes: self.processFunction(childNode) if 'parentNode' in elementNode.attributes: self.processClass(elementNode.getElementNodeByID(elementNode.attributes['parentNode'])) def processFunction(self, elementNode): 'Add function to function dictionary.' if elementNode.getNodeName() != 'function': return idKey = elementNode.attributes['id'] if idKey in self.functionDictionary: return self.functionDictionary[idKey] = ClassFunction(elementNode) for childNode in elementNode.childNodes: self.processStatement(childNode) def processStatement(self, elementNode): 'Add self statement to variables.' if elementNode.getNodeName() == 'statement': self.addToVariableSet(elementNode) for childNode in elementNode.childNodes: self.processStatement(childNode) class KeyValue: 'Class to hold a key value.' def __init__(self, key=None, value=None): 'Get key value.' self.key = key self.value = value def __repr__(self): 'Get the string representation of this KeyValue.' return str(self.__dict__) def getByCharacter( self, character, line ): 'Get by character.' dotIndex = line.find( character ) if dotIndex < 0: self.key = line self.value = None return self self.key = line[: dotIndex] self.value = line[dotIndex + 1 :] return self def getByDot(self, line): 'Get by dot.' return self.getByCharacter('.', line ) def getByEqual(self, line): 'Get by dot.' return self.getByCharacter('=', line ) class ModuleElementNode: 'Class to get the in attribute, the index name and the value name.' def __init__( self, elementNode): 'Initialize.' self.conditionSplitWords = None self.elseElement = None if 'condition' in elementNode.attributes: self.conditionSplitWords = getEvaluatorSplitWords( elementNode.attributes['condition'] ) else: print('Warning, could not find the condition attribute in ModuleElementNode in evaluate for:') print(elementNode) return if len( self.conditionSplitWords ) < 1: self.conditionSplitWords = None print('Warning, could not get split words for the condition attribute in ModuleElementNode in evaluate for:') print(elementNode) nextIndex = getNextChildIndex(elementNode) if nextIndex >= len( elementNode.parentNode.childNodes ): return nextElementNode = elementNode.parentNode.childNodes[ nextIndex ] lowerLocalName = nextElementNode.getNodeName().lower() if lowerLocalName != 'else' and lowerLocalName != 'elif': return xmlProcessor = elementNode.getXMLProcessor() if lowerLocalName not in xmlProcessor.namePathDictionary: return self.pluginModule = archive.getModuleWithPath( xmlProcessor.namePathDictionary[ lowerLocalName ] ) if self.pluginModule == None: return self.elseElement = nextElementNode def processElse(self, elementNode): 'Process the else statement.' if self.elseElement != None: self.pluginModule.processElse( self.elseElement) globalCreationDictionary = archive.getGeometryDictionary('creation') globalDictionaryOperatorBegin = { '||' : EvaluatorConcatenate, '==' : EvaluatorEqual, '>=' : EvaluatorGreaterEqual, '<=' : EvaluatorLessEqual, '!=' : EvaluatorNotEqual, '**' : EvaluatorPower } globalModuleEvaluatorDictionary = {} globalFundamentalNameSet = set(archive.getPluginFileNamesFromDirectoryPath(archive.getFundamentalsPath())) addPrefixDictionary(globalModuleEvaluatorDictionary, globalFundamentalNameSet, EvaluatorFundamental) globalElementNameSet = set(archive.getPluginFileNamesFromDirectoryPath(archive.getElementsPath())) addPrefixDictionary(globalModuleEvaluatorDictionary, globalElementNameSet, EvaluatorElement) globalModuleEvaluatorDictionary['self'] = EvaluatorSelf globalSplitDictionaryOperator = { '+' : EvaluatorAddition, '{' : EvaluatorBracketCurly, '}' : Evaluator, '(' : EvaluatorBracketRound, ')' : Evaluator, '[' : EvaluatorBracketSquare, ']' : Evaluator, ',' : EvaluatorComma, ':' : EvaluatorDictionary, '/' : EvaluatorDivision, '>' : EvaluatorGreater, '<' : EvaluatorLess, '%' : EvaluatorModulo, '*' : EvaluatorMultiplication, '-' : EvaluatorSubtraction } globalSplitDictionary = getSplitDictionary() # must be after globalSplitDictionaryOperator
unknown
codeparrot/codeparrot-clean
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Modules required to work with ironic_inspector: https://pypi.python.org/pypi/ironic-inspector """ import eventlet from futurist import periodics from oslo_log import log as logging from oslo_utils import importutils from ironic.common import exception from ironic.common.i18n import _ from ironic.common.i18n import _LE from ironic.common.i18n import _LI from ironic.common import keystone from ironic.common import states from ironic.conductor import task_manager from ironic.conf import CONF from ironic.drivers import base LOG = logging.getLogger(__name__) CONF.import_opt('auth_strategy', 'ironic.api.app') client = importutils.try_import('ironic_inspector_client') INSPECTOR_API_VERSION = (1, 0) class Inspector(base.InspectInterface): """In-band inspection via ironic-inspector project.""" @classmethod def create_if_enabled(cls, driver_name): """Create instance of Inspector if it's enabled. Reports log warning with given driver_name if it's not. :return: Inspector instance or None """ if CONF.inspector.enabled: return cls() else: LOG.info(_LI("Inspection via ironic-inspector is disabled in " "configuration for driver %s. To enable, change " "[inspector] enabled = True."), driver_name) def __init__(self): if not CONF.inspector.enabled: raise exception.DriverLoadError( _('ironic-inspector support is disabled')) if not client: raise exception.DriverLoadError( _('python-ironic-inspector-client Python module not found')) def get_properties(self): """Return the properties of the interface. :returns: dictionary of <property name>:<property description> entries. """ return {} # no properties def validate(self, task): """Validate the driver-specific inspection information. If invalid, raises an exception; otherwise returns None. :param task: a task from TaskManager. """ # NOTE(deva): this is not callable if inspector is disabled # so don't raise an exception -- just pass. pass def inspect_hardware(self, task): """Inspect hardware to obtain the hardware properties. This particular implementation only starts inspection using ironic-inspector. Results will be checked in a periodic task. :param task: a task from TaskManager. :returns: states.INSPECTING """ LOG.debug('Starting inspection for node %(uuid)s using ' 'ironic-inspector', {'uuid': task.node.uuid}) # NOTE(dtantsur): we're spawning a short-living green thread so that # we can release a lock as soon as possible and allow ironic-inspector # to operate on a node. eventlet.spawn_n(_start_inspection, task.node.uuid, task.context) return states.INSPECTING @periodics.periodic(spacing=CONF.inspector.status_check_period, enabled=CONF.inspector.enabled) def _periodic_check_result(self, manager, context): """Periodic task checking results of inspection.""" filters = {'provision_state': states.INSPECTING} node_iter = manager.iter_nodes(filters=filters) for node_uuid, driver in node_iter: try: lock_purpose = 'checking hardware inspection status' with task_manager.acquire(context, node_uuid, shared=True, purpose=lock_purpose) as task: _check_status(task) except (exception.NodeLocked, exception.NodeNotFound): continue def _call_inspector(func, uuid, context): """Wrapper around calls to inspector.""" # NOTE(dtantsur): due to bug #1428652 None is not accepted for base_url. kwargs = {'api_version': INSPECTOR_API_VERSION} if CONF.inspector.service_url: kwargs['base_url'] = CONF.inspector.service_url return func(uuid, auth_token=context.auth_token, **kwargs) def _start_inspection(node_uuid, context): """Call to inspector to start inspection.""" context.ensure_thread_contain_context() try: _call_inspector(client.introspect, node_uuid, context) except Exception as exc: LOG.exception(_LE('Exception during contacting ironic-inspector ' 'for inspection of node %(node)s: %(err)s'), {'node': node_uuid, 'err': exc}) # NOTE(dtantsur): if acquire fails our last option is to rely on # timeout lock_purpose = 'recording hardware inspection error' with task_manager.acquire(context, node_uuid, purpose=lock_purpose) as task: task.node.last_error = _('Failed to start inspection: %s') % exc task.process_event('fail') else: LOG.info(_LI('Node %s was sent to inspection to ironic-inspector'), node_uuid) def _check_status(task): """Check inspection status for node given by a task.""" node = task.node if node.provision_state != states.INSPECTING: return if not isinstance(task.driver.inspect, Inspector): return LOG.debug('Calling to inspector to check status of node %s', task.node.uuid) # NOTE(dtantsur): periodic tasks do not have proper tokens in context if CONF.auth_strategy == 'keystone': task.context.auth_token = keystone.get_admin_auth_token() try: status = _call_inspector(client.get_status, node.uuid, task.context) except Exception: # NOTE(dtantsur): get_status should not normally raise # let's assume it's a transient failure and retry later LOG.exception(_LE('Unexpected exception while getting ' 'inspection status for node %s, will retry later'), node.uuid) return error = status.get('error') finished = status.get('finished') if not error and not finished: return # If the inspection has finished or failed, we need to update the node, so # upgrade our lock to an exclusive one. task.upgrade_lock() node = task.node if error: LOG.error(_LE('Inspection failed for node %(uuid)s ' 'with error: %(err)s'), {'uuid': node.uuid, 'err': error}) node.last_error = (_('ironic-inspector inspection failed: %s') % error) task.process_event('fail') elif finished: LOG.info(_LI('Inspection finished successfully for node %s'), node.uuid) task.process_event('done')
unknown
codeparrot/codeparrot-clean
# Copyright (c) 2007 The Hewlett-Packard Development Company # All rights reserved. # # The license below extends only to copyright in the software and shall # not be construed as granting a license to any other intellectual # property including but not limited to intellectual property relating # to a hardware implementation of the functionality of the software # licensed hereunder. You may use the software subject to the license # terms below provided that you ensure that this notice is replicated # unmodified and in its entirety in all distributions of the software, # modified or unmodified, in source code or in binary form. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer; # redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution; # neither the name of the copyright holders nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # Authors: Gabe Black microcode = ''' def macroop PCMPEQB_XMM_XMM { mcmpi2r xmml, xmml, xmmlm, size=1, ext=0 mcmpi2r xmmh, xmmh, xmmhm, size=1, ext=0 }; def macroop PCMPEQB_XMM_M { ldfp ufp1, seg, sib, "DISPLACEMENT", dataSize=8 ldfp ufp2, seg, sib, "DISPLACEMENT + 8", dataSize=8 mcmpi2r xmml, xmml, ufp1, size=1, ext=0 mcmpi2r xmmh, xmmh, ufp2, size=1, ext=0 }; def macroop PCMPEQB_XMM_P { rdip t7 ldfp ufp1, seg, riprel, "DISPLACEMENT", dataSize=8 ldfp ufp2, seg, riprel, "DISPLACEMENT + 8", dataSize=8 mcmpi2r xmml, xmml, ufp1, size=1, ext=0 mcmpi2r xmmh, xmmh, ufp2, size=1, ext=0 }; def macroop PCMPEQW_XMM_XMM { mcmpi2r xmml, xmml, xmmlm, size=2, ext=0 mcmpi2r xmmh, xmmh, xmmhm, size=2, ext=0 }; def macroop PCMPEQW_XMM_M { ldfp ufp1, seg, sib, "DISPLACEMENT", dataSize=8 ldfp ufp2, seg, sib, "DISPLACEMENT + 8", dataSize=8 mcmpi2r xmml, xmml, ufp1, size=2, ext=0 mcmpi2r xmmh, xmmh, ufp2, size=2, ext=0 }; def macroop PCMPEQW_XMM_P { rdip t7 ldfp ufp1, seg, riprel, "DISPLACEMENT", dataSize=8 ldfp ufp2, seg, riprel, "DISPLACEMENT + 8", dataSize=8 mcmpi2r xmml, xmml, ufp1, size=2, ext=0 mcmpi2r xmmh, xmmh, ufp2, size=2, ext=0 }; def macroop PCMPEQD_XMM_XMM { mcmpi2r xmml, xmml, xmmlm, size=4, ext=0 mcmpi2r xmmh, xmmh, xmmhm, size=4, ext=0 }; def macroop PCMPEQD_XMM_M { ldfp ufp1, seg, sib, "DISPLACEMENT", dataSize=8 ldfp ufp2, seg, sib, "DISPLACEMENT + 8", dataSize=8 mcmpi2r xmml, xmml, ufp1, size=4, ext=0 mcmpi2r xmmh, xmmh, ufp2, size=4, ext=0 }; def macroop PCMPEQD_XMM_P { rdip t7 ldfp ufp1, seg, riprel, "DISPLACEMENT", dataSize=8 ldfp ufp2, seg, riprel, "DISPLACEMENT + 8", dataSize=8 mcmpi2r xmml, xmml, ufp1, size=4, ext=0 mcmpi2r xmmh, xmmh, ufp2, size=4, ext=0 }; def macroop PCMPGTB_XMM_XMM { mcmpi2r xmml, xmml, xmmlm, size=1, ext=2 mcmpi2r xmmh, xmmh, xmmhm, size=1, ext=2 }; def macroop PCMPGTB_XMM_M { ldfp ufp1, seg, sib, "DISPLACEMENT", dataSize=8 ldfp ufp2, seg, sib, "DISPLACEMENT + 8", dataSize=8 mcmpi2r xmml, xmml, ufp1, size=1, ext=2 mcmpi2r xmmh, xmmh, ufp2, size=1, ext=2 }; def macroop PCMPGTB_XMM_P { rdip t7 ldfp ufp1, seg, riprel, "DISPLACEMENT", dataSize=8 ldfp ufp2, seg, riprel, "DISPLACEMENT + 8", dataSize=8 mcmpi2r xmml, xmml, ufp1, size=1, ext=2 mcmpi2r xmmh, xmmh, ufp2, size=1, ext=2 }; def macroop PCMPGTW_XMM_XMM { mcmpi2r xmml, xmml, xmmlm, size=2, ext=2 mcmpi2r xmmh, xmmh, xmmhm, size=2, ext=2 }; def macroop PCMPGTW_XMM_M { ldfp ufp1, seg, sib, "DISPLACEMENT", dataSize=8 ldfp ufp2, seg, sib, "DISPLACEMENT + 8", dataSize=8 mcmpi2r xmml, xmml, ufp1, size=2, ext=2 mcmpi2r xmmh, xmmh, ufp2, size=2, ext=2 }; def macroop PCMPGTW_XMM_P { rdip t7 ldfp ufp1, seg, riprel, "DISPLACEMENT", dataSize=8 ldfp ufp2, seg, riprel, "DISPLACEMENT + 8", dataSize=8 mcmpi2r xmml, xmml, ufp1, size=2, ext=2 mcmpi2r xmmh, xmmh, ufp2, size=2, ext=2 }; def macroop PCMPGTD_XMM_XMM { mcmpi2r xmml, xmml, xmmlm, size=4, ext=2 mcmpi2r xmmh, xmmh, xmmhm, size=4, ext=2 }; def macroop PCMPGTD_XMM_M { ldfp ufp1, seg, sib, "DISPLACEMENT", dataSize=8 ldfp ufp2, seg, sib, "DISPLACEMENT + 8", dataSize=8 mcmpi2r xmml, xmml, ufp1, size=4, ext=2 mcmpi2r xmmh, xmmh, ufp2, size=4, ext=2 }; def macroop PCMPGTD_XMM_P { rdip t7 ldfp ufp1, seg, riprel, "DISPLACEMENT", dataSize=8 ldfp ufp2, seg, riprel, "DISPLACEMENT + 8", dataSize=8 mcmpi2r xmml, xmml, ufp1, size=4, ext=2 mcmpi2r xmmh, xmmh, ufp2, size=4, ext=2 }; '''
unknown
codeparrot/codeparrot-clean
# IMAP folder support # Copyright (C) 2002-2007 John Goerzen # <jgoerzen@complete.org> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA from Base import BaseFolder import imaplib from offlineimap import imaputil, imaplibutil from offlineimap.ui import UIBase from offlineimap.version import versionstr import rfc822, time, string, random, binascii, re from StringIO import StringIO from copy import copy import time class IMAPFolder(BaseFolder): def __init__(self, imapserver, name, visiblename, accountname, repository): self.config = imapserver.config self.expunge = repository.getexpunge() self.name = imaputil.dequote(name) self.root = None # imapserver.root self.sep = imapserver.delim self.imapserver = imapserver self.messagelist = None self.visiblename = visiblename self.accountname = accountname self.repository = repository self.randomgenerator = random.Random() BaseFolder.__init__(self) def selectro(self, imapobj): """Select this folder when we do not need write access. Prefer SELECT to EXAMINE if we can, since some servers (Courier) do not stabilize UID validity until the folder is selected.""" try: imapobj.select(self.getfullname()) except imapobj.readonly: imapobj.select(self.getfullname(), readonly = 1) def getaccountname(self): return self.accountname def suggeststhreads(self): return 1 def waitforthread(self): self.imapserver.connectionwait() def getcopyinstancelimit(self): return 'MSGCOPY_' + self.repository.getname() def getvisiblename(self): return self.visiblename def getuidvalidity(self): imapobj = self.imapserver.acquireconnection() try: # Primes untagged_responses self.selectro(imapobj) return long(imapobj.untagged_responses['UIDVALIDITY'][0]) finally: self.imapserver.releaseconnection(imapobj) def quickchanged(self, statusfolder): # An IMAP folder has definitely changed if the number of # messages or the UID of the last message have changed. Otherwise # only flag changes could have occurred. imapobj = self.imapserver.acquireconnection() try: # Primes untagged_responses imapobj.select(self.getfullname(), readonly = 1, force = 1) try: # Some mail servers do not return an EXISTS response if # the folder is empty. maxmsgid = long(imapobj.untagged_responses['EXISTS'][0]) except KeyError: return True # Different number of messages than last time? if maxmsgid != len(statusfolder.getmessagelist()): return True if maxmsgid < 1: # No messages; return return False # Now, get the UID for the last message. response = imapobj.fetch('%d' % maxmsgid, '(UID)')[1] finally: self.imapserver.releaseconnection(imapobj) # Discard the message number. messagestr = string.split(response[0], maxsplit = 1)[1] options = imaputil.flags2hash(messagestr) if not options.has_key('UID'): return True uid = long(options['UID']) saveduids = statusfolder.getmessagelist().keys() saveduids.sort() if uid != saveduids[-1]: return True return False # TODO: Make this so that it can define a date that would be the oldest messages etc. def cachemessagelist(self): imapobj = self.imapserver.acquireconnection() self.messagelist = {} try: # Primes untagged_responses imapobj.select(self.getfullname(), readonly = 1, force = 1) maxage = self.config.getdefaultint("Account " + self.accountname, "maxage", -1) maxsize = self.config.getdefaultint("Account " + self.accountname, "maxsize", -1) if (maxage != -1) | (maxsize != -1): try: search_condition = "("; if(maxage != -1): #find out what the oldest message is that we should look at oldest_time_struct = time.gmtime(time.time() - (60*60*24*maxage)) #format this manually - otherwise locales could cause problems monthnames_standard = ["Jan", "Feb", "Mar", "Apr", "May", \ "June", "July", "Aug", "Sep", "Oct", "Nov", "Dec"] our_monthname = monthnames_standard[oldest_time_struct[1]-1] daystr = "%(day)02d" % {'day' : oldest_time_struct[2]} date_search_str = "SINCE " + daystr + "-" + our_monthname \ + "-" + str(oldest_time_struct[0]) search_condition += date_search_str if(maxsize != -1): if(maxage != 1): #There are two conditions - add a space search_condition += " " search_condition += "SMALLER " + self.config.getdefault("Account " + self.accountname, "maxsize", -1) search_condition += ")" searchresult = imapobj.search(None, search_condition) #result would come back seperated by space - to change into a fetch #statement we need to change space to comma messagesToFetch = searchresult[1][0].replace(" ", ",") except KeyError: return if len(messagesToFetch) < 1: # No messages; return return else: try: # Some mail servers do not return an EXISTS response if # the folder is empty. maxmsgid = long(imapobj.untagged_responses['EXISTS'][0]) messagesToFetch = '1:%d' % maxmsgid; except KeyError: return if maxmsgid < 1: #no messages; return return # Now, get the flags and UIDs for these. # We could conceivably get rid of maxmsgid and just say # '1:*' here. response = imapobj.fetch(messagesToFetch, '(FLAGS UID)')[1] finally: self.imapserver.releaseconnection(imapobj) for messagestr in response: # Discard the message number. messagestr = string.split(messagestr, maxsplit = 1)[1] options = imaputil.flags2hash(messagestr) if not options.has_key('UID'): UIBase.getglobalui().warn('No UID in message with options %s' %\ str(options), minor = 1) else: uid = long(options['UID']) flags = imaputil.flagsimap2maildir(options['FLAGS']) rtime = imaplibutil.Internaldate2epoch(messagestr) self.messagelist[uid] = {'uid': uid, 'flags': flags, 'time': rtime} def getmessagelist(self): return self.messagelist def getmessage(self, uid): ui = UIBase.getglobalui() imapobj = self.imapserver.acquireconnection() try: imapobj.select(self.getfullname(), readonly = 1) initialresult = imapobj.uid('fetch', '%d' % uid, '(BODY.PEEK[])') ui.debug('imap', 'Returned object from fetching %d: %s' % \ (uid, str(initialresult))) return initialresult[1][0][1].replace("\r\n", "\n") finally: self.imapserver.releaseconnection(imapobj) def getmessagetime(self, uid): return self.messagelist[uid]['time'] def getmessageflags(self, uid): return self.messagelist[uid]['flags'] def savemessage_getnewheader(self, content): headername = 'X-OfflineIMAP-%s-' % str(binascii.crc32(content)).replace('-', 'x') headername += binascii.hexlify(self.repository.getname()) + '-' headername += binascii.hexlify(self.getname()) headervalue= '%d-' % long(time.time()) headervalue += str(self.randomgenerator.random()).replace('.', '') headervalue += '-v' + versionstr return (headername, headervalue) def savemessage_addheader(self, content, headername, headervalue): ui = UIBase.getglobalui() ui.debug('imap', 'savemessage_addheader: called to add %s: %s' % (headername, headervalue)) insertionpoint = content.find("\r\n") ui.debug('imap', 'savemessage_addheader: insertionpoint = %d' % insertionpoint) leader = content[0:insertionpoint] ui.debug('imap', 'savemessage_addheader: leader = %s' % repr(leader)) if insertionpoint == 0 or insertionpoint == -1: newline = '' insertionpoint = 0 else: newline = "\r\n" newline += "%s: %s" % (headername, headervalue) ui.debug('imap', 'savemessage_addheader: newline = ' + repr(newline)) trailer = content[insertionpoint:] ui.debug('imap', 'savemessage_addheader: trailer = ' + repr(trailer)) return leader + newline + trailer def savemessage_searchforheader(self, imapobj, headername, headervalue): if imapobj.untagged_responses.has_key('APPENDUID'): return long(imapobj.untagged_responses['APPENDUID'][-1].split(' ')[1]) ui = UIBase.getglobalui() ui.debug('imap', 'savemessage_searchforheader called for %s: %s' % \ (headername, headervalue)) # Now find the UID it got. headervalue = imapobj._quote(headervalue) try: matchinguids = imapobj.uid('search', 'HEADER', headername, headervalue)[1][0] except imapobj.error, err: # IMAP server doesn't implement search or had a problem. ui.debug('imap', "savemessage_searchforheader: got IMAP error '%s' while attempting to UID SEARCH for message with header %s" % (err, headername)) return 0 ui.debug('imap', 'savemessage_searchforheader got initial matchinguids: ' + repr(matchinguids)) if matchinguids == '': ui.debug('imap', "savemessage_searchforheader: UID SEARCH for message with header %s yielded no results" % headername) return 0 matchinguids = matchinguids.split(' ') ui.debug('imap', 'savemessage_searchforheader: matchinguids now ' + \ repr(matchinguids)) if len(matchinguids) != 1 or matchinguids[0] == None: raise ValueError, "While attempting to find UID for message with header %s, got wrong-sized matchinguids of %s" % (headername, str(matchinguids)) matchinguids.sort() return long(matchinguids[0]) def savemessage(self, uid, content, flags, rtime): imapobj = self.imapserver.acquireconnection() ui = UIBase.getglobalui() ui.debug('imap', 'savemessage: called') try: try: imapobj.select(self.getfullname()) # Needed for search except imapobj.readonly: ui.msgtoreadonly(self, uid, content, flags) # Return indicating message taken, but no UID assigned. # Fudge it. return 0 # This backend always assigns a new uid, so the uid arg is ignored. # In order to get the new uid, we need to save off the message ID. message = rfc822.Message(StringIO(content)) datetuple_msg = rfc822.parsedate(message.getheader('Date')) # Will be None if missing or not in a valid format. # If time isn't known if rtime == None and datetuple_msg == None: datetuple = time.localtime() elif rtime == None: datetuple = datetuple_msg else: datetuple = time.localtime(rtime) try: if datetuple[0] < 1981: raise ValueError # Check for invalid date datetuple_check = time.localtime(time.mktime(datetuple)) if datetuple[:2] != datetuple_check[:2]: raise ValueError # This could raise a value error if it's not a valid format. date = imaplib.Time2Internaldate(datetuple) except (ValueError, OverflowError): # Argh, sometimes it's a valid format but year is 0102 # or something. Argh. It seems that Time2Internaldate # will rause a ValueError if the year is 0102 but not 1902, # but some IMAP servers nonetheless choke on 1902. date = imaplib.Time2Internaldate(time.localtime()) ui.debug('imap', 'savemessage: using date ' + str(date)) content = re.sub("(?<!\r)\n", "\r\n", content) ui.debug('imap', 'savemessage: initial content is: ' + repr(content)) (headername, headervalue) = self.savemessage_getnewheader(content) ui.debug('imap', 'savemessage: new headers are: %s: %s' % \ (headername, headervalue)) content = self.savemessage_addheader(content, headername, headervalue) ui.debug('imap', 'savemessage: new content is: ' + repr(content)) ui.debug('imap', 'savemessage: new content length is ' + \ str(len(content))) assert(imapobj.append(self.getfullname(), imaputil.flagsmaildir2imap(flags), date, content)[0] == 'OK') # Checkpoint. Let it write out the messages, etc. assert(imapobj.check()[0] == 'OK') # Keep trying until we get the UID. ui.debug('imap', 'savemessage: first attempt to get new UID') uid = self.savemessage_searchforheader(imapobj, headername, headervalue) # See docs for savemessage in Base.py for explanation of this and other return values if uid <= 0: ui.debug('imap', 'savemessage: first attempt to get new UID failed. Going to run a NOOP and try again.') assert(imapobj.noop()[0] == 'OK') uid = self.savemessage_searchforheader(imapobj, headername, headervalue) finally: self.imapserver.releaseconnection(imapobj) if uid: # avoid UID FETCH 0 crash happening later on self.messagelist[uid] = {'uid': uid, 'flags': flags} ui.debug('imap', 'savemessage: returning %d' % uid) return uid def savemessageflags(self, uid, flags): imapobj = self.imapserver.acquireconnection() try: try: imapobj.select(self.getfullname()) except imapobj.readonly: UIBase.getglobalui().flagstoreadonly(self, [uid], flags) return result = imapobj.uid('store', '%d' % uid, 'FLAGS', imaputil.flagsmaildir2imap(flags)) assert result[0] == 'OK', 'Error with store: ' + '. '.join(r[1]) finally: self.imapserver.releaseconnection(imapobj) result = result[1][0] if not result: self.messagelist[uid]['flags'] = flags else: flags = imaputil.flags2hash(imaputil.imapsplit(result)[1])['FLAGS'] self.messagelist[uid]['flags'] = imaputil.flagsimap2maildir(flags) def addmessageflags(self, uid, flags): self.addmessagesflags([uid], flags) def addmessagesflags_noconvert(self, uidlist, flags): self.processmessagesflags('+', uidlist, flags) def addmessagesflags(self, uidlist, flags): """This is here for the sake of UIDMaps.py -- deletemessages must add flags and get a converted UID, and if we don't have noconvert, then UIDMaps will try to convert it twice.""" self.addmessagesflags_noconvert(uidlist, flags) def deletemessageflags(self, uid, flags): self.deletemessagesflags([uid], flags) def deletemessagesflags(self, uidlist, flags): self.processmessagesflags('-', uidlist, flags) def processmessagesflags(self, operation, uidlist, flags): if len(uidlist) > 101: # Hack for those IMAP ervers with a limited line length self.processmessagesflags(operation, uidlist[:100], flags) self.processmessagesflags(operation, uidlist[100:], flags) return imapobj = self.imapserver.acquireconnection() try: try: imapobj.select(self.getfullname()) except imapobj.readonly: UIBase.getglobalui().flagstoreadonly(self, uidlist, flags) return r = imapobj.uid('store', imaputil.listjoin(uidlist), operation + 'FLAGS', imaputil.flagsmaildir2imap(flags)) assert r[0] == 'OK', 'Error with store: ' + '. '.join(r[1]) r = r[1] finally: self.imapserver.releaseconnection(imapobj) # Some IMAP servers do not always return a result. Therefore, # only update the ones that it talks about, and manually fix # the others. needupdate = copy(uidlist) for result in r: if result == None: # Compensate for servers that don't return anything from # STORE. continue attributehash = imaputil.flags2hash(imaputil.imapsplit(result)[1]) if not ('UID' in attributehash and 'FLAGS' in attributehash): # Compensate for servers that don't return a UID attribute. continue lflags = attributehash['FLAGS'] uid = long(attributehash['UID']) self.messagelist[uid]['flags'] = imaputil.flagsimap2maildir(lflags) try: needupdate.remove(uid) except ValueError: # Let it slide if it's not in the list pass for uid in needupdate: if operation == '+': for flag in flags: if not flag in self.messagelist[uid]['flags']: self.messagelist[uid]['flags'].append(flag) self.messagelist[uid]['flags'].sort() elif operation == '-': for flag in flags: if flag in self.messagelist[uid]['flags']: self.messagelist[uid]['flags'].remove(flag) def deletemessage(self, uid): self.deletemessages_noconvert([uid]) def deletemessages(self, uidlist): self.deletemessages_noconvert(uidlist) def deletemessages_noconvert(self, uidlist): # Weed out ones not in self.messagelist uidlist = [uid for uid in uidlist if uid in self.messagelist] if not len(uidlist): return self.addmessagesflags_noconvert(uidlist, ['T']) imapobj = self.imapserver.acquireconnection() try: try: imapobj.select(self.getfullname()) except imapobj.readonly: UIBase.getglobalui().deletereadonly(self, uidlist) return if self.expunge: assert(imapobj.expunge()[0] == 'OK') finally: self.imapserver.releaseconnection(imapobj) for uid in uidlist: del self.messagelist[uid]
unknown
codeparrot/codeparrot-clean
from django.conf import settings """ This file attempts to automatically load the denorm backend for your chosen database adaptor. Currently only mysql, postgresql and sqlite3 are supported. If your database is not detected then you can specify the backend in your settings file: # Django < 1.2 DATABASE_DENORM_BACKEND = 'denorm.db.postgresql' # Django >= 1.2 DATABASES = { 'default': { 'DENORM_BACKEND': 'denorm.db.postgresql', } } """ # Default mappings from common postgresql equivalents DB_GUESS_MAPPING = { 'postgis': 'postgresql', 'postgresql_psycopg2': 'postgresql', } def backend_for_dbname(db_name): return 'denorm.db.%s' % DB_GUESS_MAPPING.get(db_name, db_name) if hasattr(settings, 'DATABASE_ENGINE') and settings.DATABASE_ENGINE: # Django < 1.2 syntax if hasattr(settings, 'DATABASE_DENORM_BACKEND'): backend = settings.DATABASE_DENORM_BACKEND else: backend = backend_for_dbname(settings.DATABASE_ENGINE) else: # Assume >= Django 1.2 syntax from django.db import connections, DEFAULT_DB_ALIAS if 'DENORM_BACKEND' in connections[DEFAULT_DB_ALIAS].settings_dict: backend = connections[DEFAULT_DB_ALIAS].settings_dict['DENORM_BACKEND'] else: engine = connections[DEFAULT_DB_ALIAS].settings_dict['ENGINE'] backend = backend_for_dbname(engine.rsplit(".", 1)[1]) try: triggers = __import__('.'.join([backend, 'triggers']), {}, {}, ['']) except ImportError: raise ImportError("""There is no django-denorm database module for the engine '%s'. Please either choose a supported one, or remove 'denorm' from INSTALLED_APPS.\n""" % backend)
unknown
codeparrot/codeparrot-clean
""" NCL User Guide Python Example: PyNGL_curvilinear_contour.py - curvilinear data - colormap - draw edges 04.06.15 kmf """ import Ngl,Nio import os,sys #-- define variables diri = "./" #-- data directory fname = "tos_ocean_bipolar_grid.nc" #-- curvilinear data #---Test if file exists if(not os.path.exists(diri+fname)): print("You do not have the necessary file (%s) to run this example." % (diri+fname)) print("You can get the files from the NCL website at:") print("http://www.ncl.ucar.edu/Document/Manuals/NCL_User_Guide/Data/") sys.exit() #-- open file and read variables f = Nio.open_file(diri + fname,"r") var = f.variables["tos"][0,:,:] #-- first time step, reverse latitude lat2d = f.variables["lat"][:,:] #-- 2D latitudes lon2d = f.variables["lon"][:,:] #-- 2D longitudes #-- open a workstation wkres = Ngl.Resources() #-- generate an resources object for workstation wks_type = "png" #-- output type wks = Ngl.open_wks(wks_type,"plot_curvilinear_contour_ngl",wkres) #-- open workstation #-- set resources res = Ngl.Resources() #-- generate an resources object for plot res.cnFillOn = True #-- turn on contour fill res.cnLinesOn = False #-- don't draw contour lines res.cnLineLabelsOn = False #-- don't draw line labels res.cnFillPalette = "BlueWhiteOrangeRed" #-- set color map res.cnFillMode = "CellFill" #-- change contour fill mode res.cnCellFillEdgeColor = "black" #-- edges color res.cnCellFillMissingValEdgeColor = "gray50" #-- missing value edges color res.cnMissingValFillColor = "gray50" #-- missing value fill color res.lbOrientation = "Horizontal" #-- labelbar orientation res.tiMainString = "Curvilinear grid: MPI-ESM-LR (2D lat/lon arrays)" #-- title string res.tiMainFontHeightF = 0.022 #-- main title font size res.sfXArray = lon2d #-- longitude grid cell center res.sfYArray = lat2d #-- latitude grid cell center res.mpFillOn = False #-- don't draw filled map res.mpGridLatSpacingF = 10. #-- grid lat spacing res.mpGridLonSpacingF = 10. #-- grid lon spacing res.mpDataBaseVersion = "MediumRes" #-- map database res.mpLimitMode = "LatLon" #-- must be set using minLatF/maxLatF/minLonF/maxLonF res.mpMinLatF = -10. #-- sub-region minimum latitude res.mpMaxLatF = 80. #-- sub-region maximum latitude res.mpMinLonF = -120. #-- sub-region minimum longitude res.mpMaxLonF = 60. #-- sub-region maximum longitude #-- create the plot plot = Ngl.contour_map(wks,var,res) #-- create the contour plot #-- end Ngl.end()
unknown
codeparrot/codeparrot-clean
# Scanner produces tokens of the following types: # STREAM-START # STREAM-END # DIRECTIVE(name, value) # DOCUMENT-START # DOCUMENT-END # BLOCK-SEQUENCE-START # BLOCK-MAPPING-START # BLOCK-END # FLOW-SEQUENCE-START # FLOW-MAPPING-START # FLOW-SEQUENCE-END # FLOW-MAPPING-END # BLOCK-ENTRY # FLOW-ENTRY # KEY # VALUE # ALIAS(value) # ANCHOR(value) # TAG(value) # SCALAR(value, plain, style) # # Read comments in the Scanner code for more details. # __all__ = ['Scanner', 'ScannerError'] from error import MarkedYAMLError from tokens import * class ScannerError(MarkedYAMLError): pass class SimpleKey(object): # See below simple keys treatment. def __init__(self, token_number, required, index, line, column, mark): self.token_number = token_number self.required = required self.index = index self.line = line self.column = column self.mark = mark class Scanner(object): def __init__(self): """Initialize the scanner.""" # It is assumed that Scanner and Reader will have a common descendant. # Reader do the dirty work of checking for BOM and converting the # input data to Unicode. It also adds NUL to the end. # # Reader supports the following methods # self.peek(i=0) # peek the next i-th character # self.prefix(l=1) # peek the next l characters # self.forward(l=1) # read the next l characters and move the pointer. # Had we reached the end of the stream? self.done = False # The number of unclosed '{' and '['. `flow_level == 0` means block # context. self.flow_level = 0 # List of processed tokens that are not yet emitted. self.tokens = [] # Add the STREAM-START token. self.fetch_stream_start() # Number of tokens that were emitted through the `get_token` method. self.tokens_taken = 0 # The current indentation level. self.indent = -1 # Past indentation levels. self.indents = [] # Variables related to simple keys treatment. # A simple key is a key that is not denoted by the '?' indicator. # Example of simple keys: # --- # block simple key: value # ? not a simple key: # : { flow simple key: value } # We emit the KEY token before all keys, so when we find a potential # simple key, we try to locate the corresponding ':' indicator. # Simple keys should be limited to a single line and 1024 characters. # Can a simple key start at the current position? A simple key may # start: # - at the beginning of the line, not counting indentation spaces # (in block context), # - after '{', '[', ',' (in the flow context), # - after '?', ':', '-' (in the block context). # In the block context, this flag also signifies if a block collection # may start at the current position. self.allow_simple_key = True # Keep track of possible simple keys. This is a dictionary. The key # is `flow_level`; there can be no more that one possible simple key # for each level. The value is a SimpleKey record: # (token_number, required, index, line, column, mark) # A simple key may start with ALIAS, ANCHOR, TAG, SCALAR(flow), # '[', or '{' tokens. self.possible_simple_keys = {} # Public methods. def check_token(self, *choices): # Check if the next token is one of the given types. while self.need_more_tokens(): self.fetch_more_tokens() if self.tokens: if not choices: return True for choice in choices: if isinstance(self.tokens[0], choice): return True return False def peek_token(self): # Return the next token, but do not delete if from the queue. while self.need_more_tokens(): self.fetch_more_tokens() if self.tokens: return self.tokens[0] def get_token(self): # Return the next token. while self.need_more_tokens(): self.fetch_more_tokens() if self.tokens: self.tokens_taken += 1 return self.tokens.pop(0) # Private methods. def need_more_tokens(self): if self.done: return False if not self.tokens: return True # The current token may be a potential simple key, so we # need to look further. self.stale_possible_simple_keys() if self.next_possible_simple_key() == self.tokens_taken: return True def fetch_more_tokens(self): # Eat whitespaces and comments until we reach the next token. self.scan_to_next_token() # Remove obsolete possible simple keys. self.stale_possible_simple_keys() # Compare the current indentation and column. It may add some tokens # and decrease the current indentation level. self.unwind_indent(self.column) # Peek the next character. ch = self.peek() # Is it the end of stream? if ch == u'\0': return self.fetch_stream_end() # Is it a directive? if ch == u'%' and self.check_directive(): return self.fetch_directive() # Is it the document start? if ch == u'-' and self.check_document_start(): return self.fetch_document_start() # Is it the document end? if ch == u'.' and self.check_document_end(): return self.fetch_document_end() # TODO: support for BOM within a stream. #if ch == u'\uFEFF': # return self.fetch_bom() <-- issue BOMToken # Note: the order of the following checks is NOT significant. # Is it the flow sequence start indicator? if ch == u'[': return self.fetch_flow_sequence_start() # Is it the flow mapping start indicator? if ch == u'{': return self.fetch_flow_mapping_start() # Is it the flow sequence end indicator? if ch == u']': return self.fetch_flow_sequence_end() # Is it the flow mapping end indicator? if ch == u'}': return self.fetch_flow_mapping_end() # Is it the flow entry indicator? if ch == u',': return self.fetch_flow_entry() # Is it the block entry indicator? if ch == u'-' and self.check_block_entry(): return self.fetch_block_entry() # Is it the key indicator? if ch == u'?' and self.check_key(): return self.fetch_key() # Is it the value indicator? if ch == u':' and self.check_value(): return self.fetch_value() # Is it an alias? if ch == u'*': return self.fetch_alias() # Is it an anchor? if ch == u'&': return self.fetch_anchor() # Is it a tag? if ch == u'!': return self.fetch_tag() # Is it a literal scalar? if ch == u'|' and not self.flow_level: return self.fetch_literal() # Is it a folded scalar? if ch == u'>' and not self.flow_level: return self.fetch_folded() # Is it a single quoted scalar? if ch == u'\'': return self.fetch_single() # Is it a double quoted scalar? if ch == u'\"': return self.fetch_double() # It must be a plain scalar then. if self.check_plain(): return self.fetch_plain() # No? It's an error. Let's produce a nice error message. raise ScannerError("while scanning for the next token", None, "found character %r that cannot start any token" % ch.encode('utf-8'), self.get_mark()) # Simple keys treatment. def next_possible_simple_key(self): # Return the number of the nearest possible simple key. Actually we # don't need to loop through the whole dictionary. We may replace it # with the following code: # if not self.possible_simple_keys: # return None # return self.possible_simple_keys[ # min(self.possible_simple_keys.keys())].token_number min_token_number = None for level in self.possible_simple_keys: key = self.possible_simple_keys[level] if min_token_number is None or key.token_number < min_token_number: min_token_number = key.token_number return min_token_number def stale_possible_simple_keys(self): # Remove entries that are no longer possible simple keys. According to # the YAML specification, simple keys # - should be limited to a single line, # - should be no longer than 1024 characters. # Disabling this procedure will allow simple keys of any length and # height (may cause problems if indentation is broken though). for level in self.possible_simple_keys.keys(): key = self.possible_simple_keys[level] if key.line != self.line \ or self.index-key.index > 1024: if key.required: raise ScannerError("while scanning a simple key", key.mark, "could not found expected ':'", self.get_mark()) del self.possible_simple_keys[level] def save_possible_simple_key(self): # The next token may start a simple key. We check if it's possible # and save its position. This function is called for # ALIAS, ANCHOR, TAG, SCALAR(flow), '[', and '{'. # Check if a simple key is required at the current position. required = not self.flow_level and self.indent == self.column # A simple key is required only if it is the first token in the current # line. Therefore it is always allowed. assert self.allow_simple_key or not required # The next token might be a simple key. Let's save it's number and # position. if self.allow_simple_key: self.remove_possible_simple_key() token_number = self.tokens_taken+len(self.tokens) key = SimpleKey(token_number, required, self.index, self.line, self.column, self.get_mark()) self.possible_simple_keys[self.flow_level] = key def remove_possible_simple_key(self): # Remove the saved possible key position at the current flow level. if self.flow_level in self.possible_simple_keys: key = self.possible_simple_keys[self.flow_level] if key.required: raise ScannerError("while scanning a simple key", key.mark, "could not found expected ':'", self.get_mark()) del self.possible_simple_keys[self.flow_level] # Indentation functions. def unwind_indent(self, column): ## In flow context, tokens should respect indentation. ## Actually the condition should be `self.indent >= column` according to ## the spec. But this condition will prohibit intuitively correct ## constructions such as ## key : { ## } #if self.flow_level and self.indent > column: # raise ScannerError(None, None, # "invalid intendation or unclosed '[' or '{'", # self.get_mark()) # In the flow context, indentation is ignored. We make the scanner less # restrictive then specification requires. if self.flow_level: return # In block context, we may need to issue the BLOCK-END tokens. while self.indent > column: mark = self.get_mark() self.indent = self.indents.pop() self.tokens.append(BlockEndToken(mark, mark)) def add_indent(self, column): # Check if we need to increase indentation. if self.indent < column: self.indents.append(self.indent) self.indent = column return True return False # Fetchers. def fetch_stream_start(self): # We always add STREAM-START as the first token and STREAM-END as the # last token. # Read the token. mark = self.get_mark() # Add STREAM-START. self.tokens.append(StreamStartToken(mark, mark, encoding=self.encoding)) def fetch_stream_end(self): # Set the current intendation to -1. self.unwind_indent(-1) # Reset everything (not really needed). self.allow_simple_key = False self.possible_simple_keys = {} # Read the token. mark = self.get_mark() # Add STREAM-END. self.tokens.append(StreamEndToken(mark, mark)) # The steam is finished. self.done = True def fetch_directive(self): # Set the current intendation to -1. self.unwind_indent(-1) # Reset simple keys. self.remove_possible_simple_key() self.allow_simple_key = False # Scan and add DIRECTIVE. self.tokens.append(self.scan_directive()) def fetch_document_start(self): self.fetch_document_indicator(DocumentStartToken) def fetch_document_end(self): self.fetch_document_indicator(DocumentEndToken) def fetch_document_indicator(self, TokenClass): # Set the current intendation to -1. self.unwind_indent(-1) # Reset simple keys. Note that there could not be a block collection # after '---'. self.remove_possible_simple_key() self.allow_simple_key = False # Add DOCUMENT-START or DOCUMENT-END. start_mark = self.get_mark() self.forward(3) end_mark = self.get_mark() self.tokens.append(TokenClass(start_mark, end_mark)) def fetch_flow_sequence_start(self): self.fetch_flow_collection_start(FlowSequenceStartToken) def fetch_flow_mapping_start(self): self.fetch_flow_collection_start(FlowMappingStartToken) def fetch_flow_collection_start(self, TokenClass): # '[' and '{' may start a simple key. self.save_possible_simple_key() # Increase the flow level. self.flow_level += 1 # Simple keys are allowed after '[' and '{'. self.allow_simple_key = True # Add FLOW-SEQUENCE-START or FLOW-MAPPING-START. start_mark = self.get_mark() self.forward() end_mark = self.get_mark() self.tokens.append(TokenClass(start_mark, end_mark)) def fetch_flow_sequence_end(self): self.fetch_flow_collection_end(FlowSequenceEndToken) def fetch_flow_mapping_end(self): self.fetch_flow_collection_end(FlowMappingEndToken) def fetch_flow_collection_end(self, TokenClass): # Reset possible simple key on the current level. self.remove_possible_simple_key() # Decrease the flow level. self.flow_level -= 1 # No simple keys after ']' or '}'. self.allow_simple_key = False # Add FLOW-SEQUENCE-END or FLOW-MAPPING-END. start_mark = self.get_mark() self.forward() end_mark = self.get_mark() self.tokens.append(TokenClass(start_mark, end_mark)) def fetch_flow_entry(self): # Simple keys are allowed after ','. self.allow_simple_key = True # Reset possible simple key on the current level. self.remove_possible_simple_key() # Add FLOW-ENTRY. start_mark = self.get_mark() self.forward() end_mark = self.get_mark() self.tokens.append(FlowEntryToken(start_mark, end_mark)) def fetch_block_entry(self): # Block context needs additional checks. if not self.flow_level: # Are we allowed to start a new entry? if not self.allow_simple_key: raise ScannerError(None, None, "sequence entries are not allowed here", self.get_mark()) # We may need to add BLOCK-SEQUENCE-START. if self.add_indent(self.column): mark = self.get_mark() self.tokens.append(BlockSequenceStartToken(mark, mark)) # It's an error for the block entry to occur in the flow context, # but we let the parser detect this. else: pass # Simple keys are allowed after '-'. self.allow_simple_key = True # Reset possible simple key on the current level. self.remove_possible_simple_key() # Add BLOCK-ENTRY. start_mark = self.get_mark() self.forward() end_mark = self.get_mark() self.tokens.append(BlockEntryToken(start_mark, end_mark)) def fetch_key(self): # Block context needs additional checks. if not self.flow_level: # Are we allowed to start a key (not nessesary a simple)? if not self.allow_simple_key: raise ScannerError(None, None, "mapping keys are not allowed here", self.get_mark()) # We may need to add BLOCK-MAPPING-START. if self.add_indent(self.column): mark = self.get_mark() self.tokens.append(BlockMappingStartToken(mark, mark)) # Simple keys are allowed after '?' in the block context. self.allow_simple_key = not self.flow_level # Reset possible simple key on the current level. self.remove_possible_simple_key() # Add KEY. start_mark = self.get_mark() self.forward() end_mark = self.get_mark() self.tokens.append(KeyToken(start_mark, end_mark)) def fetch_value(self): # Do we determine a simple key? if self.flow_level in self.possible_simple_keys: # Add KEY. key = self.possible_simple_keys[self.flow_level] del self.possible_simple_keys[self.flow_level] self.tokens.insert(key.token_number-self.tokens_taken, KeyToken(key.mark, key.mark)) # If this key starts a new block mapping, we need to add # BLOCK-MAPPING-START. if not self.flow_level: if self.add_indent(key.column): self.tokens.insert(key.token_number-self.tokens_taken, BlockMappingStartToken(key.mark, key.mark)) # There cannot be two simple keys one after another. self.allow_simple_key = False # It must be a part of a complex key. else: # Block context needs additional checks. # (Do we really need them? They will be catched by the parser # anyway.) if not self.flow_level: # We are allowed to start a complex value if and only if # we can start a simple key. if not self.allow_simple_key: raise ScannerError(None, None, "mapping values are not allowed here", self.get_mark()) # If this value starts a new block mapping, we need to add # BLOCK-MAPPING-START. It will be detected as an error later by # the parser. if not self.flow_level: if self.add_indent(self.column): mark = self.get_mark() self.tokens.append(BlockMappingStartToken(mark, mark)) # Simple keys are allowed after ':' in the block context. self.allow_simple_key = not self.flow_level # Reset possible simple key on the current level. self.remove_possible_simple_key() # Add VALUE. start_mark = self.get_mark() self.forward() end_mark = self.get_mark() self.tokens.append(ValueToken(start_mark, end_mark)) def fetch_alias(self): # ALIAS could be a simple key. self.save_possible_simple_key() # No simple keys after ALIAS. self.allow_simple_key = False # Scan and add ALIAS. self.tokens.append(self.scan_anchor(AliasToken)) def fetch_anchor(self): # ANCHOR could start a simple key. self.save_possible_simple_key() # No simple keys after ANCHOR. self.allow_simple_key = False # Scan and add ANCHOR. self.tokens.append(self.scan_anchor(AnchorToken)) def fetch_tag(self): # TAG could start a simple key. self.save_possible_simple_key() # No simple keys after TAG. self.allow_simple_key = False # Scan and add TAG. self.tokens.append(self.scan_tag()) def fetch_literal(self): self.fetch_block_scalar(style='|') def fetch_folded(self): self.fetch_block_scalar(style='>') def fetch_block_scalar(self, style): # A simple key may follow a block scalar. self.allow_simple_key = True # Reset possible simple key on the current level. self.remove_possible_simple_key() # Scan and add SCALAR. self.tokens.append(self.scan_block_scalar(style)) def fetch_single(self): self.fetch_flow_scalar(style='\'') def fetch_double(self): self.fetch_flow_scalar(style='"') def fetch_flow_scalar(self, style): # A flow scalar could be a simple key. self.save_possible_simple_key() # No simple keys after flow scalars. self.allow_simple_key = False # Scan and add SCALAR. self.tokens.append(self.scan_flow_scalar(style)) def fetch_plain(self): # A plain scalar could be a simple key. self.save_possible_simple_key() # No simple keys after plain scalars. But note that `scan_plain` will # change this flag if the scan is finished at the beginning of the # line. self.allow_simple_key = False # Scan and add SCALAR. May change `allow_simple_key`. self.tokens.append(self.scan_plain()) # Checkers. def check_directive(self): # DIRECTIVE: ^ '%' ... # The '%' indicator is already checked. if self.column == 0: return True def check_document_start(self): # DOCUMENT-START: ^ '---' (' '|'\n') if self.column == 0: if self.prefix(3) == u'---' \ and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029': return True def check_document_end(self): # DOCUMENT-END: ^ '...' (' '|'\n') if self.column == 0: if self.prefix(3) == u'...' \ and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029': return True def check_block_entry(self): # BLOCK-ENTRY: '-' (' '|'\n') return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029' def check_key(self): # KEY(flow context): '?' if self.flow_level: return True # KEY(block context): '?' (' '|'\n') else: return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029' def check_value(self): # VALUE(flow context): ':' if self.flow_level: return True # VALUE(block context): ':' (' '|'\n') else: return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029' def check_plain(self): # A plain scalar may start with any non-space character except: # '-', '?', ':', ',', '[', ']', '{', '}', # '#', '&', '*', '!', '|', '>', '\'', '\"', # '%', '@', '`'. # # It may also start with # '-', '?', ':' # if it is followed by a non-space character. # # Note that we limit the last rule to the block context (except the # '-' character) because we want the flow context to be space # independent. ch = self.peek() return ch not in u'\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'\"%@`' \ or (self.peek(1) not in u'\0 \t\r\n\x85\u2028\u2029' and (ch == u'-' or (not self.flow_level and ch in u'?:'))) # Scanners. def scan_to_next_token(self): # We ignore spaces, line breaks and comments. # If we find a line break in the block context, we set the flag # `allow_simple_key` on. # The byte order mark is stripped if it's the first character in the # stream. We do not yet support BOM inside the stream as the # specification requires. Any such mark will be considered as a part # of the document. # # TODO: We need to make tab handling rules more sane. A good rule is # Tabs cannot precede tokens # BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END, # KEY(block), VALUE(block), BLOCK-ENTRY # So the checking code is # if <TAB>: # self.allow_simple_keys = False # We also need to add the check for `allow_simple_keys == True` to # `unwind_indent` before issuing BLOCK-END. # Scanners for block, flow, and plain scalars need to be modified. if self.index == 0 and self.peek() == u'\uFEFF': self.forward() found = False while not found: while self.peek() == u' ': self.forward() if self.peek() == u'#': while self.peek() not in u'\0\r\n\x85\u2028\u2029': self.forward() if self.scan_line_break(): if not self.flow_level: self.allow_simple_key = True else: found = True def scan_directive(self): # See the specification for details. start_mark = self.get_mark() self.forward() name = self.scan_directive_name(start_mark) value = None if name == u'YAML': value = self.scan_yaml_directive_value(start_mark) end_mark = self.get_mark() elif name == u'TAG': value = self.scan_tag_directive_value(start_mark) end_mark = self.get_mark() else: end_mark = self.get_mark() while self.peek() not in u'\0\r\n\x85\u2028\u2029': self.forward() self.scan_directive_ignored_line(start_mark) return DirectiveToken(name, value, start_mark, end_mark) def scan_directive_name(self, start_mark): # See the specification for details. length = 0 ch = self.peek(length) while u'0' <= ch <= u'9' or u'A' <= ch <= 'Z' or u'a' <= ch <= 'z' \ or ch in u'-_': length += 1 ch = self.peek(length) if not length: raise ScannerError("while scanning a directive", start_mark, "expected alphabetic or numeric character, but found %r" % ch.encode('utf-8'), self.get_mark()) value = self.prefix(length) self.forward(length) ch = self.peek() if ch not in u'\0 \r\n\x85\u2028\u2029': raise ScannerError("while scanning a directive", start_mark, "expected alphabetic or numeric character, but found %r" % ch.encode('utf-8'), self.get_mark()) return value def scan_yaml_directive_value(self, start_mark): # See the specification for details. while self.peek() == u' ': self.forward() major = self.scan_yaml_directive_number(start_mark) if self.peek() != '.': raise ScannerError("while scanning a directive", start_mark, "expected a digit or '.', but found %r" % self.peek().encode('utf-8'), self.get_mark()) self.forward() minor = self.scan_yaml_directive_number(start_mark) if self.peek() not in u'\0 \r\n\x85\u2028\u2029': raise ScannerError("while scanning a directive", start_mark, "expected a digit or ' ', but found %r" % self.peek().encode('utf-8'), self.get_mark()) return (major, minor) def scan_yaml_directive_number(self, start_mark): # See the specification for details. ch = self.peek() if not (u'0' <= ch <= '9'): raise ScannerError("while scanning a directive", start_mark, "expected a digit, but found %r" % ch.encode('utf-8'), self.get_mark()) length = 0 while u'0' <= self.peek(length) <= u'9': length += 1 value = int(self.prefix(length)) self.forward(length) return value def scan_tag_directive_value(self, start_mark): # See the specification for details. while self.peek() == u' ': self.forward() handle = self.scan_tag_directive_handle(start_mark) while self.peek() == u' ': self.forward() prefix = self.scan_tag_directive_prefix(start_mark) return (handle, prefix) def scan_tag_directive_handle(self, start_mark): # See the specification for details. value = self.scan_tag_handle('directive', start_mark) ch = self.peek() if ch != u' ': raise ScannerError("while scanning a directive", start_mark, "expected ' ', but found %r" % ch.encode('utf-8'), self.get_mark()) return value def scan_tag_directive_prefix(self, start_mark): # See the specification for details. value = self.scan_tag_uri('directive', start_mark) ch = self.peek() if ch not in u'\0 \r\n\x85\u2028\u2029': raise ScannerError("while scanning a directive", start_mark, "expected ' ', but found %r" % ch.encode('utf-8'), self.get_mark()) return value def scan_directive_ignored_line(self, start_mark): # See the specification for details. while self.peek() == u' ': self.forward() if self.peek() == u'#': while self.peek() not in u'\0\r\n\x85\u2028\u2029': self.forward() ch = self.peek() if ch not in u'\0\r\n\x85\u2028\u2029': raise ScannerError("while scanning a directive", start_mark, "expected a comment or a line break, but found %r" % ch.encode('utf-8'), self.get_mark()) self.scan_line_break() def scan_anchor(self, TokenClass): # The specification does not restrict characters for anchors and # aliases. This may lead to problems, for instance, the document: # [ *alias, value ] # can be interpteted in two ways, as # [ "value" ] # and # [ *alias , "value" ] # Therefore we restrict aliases to numbers and ASCII letters. start_mark = self.get_mark() indicator = self.peek() if indicator == '*': name = 'alias' else: name = 'anchor' self.forward() length = 0 ch = self.peek(length) while u'0' <= ch <= u'9' or u'A' <= ch <= 'Z' or u'a' <= ch <= 'z' \ or ch in u'-_': length += 1 ch = self.peek(length) if not length: raise ScannerError("while scanning an %s" % name, start_mark, "expected alphabetic or numeric character, but found %r" % ch.encode('utf-8'), self.get_mark()) value = self.prefix(length) self.forward(length) ch = self.peek() if ch not in u'\0 \t\r\n\x85\u2028\u2029?:,]}%@`': raise ScannerError("while scanning an %s" % name, start_mark, "expected alphabetic or numeric character, but found %r" % ch.encode('utf-8'), self.get_mark()) end_mark = self.get_mark() return TokenClass(value, start_mark, end_mark) def scan_tag(self): # See the specification for details. start_mark = self.get_mark() ch = self.peek(1) if ch == u'<': handle = None self.forward(2) suffix = self.scan_tag_uri('tag', start_mark) if self.peek() != u'>': raise ScannerError("while parsing a tag", start_mark, "expected '>', but found %r" % self.peek().encode('utf-8'), self.get_mark()) self.forward() elif ch in u'\0 \t\r\n\x85\u2028\u2029': handle = None suffix = u'!' self.forward() else: length = 1 use_handle = False while ch not in u'\0 \r\n\x85\u2028\u2029': if ch == u'!': use_handle = True break length += 1 ch = self.peek(length) handle = u'!' if use_handle: handle = self.scan_tag_handle('tag', start_mark) else: handle = u'!' self.forward() suffix = self.scan_tag_uri('tag', start_mark) ch = self.peek() if ch not in u'\0 \r\n\x85\u2028\u2029': raise ScannerError("while scanning a tag", start_mark, "expected ' ', but found %r" % ch.encode('utf-8'), self.get_mark()) value = (handle, suffix) end_mark = self.get_mark() return TagToken(value, start_mark, end_mark) def scan_block_scalar(self, style): # See the specification for details. if style == '>': folded = True else: folded = False chunks = [] start_mark = self.get_mark() # Scan the header. self.forward() chomping, increment = self.scan_block_scalar_indicators(start_mark) self.scan_block_scalar_ignored_line(start_mark) # Determine the indentation level and go to the first non-empty line. min_indent = self.indent+1 if min_indent < 1: min_indent = 1 if increment is None: breaks, max_indent, end_mark = self.scan_block_scalar_indentation() indent = max(min_indent, max_indent) else: indent = min_indent+increment-1 breaks, end_mark = self.scan_block_scalar_breaks(indent) line_break = u'' # Scan the inner part of the block scalar. while self.column == indent and self.peek() != u'\0': chunks.extend(breaks) leading_non_space = self.peek() not in u' \t' length = 0 while self.peek(length) not in u'\0\r\n\x85\u2028\u2029': length += 1 chunks.append(self.prefix(length)) self.forward(length) line_break = self.scan_line_break() breaks, end_mark = self.scan_block_scalar_breaks(indent) if self.column == indent and self.peek() != u'\0': # Unfortunately, folding rules are ambiguous. # # This is the folding according to the specification: if folded and line_break == u'\n' \ and leading_non_space and self.peek() not in u' \t': if not breaks: chunks.append(u' ') else: chunks.append(line_break) # This is Clark Evans's interpretation (also in the spec # examples): # #if folded and line_break == u'\n': # if not breaks: # if self.peek() not in ' \t': # chunks.append(u' ') # else: # chunks.append(line_break) #else: # chunks.append(line_break) else: break # Chomp the tail. if chomping is not False: chunks.append(line_break) if chomping is True: chunks.extend(breaks) # We are done. return ScalarToken(u''.join(chunks), False, start_mark, end_mark, style) def scan_block_scalar_indicators(self, start_mark): # See the specification for details. chomping = None increment = None ch = self.peek() if ch in u'+-': if ch == '+': chomping = True else: chomping = False self.forward() ch = self.peek() if ch in u'0123456789': increment = int(ch) if increment == 0: raise ScannerError("while scanning a block scalar", start_mark, "expected indentation indicator in the range 1-9, but found 0", self.get_mark()) self.forward() elif ch in u'0123456789': increment = int(ch) if increment == 0: raise ScannerError("while scanning a block scalar", start_mark, "expected indentation indicator in the range 1-9, but found 0", self.get_mark()) self.forward() ch = self.peek() if ch in u'+-': if ch == '+': chomping = True else: chomping = False self.forward() ch = self.peek() if ch not in u'\0 \r\n\x85\u2028\u2029': raise ScannerError("while scanning a block scalar", start_mark, "expected chomping or indentation indicators, but found %r" % ch.encode('utf-8'), self.get_mark()) return chomping, increment def scan_block_scalar_ignored_line(self, start_mark): # See the specification for details. while self.peek() == u' ': self.forward() if self.peek() == u'#': while self.peek() not in u'\0\r\n\x85\u2028\u2029': self.forward() ch = self.peek() if ch not in u'\0\r\n\x85\u2028\u2029': raise ScannerError("while scanning a block scalar", start_mark, "expected a comment or a line break, but found %r" % ch.encode('utf-8'), self.get_mark()) self.scan_line_break() def scan_block_scalar_indentation(self): # See the specification for details. chunks = [] max_indent = 0 end_mark = self.get_mark() while self.peek() in u' \r\n\x85\u2028\u2029': if self.peek() != u' ': chunks.append(self.scan_line_break()) end_mark = self.get_mark() else: self.forward() if self.column > max_indent: max_indent = self.column return chunks, max_indent, end_mark def scan_block_scalar_breaks(self, indent): # See the specification for details. chunks = [] end_mark = self.get_mark() while self.column < indent and self.peek() == u' ': self.forward() while self.peek() in u'\r\n\x85\u2028\u2029': chunks.append(self.scan_line_break()) end_mark = self.get_mark() while self.column < indent and self.peek() == u' ': self.forward() return chunks, end_mark def scan_flow_scalar(self, style): # See the specification for details. # Note that we loose indentation rules for quoted scalars. Quoted # scalars don't need to adhere indentation because " and ' clearly # mark the beginning and the end of them. Therefore we are less # restrictive then the specification requires. We only need to check # that document separators are not included in scalars. if style == '"': double = True else: double = False chunks = [] start_mark = self.get_mark() quote = self.peek() self.forward() chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark)) while self.peek() != quote: chunks.extend(self.scan_flow_scalar_spaces(double, start_mark)) chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark)) self.forward() end_mark = self.get_mark() return ScalarToken(u''.join(chunks), False, start_mark, end_mark, style) ESCAPE_REPLACEMENTS = { u'0': u'\0', u'a': u'\x07', u'b': u'\x08', u't': u'\x09', u'\t': u'\x09', u'n': u'\x0A', u'v': u'\x0B', u'f': u'\x0C', u'r': u'\x0D', u'e': u'\x1B', u' ': u'\x20', u'\"': u'\"', u'\\': u'\\', u'N': u'\x85', u'_': u'\xA0', u'L': u'\u2028', u'P': u'\u2029', } ESCAPE_CODES = { u'x': 2, u'u': 4, u'U': 8, } def scan_flow_scalar_non_spaces(self, double, start_mark): # See the specification for details. chunks = [] while True: length = 0 while self.peek(length) not in u'\'\"\\\0 \t\r\n\x85\u2028\u2029': length += 1 if length: chunks.append(self.prefix(length)) self.forward(length) ch = self.peek() if not double and ch == u'\'' and self.peek(1) == u'\'': chunks.append(u'\'') self.forward(2) elif (double and ch == u'\'') or (not double and ch in u'\"\\'): chunks.append(ch) self.forward() elif double and ch == u'\\': self.forward() ch = self.peek() if ch in self.ESCAPE_REPLACEMENTS: chunks.append(self.ESCAPE_REPLACEMENTS[ch]) self.forward() elif ch in self.ESCAPE_CODES: length = self.ESCAPE_CODES[ch] self.forward() for k in range(length): if self.peek(k) not in u'0123456789ABCDEFabcdef': raise ScannerError("while scanning a double-quoted scalar", start_mark, "expected escape sequence of %d hexdecimal numbers, but found %r" % (length, self.peek(k).encode('utf-8')), self.get_mark()) code = int(self.prefix(length), 16) chunks.append(unichr(code)) self.forward(length) elif ch in u'\r\n\x85\u2028\u2029': self.scan_line_break() chunks.extend(self.scan_flow_scalar_breaks(double, start_mark)) else: raise ScannerError("while scanning a double-quoted scalar", start_mark, "found unknown escape character %r" % ch.encode('utf-8'), self.get_mark()) else: return chunks def scan_flow_scalar_spaces(self, double, start_mark): # See the specification for details. chunks = [] length = 0 while self.peek(length) in u' \t': length += 1 whitespaces = self.prefix(length) self.forward(length) ch = self.peek() if ch == u'\0': raise ScannerError("while scanning a quoted scalar", start_mark, "found unexpected end of stream", self.get_mark()) elif ch in u'\r\n\x85\u2028\u2029': line_break = self.scan_line_break() breaks = self.scan_flow_scalar_breaks(double, start_mark) if line_break != u'\n': chunks.append(line_break) elif not breaks: chunks.append(u' ') chunks.extend(breaks) else: chunks.append(whitespaces) return chunks def scan_flow_scalar_breaks(self, double, start_mark): # See the specification for details. chunks = [] while True: # Instead of checking indentation, we check for document # separators. prefix = self.prefix(3) if (prefix == u'---' or prefix == u'...') \ and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029': raise ScannerError("while scanning a quoted scalar", start_mark, "found unexpected document separator", self.get_mark()) while self.peek() in u' \t': self.forward() if self.peek() in u'\r\n\x85\u2028\u2029': chunks.append(self.scan_line_break()) else: return chunks def scan_plain(self): # See the specification for details. # We add an additional restriction for the flow context: # plain scalars in the flow context cannot contain ',', ':' and '?'. # We also keep track of the `allow_simple_key` flag here. # Indentation rules are loosed for the flow context. chunks = [] start_mark = self.get_mark() end_mark = start_mark indent = self.indent+1 # We allow zero indentation for scalars, but then we need to check for # document separators at the beginning of the line. #if indent == 0: # indent = 1 spaces = [] while True: length = 0 if self.peek() == u'#': break while True: ch = self.peek(length) if ch in u'\0 \t\r\n\x85\u2028\u2029' \ or (not self.flow_level and ch == u':' and self.peek(length+1) in u'\0 \t\r\n\x85\u2028\u2029') \ or (self.flow_level and ch in u',:?[]{}'): break length += 1 # It's not clear what we should do with ':' in the flow context. if (self.flow_level and ch == u':' and self.peek(length+1) not in u'\0 \t\r\n\x85\u2028\u2029,[]{}'): self.forward(length) raise ScannerError("while scanning a plain scalar", start_mark, "found unexpected ':'", self.get_mark(), "Please check http://pyyaml.org/wiki/YAMLColonInFlowContext for details.") if length == 0: break self.allow_simple_key = False chunks.extend(spaces) chunks.append(self.prefix(length)) self.forward(length) end_mark = self.get_mark() spaces = self.scan_plain_spaces(indent, start_mark) if not spaces or self.peek() == u'#' \ or (not self.flow_level and self.column < indent): break return ScalarToken(u''.join(chunks), True, start_mark, end_mark) def scan_plain_spaces(self, indent, start_mark): # See the specification for details. # The specification is really confusing about tabs in plain scalars. # We just forbid them completely. Do not use tabs in YAML! chunks = [] length = 0 while self.peek(length) in u' ': length += 1 whitespaces = self.prefix(length) self.forward(length) ch = self.peek() if ch in u'\r\n\x85\u2028\u2029': line_break = self.scan_line_break() self.allow_simple_key = True prefix = self.prefix(3) if (prefix == u'---' or prefix == u'...') \ and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029': return breaks = [] while self.peek() in u' \r\n\x85\u2028\u2029': if self.peek() == ' ': self.forward() else: breaks.append(self.scan_line_break()) prefix = self.prefix(3) if (prefix == u'---' or prefix == u'...') \ and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029': return if line_break != u'\n': chunks.append(line_break) elif not breaks: chunks.append(u' ') chunks.extend(breaks) elif whitespaces: chunks.append(whitespaces) return chunks def scan_tag_handle(self, name, start_mark): # See the specification for details. # For some strange reasons, the specification does not allow '_' in # tag handles. I have allowed it anyway. ch = self.peek() if ch != u'!': raise ScannerError("while scanning a %s" % name, start_mark, "expected '!', but found %r" % ch.encode('utf-8'), self.get_mark()) length = 1 ch = self.peek(length) if ch != u' ': while u'0' <= ch <= u'9' or u'A' <= ch <= 'Z' or u'a' <= ch <= 'z' \ or ch in u'-_': length += 1 ch = self.peek(length) if ch != u'!': self.forward(length) raise ScannerError("while scanning a %s" % name, start_mark, "expected '!', but found %r" % ch.encode('utf-8'), self.get_mark()) length += 1 value = self.prefix(length) self.forward(length) return value def scan_tag_uri(self, name, start_mark): # See the specification for details. # Note: we do not check if URI is well-formed. chunks = [] length = 0 ch = self.peek(length) while u'0' <= ch <= u'9' or u'A' <= ch <= 'Z' or u'a' <= ch <= 'z' \ or ch in u'-;/?:@&=+$,_.!~*\'()[]%': if ch == u'%': chunks.append(self.prefix(length)) self.forward(length) length = 0 chunks.append(self.scan_uri_escapes(name, start_mark)) else: length += 1 ch = self.peek(length) if length: chunks.append(self.prefix(length)) self.forward(length) length = 0 if not chunks: raise ScannerError("while parsing a %s" % name, start_mark, "expected URI, but found %r" % ch.encode('utf-8'), self.get_mark()) return u''.join(chunks) def scan_uri_escapes(self, name, start_mark): # See the specification for details. bytes = [] mark = self.get_mark() while self.peek() == u'%': self.forward() for k in range(2): if self.peek(k) not in u'0123456789ABCDEFabcdef': raise ScannerError("while scanning a %s" % name, start_mark, "expected URI escape sequence of 2 hexdecimal numbers, but found %r" % (self.peek(k).encode('utf-8')), self.get_mark()) bytes.append(chr(int(self.prefix(2), 16))) self.forward(2) try: value = unicode(''.join(bytes), 'utf-8') except UnicodeDecodeError, exc: raise ScannerError("while scanning a %s" % name, start_mark, str(exc), mark) return value def scan_line_break(self): # Transforms: # '\r\n' : '\n' # '\r' : '\n' # '\n' : '\n' # '\x85' : '\n' # '\u2028' : '\u2028' # '\u2029 : '\u2029' # default : '' ch = self.peek() if ch in u'\r\n\x85': if self.prefix(2) == u'\r\n': self.forward(2) else: self.forward() return u'\n' elif ch in u'\u2028\u2029': self.forward() return ch return u'' #try: # import psyco # psyco.bind(Scanner) #except ImportError: # pass
unknown
codeparrot/codeparrot-clean
// Copyright 2020 The Cockroach Authors. // // Use of this software is governed by the CockroachDB Software License // included in the /LICENSE file. package server import ( "context" "fmt" "sort" "strconv" "strings" "testing" "time" "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/liveness/livenesspb" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/server/serverpb" "github.com/cockroachdb/cockroach/pkg/testutils/datapathutils" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/datadriven" "github.com/cockroachdb/errors" "github.com/stretchr/testify/require" ) // TestSimplePaginate is a datadriven-based test for simplePaginate(). // Commands: // // paginate <limit> <offset> // <input> // ---- // result=<result> // next=<next> // // Calls paginate(). // input args: // - limit: max number of elements to return. // - offset: index offset since the start of slice. // - input: comma-separated list of ints used as input to simplePaginate. // // output args: // - result: the sub-sliced input returned from simplePaginate. // - next: the next offset. func TestSimplePaginate(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) datadriven.RunTest(t, datapathutils.TestDataPath(t, "simple_paginate"), func(t *testing.T, d *datadriven.TestData) string { switch d.Cmd { case "paginate": var input []int if len(d.CmdArgs) != 2 { return "expected 2 args: paginate <limit> <offset>" } limit, err := strconv.Atoi(d.CmdArgs[0].Key) if err != nil { return err.Error() } offset, err := strconv.Atoi(d.CmdArgs[1].Key) if err != nil { return err.Error() } inputString := strings.TrimSpace(d.Input) if len(inputString) > 0 { var inputSlice []int for _, part := range strings.Split(inputString, ",") { val, err := strconv.Atoi(strings.TrimSpace(part)) if err != nil { return err.Error() } inputSlice = append(inputSlice, val) } input = inputSlice } result, next := simplePaginate(input, limit, offset) return fmt.Sprintf("result=%v\nnext=%d", result, next) default: return fmt.Sprintf("unexpected command: %s", d.Cmd) } }) } // TestPaginationState is a datadriven-based test for paginationState and // related methods. // // Commands: // // define // queried=<queried> // in-progress=<in-progress> // in-progress-index=<in-progress-index> // to-query=<to-query> // ---- // <printed-state> // // Resets and defines a new paginationState. // input args: // - queried: list of queried nodeIDs, comma-separated // - in-progress: node ID of current cursor position's node // - in-progress-index: index of current cursor position within current node's // response // - to-query: list of node IDs yet to query, comma-separated // // output args: // - printed-state: textual representation of current pagination state. // // merge-node-ids // <nodes> // ---- // <printed-state> // // Calls mergeNodeIDs(). // input args: // - nodes: sorted node IDs to merge into pagination state, using mergeNodeIDs. // // output args: // - printed-state: textual representation of current pagination state. // // paginate // limit=<limit> // nodeID=<nodeID> // length=<length> // ---- // start: <start> // end: <end> // newLimit: <newlimit> // state: <printed-state> // // Calls paginate() // input args: // - limit: Max objects to return from paginate(). // - nodeID: ID of node the response is coming from. // - length: length of values in current node's response. // // output args: // - start: Start idx of response slice. // - end: End idx of response slice. // - newLimit: Limit to be used on next call to paginate(), if current slice // doesn't have `limit` remaining items. 0 if `limit` was reached. // - printed-state: textual representation of current pagination state. // // unmarshal // <input> // ---- // <printed-state> // // Unmarshals base64-encoded string into a paginationState. Opposite of marshal. // input args: // - input: base64-encoded string to unmarshal. // // output args: // - printed-state: textual representation of unmarshalled pagination state. // // marshal // ---- // <text> // // Marshals current state to base64-encoded string. // output args: // - text: base64-encoded string that can be passed to unmarshal. func TestPaginationState(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) parseNodesString := func(t *testing.T, nodesString string) []roachpb.NodeID { var res []roachpb.NodeID for _, node := range strings.Split(nodesString, ",") { i, err := strconv.Atoi(strings.TrimSpace(node)) require.NoError(t, err) res = append(res, roachpb.NodeID(i)) } return res } printState := func(state paginationState) string { var builder strings.Builder fmt.Fprintf(&builder, "nodesQueried:") for i, node := range state.nodesQueried { if i > 0 { fmt.Fprintf(&builder, ",") } else { fmt.Fprintf(&builder, " ") } fmt.Fprintf(&builder, "%d", node) } fmt.Fprintf(&builder, "\ninProgress: %d", state.inProgress) fmt.Fprintf(&builder, "\ninProgressIndex: %d", state.inProgressIndex) fmt.Fprintf(&builder, "\nnodesToQuery:") for i, node := range state.nodesToQuery { if i > 0 { fmt.Fprintf(&builder, ",") } else { fmt.Fprintf(&builder, " ") } fmt.Fprintf(&builder, "%d", node) } return builder.String() } var state paginationState datadriven.RunTest(t, datapathutils.TestDataPath(t, "pagination_state"), func(t *testing.T, d *datadriven.TestData) string { switch d.Cmd { case "define": state = paginationState{} for _, line := range strings.Split(d.Input, "\n") { parts := strings.Split(line, ":") switch parts[0] { case "queried": state.nodesQueried = parseNodesString(t, parts[1]) case "to-query": state.nodesToQuery = parseNodesString(t, parts[1]) case "in-progress": inProgress, err := strconv.Atoi(strings.TrimSpace(parts[1])) require.NoError(t, err) state.inProgress = roachpb.NodeID(inProgress) case "in-progress-index": inProgressIdx, err := strconv.Atoi(strings.TrimSpace(parts[1])) require.NoError(t, err) state.inProgressIndex = inProgressIdx default: return fmt.Sprintf("unexpected keyword: %s", parts[0]) } } return "ok" case "merge-node-ids": state.mergeNodeIDs(parseNodesString(t, d.Input)) return printState(state) case "paginate": var limit, nodeID, length int var err error for _, line := range strings.Split(d.Input, "\n") { fields := strings.Fields(line) if len(fields) != 2 { return "expected lines in the format <field> <value>" } switch fields[0] { case "limit": limit, err = strconv.Atoi(fields[1]) case "nodeID": nodeID, err = strconv.Atoi(fields[1]) case "length": length, err = strconv.Atoi(fields[1]) default: return fmt.Sprintf("unexpected field: %s", fields[0]) } require.NoError(t, err) } start, end, newLimit, err := state.paginate(limit, roachpb.NodeID(nodeID), length) if err != nil { return err.Error() } return fmt.Sprintf("start: %d\nend: %d\nnewLimit: %d\nstate:\n%s", start, end, newLimit, printState(state)) case "marshal": textState, err := state.MarshalText() require.NoError(t, err) return string(textState) case "unmarshal": require.NoError(t, state.UnmarshalText([]byte(d.Input))) return printState(state) default: return fmt.Sprintf("unexpected command: %s", d.Cmd) } }) } type testNodeResponse struct { nodeID roachpb.NodeID val int } // TestRPCPaginator tests the rpcPaginator struct. It constructs a hypothetical // underlying RPC response from nodes, then sends off fake RPCs to them in // parallel, while letting rpcPaginator merge and truncate those replies // according to set limits. The total elements returned should match across // different limit values. func TestRPCPaginator(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) // Each test case consists of a set of limit values to be tested, and a // map of node IDs and the number of elements to be returned by the nodeFn's // of each of those nodes. The test repeatedly calls rpcPaginator with a limit // until there are no more values to return. The total set of values returned // by the paginator should match across all values of limits in a given test // case. // // If numResponses has a negative int for a particular node, that node returns // an error instead. The number of expected errors is stored in `errors`. testCases := []struct { limits []int numResponses map[roachpb.NodeID]int errors int }{ {[]int{3, 1, 5, 7, 9}, map[roachpb.NodeID]int{1: 5, 2: 10, 3: 7, 5: 10}, 0}, {[]int{1, 5, 10}, map[roachpb.NodeID]int{1: 5, 2: 0, 3: -1, 5: 2}, 1}, } ctx, done := context.WithTimeout(context.Background(), 10*time.Second) defer done() for i, tc := range testCases { tc := tc t.Run(fmt.Sprintf("testCase=%d", i), func(t *testing.T) { // Build a reference response first, to compare each potential limit with. var referenceResp []testNodeResponse for nodeID, numResponses := range tc.numResponses { for i := 0; i < numResponses; i++ { referenceResp = append(referenceResp, testNodeResponse{nodeID, i}) } } // Build a reference response, consisting of all the fake node responses // sorted by node IDs and then vals. The paginator will be expected to // return a sub-slice of this larger response each time it's called, and // by the end, appending the paginator's responses to one another should // yield a response that matches it exactly. sort.Slice(referenceResp, func(i, j int) bool { if referenceResp[i].nodeID == referenceResp[j].nodeID { return referenceResp[i].val < referenceResp[j].val } return referenceResp[i].nodeID < referenceResp[j].nodeID }) dialFn := func(ctx context.Context, id roachpb.NodeID) (client interface{}, err error) { return id, nil } nodeFn := func(ctx context.Context, client interface{}, nodeID roachpb.NodeID) (res []testNodeResponse, err error) { numResponses := tc.numResponses[nodeID] // If a negative value is stored, return an error instead. if numResponses < 0 { return nil, errors.New("injected") } var response []testNodeResponse // For positive values of numResponses, return slices of ints that go // [0, 1, 2, ..., numResponses-1]. for i := 0; i < numResponses; i++ { response = append(response, testNodeResponse{nodeID, i}) } return response, nil } // For each limit specified in tc.limits, run the paginator until // all values are exhausted. for _, limit := range tc.limits { t.Run(fmt.Sprintf("limit=%d", limit), func(t *testing.T) { var response []testNodeResponse errorsDetected := 0 responseFn := func(nodeID roachpb.NodeID, resp []testNodeResponse) { response = append(response, resp...) } errorFn := func(nodeID roachpb.NodeID, nodeFnError error) { errorsDetected++ } var pagState paginationState sortedNodeIDs := make([]roachpb.NodeID, 0, len(tc.numResponses)) for nodeID := range tc.numResponses { sortedNodeIDs = append(sortedNodeIDs, nodeID) } sort.Slice(sortedNodeIDs, func(i, j int) bool { return sortedNodeIDs[i] < sortedNodeIDs[j] }) pagState.mergeNodeIDs(sortedNodeIDs) for { nodesToQuery := []roachpb.NodeID{pagState.inProgress} nodesToQuery = append(nodesToQuery, pagState.nodesToQuery...) paginator := rpcNodePaginator[interface{}, testNodeResponse]{ limit: limit, numNodes: len(nodesToQuery), errorCtx: "test", pagState: pagState, nodeStatuses: make(map[serverID]livenesspb.NodeLivenessStatus), dialFn: dialFn, nodeFn: nodeFn, responseFn: responseFn, errorFn: errorFn, } paginator.init() // Issue requests in parallel. for idx, nodeID := range nodesToQuery { go func(nodeID roachpb.NodeID, idx int) { paginator.queryNode(ctx, nodeID, idx, noTimeout) }(nodeID, idx) } var err error pagState, err = paginator.processResponses(ctx) require.NoError(t, err) // When no node is "in progress", we've gotten all values and can // break out of this loop. if pagState.inProgress == 0 { break } } // The chained paginated responses should match the reference response // that was built earlier. require.Equal(t, referenceResp, response) require.Equal(t, tc.errors, errorsDetected) }) } }) } } func TestRPCPaginatorWithTimeout(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() server := serverutils.StartServerOnly(t, base.TestServerArgs{}) defer server.Stopper().Stop(ctx) s := server.StatusServer().(*systemStatusServer) nodeFn := func(ctx context.Context, client serverpb.RPCStatusClient, nodeID roachpb.NodeID) ([]interface{}, error) { select { case <-time.After(time.Second * 10): case <-ctx.Done(): break } // Return an error that mimics the error returned when a rpc's context is cancelled: return nil, errors.New("some error") } responseFn := func(nodeID roachpb.NodeID, resp []interface{}) { // noop } var timeoutError error errorFn := func(nodeID roachpb.NodeID, err error) { timeoutError = err log.Dev.Infof(ctx, "error from node %d: %v", nodeID, err) } pagState := paginationState{} _, _ = paginatedIterateNodes( ctx, s.statusServer, "test-paginate-with-timeout", 1000, pagState, []roachpb.NodeID{}, time.Second*2, nodeFn, responseFn, errorFn, ) require.ErrorContains(t, timeoutError, "operation \"node fn\" timed out") }
go
github
https://github.com/cockroachdb/cockroach
pkg/server/pagination_test.go
from twisted.plugin import IPlugin from txircd.module_interface import IMode, IModuleData, Mode, ModuleData from txircd.utils import ModeType from zope.interface import implements class InvisibleMode(ModuleData, Mode): implements(IPlugin, IModuleData, IMode) name = "InvisibleMode" core = True affectedActions = { "showchanneluser": 1, "showuser": 1 } def actions(self): return [ ("modeactioncheck-user-i-showchanneluser", 1, self.isInvisibleChan), ("modeactioncheck-user-i-showuser", 1, self.isInvisibleUser) ] def userModes(self): return [ ("i", ModeType.NoParam, self) ] def isInvisibleChan(self, user, channel, fromUser, userSeeing): if "i" in user.modes: return True return None def isInvisibleUser(self, user, fromUser, userSeeing): if "i" in user.modes: return True return None def apply(self, actionName, user, param, *params): if actionName == "showchanneluser": return self.applyChannels(user, *params) return self.applyUsers(user, *params) def applyChannels(self, user, channel, fromUser, sameUser): if user != sameUser: return None if not channel or fromUser not in channel.users: return False return None def applyUsers(self, user, fromUser, sameUser): if user != sameUser: return None if set(fromUser.channels).intersection(user.channels): # Get the set intersection to see if there is any overlap return None return False invisibleMode = InvisibleMode()
unknown
codeparrot/codeparrot-clean
{ "compilerOptions": { "target": "es5", "module": "esnext", "jsx": "react-jsx", "strict": false, "esModuleInterop": true, "skipLibCheck": true, "forceConsistentCasingInFileNames": true, "lib": ["dom", "dom.iterable", "esnext"], "allowJs": true, "noEmit": true, "moduleResolution": "node", "resolveJsonModule": true, "isolatedModules": true, "incremental": true }, "exclude": ["node_modules"], "include": ["next-env.d.ts", "**/*.ts", "**/*.tsx"] }
json
github
https://github.com/vercel/next.js
examples/cms-sitefinity/tsconfig.json
from browser import window, alert def _randint(a, b): return int(window.Math.random()*(b-a+1)+a) def _rand_with_seed(x, rand_obj): x = window.Math.sin(rand_obj._state) * 10000 # Adding 1 is not reliable because of current integer implementation # If rand_obj._state is not a "safe integer" in the range [-2**53, 2**53] # the increment between 2 different values is a power of 2 # It is stored in an attribute of rand_obj to avoid having to compute it # for each iteration if not hasattr(rand_obj, 'incr'): rand_obj.incr = 1 n = rand_obj._state while n+rand_obj.incr==n: # increase the increment until the increment value is different rand_obj.incr *= 2 rand_obj._state += rand_obj.incr return x - window.Math.floor(x) def _urandom(n, rand_obj=None): """urandom(n) -> str Return n random bytes suitable for cryptographic use.""" if rand_obj is None or rand_obj._state is None: randbytes= [_randint(0,255) for i in range(n)] else: randbytes= [] for i in range(n): randbytes.append(int(256*_rand_with_seed(i, rand_obj))) return bytes(randbytes) class Random: """Random number generator base class used by bound module functions. Used to instantiate instances of Random to get generators that don't share state. Class Random can also be subclassed if you want to use a different basic generator of your own devising: in that case, override the following methods: random(), seed(), getstate(), and setstate(). Optionally, implement a getrandbits() method so that randrange() can cover arbitrarily large ranges. """ #random #seed #getstate #setstate VERSION = 3 # used by getstate/setstate def __init__(self, x=None): """Initialize an instance. Optional argument x controls seeding, as for Random.seed(). """ self._state=x def seed(self, a=None, version=2): """Initialize internal state from hashable object. None or no argument seeds from current time or from an operating system specific randomness source if available. For version 2 (the default), all of the bits are used if *a* is a str, bytes, or bytearray. For version 1, the hash() of *a* is used instead. If *a* is an int, all bits are used. """ self._state=a self.gauss_next = None def getstate(self): """Return internal state; can be passed to setstate() later.""" return self._state def setstate(self, state): """Restore internal state from object returned by getstate().""" self._state=state def random(self): """Get the next random number in the range [0.0, 1.0).""" return window.Math.random() def getrandbits(self, k): """getrandbits(k) -> x. Generates a long int with k random bits.""" if k <= 0: raise ValueError('number of bits must be greater than zero') if k != int(k): raise TypeError('number of bits should be an integer') numbytes = (k + 7) // 8 # bits / 8 and rounded up x = int.from_bytes(_urandom(numbytes, self), 'big') return x >> (numbytes * 8 - k) # trim excess bits
unknown
codeparrot/codeparrot-clean
# Copyright 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import uuid from eventlet import greenthread import fixtures import mock from mox3 import mox from oslo_concurrency import lockutils from oslo_concurrency import processutils from oslo_config import cfg from oslo_config import fixture as config_fixture from oslo_utils import timeutils from oslo_utils import units import six from nova.compute import flavors from nova.compute import power_state from nova.compute import vm_mode from nova import context from nova import exception from nova import objects from nova import test from nova.tests.unit import fake_instance from nova.tests.unit.objects import test_flavor from nova.tests.unit.virt.xenapi import stubs from nova.tests.unit.virt.xenapi import test_xenapi from nova import utils from nova.virt import hardware from nova.virt.xenapi.client import session as xenapi_session from nova.virt.xenapi import driver as xenapi_conn from nova.virt.xenapi import fake from nova.virt.xenapi import vm_utils CONF = cfg.CONF XENSM_TYPE = 'xensm' ISCSI_TYPE = 'iscsi' def get_fake_connection_data(sr_type): fakes = {XENSM_TYPE: {'sr_uuid': 'falseSR', 'name_label': 'fake_storage', 'name_description': 'test purposes', 'server': 'myserver', 'serverpath': '/local/scratch/myname', 'sr_type': 'nfs', 'introduce_sr_keys': ['server', 'serverpath', 'sr_type'], 'vdi_uuid': 'falseVDI'}, ISCSI_TYPE: {'volume_id': 'fake_volume_id', 'target_lun': 1, 'target_iqn': 'fake_iqn:volume-fake_volume_id', 'target_portal': u'localhost:3260', 'target_discovered': False}, } return fakes[sr_type] def _get_fake_session(error=None): session = mock.Mock() xenapi_session.apply_session_helpers(session) if error is not None: class FakeException(Exception): details = [error, "a", "b", "c"] session.XenAPI.Failure = FakeException session.call_xenapi.side_effect = FakeException return session @contextlib.contextmanager def contextified(result): yield result def _fake_noop(*args, **kwargs): return class VMUtilsTestBase(stubs.XenAPITestBaseNoDB): pass class LookupTestCase(VMUtilsTestBase): def setUp(self): super(LookupTestCase, self).setUp() self.session = self.mox.CreateMockAnything('Fake Session') self.name_label = 'my_vm' def _do_mock(self, result): self.session.call_xenapi( "VM.get_by_name_label", self.name_label).AndReturn(result) self.mox.ReplayAll() def test_normal(self): self._do_mock(['x']) result = vm_utils.lookup(self.session, self.name_label) self.assertEqual('x', result) def test_no_result(self): self._do_mock([]) result = vm_utils.lookup(self.session, self.name_label) self.assertIsNone(result) def test_too_many(self): self._do_mock(['a', 'b']) self.assertRaises(exception.InstanceExists, vm_utils.lookup, self.session, self.name_label) def test_rescue_none(self): self.session.call_xenapi( "VM.get_by_name_label", self.name_label + '-rescue').AndReturn([]) self._do_mock(['x']) result = vm_utils.lookup(self.session, self.name_label, check_rescue=True) self.assertEqual('x', result) def test_rescue_found(self): self.session.call_xenapi( "VM.get_by_name_label", self.name_label + '-rescue').AndReturn(['y']) self.mox.ReplayAll() result = vm_utils.lookup(self.session, self.name_label, check_rescue=True) self.assertEqual('y', result) def test_rescue_too_many(self): self.session.call_xenapi( "VM.get_by_name_label", self.name_label + '-rescue').AndReturn(['a', 'b', 'c']) self.mox.ReplayAll() self.assertRaises(exception.InstanceExists, vm_utils.lookup, self.session, self.name_label, check_rescue=True) class GenerateConfigDriveTestCase(VMUtilsTestBase): def test_no_admin_pass(self): instance = {} self.mox.StubOutWithMock(vm_utils, 'safe_find_sr') vm_utils.safe_find_sr('session').AndReturn('sr_ref') self.mox.StubOutWithMock(vm_utils, 'create_vdi') vm_utils.create_vdi('session', 'sr_ref', instance, 'config-2', 'configdrive', 64 * units.Mi).AndReturn('vdi_ref') self.mox.StubOutWithMock(vm_utils, 'vdi_attached_here') vm_utils.vdi_attached_here( 'session', 'vdi_ref', read_only=False).AndReturn( contextified('mounted_dev')) class FakeInstanceMetadata(object): def __init__(_self, instance, content=None, extra_md=None, network_info=None): self.assertEqual(network_info, "nw_info") def metadata_for_config_drive(_self): return [] self.useFixture(fixtures.MonkeyPatch( 'nova.api.metadata.base.InstanceMetadata', FakeInstanceMetadata)) self.mox.StubOutWithMock(utils, 'execute') utils.execute('genisoimage', '-o', mox.IgnoreArg(), '-ldots', '-allow-lowercase', '-allow-multidot', '-l', '-publisher', mox.IgnoreArg(), '-quiet', '-J', '-r', '-V', 'config-2', mox.IgnoreArg(), attempts=1, run_as_root=False).AndReturn(None) utils.execute('dd', mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg(), run_as_root=True).AndReturn(None) self.mox.StubOutWithMock(vm_utils, 'create_vbd') vm_utils.create_vbd('session', 'vm_ref', 'vdi_ref', mox.IgnoreArg(), bootable=False, read_only=True).AndReturn(None) self.mox.ReplayAll() # And the actual call we're testing vm_utils.generate_configdrive('session', instance, 'vm_ref', 'userdevice', "nw_info") @mock.patch.object(vm_utils, "destroy_vdi") @mock.patch.object(vm_utils, "vdi_attached_here") @mock.patch.object(vm_utils, "create_vdi") @mock.patch.object(vm_utils, "safe_find_sr") def test_vdi_cleaned_up(self, mock_find, mock_create_vdi, mock_attached, mock_destroy): mock_create_vdi.return_value = 'vdi_ref' mock_attached.side_effect = test.TestingException mock_destroy.side_effect = exception.StorageError(reason="") instance = {"uuid": "asdf"} self.assertRaises(test.TestingException, vm_utils.generate_configdrive, 'session', instance, 'vm_ref', 'userdevice', 'nw_info') mock_destroy.assert_called_once_with('session', 'vdi_ref') class XenAPIGetUUID(VMUtilsTestBase): def test_get_this_vm_uuid_new_kernel(self): self.mox.StubOutWithMock(vm_utils, '_get_sys_hypervisor_uuid') vm_utils._get_sys_hypervisor_uuid().AndReturn( '2f46f0f5-f14c-ef1b-1fac-9eeca0888a3f') self.mox.ReplayAll() self.assertEqual('2f46f0f5-f14c-ef1b-1fac-9eeca0888a3f', vm_utils.get_this_vm_uuid(None)) self.mox.VerifyAll() def test_get_this_vm_uuid_old_kernel_reboot(self): self.mox.StubOutWithMock(vm_utils, '_get_sys_hypervisor_uuid') self.mox.StubOutWithMock(utils, 'execute') vm_utils._get_sys_hypervisor_uuid().AndRaise( IOError(13, 'Permission denied')) utils.execute('xenstore-read', 'domid', run_as_root=True).AndReturn( ('27', '')) utils.execute('xenstore-read', '/local/domain/27/vm', run_as_root=True).AndReturn( ('/vm/2f46f0f5-f14c-ef1b-1fac-9eeca0888a3f', '')) self.mox.ReplayAll() self.assertEqual('2f46f0f5-f14c-ef1b-1fac-9eeca0888a3f', vm_utils.get_this_vm_uuid(None)) self.mox.VerifyAll() class FakeSession(object): def call_xenapi(self, *args): pass def call_plugin(self, *args): pass def call_plugin_serialized(self, plugin, fn, *args, **kwargs): pass def call_plugin_serialized_with_retry(self, plugin, fn, num_retries, callback, *args, **kwargs): pass class FetchVhdImageTestCase(VMUtilsTestBase): def setUp(self): super(FetchVhdImageTestCase, self).setUp() self.context = context.get_admin_context() self.context.auth_token = 'auth_token' self.session = FakeSession() self.instance = {"uuid": "uuid"} self.mox.StubOutWithMock(vm_utils, '_make_uuid_stack') vm_utils._make_uuid_stack().AndReturn(["uuid_stack"]) self.mox.StubOutWithMock(vm_utils, 'get_sr_path') vm_utils.get_sr_path(self.session).AndReturn('sr_path') def _stub_glance_download_vhd(self, raise_exc=None): self.mox.StubOutWithMock( self.session, 'call_plugin_serialized_with_retry') func = self.session.call_plugin_serialized_with_retry( 'glance', 'download_vhd', 0, mox.IgnoreArg(), mox.IgnoreArg(), extra_headers={'X-Service-Catalog': '[]', 'X-Auth-Token': 'auth_token', 'X-Roles': '', 'X-Tenant-Id': None, 'X-User-Id': None, 'X-Identity-Status': 'Confirmed'}, image_id='image_id', uuid_stack=["uuid_stack"], sr_path='sr_path') if raise_exc: func.AndRaise(raise_exc) else: func.AndReturn({'root': {'uuid': 'vdi'}}) def _stub_bittorrent_download_vhd(self, raise_exc=None): self.mox.StubOutWithMock( self.session, 'call_plugin_serialized') func = self.session.call_plugin_serialized( 'bittorrent', 'download_vhd', image_id='image_id', uuid_stack=["uuid_stack"], sr_path='sr_path', torrent_download_stall_cutoff=600, torrent_listen_port_start=6881, torrent_listen_port_end=6891, torrent_max_last_accessed=86400, torrent_max_seeder_processes_per_host=1, torrent_seed_chance=1.0, torrent_seed_duration=3600, torrent_url='http://foo/image_id.torrent' ) if raise_exc: func.AndRaise(raise_exc) else: func.AndReturn({'root': {'uuid': 'vdi'}}) def test_fetch_vhd_image_works_with_glance(self): self.mox.StubOutWithMock(vm_utils, '_image_uses_bittorrent') vm_utils._image_uses_bittorrent( self.context, self.instance).AndReturn(False) self._stub_glance_download_vhd() self.mox.StubOutWithMock(vm_utils, 'safe_find_sr') vm_utils.safe_find_sr(self.session).AndReturn("sr") self.mox.StubOutWithMock(vm_utils, '_scan_sr') vm_utils._scan_sr(self.session, "sr") self.mox.StubOutWithMock(vm_utils, '_check_vdi_size') vm_utils._check_vdi_size( self.context, self.session, self.instance, "vdi") self.mox.ReplayAll() self.assertEqual("vdi", vm_utils._fetch_vhd_image(self.context, self.session, self.instance, 'image_id')['root']['uuid']) self.mox.VerifyAll() def test_fetch_vhd_image_works_with_bittorrent(self): cfg.CONF.import_opt('torrent_base_url', 'nova.virt.xenapi.image.bittorrent', group='xenserver') self.flags(torrent_base_url='http://foo', group='xenserver') self.mox.StubOutWithMock(vm_utils, '_image_uses_bittorrent') vm_utils._image_uses_bittorrent( self.context, self.instance).AndReturn(True) self._stub_bittorrent_download_vhd() self.mox.StubOutWithMock(vm_utils, 'safe_find_sr') vm_utils.safe_find_sr(self.session).AndReturn("sr") self.mox.StubOutWithMock(vm_utils, '_scan_sr') vm_utils._scan_sr(self.session, "sr") self.mox.StubOutWithMock(vm_utils, '_check_vdi_size') vm_utils._check_vdi_size(self.context, self.session, self.instance, "vdi") self.mox.ReplayAll() self.assertEqual("vdi", vm_utils._fetch_vhd_image(self.context, self.session, self.instance, 'image_id')['root']['uuid']) self.mox.VerifyAll() def test_fetch_vhd_image_cleans_up_vdi_on_fail(self): self.mox.StubOutWithMock(vm_utils, '_image_uses_bittorrent') vm_utils._image_uses_bittorrent( self.context, self.instance).AndReturn(False) self._stub_glance_download_vhd() self.mox.StubOutWithMock(vm_utils, 'safe_find_sr') vm_utils.safe_find_sr(self.session).AndReturn("sr") self.mox.StubOutWithMock(vm_utils, '_scan_sr') vm_utils._scan_sr(self.session, "sr") self.mox.StubOutWithMock(vm_utils, '_check_vdi_size') vm_utils._check_vdi_size(self.context, self.session, self.instance, "vdi").AndRaise(exception.FlavorDiskTooSmall) self.mox.StubOutWithMock(self.session, 'call_xenapi') self.session.call_xenapi("VDI.get_by_uuid", "vdi").AndReturn("ref") self.mox.StubOutWithMock(vm_utils, 'destroy_vdi') vm_utils.destroy_vdi(self.session, "ref").AndRaise(exception.StorageError(reason="")) self.mox.ReplayAll() self.assertRaises(exception.FlavorDiskTooSmall, vm_utils._fetch_vhd_image, self.context, self.session, self.instance, 'image_id') self.mox.VerifyAll() def test_fallback_to_default_handler(self): cfg.CONF.import_opt('torrent_base_url', 'nova.virt.xenapi.image.bittorrent', group='xenserver') self.flags(torrent_base_url='http://foo', group='xenserver') self.mox.StubOutWithMock(vm_utils, '_image_uses_bittorrent') vm_utils._image_uses_bittorrent( self.context, self.instance).AndReturn(True) self._stub_bittorrent_download_vhd(raise_exc=RuntimeError) vm_utils._make_uuid_stack().AndReturn(["uuid_stack"]) vm_utils.get_sr_path(self.session).AndReturn('sr_path') self._stub_glance_download_vhd() self.mox.StubOutWithMock(vm_utils, 'safe_find_sr') vm_utils.safe_find_sr(self.session).AndReturn("sr") self.mox.StubOutWithMock(vm_utils, '_scan_sr') vm_utils._scan_sr(self.session, "sr") self.mox.StubOutWithMock(vm_utils, '_check_vdi_size') vm_utils._check_vdi_size(self.context, self.session, self.instance, "vdi") self.mox.ReplayAll() self.assertEqual("vdi", vm_utils._fetch_vhd_image(self.context, self.session, self.instance, 'image_id')['root']['uuid']) self.mox.VerifyAll() def test_default_handler_does_not_fallback_to_itself(self): cfg.CONF.import_opt('torrent_base_url', 'nova.virt.xenapi.image.bittorrent', group='xenserver') self.flags(torrent_base_url='http://foo', group='xenserver') self.mox.StubOutWithMock(vm_utils, '_image_uses_bittorrent') vm_utils._image_uses_bittorrent( self.context, self.instance).AndReturn(False) self._stub_glance_download_vhd(raise_exc=RuntimeError) self.mox.ReplayAll() self.assertRaises(RuntimeError, vm_utils._fetch_vhd_image, self.context, self.session, self.instance, 'image_id') self.mox.VerifyAll() class TestImageCompression(VMUtilsTestBase): def test_image_compression(self): # Testing for nova.conf, too low, negative, and a correct value. self.assertIsNone(vm_utils.get_compression_level()) self.flags(image_compression_level=0, group='xenserver') self.assertIsNone(vm_utils.get_compression_level()) self.flags(image_compression_level=-6, group='xenserver') self.assertIsNone(vm_utils.get_compression_level()) self.flags(image_compression_level=6, group='xenserver') self.assertEqual(vm_utils.get_compression_level(), 6) class ResizeHelpersTestCase(VMUtilsTestBase): def test_repair_filesystem(self): self.mox.StubOutWithMock(utils, 'execute') utils.execute('e2fsck', '-f', "-y", "fakepath", run_as_root=True, check_exit_code=[0, 1, 2]).AndReturn( ("size is: 42", "")) self.mox.ReplayAll() vm_utils._repair_filesystem("fakepath") def _call_tune2fs_remove_journal(self, path): utils.execute("tune2fs", "-O ^has_journal", path, run_as_root=True) def _call_tune2fs_add_journal(self, path): utils.execute("tune2fs", "-j", path, run_as_root=True) def _call_parted_mkpart(self, path, start, end): utils.execute('parted', '--script', path, 'rm', '1', run_as_root=True) utils.execute('parted', '--script', path, 'mkpart', 'primary', '%ds' % start, '%ds' % end, run_as_root=True) def _call_parted_boot_flag(sef, path): utils.execute('parted', '--script', path, 'set', '1', 'boot', 'on', run_as_root=True) def test_resize_part_and_fs_down_succeeds(self): self.mox.StubOutWithMock(vm_utils, "_repair_filesystem") self.mox.StubOutWithMock(utils, 'execute') dev_path = "/dev/fake" partition_path = "%s1" % dev_path vm_utils._repair_filesystem(partition_path) self._call_tune2fs_remove_journal(partition_path) utils.execute("resize2fs", partition_path, "10s", run_as_root=True) self._call_parted_mkpart(dev_path, 0, 9) self._call_parted_boot_flag(dev_path) self._call_tune2fs_add_journal(partition_path) self.mox.ReplayAll() vm_utils._resize_part_and_fs("fake", 0, 20, 10, "boot") def test_log_progress_if_required(self): self.mox.StubOutWithMock(vm_utils.LOG, "debug") vm_utils.LOG.debug("Sparse copy in progress, " "%(complete_pct).2f%% complete. " "%(left)s bytes left to copy", {"complete_pct": 50.0, "left": 1}) current = timeutils.utcnow() timeutils.set_time_override(current) timeutils.advance_time_seconds(vm_utils.PROGRESS_INTERVAL_SECONDS + 1) self.mox.ReplayAll() vm_utils._log_progress_if_required(1, current, 2) def test_log_progress_if_not_required(self): self.mox.StubOutWithMock(vm_utils.LOG, "debug") current = timeutils.utcnow() timeutils.set_time_override(current) timeutils.advance_time_seconds(vm_utils.PROGRESS_INTERVAL_SECONDS - 1) self.mox.ReplayAll() vm_utils._log_progress_if_required(1, current, 2) def test_resize_part_and_fs_down_fails_disk_too_big(self): self.mox.StubOutWithMock(vm_utils, "_repair_filesystem") self.mox.StubOutWithMock(utils, 'execute') dev_path = "/dev/fake" partition_path = "%s1" % dev_path new_sectors = 10 vm_utils._repair_filesystem(partition_path) self._call_tune2fs_remove_journal(partition_path) mobj = utils.execute("resize2fs", partition_path, "%ss" % new_sectors, run_as_root=True) mobj.AndRaise(processutils.ProcessExecutionError) self.mox.ReplayAll() self.assertRaises(exception.ResizeError, vm_utils._resize_part_and_fs, "fake", 0, 20, 10, "boot") def test_resize_part_and_fs_up_succeeds(self): self.mox.StubOutWithMock(vm_utils, "_repair_filesystem") self.mox.StubOutWithMock(utils, 'execute') dev_path = "/dev/fake" partition_path = "%s1" % dev_path vm_utils._repair_filesystem(partition_path) self._call_tune2fs_remove_journal(partition_path) self._call_parted_mkpart(dev_path, 0, 29) utils.execute("resize2fs", partition_path, run_as_root=True) self._call_tune2fs_add_journal(partition_path) self.mox.ReplayAll() vm_utils._resize_part_and_fs("fake", 0, 20, 30, "") def test_resize_disk_throws_on_zero_size(self): self.assertRaises(exception.ResizeError, vm_utils.resize_disk, "session", "instance", "vdi_ref", {"root_gb": 0}) def test_auto_config_disk_returns_early_on_zero_size(self): vm_utils.try_auto_configure_disk("bad_session", "bad_vdi_ref", 0) @mock.patch.object(utils, "execute") def test_get_partitions(self, mock_execute): parted_return = "BYT;\n...\n" parted_return += "1:2s:11s:10s:ext3::boot;\n" parted_return += "2:20s:11s:10s::bob:;\n" mock_execute.return_value = (parted_return, None) partitions = vm_utils._get_partitions("abc") self.assertEqual(2, len(partitions)) self.assertEqual((1, 2, 10, "ext3", "", "boot"), partitions[0]) self.assertEqual((2, 20, 10, "", "bob", ""), partitions[1]) class CheckVDISizeTestCase(VMUtilsTestBase): def setUp(self): super(CheckVDISizeTestCase, self).setUp() self.context = 'fakecontext' self.session = 'fakesession' self.instance = objects.Instance(uuid=str(uuid.uuid4())) self.flavor = objects.Flavor() self.vdi_uuid = 'fakeuuid' def test_not_too_large(self): self.mox.StubOutWithMock(vm_utils, '_get_vdi_chain_size') vm_utils._get_vdi_chain_size(self.session, self.vdi_uuid).AndReturn(1073741824) self.mox.ReplayAll() with mock.patch.object(self.instance, 'get_flavor') as get: self.flavor.root_gb = 1 get.return_value = self.flavor vm_utils._check_vdi_size(self.context, self.session, self.instance, self.vdi_uuid) def test_too_large(self): self.mox.StubOutWithMock(vm_utils, '_get_vdi_chain_size') vm_utils._get_vdi_chain_size(self.session, self.vdi_uuid).AndReturn(11811160065) # 10GB overhead allowed self.mox.ReplayAll() with mock.patch.object(self.instance, 'get_flavor') as get: self.flavor.root_gb = 1 get.return_value = self.flavor self.assertRaises(exception.FlavorDiskTooSmall, vm_utils._check_vdi_size, self.context, self.session, self.instance, self.vdi_uuid) def test_zero_root_gb_disables_check(self): with mock.patch.object(self.instance, 'get_flavor') as get: self.flavor.root_gb = 0 get.return_value = self.flavor vm_utils._check_vdi_size(self.context, self.session, self.instance, self.vdi_uuid) class GetInstanceForVdisForSrTestCase(VMUtilsTestBase): def setUp(self): super(GetInstanceForVdisForSrTestCase, self).setUp() self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF)) self.fixture.config(disable_process_locking=True, group='oslo_concurrency') self.flags(instance_name_template='%d', firewall_driver='nova.virt.xenapi.firewall.' 'Dom0IptablesFirewallDriver') self.flags(connection_url='test_url', connection_password='test_pass', group='xenserver') def test_get_instance_vdis_for_sr(self): vm_ref = fake.create_vm("foo", "Running") sr_ref = fake.create_sr() vdi_1 = fake.create_vdi('vdiname1', sr_ref) vdi_2 = fake.create_vdi('vdiname2', sr_ref) for vdi_ref in [vdi_1, vdi_2]: fake.create_vbd(vm_ref, vdi_ref) stubs.stubout_session(self.stubs, fake.SessionBase) driver = xenapi_conn.XenAPIDriver(False) result = list(vm_utils.get_instance_vdis_for_sr( driver._session, vm_ref, sr_ref)) self.assertEqual([vdi_1, vdi_2], result) def test_get_instance_vdis_for_sr_no_vbd(self): vm_ref = fake.create_vm("foo", "Running") sr_ref = fake.create_sr() stubs.stubout_session(self.stubs, fake.SessionBase) driver = xenapi_conn.XenAPIDriver(False) result = list(vm_utils.get_instance_vdis_for_sr( driver._session, vm_ref, sr_ref)) self.assertEqual([], result) class VMRefOrRaiseVMFoundTestCase(VMUtilsTestBase): def test_lookup_call(self): mock = mox.Mox() mock.StubOutWithMock(vm_utils, 'lookup') vm_utils.lookup('session', 'somename').AndReturn('ignored') mock.ReplayAll() vm_utils.vm_ref_or_raise('session', 'somename') mock.VerifyAll() def test_return_value(self): mock = mox.Mox() mock.StubOutWithMock(vm_utils, 'lookup') vm_utils.lookup(mox.IgnoreArg(), mox.IgnoreArg()).AndReturn('vmref') mock.ReplayAll() self.assertEqual( 'vmref', vm_utils.vm_ref_or_raise('session', 'somename')) mock.VerifyAll() class VMRefOrRaiseVMNotFoundTestCase(VMUtilsTestBase): def test_exception_raised(self): mock = mox.Mox() mock.StubOutWithMock(vm_utils, 'lookup') vm_utils.lookup('session', 'somename').AndReturn(None) mock.ReplayAll() self.assertRaises( exception.InstanceNotFound, lambda: vm_utils.vm_ref_or_raise('session', 'somename') ) mock.VerifyAll() def test_exception_msg_contains_vm_name(self): mock = mox.Mox() mock.StubOutWithMock(vm_utils, 'lookup') vm_utils.lookup('session', 'somename').AndReturn(None) mock.ReplayAll() try: vm_utils.vm_ref_or_raise('session', 'somename') except exception.InstanceNotFound as e: self.assertIn('somename', six.text_type(e)) mock.VerifyAll() @mock.patch.object(vm_utils, 'safe_find_sr', return_value='safe_find_sr') class CreateCachedImageTestCase(VMUtilsTestBase): def setUp(self): super(CreateCachedImageTestCase, self).setUp() self.session = _get_fake_session() @mock.patch.object(vm_utils, '_clone_vdi', return_value='new_vdi_ref') def test_cached(self, mock_clone_vdi, mock_safe_find_sr): self.session.call_xenapi.side_effect = ['ext', {'vdi_ref': 2}, None, None, None, 'vdi_uuid'] self.assertEqual((False, {'root': {'uuid': 'vdi_uuid', 'file': None}}), vm_utils._create_cached_image('context', self.session, 'instance', 'name', 'uuid', vm_utils.ImageType.DISK_VHD)) @mock.patch.object(vm_utils, '_safe_copy_vdi', return_value='new_vdi_ref') def test_no_cow(self, mock_safe_copy_vdi, mock_safe_find_sr): self.flags(use_cow_images=False) self.session.call_xenapi.side_effect = ['ext', {'vdi_ref': 2}, None, None, None, 'vdi_uuid'] self.assertEqual((False, {'root': {'uuid': 'vdi_uuid', 'file': None}}), vm_utils._create_cached_image('context', self.session, 'instance', 'name', 'uuid', vm_utils.ImageType.DISK_VHD)) def test_no_cow_no_ext(self, mock_safe_find_sr): self.flags(use_cow_images=False) self.session.call_xenapi.side_effect = ['non-ext', {'vdi_ref': 2}, 'vdi_ref', None, None, None, 'vdi_uuid'] self.assertEqual((False, {'root': {'uuid': 'vdi_uuid', 'file': None}}), vm_utils._create_cached_image('context', self.session, 'instance', 'name', 'uuid', vm_utils.ImageType.DISK_VHD)) @mock.patch.object(vm_utils, '_clone_vdi', return_value='new_vdi_ref') @mock.patch.object(vm_utils, '_fetch_image', return_value={'root': {'uuid': 'vdi_uuid', 'file': None}}) def test_noncached(self, mock_fetch_image, mock_clone_vdi, mock_safe_find_sr): self.session.call_xenapi.side_effect = ['ext', {}, 'cache_vdi_ref', None, None, None, None, None, None, 'vdi_uuid'] self.assertEqual((True, {'root': {'uuid': 'vdi_uuid', 'file': None}}), vm_utils._create_cached_image('context', self.session, 'instance', 'name', 'uuid', vm_utils.ImageType.DISK_VHD)) class BittorrentTestCase(VMUtilsTestBase): def setUp(self): super(BittorrentTestCase, self).setUp() self.context = context.get_admin_context() def test_image_uses_bittorrent(self): instance = {'system_metadata': {'image_bittorrent': True}} self.flags(torrent_images='some', group='xenserver') self.assertTrue(vm_utils._image_uses_bittorrent(self.context, instance)) def _test_create_image(self, cache_type): instance = {'system_metadata': {'image_cache_in_nova': True}} self.flags(cache_images=cache_type, group='xenserver') was = {'called': None} def fake_create_cached_image(*args): was['called'] = 'some' return (False, {}) self.stubs.Set(vm_utils, '_create_cached_image', fake_create_cached_image) def fake_fetch_image(*args): was['called'] = 'none' return {} self.stubs.Set(vm_utils, '_fetch_image', fake_fetch_image) vm_utils.create_image(self.context, None, instance, 'foo', 'bar', 'baz') self.assertEqual(was['called'], cache_type) def test_create_image_cached(self): self._test_create_image('some') def test_create_image_uncached(self): self._test_create_image('none') class ShutdownTestCase(VMUtilsTestBase): def test_hardshutdown_should_return_true_when_vm_is_shutdown(self): self.mock = mox.Mox() session = FakeSession() instance = "instance" vm_ref = "vm-ref" self.mock.StubOutWithMock(vm_utils, 'is_vm_shutdown') vm_utils.is_vm_shutdown(session, vm_ref).AndReturn(True) self.mock.StubOutWithMock(vm_utils, 'LOG') self.assertTrue(vm_utils.hard_shutdown_vm( session, instance, vm_ref)) def test_cleanshutdown_should_return_true_when_vm_is_shutdown(self): self.mock = mox.Mox() session = FakeSession() instance = "instance" vm_ref = "vm-ref" self.mock.StubOutWithMock(vm_utils, 'is_vm_shutdown') vm_utils.is_vm_shutdown(session, vm_ref).AndReturn(True) self.mock.StubOutWithMock(vm_utils, 'LOG') self.assertTrue(vm_utils.clean_shutdown_vm( session, instance, vm_ref)) class CreateVBDTestCase(VMUtilsTestBase): def setUp(self): super(CreateVBDTestCase, self).setUp() self.session = FakeSession() self.mock = mox.Mox() self.mock.StubOutWithMock(self.session, 'call_xenapi') self.vbd_rec = self._generate_vbd_rec() def _generate_vbd_rec(self): vbd_rec = {} vbd_rec['VM'] = 'vm_ref' vbd_rec['VDI'] = 'vdi_ref' vbd_rec['userdevice'] = '0' vbd_rec['bootable'] = False vbd_rec['mode'] = 'RW' vbd_rec['type'] = 'disk' vbd_rec['unpluggable'] = True vbd_rec['empty'] = False vbd_rec['other_config'] = {} vbd_rec['qos_algorithm_type'] = '' vbd_rec['qos_algorithm_params'] = {} vbd_rec['qos_supported_algorithms'] = [] return vbd_rec def test_create_vbd_default_args(self): self.session.call_xenapi('VBD.create', self.vbd_rec).AndReturn("vbd_ref") self.mock.ReplayAll() result = vm_utils.create_vbd(self.session, "vm_ref", "vdi_ref", 0) self.assertEqual(result, "vbd_ref") self.mock.VerifyAll() def test_create_vbd_osvol(self): self.session.call_xenapi('VBD.create', self.vbd_rec).AndReturn("vbd_ref") self.session.call_xenapi('VBD.add_to_other_config', "vbd_ref", "osvol", "True") self.mock.ReplayAll() result = vm_utils.create_vbd(self.session, "vm_ref", "vdi_ref", 0, osvol=True) self.assertEqual(result, "vbd_ref") self.mock.VerifyAll() def test_create_vbd_extra_args(self): self.vbd_rec['VDI'] = 'OpaqueRef:NULL' self.vbd_rec['type'] = 'a' self.vbd_rec['mode'] = 'RO' self.vbd_rec['bootable'] = True self.vbd_rec['empty'] = True self.vbd_rec['unpluggable'] = False self.session.call_xenapi('VBD.create', self.vbd_rec).AndReturn("vbd_ref") self.mock.ReplayAll() result = vm_utils.create_vbd(self.session, "vm_ref", None, 0, vbd_type="a", read_only=True, bootable=True, empty=True, unpluggable=False) self.assertEqual(result, "vbd_ref") self.mock.VerifyAll() def test_attach_cd(self): self.mock.StubOutWithMock(vm_utils, 'create_vbd') vm_utils.create_vbd(self.session, "vm_ref", None, 1, vbd_type='cd', read_only=True, bootable=True, empty=True, unpluggable=False).AndReturn("vbd_ref") self.session.call_xenapi('VBD.insert', "vbd_ref", "vdi_ref") self.mock.ReplayAll() result = vm_utils.attach_cd(self.session, "vm_ref", "vdi_ref", 1) self.assertEqual(result, "vbd_ref") self.mock.VerifyAll() class UnplugVbdTestCase(VMUtilsTestBase): @mock.patch.object(greenthread, 'sleep') def test_unplug_vbd_works(self, mock_sleep): session = _get_fake_session() vbd_ref = "vbd_ref" vm_ref = 'vm_ref' vm_utils.unplug_vbd(session, vbd_ref, vm_ref) session.call_xenapi.assert_called_once_with('VBD.unplug', vbd_ref) self.assertEqual(0, mock_sleep.call_count) def test_unplug_vbd_raises_unexpected_error(self): session = _get_fake_session() vbd_ref = "vbd_ref" vm_ref = 'vm_ref' session.call_xenapi.side_effect = test.TestingException() self.assertRaises(test.TestingException, vm_utils.unplug_vbd, session, vm_ref, vbd_ref) self.assertEqual(1, session.call_xenapi.call_count) def test_unplug_vbd_already_detached_works(self): error = "DEVICE_ALREADY_DETACHED" session = _get_fake_session(error) vbd_ref = "vbd_ref" vm_ref = 'vm_ref' vm_utils.unplug_vbd(session, vbd_ref, vm_ref) self.assertEqual(1, session.call_xenapi.call_count) def test_unplug_vbd_already_raises_unexpected_xenapi_error(self): session = _get_fake_session("") vbd_ref = "vbd_ref" vm_ref = 'vm_ref' self.assertRaises(exception.StorageError, vm_utils.unplug_vbd, session, vbd_ref, vm_ref) self.assertEqual(1, session.call_xenapi.call_count) def _test_uplug_vbd_retries(self, mock_sleep, error): session = _get_fake_session(error) vbd_ref = "vbd_ref" vm_ref = 'vm_ref' self.assertRaises(exception.StorageError, vm_utils.unplug_vbd, session, vm_ref, vbd_ref) self.assertEqual(11, session.call_xenapi.call_count) self.assertEqual(10, mock_sleep.call_count) def _test_uplug_vbd_retries_with_neg_val(self): session = _get_fake_session() self.flags(num_vbd_unplug_retries=-1, group='xenserver') vbd_ref = "vbd_ref" vm_ref = 'vm_ref' vm_utils.unplug_vbd(session, vbd_ref, vm_ref) self.assertEqual(1, session.call_xenapi.call_count) @mock.patch.object(greenthread, 'sleep') def test_uplug_vbd_retries_on_rejected(self, mock_sleep): self._test_uplug_vbd_retries(mock_sleep, "DEVICE_DETACH_REJECTED") @mock.patch.object(greenthread, 'sleep') def test_uplug_vbd_retries_on_internal_error(self, mock_sleep): self._test_uplug_vbd_retries(mock_sleep, "INTERNAL_ERROR") class VDIOtherConfigTestCase(VMUtilsTestBase): """Tests to ensure that the code is populating VDI's `other_config` attribute with the correct metadta. """ def setUp(self): super(VDIOtherConfigTestCase, self).setUp() class _FakeSession(object): def call_xenapi(self, operation, *args, **kwargs): # VDI.add_to_other_config -> VDI_add_to_other_config method = getattr(self, operation.replace('.', '_'), None) if method: return method(*args, **kwargs) self.operation = operation self.args = args self.kwargs = kwargs self.session = _FakeSession() self.context = context.get_admin_context() self.fake_instance = {'uuid': 'aaaa-bbbb-cccc-dddd', 'name': 'myinstance'} def test_create_vdi(self): # Some images are registered with XenServer explicitly by calling # `create_vdi` vm_utils.create_vdi(self.session, 'sr_ref', self.fake_instance, 'myvdi', 'root', 1024, read_only=True) expected = {'nova_disk_type': 'root', 'nova_instance_uuid': 'aaaa-bbbb-cccc-dddd'} self.assertEqual(expected, self.session.args[0]['other_config']) def test_create_image(self): # Other images are registered implicitly when they are dropped into # the SR by a dom0 plugin or some other process self.flags(cache_images='none', group='xenserver') def fake_fetch_image(*args): return {'root': {'uuid': 'fake-uuid'}} self.stubs.Set(vm_utils, '_fetch_image', fake_fetch_image) other_config = {} def VDI_add_to_other_config(ref, key, value): other_config[key] = value # Stubbing on the session object and not class so we don't pollute # other tests self.session.VDI_add_to_other_config = VDI_add_to_other_config self.session.VDI_get_other_config = lambda vdi: {} vm_utils.create_image(self.context, self.session, self.fake_instance, 'myvdi', 'image1', vm_utils.ImageType.DISK_VHD) expected = {'nova_disk_type': 'root', 'nova_instance_uuid': 'aaaa-bbbb-cccc-dddd'} self.assertEqual(expected, other_config) def test_import_migrated_vhds(self): # Migrated images should preserve the `other_config` other_config = {} def VDI_add_to_other_config(ref, key, value): other_config[key] = value def call_plugin_serialized(*args, **kwargs): return {'root': {'uuid': 'aaaa-bbbb-cccc-dddd'}} # Stubbing on the session object and not class so we don't pollute # other tests self.session.VDI_add_to_other_config = VDI_add_to_other_config self.session.VDI_get_other_config = lambda vdi: {} self.session.call_plugin_serialized = call_plugin_serialized self.stubs.Set(vm_utils, 'get_sr_path', lambda *a, **k: None) self.stubs.Set(vm_utils, 'scan_default_sr', lambda *a, **k: None) vm_utils._import_migrated_vhds(self.session, self.fake_instance, "disk_label", "root", "vdi_label") expected = {'nova_disk_type': 'root', 'nova_instance_uuid': 'aaaa-bbbb-cccc-dddd'} self.assertEqual(expected, other_config) class GenerateDiskTestCase(VMUtilsTestBase): def setUp(self): super(GenerateDiskTestCase, self).setUp() self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF)) self.fixture.config(disable_process_locking=True, group='oslo_concurrency') self.flags(instance_name_template='%d', firewall_driver='nova.virt.xenapi.firewall.' 'Dom0IptablesFirewallDriver') self.flags(connection_url='test_url', connection_password='test_pass', group='xenserver') stubs.stubout_session(self.stubs, fake.SessionBase) driver = xenapi_conn.XenAPIDriver(False) self.session = driver._session self.session.is_local_connection = False self.vm_ref = fake.create_vm("foo", "Running") def tearDown(self): super(GenerateDiskTestCase, self).tearDown() fake.destroy_vm(self.vm_ref) def _expect_parted_calls(self): self.mox.StubOutWithMock(utils, "execute") self.mox.StubOutWithMock(utils, "trycmd") self.mox.StubOutWithMock(vm_utils, "destroy_vdi") self.mox.StubOutWithMock(vm_utils.os.path, "exists") if self.session.is_local_connection: utils.execute('parted', '--script', '/dev/fakedev', 'mklabel', 'msdos', check_exit_code=False, run_as_root=True) utils.execute('parted', '--script', '/dev/fakedev', '--', 'mkpart', 'primary', '0', '-0', check_exit_code=False, run_as_root=True) vm_utils.os.path.exists('/dev/mapper/fakedev1').AndReturn(True) utils.trycmd('kpartx', '-a', '/dev/fakedev', discard_warnings=True, run_as_root=True) else: utils.execute('parted', '--script', '/dev/fakedev', 'mklabel', 'msdos', check_exit_code=True, run_as_root=True) utils.execute('parted', '--script', '/dev/fakedev', '--', 'mkpart', 'primary', '0', '-0', check_exit_code=True, run_as_root=True) def _check_vdi(self, vdi_ref, check_attached=True): vdi_rec = self.session.call_xenapi("VDI.get_record", vdi_ref) self.assertEqual(str(10 * units.Mi), vdi_rec["virtual_size"]) if check_attached: vbd_ref = vdi_rec["VBDs"][0] vbd_rec = self.session.call_xenapi("VBD.get_record", vbd_ref) self.assertEqual(self.vm_ref, vbd_rec['VM']) else: self.assertEqual(0, len(vdi_rec["VBDs"])) @test_xenapi.stub_vm_utils_with_vdi_attached_here def test_generate_disk_with_no_fs_given(self): self._expect_parted_calls() self.mox.ReplayAll() vdi_ref = vm_utils._generate_disk(self.session, {"uuid": "fake_uuid"}, self.vm_ref, "2", "name", "user", 10, None) self._check_vdi(vdi_ref) @test_xenapi.stub_vm_utils_with_vdi_attached_here def test_generate_disk_swap(self): self._expect_parted_calls() utils.execute('mkswap', '/dev/fakedev1', run_as_root=True) self.mox.ReplayAll() vdi_ref = vm_utils._generate_disk(self.session, {"uuid": "fake_uuid"}, self.vm_ref, "2", "name", "swap", 10, "linux-swap") self._check_vdi(vdi_ref) @test_xenapi.stub_vm_utils_with_vdi_attached_here def test_generate_disk_ephemeral(self): self._expect_parted_calls() utils.execute('mkfs', '-t', 'ext4', '/dev/fakedev1', run_as_root=True) self.mox.ReplayAll() vdi_ref = vm_utils._generate_disk(self.session, {"uuid": "fake_uuid"}, self.vm_ref, "2", "name", "ephemeral", 10, "ext4") self._check_vdi(vdi_ref) @test_xenapi.stub_vm_utils_with_vdi_attached_here def test_generate_disk_ensure_cleanup_called(self): self._expect_parted_calls() utils.execute('mkfs', '-t', 'ext4', '/dev/fakedev1', run_as_root=True).AndRaise(test.TestingException) vm_utils.destroy_vdi(self.session, mox.IgnoreArg()).AndRaise(exception.StorageError(reason="")) self.mox.ReplayAll() self.assertRaises(test.TestingException, vm_utils._generate_disk, self.session, {"uuid": "fake_uuid"}, self.vm_ref, "2", "name", "ephemeral", 10, "ext4") @test_xenapi.stub_vm_utils_with_vdi_attached_here def test_generate_disk_ephemeral_local_not_attached(self): self.session.is_local_connection = True self._expect_parted_calls() utils.execute('mkfs', '-t', 'ext4', '/dev/mapper/fakedev1', run_as_root=True) self.mox.ReplayAll() vdi_ref = vm_utils._generate_disk(self.session, {"uuid": "fake_uuid"}, None, "2", "name", "ephemeral", 10, "ext4") self._check_vdi(vdi_ref, check_attached=False) class GenerateEphemeralTestCase(VMUtilsTestBase): def setUp(self): super(GenerateEphemeralTestCase, self).setUp() self.session = "session" self.instance = "instance" self.vm_ref = "vm_ref" self.name_label = "name" self.ephemeral_name_label = "name ephemeral" self.userdevice = 4 self.mox.StubOutWithMock(vm_utils, "_generate_disk") self.mox.StubOutWithMock(vm_utils, "safe_destroy_vdis") def test_get_ephemeral_disk_sizes_simple(self): result = vm_utils.get_ephemeral_disk_sizes(20) expected = [20] self.assertEqual(expected, list(result)) def test_get_ephemeral_disk_sizes_three_disks_2000(self): result = vm_utils.get_ephemeral_disk_sizes(4030) expected = [2000, 2000, 30] self.assertEqual(expected, list(result)) def test_get_ephemeral_disk_sizes_two_disks_1024(self): result = vm_utils.get_ephemeral_disk_sizes(2048) expected = [1024, 1024] self.assertEqual(expected, list(result)) def _expect_generate_disk(self, size, device, name_label): vm_utils._generate_disk(self.session, self.instance, self.vm_ref, str(device), name_label, 'ephemeral', size * 1024, None).AndReturn(device) def test_generate_ephemeral_adds_one_disk(self): self._expect_generate_disk(20, self.userdevice, self.ephemeral_name_label) self.mox.ReplayAll() vm_utils.generate_ephemeral(self.session, self.instance, self.vm_ref, str(self.userdevice), self.name_label, 20) def test_generate_ephemeral_adds_multiple_disks(self): self._expect_generate_disk(2000, self.userdevice, self.ephemeral_name_label) self._expect_generate_disk(2000, self.userdevice + 1, self.ephemeral_name_label + " (1)") self._expect_generate_disk(30, self.userdevice + 2, self.ephemeral_name_label + " (2)") self.mox.ReplayAll() vm_utils.generate_ephemeral(self.session, self.instance, self.vm_ref, str(self.userdevice), self.name_label, 4030) def test_generate_ephemeral_cleans_up_on_error(self): self._expect_generate_disk(1024, self.userdevice, self.ephemeral_name_label) self._expect_generate_disk(1024, self.userdevice + 1, self.ephemeral_name_label + " (1)") vm_utils._generate_disk(self.session, self.instance, self.vm_ref, str(self.userdevice + 2), "name ephemeral (2)", 'ephemeral', units.Mi, None).AndRaise(exception.NovaException) vm_utils.safe_destroy_vdis(self.session, [4, 5]) self.mox.ReplayAll() self.assertRaises(exception.NovaException, vm_utils.generate_ephemeral, self.session, self.instance, self.vm_ref, str(self.userdevice), self.name_label, 4096) class FakeFile(object): def __init__(self): self._file_operations = [] def seek(self, offset): self._file_operations.append((self.seek, offset)) class StreamDiskTestCase(VMUtilsTestBase): def setUp(self): import __builtin__ super(StreamDiskTestCase, self).setUp() self.mox.StubOutWithMock(vm_utils.utils, 'make_dev_path') self.mox.StubOutWithMock(vm_utils.utils, 'temporary_chown') self.mox.StubOutWithMock(vm_utils, '_write_partition') # NOTE(matelakat): This might hide the fail reason, as test runners # are unhappy with a mocked out open. self.mox.StubOutWithMock(__builtin__, 'open') self.image_service_func = self.mox.CreateMockAnything() def test_non_ami(self): fake_file = FakeFile() vm_utils.utils.make_dev_path('dev').AndReturn('some_path') vm_utils.utils.temporary_chown( 'some_path').AndReturn(contextified(None)) open('some_path', 'wb').AndReturn(contextified(fake_file)) self.image_service_func(fake_file) self.mox.ReplayAll() vm_utils._stream_disk("session", self.image_service_func, vm_utils.ImageType.KERNEL, None, 'dev') self.assertEqual([(fake_file.seek, 0)], fake_file._file_operations) def test_ami_disk(self): fake_file = FakeFile() vm_utils._write_partition("session", 100, 'dev') vm_utils.utils.make_dev_path('dev').AndReturn('some_path') vm_utils.utils.temporary_chown( 'some_path').AndReturn(contextified(None)) open('some_path', 'wb').AndReturn(contextified(fake_file)) self.image_service_func(fake_file) self.mox.ReplayAll() vm_utils._stream_disk("session", self.image_service_func, vm_utils.ImageType.DISK, 100, 'dev') self.assertEqual( [(fake_file.seek, vm_utils.MBR_SIZE_BYTES)], fake_file._file_operations) class VMUtilsSRPath(VMUtilsTestBase): def setUp(self): super(VMUtilsSRPath, self).setUp() self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF)) self.fixture.config(disable_process_locking=True, group='oslo_concurrency') self.flags(instance_name_template='%d', firewall_driver='nova.virt.xenapi.firewall.' 'Dom0IptablesFirewallDriver') self.flags(connection_url='test_url', connection_password='test_pass', group='xenserver') stubs.stubout_session(self.stubs, fake.SessionBase) driver = xenapi_conn.XenAPIDriver(False) self.session = driver._session self.session.is_local_connection = False def test_defined(self): self.mox.StubOutWithMock(vm_utils, "safe_find_sr") self.mox.StubOutWithMock(self.session, "call_xenapi") vm_utils.safe_find_sr(self.session).AndReturn("sr_ref") self.session.host_ref = "host_ref" self.session.call_xenapi('PBD.get_all_records_where', 'field "host"="host_ref" and field "SR"="sr_ref"').AndReturn( {'pbd_ref': {'device_config': {'path': 'sr_path'}}}) self.mox.ReplayAll() self.assertEqual(vm_utils.get_sr_path(self.session), "sr_path") def test_default(self): self.mox.StubOutWithMock(vm_utils, "safe_find_sr") self.mox.StubOutWithMock(self.session, "call_xenapi") vm_utils.safe_find_sr(self.session).AndReturn("sr_ref") self.session.host_ref = "host_ref" self.session.call_xenapi('PBD.get_all_records_where', 'field "host"="host_ref" and field "SR"="sr_ref"').AndReturn( {'pbd_ref': {'device_config': {}}}) self.session.call_xenapi("SR.get_record", "sr_ref").AndReturn( {'uuid': 'sr_uuid', 'type': 'ext'}) self.mox.ReplayAll() self.assertEqual(vm_utils.get_sr_path(self.session), "/var/run/sr-mount/sr_uuid") class CreateKernelRamdiskTestCase(VMUtilsTestBase): def setUp(self): super(CreateKernelRamdiskTestCase, self).setUp() self.context = "context" self.session = FakeSession() self.instance = {"kernel_id": None, "ramdisk_id": None} self.name_label = "name" self.mox.StubOutWithMock(self.session, "call_plugin") self.mox.StubOutWithMock(uuid, "uuid4") self.mox.StubOutWithMock(vm_utils, "_fetch_disk_image") def test_create_kernel_and_ramdisk_no_create(self): self.mox.ReplayAll() result = vm_utils.create_kernel_and_ramdisk(self.context, self.session, self.instance, self.name_label) self.assertEqual((None, None), result) def test_create_kernel_and_ramdisk_create_both_cached(self): kernel_id = "kernel" ramdisk_id = "ramdisk" self.instance["kernel_id"] = kernel_id self.instance["ramdisk_id"] = ramdisk_id args_kernel = {} args_kernel['cached-image'] = kernel_id args_kernel['new-image-uuid'] = "fake_uuid1" uuid.uuid4().AndReturn("fake_uuid1") self.session.call_plugin('kernel', 'create_kernel_ramdisk', args_kernel).AndReturn("k") args_ramdisk = {} args_ramdisk['cached-image'] = ramdisk_id args_ramdisk['new-image-uuid'] = "fake_uuid2" uuid.uuid4().AndReturn("fake_uuid2") self.session.call_plugin('kernel', 'create_kernel_ramdisk', args_ramdisk).AndReturn("r") self.mox.ReplayAll() result = vm_utils.create_kernel_and_ramdisk(self.context, self.session, self.instance, self.name_label) self.assertEqual(("k", "r"), result) def test_create_kernel_and_ramdisk_create_kernel_not_cached(self): kernel_id = "kernel" self.instance["kernel_id"] = kernel_id args_kernel = {} args_kernel['cached-image'] = kernel_id args_kernel['new-image-uuid'] = "fake_uuid1" uuid.uuid4().AndReturn("fake_uuid1") self.session.call_plugin('kernel', 'create_kernel_ramdisk', args_kernel).AndReturn("") kernel = {"kernel": {"file": "k"}} vm_utils._fetch_disk_image(self.context, self.session, self.instance, self.name_label, kernel_id, 0).AndReturn(kernel) self.mox.ReplayAll() result = vm_utils.create_kernel_and_ramdisk(self.context, self.session, self.instance, self.name_label) self.assertEqual(("k", None), result) class ScanSrTestCase(VMUtilsTestBase): @mock.patch.object(vm_utils, "_scan_sr") @mock.patch.object(vm_utils, "safe_find_sr") def test_scan_default_sr(self, mock_safe_find_sr, mock_scan_sr): mock_safe_find_sr.return_value = "sr_ref" self.assertEqual("sr_ref", vm_utils.scan_default_sr("fake_session")) mock_scan_sr.assert_called_once_with("fake_session", "sr_ref") def test_scan_sr_works(self): session = mock.Mock() vm_utils._scan_sr(session, "sr_ref") session.call_xenapi.assert_called_once_with('SR.scan', "sr_ref") def test_scan_sr_unknown_error_fails_once(self): session = mock.Mock() session.call_xenapi.side_effect = test.TestingException self.assertRaises(test.TestingException, vm_utils._scan_sr, session, "sr_ref") session.call_xenapi.assert_called_once_with('SR.scan', "sr_ref") @mock.patch.object(greenthread, 'sleep') def test_scan_sr_known_error_retries_then_throws(self, mock_sleep): session = mock.Mock() class FakeException(Exception): details = ['SR_BACKEND_FAILURE_40', "", "", ""] session.XenAPI.Failure = FakeException session.call_xenapi.side_effect = FakeException self.assertRaises(FakeException, vm_utils._scan_sr, session, "sr_ref") session.call_xenapi.assert_called_with('SR.scan', "sr_ref") self.assertEqual(4, session.call_xenapi.call_count) mock_sleep.assert_has_calls([mock.call(2), mock.call(4), mock.call(8)]) @mock.patch.object(greenthread, 'sleep') def test_scan_sr_known_error_retries_then_succeeds(self, mock_sleep): session = mock.Mock() class FakeException(Exception): details = ['SR_BACKEND_FAILURE_40', "", "", ""] session.XenAPI.Failure = FakeException def fake_call_xenapi(*args): fake_call_xenapi.count += 1 if fake_call_xenapi.count != 2: raise FakeException() fake_call_xenapi.count = 0 session.call_xenapi.side_effect = fake_call_xenapi vm_utils._scan_sr(session, "sr_ref") session.call_xenapi.assert_called_with('SR.scan', "sr_ref") self.assertEqual(2, session.call_xenapi.call_count) mock_sleep.assert_called_once_with(2) @mock.patch.object(flavors, 'extract_flavor', return_value={ 'memory_mb': 1024, 'vcpus': 1, 'vcpu_weight': 1.0, }) class CreateVmTestCase(VMUtilsTestBase): def test_vss_provider(self, mock_extract): self.flags(vcpu_pin_set="2,3") session = _get_fake_session() instance = objects.Instance(uuid="uuid", os_type="windows", system_metadata={}) with mock.patch.object(instance, 'get_flavor') as get: get.return_value = objects.Flavor._from_db_object( None, objects.Flavor(), test_flavor.fake_flavor) vm_utils.create_vm(session, instance, "label", "kernel", "ramdisk") vm_rec = { 'VCPUs_params': {'cap': '0', 'mask': '2,3', 'weight': '1'}, 'PV_args': '', 'memory_static_min': '0', 'ha_restart_priority': '', 'HVM_boot_policy': 'BIOS order', 'PV_bootloader': '', 'tags': [], 'VCPUs_max': '4', 'memory_static_max': '1073741824', 'actions_after_shutdown': 'destroy', 'memory_dynamic_max': '1073741824', 'user_version': '0', 'xenstore_data': {'vm-data/allowvssprovider': 'false'}, 'blocked_operations': {}, 'is_a_template': False, 'name_description': '', 'memory_dynamic_min': '1073741824', 'actions_after_crash': 'destroy', 'memory_target': '1073741824', 'PV_ramdisk': '', 'PV_bootloader_args': '', 'PCI_bus': '', 'other_config': {'nova_uuid': 'uuid'}, 'name_label': 'label', 'actions_after_reboot': 'restart', 'VCPUs_at_startup': '4', 'HVM_boot_params': {'order': 'dc'}, 'platform': {'nx': 'true', 'pae': 'true', 'apic': 'true', 'timeoffset': '0', 'viridian': 'true', 'acpi': 'true'}, 'PV_legacy_args': '', 'PV_kernel': '', 'affinity': '', 'recommendations': '', 'ha_always_run': False } session.call_xenapi.assert_called_once_with("VM.create", vm_rec) def test_invalid_cpu_mask_raises(self, mock_extract): self.flags(vcpu_pin_set="asdf") session = mock.Mock() instance = objects.Instance(uuid=str(uuid.uuid4()), system_metadata={}) with mock.patch.object(instance, 'get_flavor') as get: get.return_value = objects.Flavor._from_db_object( None, objects.Flavor(), test_flavor.fake_flavor) self.assertRaises(exception.Invalid, vm_utils.create_vm, session, instance, "label", "kernel", "ramdisk") def test_destroy_vm(self, mock_extract): session = mock.Mock() instance = objects.Instance(uuid=str(uuid.uuid4())) vm_utils.destroy_vm(session, instance, "vm_ref") session.VM.destroy.assert_called_once_with("vm_ref") def test_destroy_vm_silently_fails(self, mock_extract): session = mock.Mock() exc = test.TestingException() session.XenAPI.Failure = test.TestingException session.VM.destroy.side_effect = exc instance = objects.Instance(uuid=str(uuid.uuid4())) vm_utils.destroy_vm(session, instance, "vm_ref") session.VM.destroy.assert_called_once_with("vm_ref") class DetermineVmModeTestCase(VMUtilsTestBase): def _fake_object(self, updates): return fake_instance.fake_instance_obj(None, **updates) def test_determine_vm_mode_returns_xen_mode(self): instance = self._fake_object({"vm_mode": "xen"}) self.assertEqual(vm_mode.XEN, vm_utils.determine_vm_mode(instance, None)) def test_determine_vm_mode_returns_hvm_mode(self): instance = self._fake_object({"vm_mode": "hvm"}) self.assertEqual(vm_mode.HVM, vm_utils.determine_vm_mode(instance, None)) def test_determine_vm_mode_returns_xen_for_linux(self): instance = self._fake_object({"vm_mode": None, "os_type": "linux"}) self.assertEqual(vm_mode.XEN, vm_utils.determine_vm_mode(instance, None)) def test_determine_vm_mode_returns_hvm_for_windows(self): instance = self._fake_object({"vm_mode": None, "os_type": "windows"}) self.assertEqual(vm_mode.HVM, vm_utils.determine_vm_mode(instance, None)) def test_determine_vm_mode_returns_hvm_by_default(self): instance = self._fake_object({"vm_mode": None, "os_type": None}) self.assertEqual(vm_mode.HVM, vm_utils.determine_vm_mode(instance, None)) def test_determine_vm_mode_returns_xen_for_VHD(self): instance = self._fake_object({"vm_mode": None, "os_type": None}) self.assertEqual(vm_mode.XEN, vm_utils.determine_vm_mode(instance, vm_utils.ImageType.DISK_VHD)) def test_determine_vm_mode_returns_xen_for_DISK(self): instance = self._fake_object({"vm_mode": None, "os_type": None}) self.assertEqual(vm_mode.XEN, vm_utils.determine_vm_mode(instance, vm_utils.ImageType.DISK)) class CallXenAPIHelpersTestCase(VMUtilsTestBase): def test_vm_get_vbd_refs(self): session = mock.Mock() session.call_xenapi.return_value = "foo" self.assertEqual("foo", vm_utils._vm_get_vbd_refs(session, "vm_ref")) session.call_xenapi.assert_called_once_with("VM.get_VBDs", "vm_ref") def test_vbd_get_rec(self): session = mock.Mock() session.call_xenapi.return_value = "foo" self.assertEqual("foo", vm_utils._vbd_get_rec(session, "vbd_ref")) session.call_xenapi.assert_called_once_with("VBD.get_record", "vbd_ref") def test_vdi_get_rec(self): session = mock.Mock() session.call_xenapi.return_value = "foo" self.assertEqual("foo", vm_utils._vdi_get_rec(session, "vdi_ref")) session.call_xenapi.assert_called_once_with("VDI.get_record", "vdi_ref") def test_vdi_snapshot(self): session = mock.Mock() session.call_xenapi.return_value = "foo" self.assertEqual("foo", vm_utils._vdi_snapshot(session, "vdi_ref")) session.call_xenapi.assert_called_once_with("VDI.snapshot", "vdi_ref", {}) def test_vdi_get_virtual_size(self): session = mock.Mock() session.call_xenapi.return_value = "123" self.assertEqual(123, vm_utils._vdi_get_virtual_size(session, "ref")) session.call_xenapi.assert_called_once_with("VDI.get_virtual_size", "ref") @mock.patch.object(vm_utils, '_get_resize_func_name') def test_vdi_resize(self, mock_get_resize_func_name): session = mock.Mock() mock_get_resize_func_name.return_value = "VDI.fake" vm_utils._vdi_resize(session, "ref", 123) session.call_xenapi.assert_called_once_with("VDI.fake", "ref", "123") @mock.patch.object(vm_utils, '_vdi_resize') @mock.patch.object(vm_utils, '_vdi_get_virtual_size') def test_update_vdi_virtual_size_works(self, mock_get_size, mock_resize): mock_get_size.return_value = (1024 ** 3) - 1 instance = {"uuid": "a"} vm_utils.update_vdi_virtual_size("s", instance, "ref", 1) mock_get_size.assert_called_once_with("s", "ref") mock_resize.assert_called_once_with("s", "ref", 1024 ** 3) @mock.patch.object(vm_utils, '_vdi_resize') @mock.patch.object(vm_utils, '_vdi_get_virtual_size') def test_update_vdi_virtual_size_skips_resize_down(self, mock_get_size, mock_resize): mock_get_size.return_value = 1024 ** 3 instance = {"uuid": "a"} vm_utils.update_vdi_virtual_size("s", instance, "ref", 1) mock_get_size.assert_called_once_with("s", "ref") self.assertFalse(mock_resize.called) @mock.patch.object(vm_utils, '_vdi_resize') @mock.patch.object(vm_utils, '_vdi_get_virtual_size') def test_update_vdi_virtual_size_raise_if_disk_big(self, mock_get_size, mock_resize): mock_get_size.return_value = 1024 ** 3 + 1 instance = {"uuid": "a"} self.assertRaises(exception.ResizeError, vm_utils.update_vdi_virtual_size, "s", instance, "ref", 1) mock_get_size.assert_called_once_with("s", "ref") self.assertFalse(mock_resize.called) @mock.patch.object(vm_utils, '_vdi_get_rec') @mock.patch.object(vm_utils, '_vbd_get_rec') @mock.patch.object(vm_utils, '_vm_get_vbd_refs') class GetVdiForVMTestCase(VMUtilsTestBase): def test_get_vdi_for_vm_safely(self, vm_get_vbd_refs, vbd_get_rec, vdi_get_rec): session = "session" vm_get_vbd_refs.return_value = ["a", "b"] vbd_get_rec.return_value = {'userdevice': '0', 'VDI': 'vdi_ref'} vdi_get_rec.return_value = {} result = vm_utils.get_vdi_for_vm_safely(session, "vm_ref") self.assertEqual(('vdi_ref', {}), result) vm_get_vbd_refs.assert_called_once_with(session, "vm_ref") vbd_get_rec.assert_called_once_with(session, "a") vdi_get_rec.assert_called_once_with(session, "vdi_ref") def test_get_vdi_for_vm_safely_fails(self, vm_get_vbd_refs, vbd_get_rec, vdi_get_rec): session = "session" vm_get_vbd_refs.return_value = ["a", "b"] vbd_get_rec.return_value = {'userdevice': '0', 'VDI': 'vdi_ref'} self.assertRaises(exception.NovaException, vm_utils.get_vdi_for_vm_safely, session, "vm_ref", userdevice='1') self.assertEqual([], vdi_get_rec.call_args_list) self.assertEqual(2, len(vbd_get_rec.call_args_list)) @mock.patch.object(vm_utils, '_vdi_get_uuid') @mock.patch.object(vm_utils, '_vbd_get_rec') @mock.patch.object(vm_utils, '_vm_get_vbd_refs') class GetAllVdiForVMTestCase(VMUtilsTestBase): def _setup_get_all_vdi_uuids_for_vm(self, vm_get_vbd_refs, vbd_get_rec, vdi_get_uuid): def fake_vbd_get_rec(session, vbd_ref): return {'userdevice': vbd_ref, 'VDI': "vdi_ref_%s" % vbd_ref} def fake_vdi_get_uuid(session, vdi_ref): return vdi_ref vm_get_vbd_refs.return_value = ["0", "2"] vbd_get_rec.side_effect = fake_vbd_get_rec vdi_get_uuid.side_effect = fake_vdi_get_uuid def test_get_all_vdi_uuids_for_vm_works(self, vm_get_vbd_refs, vbd_get_rec, vdi_get_uuid): self._setup_get_all_vdi_uuids_for_vm(vm_get_vbd_refs, vbd_get_rec, vdi_get_uuid) result = vm_utils.get_all_vdi_uuids_for_vm('session', "vm_ref") expected = ['vdi_ref_0', 'vdi_ref_2'] self.assertEqual(expected, list(result)) def test_get_all_vdi_uuids_for_vm_finds_none(self, vm_get_vbd_refs, vbd_get_rec, vdi_get_uuid): self._setup_get_all_vdi_uuids_for_vm(vm_get_vbd_refs, vbd_get_rec, vdi_get_uuid) result = vm_utils.get_all_vdi_uuids_for_vm('session', "vm_ref", min_userdevice=1) expected = ["vdi_ref_2"] self.assertEqual(expected, list(result)) class GetAllVdisTestCase(VMUtilsTestBase): def test_get_all_vdis_in_sr(self): def fake_get_rec(record_type, ref): if ref == "2": return "vdi_rec_2" session = mock.Mock() session.call_xenapi.return_value = ["1", "2"] session.get_rec.side_effect = fake_get_rec sr_ref = "sr_ref" actual = list(vm_utils._get_all_vdis_in_sr(session, sr_ref)) self.assertEqual(actual, [('2', 'vdi_rec_2')]) session.call_xenapi.assert_called_once_with("SR.get_VDIs", sr_ref) class VDIAttachedHere(VMUtilsTestBase): @mock.patch.object(vm_utils, 'destroy_vbd') @mock.patch.object(vm_utils, '_get_this_vm_ref') @mock.patch.object(vm_utils, 'create_vbd') @mock.patch.object(vm_utils, '_remap_vbd_dev') @mock.patch.object(vm_utils, '_wait_for_device') @mock.patch.object(utils, 'execute') def test_sync_called(self, mock_execute, mock_wait_for_device, mock_remap_vbd_dev, mock_create_vbd, mock_get_this_vm_ref, mock_destroy_vbd): session = _get_fake_session() with vm_utils.vdi_attached_here(session, 'vdi_ref'): pass mock_execute.assert_called_with('sync', run_as_root=True) class SnapshotAttachedHereTestCase(VMUtilsTestBase): @mock.patch.object(vm_utils, '_snapshot_attached_here_impl') def test_snapshot_attached_here(self, mock_impl): def fake_impl(session, instance, vm_ref, label, userdevice, post_snapshot_callback): self.assertEqual("session", session) self.assertEqual("instance", instance) self.assertEqual("vm_ref", vm_ref) self.assertEqual("label", label) self.assertEqual('0', userdevice) self.assertIsNone(post_snapshot_callback) yield "fake" mock_impl.side_effect = fake_impl with vm_utils.snapshot_attached_here("session", "instance", "vm_ref", "label") as result: self.assertEqual("fake", result) mock_impl.assert_called_once_with("session", "instance", "vm_ref", "label", '0', None) @mock.patch.object(vm_utils, '_delete_snapshots_in_vdi_chain') @mock.patch.object(vm_utils, 'safe_destroy_vdis') @mock.patch.object(vm_utils, '_walk_vdi_chain') @mock.patch.object(vm_utils, '_wait_for_vhd_coalesce') @mock.patch.object(vm_utils, '_vdi_get_uuid') @mock.patch.object(vm_utils, '_vdi_snapshot') @mock.patch.object(vm_utils, 'get_vdi_for_vm_safely') def test_snapshot_attached_here_impl(self, mock_get_vdi_for_vm_safely, mock_vdi_snapshot, mock_vdi_get_uuid, mock_wait_for_vhd_coalesce, mock_walk_vdi_chain, mock_safe_destroy_vdis, mock_delete_snapshots_in_vdi_chain): session = "session" instance = {"uuid": "uuid"} mock_callback = mock.Mock() mock_get_vdi_for_vm_safely.return_value = ("vdi_ref", {"SR": "sr_ref", "uuid": "vdi_uuid"}) mock_vdi_snapshot.return_value = "snap_ref" mock_vdi_get_uuid.return_value = "snap_uuid" mock_walk_vdi_chain.return_value = [{"uuid": "a"}, {"uuid": "b"}] try: with vm_utils.snapshot_attached_here(session, instance, "vm_ref", "label", '2', mock_callback) as result: self.assertEqual(["a", "b"], result) raise test.TestingException() self.assertTrue(False) except test.TestingException: pass mock_get_vdi_for_vm_safely.assert_called_once_with(session, "vm_ref", '2') mock_vdi_snapshot.assert_called_once_with(session, "vdi_ref") mock_wait_for_vhd_coalesce.assert_called_once_with(session, instance, "sr_ref", "vdi_ref", ['a', 'b']) mock_vdi_get_uuid.assert_called_once_with(session, "snap_ref") mock_walk_vdi_chain.assert_has_calls([mock.call(session, "vdi_uuid"), mock.call(session, "snap_uuid")]) mock_callback.assert_called_once_with( task_state="image_pending_upload") mock_safe_destroy_vdis.assert_called_once_with(session, ["snap_ref"]) mock_delete_snapshots_in_vdi_chain.assert_called_once_with(session, instance, ['a', 'b'], "sr_ref") @mock.patch.object(greenthread, 'sleep') def test_wait_for_vhd_coalesce_leaf_node(self, mock_sleep): instance = {"uuid": "fake"} vm_utils._wait_for_vhd_coalesce("session", instance, "sr_ref", "vdi_ref", ["uuid"]) self.assertFalse(mock_sleep.called) @mock.patch.object(vm_utils, '_count_children') @mock.patch.object(greenthread, 'sleep') def test_wait_for_vhd_coalesce_parent_snapshot(self, mock_sleep, mock_count): mock_count.return_value = 2 instance = {"uuid": "fake"} vm_utils._wait_for_vhd_coalesce("session", instance, "sr_ref", "vdi_ref", ["uuid1", "uuid2"]) self.assertFalse(mock_sleep.called) self.assertTrue(mock_count.called) @mock.patch.object(greenthread, 'sleep') @mock.patch.object(vm_utils, '_get_vhd_parent_uuid') @mock.patch.object(vm_utils, '_count_children') @mock.patch.object(vm_utils, '_scan_sr') def test_wait_for_vhd_coalesce_raises(self, mock_scan_sr, mock_count, mock_get_vhd_parent_uuid, mock_sleep): mock_count.return_value = 1 instance = {"uuid": "fake"} self.assertRaises(exception.NovaException, vm_utils._wait_for_vhd_coalesce, "session", instance, "sr_ref", "vdi_ref", ["uuid1", "uuid2"]) self.assertTrue(mock_count.called) self.assertEqual(20, mock_sleep.call_count) self.assertEqual(20, mock_scan_sr.call_count) @mock.patch.object(greenthread, 'sleep') @mock.patch.object(vm_utils, '_get_vhd_parent_uuid') @mock.patch.object(vm_utils, '_count_children') @mock.patch.object(vm_utils, '_scan_sr') def test_wait_for_vhd_coalesce_success(self, mock_scan_sr, mock_count, mock_get_vhd_parent_uuid, mock_sleep): mock_count.return_value = 1 instance = {"uuid": "fake"} mock_get_vhd_parent_uuid.side_effect = ["bad", "uuid2"] vm_utils._wait_for_vhd_coalesce("session", instance, "sr_ref", "vdi_ref", ["uuid1", "uuid2"]) self.assertEqual(1, mock_sleep.call_count) self.assertEqual(2, mock_scan_sr.call_count) @mock.patch.object(vm_utils, '_get_all_vdis_in_sr') def test_count_children(self, mock_get_all_vdis_in_sr): vdis = [('child1', {'sm_config': {'vhd-parent': 'parent1'}}), ('child2', {'sm_config': {'vhd-parent': 'parent2'}}), ('child3', {'sm_config': {'vhd-parent': 'parent1'}})] mock_get_all_vdis_in_sr.return_value = vdis self.assertEqual(2, vm_utils._count_children('session', 'parent1', 'sr')) class ImportMigratedDisksTestCase(VMUtilsTestBase): @mock.patch.object(vm_utils, '_import_migrate_ephemeral_disks') @mock.patch.object(vm_utils, '_import_migrated_root_disk') def test_import_all_migrated_disks(self, mock_root, mock_ephemeral): session = "session" instance = "instance" mock_root.return_value = "root_vdi" mock_ephemeral.return_value = ["a", "b"] result = vm_utils.import_all_migrated_disks(session, instance) expected = {'root': 'root_vdi', 'ephemerals': ["a", "b"]} self.assertEqual(expected, result) mock_root.assert_called_once_with(session, instance) mock_ephemeral.assert_called_once_with(session, instance) @mock.patch.object(vm_utils, '_import_migrate_ephemeral_disks') @mock.patch.object(vm_utils, '_import_migrated_root_disk') def test_import_all_migrated_disks_import_root_false(self, mock_root, mock_ephemeral): session = "session" instance = "instance" mock_root.return_value = "root_vdi" mock_ephemeral.return_value = ["a", "b"] result = vm_utils.import_all_migrated_disks(session, instance, import_root=False) expected = {'root': None, 'ephemerals': ["a", "b"]} self.assertEqual(expected, result) self.assertEqual(0, mock_root.call_count) mock_ephemeral.assert_called_once_with(session, instance) @mock.patch.object(vm_utils, '_import_migrated_vhds') def test_import_migrated_root_disk(self, mock_migrate): mock_migrate.return_value = "foo" instance = {"uuid": "uuid", "name": "name"} result = vm_utils._import_migrated_root_disk("s", instance) self.assertEqual("foo", result) mock_migrate.assert_called_once_with("s", instance, "uuid", "root", "name") @mock.patch.object(vm_utils, '_import_migrated_vhds') def test_import_migrate_ephemeral_disks(self, mock_migrate): mock_migrate.return_value = "foo" instance = {"uuid": "uuid", "name": "name", "ephemeral_gb": 4000} result = vm_utils._import_migrate_ephemeral_disks("s", instance) self.assertEqual({'4': 'foo', '5': 'foo'}, result) expected_calls = [mock.call("s", instance, "uuid_ephemeral_1", "ephemeral", "name ephemeral (1)"), mock.call("s", instance, "uuid_ephemeral_2", "ephemeral", "name ephemeral (2)")] self.assertEqual(expected_calls, mock_migrate.call_args_list) @mock.patch.object(vm_utils, '_set_vdi_info') @mock.patch.object(vm_utils, 'scan_default_sr') @mock.patch.object(vm_utils, 'get_sr_path') def test_import_migrated_vhds(self, mock_get_sr_path, mock_scan_sr, mock_set_info): session = mock.Mock() instance = {"uuid": "uuid"} session.call_plugin_serialized.return_value = {"root": {"uuid": "a"}} session.call_xenapi.return_value = "vdi_ref" mock_get_sr_path.return_value = "sr_path" result = vm_utils._import_migrated_vhds(session, instance, 'chain_label', 'disk_type', 'vdi_label') expected = {'uuid': "a", 'ref': "vdi_ref"} self.assertEqual(expected, result) mock_get_sr_path.assert_called_once_with(session) session.call_plugin_serialized.assert_called_once_with('migration', 'move_vhds_into_sr', instance_uuid='chain_label', sr_path='sr_path', uuid_stack=mock.ANY) mock_scan_sr.assert_called_once_with(session) session.call_xenapi.assert_called_once_with('VDI.get_by_uuid', 'a') mock_set_info.assert_called_once_with(session, 'vdi_ref', 'disk_type', 'vdi_label', 'disk_type', instance) def test_get_vhd_parent_uuid_rec_provided(self): session = mock.Mock() vdi_ref = 'vdi_ref' vdi_rec = {'sm_config': {}} self.assertIsNone(vm_utils._get_vhd_parent_uuid(session, vdi_ref, vdi_rec)) self.assertFalse(session.call_xenapi.called) class MigrateVHDTestCase(VMUtilsTestBase): def _assert_transfer_called(self, session, label): session.call_plugin_serialized.assert_called_once_with( 'migration', 'transfer_vhd', instance_uuid=label, host="dest", vdi_uuid="vdi_uuid", sr_path="sr_path", seq_num=2) def test_migrate_vhd_root(self): session = mock.Mock() instance = {"uuid": "a"} vm_utils.migrate_vhd(session, instance, "vdi_uuid", "dest", "sr_path", 2) self._assert_transfer_called(session, "a") def test_migrate_vhd_ephemeral(self): session = mock.Mock() instance = {"uuid": "a"} vm_utils.migrate_vhd(session, instance, "vdi_uuid", "dest", "sr_path", 2, 2) self._assert_transfer_called(session, "a_ephemeral_2") def test_migrate_vhd_converts_exceptions(self): session = mock.Mock() session.XenAPI.Failure = test.TestingException session.call_plugin_serialized.side_effect = test.TestingException() instance = {"uuid": "a"} self.assertRaises(exception.MigrationError, vm_utils.migrate_vhd, session, instance, "vdi_uuid", "dest", "sr_path", 2) self._assert_transfer_called(session, "a") class StripBaseMirrorTestCase(VMUtilsTestBase): def test_strip_base_mirror_from_vdi_works(self): session = mock.Mock() vm_utils._try_strip_base_mirror_from_vdi(session, "vdi_ref") session.call_xenapi.assert_called_once_with( "VDI.remove_from_sm_config", "vdi_ref", "base_mirror") def test_strip_base_mirror_from_vdi_hides_error(self): session = mock.Mock() session.XenAPI.Failure = test.TestingException session.call_xenapi.side_effect = test.TestingException() vm_utils._try_strip_base_mirror_from_vdi(session, "vdi_ref") session.call_xenapi.assert_called_once_with( "VDI.remove_from_sm_config", "vdi_ref", "base_mirror") @mock.patch.object(vm_utils, '_try_strip_base_mirror_from_vdi') def test_strip_base_mirror_from_vdis(self, mock_strip): def call_xenapi(method, arg): if method == "VM.get_VBDs": return ['VBD_ref_1', 'VBD_ref_2'] if method == "VBD.get_VDI": return 'VDI' + arg[3:] return "Unexpected call_xenapi: %s.%s" % (method, arg) session = mock.Mock() session.call_xenapi.side_effect = call_xenapi vm_utils.strip_base_mirror_from_vdis(session, "vm_ref") expected = [mock.call('VM.get_VBDs', "vm_ref"), mock.call('VBD.get_VDI', "VBD_ref_1"), mock.call('VBD.get_VDI', "VBD_ref_2")] self.assertEqual(expected, session.call_xenapi.call_args_list) expected = [mock.call(session, "VDI_ref_1"), mock.call(session, "VDI_ref_2")] self.assertEqual(expected, mock_strip.call_args_list) class DeviceIdTestCase(VMUtilsTestBase): def test_device_id_is_none_if_not_specified_in_meta_data(self): image_meta = {} session = mock.Mock() session.product_version = (6, 1, 0) self.assertIsNone(vm_utils.get_vm_device_id(session, image_meta)) def test_get_device_id_if_hypervisor_version_is_greater_than_6_1(self): image_meta = {'xenapi_device_id': '0002'} session = mock.Mock() session.product_version = (6, 2, 0) self.assertEqual('0002', vm_utils.get_vm_device_id(session, image_meta)) session.product_version = (6, 3, 1) self.assertEqual('0002', vm_utils.get_vm_device_id(session, image_meta)) def test_raise_exception_if_device_id_not_supported_by_hyp_version(self): image_meta = {'xenapi_device_id': '0002'} session = mock.Mock() session.product_version = (6, 0) exc = self.assertRaises(exception.NovaException, vm_utils.get_vm_device_id, session, image_meta) self.assertEqual("Device id 0002 specified is not supported by " "hypervisor version (6, 0)", exc.message) session.product_version = ('6a') exc = self.assertRaises(exception.NovaException, vm_utils.get_vm_device_id, session, image_meta) self.assertEqual("Device id 0002 specified is not supported by " "hypervisor version 6a", exc.message) class CreateVmRecordTestCase(VMUtilsTestBase): @mock.patch.object(flavors, 'extract_flavor') def test_create_vm_record_linux(self, mock_extract_flavor): instance = objects.Instance(uuid="uuid123", os_type="linux") self._test_create_vm_record(mock_extract_flavor, instance, False) @mock.patch.object(flavors, 'extract_flavor') def test_create_vm_record_windows(self, mock_extract_flavor): instance = objects.Instance(uuid="uuid123", os_type="windows") with mock.patch.object(instance, 'get_flavor') as get: get.return_value = objects.Flavor._from_db_object( None, objects.Flavor(), test_flavor.fake_flavor) self._test_create_vm_record(mock_extract_flavor, instance, True) def _test_create_vm_record(self, mock_extract_flavor, instance, is_viridian): session = _get_fake_session() flavor = {"memory_mb": 1024, "vcpus": 1, "vcpu_weight": 2} mock_extract_flavor.return_value = flavor with mock.patch.object(instance, 'get_flavor') as get: get.return_value = objects.Flavor(memory_mb=1024, vcpus=1, vcpu_weight=2) vm_utils.create_vm(session, instance, "name", "kernel", "ramdisk", device_id="0002") is_viridian_str = str(is_viridian).lower() expected_vm_rec = { 'VCPUs_params': {'cap': '0', 'weight': '2'}, 'PV_args': '', 'memory_static_min': '0', 'ha_restart_priority': '', 'HVM_boot_policy': 'BIOS order', 'PV_bootloader': '', 'tags': [], 'VCPUs_max': '1', 'memory_static_max': '1073741824', 'actions_after_shutdown': 'destroy', 'memory_dynamic_max': '1073741824', 'user_version': '0', 'xenstore_data': {'vm-data/allowvssprovider': 'false'}, 'blocked_operations': {}, 'is_a_template': False, 'name_description': '', 'memory_dynamic_min': '1073741824', 'actions_after_crash': 'destroy', 'memory_target': '1073741824', 'PV_ramdisk': '', 'PV_bootloader_args': '', 'PCI_bus': '', 'other_config': {'nova_uuid': 'uuid123'}, 'name_label': 'name', 'actions_after_reboot': 'restart', 'VCPUs_at_startup': '1', 'HVM_boot_params': {'order': 'dc'}, 'platform': {'nx': 'true', 'pae': 'true', 'apic': 'true', 'timeoffset': '0', 'viridian': is_viridian_str, 'acpi': 'true', 'device_id': '0002'}, 'PV_legacy_args': '', 'PV_kernel': '', 'affinity': '', 'recommendations': '', 'ha_always_run': False} session.call_xenapi.assert_called_with('VM.create', expected_vm_rec) def test_list_vms(self): self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF)) self.fixture.config(disable_process_locking=True, group='oslo_concurrency') self.flags(instance_name_template='%d', firewall_driver='nova.virt.xenapi.firewall.' 'Dom0IptablesFirewallDriver') self.flags(connection_url='test_url', connection_password='test_pass', group='xenserver') fake.create_vm("foo1", "Halted") vm_ref = fake.create_vm("foo2", "Running") stubs.stubout_session(self.stubs, fake.SessionBase) driver = xenapi_conn.XenAPIDriver(False) result = list(vm_utils.list_vms(driver._session)) # Will have 3 VMs - but one is Dom0 and one is not running on the host self.assertEqual(len(driver._session.call_xenapi('VM.get_all')), 3) self.assertEqual(len(result), 1) result_keys = [key for (key, value) in result] self.assertIn(vm_ref, result_keys) class ChildVHDsTestCase(test.NoDBTestCase): all_vdis = [ ("my-vdi-ref", {"uuid": "my-uuid", "sm_config": {}, "is_a_snapshot": False, "other_config": {}}), ("non-parent", {"uuid": "uuid-1", "sm_config": {}, "is_a_snapshot": False, "other_config": {}}), ("diff-parent", {"uuid": "uuid-1", "sm_config": {"vhd-parent": "other-uuid"}, "is_a_snapshot": False, "other_config": {}}), ("child", {"uuid": "uuid-child", "sm_config": {"vhd-parent": "my-uuid"}, "is_a_snapshot": False, "other_config": {}}), ("child-snap", {"uuid": "uuid-child-snap", "sm_config": {"vhd-parent": "my-uuid"}, "is_a_snapshot": True, "other_config": {}}), ] @mock.patch.object(vm_utils, '_get_all_vdis_in_sr') def test_child_vhds_defaults(self, mock_get_all): mock_get_all.return_value = self.all_vdis result = vm_utils._child_vhds("session", "sr_ref", ["my-uuid"]) self.assertJsonEqual(['uuid-child', 'uuid-child-snap'], result) @mock.patch.object(vm_utils, '_get_all_vdis_in_sr') def test_child_vhds_only_snapshots(self, mock_get_all): mock_get_all.return_value = self.all_vdis result = vm_utils._child_vhds("session", "sr_ref", ["my-uuid"], old_snapshots_only=True) self.assertEqual(['uuid-child-snap'], result) @mock.patch.object(vm_utils, '_get_all_vdis_in_sr') def test_child_vhds_chain(self, mock_get_all): mock_get_all.return_value = self.all_vdis result = vm_utils._child_vhds("session", "sr_ref", ["my-uuid", "other-uuid"], old_snapshots_only=True) self.assertEqual(['uuid-child-snap'], result) def test_is_vdi_a_snapshot_works(self): vdi_rec = {"is_a_snapshot": True, "other_config": {}} self.assertTrue(vm_utils._is_vdi_a_snapshot(vdi_rec)) def test_is_vdi_a_snapshot_base_images_false(self): vdi_rec = {"is_a_snapshot": True, "other_config": {"image-id": "fake"}} self.assertFalse(vm_utils._is_vdi_a_snapshot(vdi_rec)) def test_is_vdi_a_snapshot_false_for_non_snapshot(self): vdi_rec = {"is_a_snapshot": False, "other_config": {}} self.assertFalse(vm_utils._is_vdi_a_snapshot(vdi_rec)) class RemoveOldSnapshotsTestCase(test.NoDBTestCase): @mock.patch.object(vm_utils, 'get_vdi_for_vm_safely') @mock.patch.object(vm_utils, '_walk_vdi_chain') @mock.patch.object(vm_utils, '_delete_snapshots_in_vdi_chain') def test_remove_old_snapshots(self, mock_delete, mock_walk, mock_get): instance = {"uuid": "fake"} mock_get.return_value = ("ref", {"uuid": "vdi", "SR": "sr_ref"}) mock_walk.return_value = [{"uuid": "uuid1"}, {"uuid": "uuid2"}] vm_utils.remove_old_snapshots("session", instance, "vm_ref") mock_delete.assert_called_once_with("session", instance, ["uuid1", "uuid2"], "sr_ref") mock_get.assert_called_once_with("session", "vm_ref") mock_walk.assert_called_once_with("session", "vdi") @mock.patch.object(vm_utils, '_child_vhds') def test_delete_snapshots_in_vdi_chain_no_chain(self, mock_child): instance = {"uuid": "fake"} vm_utils._delete_snapshots_in_vdi_chain("session", instance, ["uuid"], "sr") self.assertFalse(mock_child.called) @mock.patch.object(vm_utils, '_child_vhds') def test_delete_snapshots_in_vdi_chain_no_snapshots(self, mock_child): instance = {"uuid": "fake"} mock_child.return_value = [] vm_utils._delete_snapshots_in_vdi_chain("session", instance, ["uuid1", "uuid2"], "sr") mock_child.assert_called_once_with("session", "sr", ["uuid2"], old_snapshots_only=True) @mock.patch.object(vm_utils, '_scan_sr') @mock.patch.object(vm_utils, 'safe_destroy_vdis') @mock.patch.object(vm_utils, '_child_vhds') def test_delete_snapshots_in_vdi_chain_calls_destroy(self, mock_child, mock_destroy, mock_scan): instance = {"uuid": "fake"} mock_child.return_value = ["suuid1", "suuid2"] session = mock.Mock() session.VDI.get_by_uuid.side_effect = ["ref1", "ref2"] vm_utils._delete_snapshots_in_vdi_chain(session, instance, ["uuid1", "uuid2"], "sr") mock_child.assert_called_once_with(session, "sr", ["uuid2"], old_snapshots_only=True) session.VDI.get_by_uuid.assert_has_calls([ mock.call("suuid1"), mock.call("suuid2")]) mock_destroy.assert_called_once_with(session, ["ref1", "ref2"]) mock_scan.assert_called_once_with(session, "sr") class ResizeFunctionTestCase(test.NoDBTestCase): def _call_get_resize_func_name(self, brand, version): session = mock.Mock() session.product_brand = brand session.product_version = version return vm_utils._get_resize_func_name(session) def _test_is_resize(self, brand, version): result = self._call_get_resize_func_name(brand, version) self.assertEqual("VDI.resize", result) def _test_is_resize_online(self, brand, version): result = self._call_get_resize_func_name(brand, version) self.assertEqual("VDI.resize_online", result) def test_xenserver_5_5(self): self._test_is_resize_online("XenServer", (5, 5, 0)) def test_xenserver_6_0(self): self._test_is_resize("XenServer", (6, 0, 0)) def test_xcp_1_1(self): self._test_is_resize_online("XCP", (1, 1, 0)) def test_xcp_1_2(self): self._test_is_resize("XCP", (1, 2, 0)) def test_xcp_2_0(self): self._test_is_resize("XCP", (2, 0, 0)) def test_random_brand(self): self._test_is_resize("asfd", (1, 1, 0)) def test_default(self): self._test_is_resize(None, None) def test_empty(self): self._test_is_resize("", "") def test_bad_version(self): self._test_is_resize("XenServer", "asdf") class VMInfoTests(VMUtilsTestBase): def setUp(self): super(VMInfoTests, self).setUp() self.session = mock.Mock() def test_get_power_state_valid(self): # Save on test setup calls by having these simple tests in one method self.session.call_xenapi.return_value = "Running" self.assertEqual(vm_utils.get_power_state(self.session, "ref"), power_state.RUNNING) self.session.call_xenapi.return_value = "Halted" self.assertEqual(vm_utils.get_power_state(self.session, "ref"), power_state.SHUTDOWN) self.session.call_xenapi.return_value = "Paused" self.assertEqual(vm_utils.get_power_state(self.session, "ref"), power_state.PAUSED) self.session.call_xenapi.return_value = "Suspended" self.assertEqual(vm_utils.get_power_state(self.session, "ref"), power_state.SUSPENDED) self.session.call_xenapi.return_value = "Crashed" self.assertEqual(vm_utils.get_power_state(self.session, "ref"), power_state.CRASHED) def test_get_power_state_invalid(self): self.session.call_xenapi.return_value = "Invalid" self.assertRaises(KeyError, vm_utils.get_power_state, self.session, "ref") _XAPI_record = {'power_state': 'Running', 'memory_static_max': str(10 << 10), 'memory_dynamic_max': str(9 << 10), 'VCPUs_max': '5'} def test_compile_info(self): def call_xenapi(method, *args): if method.startswith('VM.get_') and args[0] == 'dummy': return self._XAPI_record[method[7:]] self.session.call_xenapi.side_effect = call_xenapi info = vm_utils.compile_info(self.session, "dummy") self.assertEqual(hardware.InstanceInfo(state=power_state.RUNNING, max_mem_kb=10L, mem_kb=9L, num_cpu='5', cpu_time_ns=0), info)
unknown
codeparrot/codeparrot-clean
export const runtime = 'edge'; export const dynamic = 'force-dynamic'; export const dynamicParams = false; export const fetchCache = 'force-no-store'; export const revalidate = 1;
javascript
github
https://github.com/vercel/next.js
crates/next-custom-transforms/tests/errors/react-server-components/server-graph/cache-components/output.js
//===----------------------------------------------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// #include "ClangTidyOptions.h" #include "ClangTidyModule.h" #include "clang/Basic/DiagnosticIDs.h" #include "clang/Basic/LLVM.h" #include "llvm/ADT/SmallString.h" #include "llvm/ADT/StringExtras.h" #include "llvm/Support/Debug.h" #include "llvm/Support/ErrorOr.h" #include "llvm/Support/MemoryBufferRef.h" #include "llvm/Support/Path.h" #include "llvm/Support/YAMLTraits.h" #include <algorithm> #include <optional> #include <utility> #define DEBUG_TYPE "clang-tidy-options" using clang::tidy::ClangTidyOptions; using clang::tidy::FileFilter; using OptionsSource = clang::tidy::ClangTidyOptionsProvider::OptionsSource; LLVM_YAML_IS_FLOW_SEQUENCE_VECTOR(FileFilter) LLVM_YAML_IS_FLOW_SEQUENCE_VECTOR(FileFilter::LineRange) namespace llvm::yaml { // Map std::pair<int, int> to a JSON array of size 2. template <> struct SequenceTraits<FileFilter::LineRange> { static size_t size(IO &IO, FileFilter::LineRange &Range) { return Range.first == 0 ? 0 : Range.second == 0 ? 1 : 2; } static unsigned &element(IO &IO, FileFilter::LineRange &Range, size_t Index) { if (Index > 1) IO.setError("Too many elements in line range."); return Index == 0 ? Range.first : Range.second; } }; template <> struct MappingTraits<FileFilter> { static void mapping(IO &IO, FileFilter &File) { IO.mapRequired("name", File.Name); IO.mapOptional("lines", File.LineRanges); } static std::string validate(IO &Io, FileFilter &File) { if (File.Name.empty()) return "No file name specified"; for (const FileFilter::LineRange &Range : File.LineRanges) if (Range.first <= 0 || Range.second <= 0) return "Invalid line range"; return ""; } }; template <> struct MappingTraits<ClangTidyOptions::StringPair> { static void mapping(IO &IO, ClangTidyOptions::StringPair &KeyValue) { IO.mapRequired("key", KeyValue.first); IO.mapRequired("value", KeyValue.second); } }; namespace { struct NOptionMap { NOptionMap(IO &) {} NOptionMap(IO &, const ClangTidyOptions::OptionMap &OptionMap) { Options.reserve(OptionMap.size()); for (const auto &KeyValue : OptionMap) Options.emplace_back(std::string(KeyValue.getKey()), KeyValue.getValue().Value); } ClangTidyOptions::OptionMap denormalize(IO &) { ClangTidyOptions::OptionMap Map; for (const auto &KeyValue : Options) Map[KeyValue.first] = ClangTidyOptions::ClangTidyValue(KeyValue.second); return Map; } std::vector<ClangTidyOptions::StringPair> Options; }; } // namespace template <> void yamlize(IO &IO, ClangTidyOptions::OptionMap &Val, bool, EmptyContext &Ctx) { if (IO.outputting()) { // Ensure check options are sorted std::vector<std::pair<StringRef, StringRef>> SortedOptions; SortedOptions.reserve(Val.size()); for (auto &Key : Val) SortedOptions.emplace_back(Key.getKey(), Key.getValue().Value); std::sort(SortedOptions.begin(), SortedOptions.end()); IO.beginMapping(); // Only output as a map for (auto &Option : SortedOptions) { bool UseDefault = false; void *SaveInfo = nullptr; // Requires 'llvm::yaml::IO' to accept 'StringRef' // NOLINTNEXTLINE(bugprone-suspicious-stringview-data-usage) IO.preflightKey(Option.first.data(), true, false, UseDefault, SaveInfo); IO.scalarString(Option.second, needsQuotes(Option.second)); IO.postflightKey(SaveInfo); } IO.endMapping(); } else { // We need custom logic here to support the old method of specifying check // options using a list of maps containing key and value keys. auto &I = reinterpret_cast<Input &>(IO); if (isa<SequenceNode>(I.getCurrentNode())) { MappingNormalization<NOptionMap, ClangTidyOptions::OptionMap> NOpts(IO, Val); EmptyContext Ctx; yamlize(IO, NOpts->Options, true, Ctx); } else if (isa<MappingNode>(I.getCurrentNode())) { IO.beginMapping(); for (const StringRef Key : IO.keys()) { // Requires 'llvm::yaml::IO' to accept 'StringRef' // NOLINTNEXTLINE(bugprone-suspicious-stringview-data-usage) IO.mapRequired(Key.data(), Val[Key].Value); } IO.endMapping(); } else { IO.setError("expected a sequence or map"); } } } namespace { struct MultiLineString { std::string &S; }; } // namespace template <> struct BlockScalarTraits<MultiLineString> { static void output(const MultiLineString &S, void *Ctxt, raw_ostream &OS) { OS << S.S; } static StringRef input(StringRef Str, void *Ctxt, MultiLineString &S) { S.S = Str; return ""; } }; template <> struct ScalarEnumerationTraits<clang::DiagnosticIDs::Level> { static void enumeration(IO &IO, clang::DiagnosticIDs::Level &Level) { IO.enumCase(Level, "Warning", clang::DiagnosticIDs::Level::Warning); IO.enumCase(Level, "Note", clang::DiagnosticIDs::Level::Note); } }; template <> struct SequenceElementTraits<ClangTidyOptions::CustomCheckDiag> { // NOLINTNEXTLINE(readability-identifier-naming) Defined by YAMLTraits.h static constexpr bool flow = false; }; template <> struct MappingTraits<ClangTidyOptions::CustomCheckDiag> { static void mapping(IO &IO, ClangTidyOptions::CustomCheckDiag &D) { IO.mapRequired("BindName", D.BindName); MultiLineString MLS{D.Message}; IO.mapRequired("Message", MLS); IO.mapOptional("Level", D.Level); } }; template <> struct SequenceElementTraits<ClangTidyOptions::CustomCheckValue> { // NOLINTNEXTLINE(readability-identifier-naming) Defined by YAMLTraits.h static constexpr bool flow = false; }; template <> struct MappingTraits<ClangTidyOptions::CustomCheckValue> { static void mapping(IO &IO, ClangTidyOptions::CustomCheckValue &V) { IO.mapRequired("Name", V.Name); MultiLineString MLS{V.Query}; IO.mapRequired("Query", MLS); IO.mapRequired("Diagnostic", V.Diags); } }; namespace { struct GlobListVariant { std::optional<std::string> AsString; std::optional<std::vector<std::string>> AsVector; }; } // namespace template <> void yamlize(IO &IO, GlobListVariant &Val, bool, EmptyContext &Ctx) { if (!IO.outputting()) { // Special case for reading from YAML // Must support reading from both a string or a list auto &I = reinterpret_cast<Input &>(IO); if (isa<ScalarNode, BlockScalarNode>(I.getCurrentNode())) { Val.AsString = std::string(); yamlize(IO, *Val.AsString, true, Ctx); } else if (isa<SequenceNode>(I.getCurrentNode())) { Val.AsVector = std::vector<std::string>(); yamlize(IO, *Val.AsVector, true, Ctx); } else { IO.setError("expected string or sequence"); } } } static void mapGlobList(IO &IO, std::optional<std::string> &GlobList, StringRef Key) { if (IO.outputting()) { // Output always a string IO.mapOptional(Key, GlobList); } else { // Input as either a string or a list GlobListVariant GlobListAsVariant; IO.mapOptional(Key, GlobListAsVariant); if (GlobListAsVariant.AsString) GlobList = GlobListAsVariant.AsString; else if (GlobListAsVariant.AsVector) GlobList = llvm::join(*GlobListAsVariant.AsVector, ","); } } template <> struct MappingTraits<ClangTidyOptions> { static void mapping(IO &IO, ClangTidyOptions &Options) { mapGlobList(IO, Options.Checks, "Checks"); mapGlobList(IO, Options.WarningsAsErrors, "WarningsAsErrors"); IO.mapOptional("HeaderFileExtensions", Options.HeaderFileExtensions); IO.mapOptional("ImplementationFileExtensions", Options.ImplementationFileExtensions); IO.mapOptional("HeaderFilterRegex", Options.HeaderFilterRegex); IO.mapOptional("ExcludeHeaderFilterRegex", Options.ExcludeHeaderFilterRegex); IO.mapOptional("FormatStyle", Options.FormatStyle); IO.mapOptional("User", Options.User); IO.mapOptional("CheckOptions", Options.CheckOptions); IO.mapOptional("ExtraArgs", Options.ExtraArgs); IO.mapOptional("ExtraArgsBefore", Options.ExtraArgsBefore); IO.mapOptional("RemovedArgs", Options.RemovedArgs); IO.mapOptional("InheritParentConfig", Options.InheritParentConfig); IO.mapOptional("UseColor", Options.UseColor); IO.mapOptional("SystemHeaders", Options.SystemHeaders); IO.mapOptional("CustomChecks", Options.CustomChecks); } }; } // namespace llvm::yaml namespace clang::tidy { ClangTidyOptions ClangTidyOptions::getDefaults() { ClangTidyOptions Options; Options.Checks = ""; Options.WarningsAsErrors = ""; Options.HeaderFileExtensions = {"", "h", "hh", "hpp", "hxx"}; Options.ImplementationFileExtensions = {"c", "cc", "cpp", "cxx"}; Options.HeaderFilterRegex = ".*"; Options.ExcludeHeaderFilterRegex = ""; Options.SystemHeaders = false; Options.FormatStyle = "none"; Options.User = std::nullopt; Options.RemovedArgs = std::nullopt; for (const ClangTidyModuleRegistry::entry &Module : ClangTidyModuleRegistry::entries()) Options.mergeWith(Module.instantiate()->getModuleOptions(), 0); return Options; } template <typename T> static void mergeVectors(std::optional<T> &Dest, const std::optional<T> &Src) { if (Src) { if (Dest) Dest->insert(Dest->end(), Src->begin(), Src->end()); else Dest = Src; } } static void mergeCommaSeparatedLists(std::optional<std::string> &Dest, const std::optional<std::string> &Src) { if (Src) Dest = (Dest && !Dest->empty() ? *Dest + "," : "") + *Src; } template <typename T> static void overrideValue(std::optional<T> &Dest, const std::optional<T> &Src) { if (Src) Dest = Src; } ClangTidyOptions &ClangTidyOptions::mergeWith(const ClangTidyOptions &Other, unsigned Order) { mergeCommaSeparatedLists(Checks, Other.Checks); mergeCommaSeparatedLists(WarningsAsErrors, Other.WarningsAsErrors); overrideValue(HeaderFileExtensions, Other.HeaderFileExtensions); overrideValue(ImplementationFileExtensions, Other.ImplementationFileExtensions); overrideValue(HeaderFilterRegex, Other.HeaderFilterRegex); overrideValue(ExcludeHeaderFilterRegex, Other.ExcludeHeaderFilterRegex); overrideValue(SystemHeaders, Other.SystemHeaders); overrideValue(FormatStyle, Other.FormatStyle); overrideValue(User, Other.User); overrideValue(UseColor, Other.UseColor); mergeVectors(ExtraArgs, Other.ExtraArgs); mergeVectors(ExtraArgsBefore, Other.ExtraArgsBefore); mergeVectors(RemovedArgs, Other.RemovedArgs); // FIXME: how to handle duplicate names check? mergeVectors(CustomChecks, Other.CustomChecks); for (const auto &KeyValue : Other.CheckOptions) { CheckOptions.insert_or_assign( KeyValue.getKey(), ClangTidyValue(KeyValue.getValue().Value, KeyValue.getValue().Priority + Order)); } return *this; } ClangTidyOptions ClangTidyOptions::merge(const ClangTidyOptions &Other, unsigned Order) const { ClangTidyOptions Result = *this; Result.mergeWith(Other, Order); return Result; } ClangTidyOptions ClangTidyOptionsProvider::getOptions(llvm::StringRef FileName) { ClangTidyOptions Result; unsigned Priority = 0; for (auto &Source : getRawOptions(FileName)) Result.mergeWith(Source.first, ++Priority); return Result; } std::vector<OptionsSource> DefaultOptionsProvider::getRawOptions(llvm::StringRef FileName) { std::vector<OptionsSource> Result; Result.emplace_back(DefaultOptions, OptionsSourceTypeDefaultBinary); return Result; } ConfigOptionsProvider::ConfigOptionsProvider( ClangTidyGlobalOptions GlobalOptions, ClangTidyOptions DefaultOptions, ClangTidyOptions ConfigOptions, ClangTidyOptions OverrideOptions, llvm::IntrusiveRefCntPtr<llvm::vfs::FileSystem> FS) : FileOptionsBaseProvider(std::move(GlobalOptions), std::move(DefaultOptions), std::move(OverrideOptions), std::move(FS)), ConfigOptions(std::move(ConfigOptions)) {} std::vector<OptionsSource> ConfigOptionsProvider::getRawOptions(llvm::StringRef FileName) { std::vector<OptionsSource> RawOptions = DefaultOptionsProvider::getRawOptions(FileName); if (ConfigOptions.InheritParentConfig.value_or(false)) { LLVM_DEBUG(llvm::dbgs() << "Getting options for file " << FileName << "...\n"); llvm::ErrorOr<llvm::SmallString<128>> AbsoluteFilePath = getNormalizedAbsolutePath(FileName); if (AbsoluteFilePath) addRawFileOptions(AbsoluteFilePath->str(), RawOptions); } RawOptions.emplace_back(ConfigOptions, OptionsSourceTypeConfigCommandLineOption); RawOptions.emplace_back(OverrideOptions, OptionsSourceTypeCheckCommandLineOption); return RawOptions; } FileOptionsBaseProvider::FileOptionsBaseProvider( ClangTidyGlobalOptions GlobalOptions, ClangTidyOptions DefaultOptions, ClangTidyOptions OverrideOptions, llvm::IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS) : DefaultOptionsProvider(std::move(GlobalOptions), std::move(DefaultOptions)), OverrideOptions(std::move(OverrideOptions)), FS(std::move(VFS)) { if (!FS) FS = llvm::vfs::getRealFileSystem(); ConfigHandlers.emplace_back(".clang-tidy", parseConfiguration); } FileOptionsBaseProvider::FileOptionsBaseProvider( ClangTidyGlobalOptions GlobalOptions, ClangTidyOptions DefaultOptions, ClangTidyOptions OverrideOptions, FileOptionsBaseProvider::ConfigFileHandlers ConfigHandlers) : DefaultOptionsProvider(std::move(GlobalOptions), std::move(DefaultOptions)), OverrideOptions(std::move(OverrideOptions)), ConfigHandlers(std::move(ConfigHandlers)) {} llvm::ErrorOr<llvm::SmallString<128>> FileOptionsBaseProvider::getNormalizedAbsolutePath(llvm::StringRef Path) { assert(FS && "FS must be set."); llvm::SmallString<128> NormalizedAbsolutePath = {Path}; const std::error_code Err = FS->makeAbsolute(NormalizedAbsolutePath); if (Err) return Err; llvm::sys::path::remove_dots(NormalizedAbsolutePath, /*remove_dot_dot=*/true); return NormalizedAbsolutePath; } void FileOptionsBaseProvider::addRawFileOptions( llvm::StringRef AbsolutePath, std::vector<OptionsSource> &CurOptions) { auto CurSize = CurOptions.size(); // Look for a suitable configuration file in all parent directories of the // file. Start with the immediate parent directory and move up. StringRef RootPath = llvm::sys::path::parent_path(AbsolutePath); auto MemorizedConfigFile = [this, &RootPath](StringRef CurrentPath) -> std::optional<OptionsSource> { const auto Iter = CachedOptions.Memorized.find(CurrentPath); if (Iter != CachedOptions.Memorized.end()) return CachedOptions.Storage[Iter->second]; std::optional<OptionsSource> OptionsSource = tryReadConfigFile(CurrentPath); if (OptionsSource) { const size_t Index = CachedOptions.Storage.size(); CachedOptions.Storage.emplace_back(OptionsSource.value()); while (RootPath != CurrentPath) { LLVM_DEBUG(llvm::dbgs() << "Caching configuration for path " << RootPath << ".\n"); CachedOptions.Memorized[RootPath] = Index; RootPath = llvm::sys::path::parent_path(RootPath); } CachedOptions.Memorized[CurrentPath] = Index; RootPath = llvm::sys::path::parent_path(CurrentPath); } return OptionsSource; }; for (StringRef CurrentPath = RootPath; !CurrentPath.empty(); CurrentPath = llvm::sys::path::parent_path(CurrentPath)) { if (std::optional<OptionsSource> Result = MemorizedConfigFile(CurrentPath)) { CurOptions.emplace_back(Result.value()); if (!Result->first.InheritParentConfig.value_or(false)) break; } } // Reverse order of file configs because closer configs should have higher // priority. std::reverse(CurOptions.begin() + CurSize, CurOptions.end()); } FileOptionsProvider::FileOptionsProvider( ClangTidyGlobalOptions GlobalOptions, ClangTidyOptions DefaultOptions, ClangTidyOptions OverrideOptions, llvm::IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS) : FileOptionsBaseProvider(std::move(GlobalOptions), std::move(DefaultOptions), std::move(OverrideOptions), std::move(VFS)) {} FileOptionsProvider::FileOptionsProvider( ClangTidyGlobalOptions GlobalOptions, ClangTidyOptions DefaultOptions, ClangTidyOptions OverrideOptions, FileOptionsBaseProvider::ConfigFileHandlers ConfigHandlers) : FileOptionsBaseProvider( std::move(GlobalOptions), std::move(DefaultOptions), std::move(OverrideOptions), std::move(ConfigHandlers)) {} // FIXME: This method has some common logic with clang::format::getStyle(). // Consider pulling out common bits to a findParentFileWithName function or // similar. std::vector<OptionsSource> FileOptionsProvider::getRawOptions(StringRef FileName) { LLVM_DEBUG(llvm::dbgs() << "Getting options for file " << FileName << "...\n"); const llvm::ErrorOr<llvm::SmallString<128>> AbsoluteFilePath = getNormalizedAbsolutePath(FileName); if (!AbsoluteFilePath) return {}; std::vector<OptionsSource> RawOptions = DefaultOptionsProvider::getRawOptions(AbsoluteFilePath->str()); addRawFileOptions(AbsoluteFilePath->str(), RawOptions); const OptionsSource CommandLineOptions( OverrideOptions, OptionsSourceTypeCheckCommandLineOption); RawOptions.push_back(CommandLineOptions); return RawOptions; } std::optional<OptionsSource> FileOptionsBaseProvider::tryReadConfigFile(StringRef Directory) { assert(!Directory.empty()); llvm::ErrorOr<llvm::vfs::Status> DirectoryStatus = FS->status(Directory); if (!DirectoryStatus || !DirectoryStatus->isDirectory()) { llvm::errs() << "Error reading configuration from " << Directory << ": directory doesn't exist.\n"; return std::nullopt; } for (const ConfigFileHandler &ConfigHandler : ConfigHandlers) { SmallString<128> ConfigFile(Directory); llvm::sys::path::append(ConfigFile, ConfigHandler.first); LLVM_DEBUG(llvm::dbgs() << "Trying " << ConfigFile << "...\n"); llvm::ErrorOr<llvm::vfs::Status> FileStatus = FS->status(ConfigFile); if (!FileStatus || !FileStatus->isRegularFile()) continue; llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> Text = FS->getBufferForFile(ConfigFile); if (const std::error_code EC = Text.getError()) { llvm::errs() << "Can't read " << ConfigFile << ": " << EC.message() << "\n"; continue; } // Skip empty files, e.g. files opened for writing via shell output // redirection. if ((*Text)->getBuffer().empty()) continue; llvm::ErrorOr<ClangTidyOptions> ParsedOptions = ConfigHandler.second({(*Text)->getBuffer(), ConfigFile}); if (!ParsedOptions) { if (ParsedOptions.getError()) llvm::errs() << "Error parsing " << ConfigFile << ": " << ParsedOptions.getError().message() << "\n"; continue; } return OptionsSource(*ParsedOptions, std::string(ConfigFile)); } return std::nullopt; } /// Parses -line-filter option and stores it to the \c Options. std::error_code parseLineFilter(StringRef LineFilter, clang::tidy::ClangTidyGlobalOptions &Options) { llvm::yaml::Input Input(LineFilter); Input >> Options.LineFilter; return Input.error(); } llvm::ErrorOr<ClangTidyOptions> parseConfiguration(llvm::MemoryBufferRef Config) { llvm::yaml::Input Input(Config); ClangTidyOptions Options; Input >> Options; if (Input.error()) return Input.error(); return Options; } static void diagHandlerImpl(const llvm::SMDiagnostic &Diag, void *Ctx) { (*reinterpret_cast<DiagCallback *>(Ctx))(Diag); } llvm::ErrorOr<ClangTidyOptions> parseConfigurationWithDiags(llvm::MemoryBufferRef Config, DiagCallback Handler) { llvm::yaml::Input Input(Config, nullptr, Handler ? diagHandlerImpl : nullptr, &Handler); ClangTidyOptions Options; Input >> Options; if (Input.error()) return Input.error(); return Options; } std::string configurationAsText(const ClangTidyOptions &Options) { std::string Text; llvm::raw_string_ostream Stream(Text); llvm::yaml::Output Output(Stream); // We use the same mapping method for input and output, so we need a non-const // reference here. ClangTidyOptions NonConstValue = Options; Output << NonConstValue; return Stream.str(); } } // namespace clang::tidy
cpp
github
https://github.com/llvm/llvm-project
clang-tools-extra/clang-tidy/ClangTidyOptions.cpp
use std::collections::BTreeMap; use anyhow::Result; use indoc::formatdoc; use turbo_rcstr::rcstr; use turbo_tasks::{ResolvedVc, Vc}; use turbo_tasks_fs::FileSystemPath; use turbopack_core::{ chunk::{AsyncModuleInfo, ChunkableModule, ChunkingContext, ModuleChunkItemIdExt}, ident::AssetIdent, module::{Module, ModuleSideEffects}, module_graph::ModuleGraph, reference::ModuleReferences, source::OptionSource, }; use turbopack_ecmascript::{ chunk::{ EcmascriptChunkItemContent, EcmascriptChunkPlaceable, EcmascriptExports, ecmascript_chunk_item, }, references::esm::{EsmExport, EsmExports}, runtime_functions::{TURBOPACK_EXPORT_NAMESPACE, TURBOPACK_IMPORT}, utils::StringifyJs, }; use super::server_utility_reference::NextServerUtilityModuleReference; #[turbo_tasks::value(shared)] pub struct NextServerUtilityModule { pub module: ResolvedVc<Box<dyn EcmascriptChunkPlaceable>>, } #[turbo_tasks::value_impl] impl NextServerUtilityModule { #[turbo_tasks::function] pub fn new(module: ResolvedVc<Box<dyn EcmascriptChunkPlaceable>>) -> Vc<Self> { NextServerUtilityModule { module }.cell() } #[turbo_tasks::function] pub fn server_path(&self) -> Vc<FileSystemPath> { self.module.ident().path() } } #[turbo_tasks::value_impl] impl Module for NextServerUtilityModule { #[turbo_tasks::function] fn ident(&self) -> Vc<AssetIdent> { self.module .ident() .with_modifier(rcstr!("Next.js server utility")) } #[turbo_tasks::function] fn source(&self) -> Vc<OptionSource> { Vc::cell(None) } #[turbo_tasks::function] async fn references(&self) -> Result<Vc<ModuleReferences>> { Ok(Vc::cell(vec![ResolvedVc::upcast( NextServerUtilityModuleReference::new(Vc::upcast(*self.module)) .to_resolved() .await?, )])) } #[turbo_tasks::function] fn side_effects(self: Vc<Self>) -> Vc<ModuleSideEffects> { // This just exports another import ModuleSideEffects::ModuleEvaluationIsSideEffectFree.cell() } } #[turbo_tasks::value_impl] impl ChunkableModule for NextServerUtilityModule { #[turbo_tasks::function] fn as_chunk_item( self: ResolvedVc<Self>, module_graph: ResolvedVc<ModuleGraph>, chunking_context: ResolvedVc<Box<dyn ChunkingContext>>, ) -> Vc<Box<dyn turbopack_core::chunk::ChunkItem>> { ecmascript_chunk_item(ResolvedVc::upcast(self), module_graph, chunking_context) } } #[turbo_tasks::value_impl] impl EcmascriptChunkPlaceable for NextServerUtilityModule { #[turbo_tasks::function] async fn get_exports(&self) -> Result<Vc<EcmascriptExports>> { let module_reference = ResolvedVc::upcast( NextServerUtilityModuleReference::new(Vc::upcast(*self.module)) .to_resolved() .await?, ); let mut exports = BTreeMap::new(); let default = rcstr!("default"); exports.insert( default.clone(), EsmExport::ImportedBinding(module_reference, default, false), ); Ok(EcmascriptExports::EsmExports( EsmExports { exports, star_exports: vec![module_reference], } .resolved_cell(), ) .cell()) } #[turbo_tasks::function] async fn chunk_item_content( &self, chunking_context: Vc<Box<dyn ChunkingContext>>, _module_graph: Vc<ModuleGraph>, _async_module_info: Option<Vc<AsyncModuleInfo>>, _estimated: bool, ) -> Result<Vc<EcmascriptChunkItemContent>> { let module_id = self.module.chunk_item_id(chunking_context).await?; Ok(EcmascriptChunkItemContent { inner_code: formatdoc!( r#" {TURBOPACK_EXPORT_NAMESPACE}({TURBOPACK_IMPORT}({})); "#, StringifyJs(&module_id), ) .into(), ..Default::default() } .cell()) } }
rust
github
https://github.com/vercel/next.js
crates/next-core/src/next_server_utility/server_utility_module.rs
#!/bin/sh test_description="recursive merge with directory renames" # includes checking of many corner cases, with a similar methodology to: # t6042: corner cases with renames but not criss-cross merges # t6036: corner cases with both renames and criss-cross merges # # The setup for all of them, pictorially, is: # # A # o # / \ # O o ? # \ / # o # B # # To help make it easier to follow the flow of tests, they have been # divided into sections and each test will start with a quick explanation # of what commits O, A, and B contain. # # Notation: # z/{b,c} means files z/b and z/c both exist # x/d_1 means file x/d exists with content d1. (Purpose of the # underscore notation is to differentiate different # files that might be renamed into each other's paths.) . ./test-lib.sh ########################################################################### # SECTION 1: Basic cases we should be able to handle ########################################################################### # Testcase 1a, Basic directory rename. # Commit O: z/{b,c} # Commit A: y/{b,c} # Commit B: z/{b,c,d,e/f} # Expected: y/{b,c,d,e/f} test_setup_1a () { git init 1a && ( cd 1a && mkdir z && echo b >z/b && echo c >z/c && git add z && test_tick && git commit -m "O" && git branch O && git branch A && git branch B && git checkout A && git mv z y && test_tick && git commit -m "A" && git checkout B && echo d >z/d && mkdir z/e && echo f >z/e/f && git add z/d z/e/f && test_tick && git commit -m "B" ) } test_expect_success '1a: Simple directory rename detection' ' test_setup_1a && ( cd 1a && git checkout A^0 && git -c merge.directoryRenames=true merge -s recursive B^0 >out && git ls-files -s >out && test_line_count = 4 out && git rev-parse >actual \ HEAD:y/b HEAD:y/c HEAD:y/d HEAD:y/e/f && git rev-parse >expect \ O:z/b O:z/c B:z/d B:z/e/f && test_cmp expect actual && git hash-object y/d >actual && git rev-parse B:z/d >expect && test_cmp expect actual && test_must_fail git rev-parse HEAD:z/d && test_must_fail git rev-parse HEAD:z/e/f && test_path_is_missing z/d && test_path_is_missing z/e/f ) ' # Testcase 1b, Merge a directory with another # Commit O: z/{b,c}, y/d # Commit A: z/{b,c,e}, y/d # Commit B: y/{b,c,d} # Expected: y/{b,c,d,e} test_setup_1b () { git init 1b && ( cd 1b && mkdir z && echo b >z/b && echo c >z/c && mkdir y && echo d >y/d && git add z y && test_tick && git commit -m "O" && git branch O && git branch A && git branch B && git checkout A && echo e >z/e && git add z/e && test_tick && git commit -m "A" && git checkout B && git mv z/b y && git mv z/c y && rmdir z && test_tick && git commit -m "B" ) } test_expect_success '1b: Merge a directory with another' ' test_setup_1b && ( cd 1b && git checkout A^0 && git -c merge.directoryRenames=true merge -s recursive B^0 && git ls-files -s >out && test_line_count = 4 out && git rev-parse >actual \ HEAD:y/b HEAD:y/c HEAD:y/d HEAD:y/e && git rev-parse >expect \ O:z/b O:z/c O:y/d A:z/e && test_cmp expect actual && test_must_fail git rev-parse HEAD:z/e ) ' # Testcase 1c, Transitive renaming # (Related to testcases 3a and 6d -- when should a transitive rename apply?) # (Related to testcases 9c and 9d -- can transitivity repeat?) # (Related to testcase 12b -- joint-transitivity?) # Commit O: z/{b,c}, x/d # Commit A: y/{b,c}, x/d # Commit B: z/{b,c,d} # Expected: y/{b,c,d} (because x/d -> z/d -> y/d) test_setup_1c () { git init 1c && ( cd 1c && mkdir z && echo b >z/b && echo c >z/c && mkdir x && echo d >x/d && git add z x && test_tick && git commit -m "O" && git branch O && git branch A && git branch B && git checkout A && git mv z y && test_tick && git commit -m "A" && git checkout B && git mv x/d z/d && test_tick && git commit -m "B" ) } test_expect_success '1c: Transitive renaming' ' test_setup_1c && ( cd 1c && git checkout A^0 && git -c merge.directoryRenames=true merge -s recursive B^0 >out && git ls-files -s >out && test_line_count = 3 out && git rev-parse >actual \ HEAD:y/b HEAD:y/c HEAD:y/d && git rev-parse >expect \ O:z/b O:z/c O:x/d && test_cmp expect actual && test_must_fail git rev-parse HEAD:x/d && test_must_fail git rev-parse HEAD:z/d && test_path_is_missing z/d ) ' # Testcase 1d, Directory renames (merging two directories into one new one) # cause a rename/rename(2to1) conflict # (Related to testcases 1c and 7b) # Commit O. z/{b,c}, y/{d,e} # Commit A. x/{b,c}, y/{d,e,m,wham_1} # Commit B. z/{b,c,n,wham_2}, x/{d,e} # Expected: x/{b,c,d,e,m,n}, CONFLICT:(y/wham_1 & z/wham_2 -> x/wham) # Note: y/m & z/n should definitely move into x. By the same token, both # y/wham_1 & z/wham_2 should too...giving us a conflict. test_setup_1d () { git init 1d && ( cd 1d && mkdir z && echo b >z/b && echo c >z/c && mkdir y && echo d >y/d && echo e >y/e && git add z y && test_tick && git commit -m "O" && git branch O && git branch A && git branch B && git checkout A && git mv z x && echo m >y/m && echo wham1 >y/wham && git add y && test_tick && git commit -m "A" && git checkout B && git mv y x && echo n >z/n && echo wham2 >z/wham && git add z && test_tick && git commit -m "B" ) } test_expect_success '1d: Directory renames cause a rename/rename(2to1) conflict' ' test_setup_1d && ( cd 1d && git checkout A^0 && test_must_fail git -c merge.directoryRenames=true merge -s recursive B^0 >out && test_grep "CONFLICT (\(.*\)/\1)" out && git ls-files -s >out && test_line_count = 8 out && git ls-files -u >out && test_line_count = 2 out && git ls-files -o >out && test_line_count = 1 out && git rev-parse >actual \ :0:x/b :0:x/c :0:x/d :0:x/e :0:x/m :0:x/n && git rev-parse >expect \ O:z/b O:z/c O:y/d O:y/e A:y/m B:z/n && test_cmp expect actual && test_must_fail git rev-parse :0:x/wham && git rev-parse >actual \ :2:x/wham :3:x/wham && git rev-parse >expect \ A:y/wham B:z/wham && test_cmp expect actual && # Test that the two-way merge in x/wham is as expected git cat-file -p :2:x/wham >expect && git cat-file -p :3:x/wham >other && >empty && test_must_fail git merge-file \ -L "HEAD:y/wham" \ -L "" \ -L "B^0:z/wham" \ expect empty other && test_cmp expect x/wham ) ' # Testcase 1e, Renamed directory, with all filenames being renamed too # (Related to testcases 9f & 9g) # Commit O: z/{oldb,oldc} # Commit A: y/{newb,newc} # Commit B: z/{oldb,oldc,d} # Expected: y/{newb,newc,d} test_setup_1e () { git init 1e && ( cd 1e && mkdir z && echo b >z/oldb && echo c >z/oldc && git add z && test_tick && git commit -m "O" && git branch O && git branch A && git branch B && git checkout A && mkdir y && git mv z/oldb y/newb && git mv z/oldc y/newc && test_tick && git commit -m "A" && git checkout B && echo d >z/d && git add z/d && test_tick && git commit -m "B" ) } test_expect_success '1e: Renamed directory, with all files being renamed too' ' test_setup_1e && ( cd 1e && git checkout A^0 && git -c merge.directoryRenames=true merge -s recursive B^0 && git ls-files -s >out && test_line_count = 3 out && git rev-parse >actual \ HEAD:y/newb HEAD:y/newc HEAD:y/d && git rev-parse >expect \ O:z/oldb O:z/oldc B:z/d && test_cmp expect actual && test_must_fail git rev-parse HEAD:z/d ) ' # Testcase 1f, Split a directory into two other directories # (Related to testcases 3a, all of section 2, and all of section 4) # Commit O: z/{b,c,d,e,f} # Commit A: z/{b,c,d,e,f,g} # Commit B: y/{b,c}, x/{d,e,f} # Expected: y/{b,c}, x/{d,e,f,g} test_setup_1f () { git init 1f && ( cd 1f && mkdir z && echo b >z/b && echo c >z/c && echo d >z/d && echo e >z/e && echo f >z/f && git add z && test_tick && git commit -m "O" && git branch O && git branch A && git branch B && git checkout A && echo g >z/g && git add z/g && test_tick && git commit -m "A" && git checkout B && mkdir y && mkdir x && git mv z/b y/ && git mv z/c y/ && git mv z/d x/ && git mv z/e x/ && git mv z/f x/ && rmdir z && test_tick && git commit -m "B" ) } test_expect_success '1f: Split a directory into two other directories' ' test_setup_1f && ( cd 1f && git checkout A^0 && git -c merge.directoryRenames=true merge -s recursive B^0 && git ls-files -s >out && test_line_count = 6 out && git rev-parse >actual \ HEAD:y/b HEAD:y/c HEAD:x/d HEAD:x/e HEAD:x/f HEAD:x/g && git rev-parse >expect \ O:z/b O:z/c O:z/d O:z/e O:z/f A:z/g && test_cmp expect actual && test_path_is_missing z/g && test_must_fail git rev-parse HEAD:z/g ) ' ########################################################################### # Rules suggested by testcases in section 1: # # We should still detect the directory rename even if it wasn't just # the directory renamed, but the files within it. (see 1b) # # If renames split a directory into two or more others, the directory # with the most renames, "wins" (see 1f). However, see the testcases # in section 2, plus testcases 3a and 4a. ########################################################################### ########################################################################### # SECTION 2: Split into multiple directories, with equal number of paths # # Explore the splitting-a-directory rules a bit; what happens in the # edge cases? # # Note that there is a closely related case of a directory not being # split on either side of history, but being renamed differently on # each side. See testcase 8e for that. ########################################################################### # Testcase 2a, Directory split into two on one side, with equal numbers of paths # Commit O: z/{b,c} # Commit A: y/b, w/c # Commit B: z/{b,c,d} # Expected: y/b, w/c, z/d, with warning about z/ -> (y/ vs. w/) conflict test_setup_2a () { git init 2a && ( cd 2a && mkdir z && echo b >z/b && echo c >z/c && git add z && test_tick && git commit -m "O" && git branch O && git branch A && git branch B && git checkout A && mkdir y && mkdir w && git mv z/b y/ && git mv z/c w/ && test_tick && git commit -m "A" && git checkout B && echo d >z/d && git add z/d && test_tick && git commit -m "B" ) } test_expect_success '2a: Directory split into two on one side, with equal numbers of paths' ' test_setup_2a && ( cd 2a && git checkout A^0 && test_must_fail git -c merge.directoryRenames=true merge -s recursive B^0 >out && test_grep "CONFLICT.*directory rename split" out && git ls-files -s >out && test_line_count = 3 out && git ls-files -u >out && test_line_count = 0 out && git ls-files -o >out && test_line_count = 1 out && git rev-parse >actual \ :0:y/b :0:w/c :0:z/d && git rev-parse >expect \ O:z/b O:z/c B:z/d && test_cmp expect actual ) ' # Testcase 2b, Directory split into two on one side, with equal numbers of paths # Commit O: z/{b,c} # Commit A: y/b, w/c # Commit B: z/{b,c}, x/d # Expected: y/b, w/c, x/d; No warning about z/ -> (y/ vs. w/) conflict test_setup_2b () { git init 2b && ( cd 2b && mkdir z && echo b >z/b && echo c >z/c && git add z && test_tick && git commit -m "O" && git branch O && git branch A && git branch B && git checkout A && mkdir y && mkdir w && git mv z/b y/ && git mv z/c w/ && test_tick && git commit -m "A" && git checkout B && mkdir x && echo d >x/d && git add x/d && test_tick && git commit -m "B" ) } test_expect_success '2b: Directory split into two on one side, with equal numbers of paths' ' test_setup_2b && ( cd 2b && git checkout A^0 && git -c merge.directoryRenames=true merge -s recursive B^0 >out && git ls-files -s >out && test_line_count = 3 out && git ls-files -u >out && test_line_count = 0 out && git ls-files -o >out && test_line_count = 1 out && git rev-parse >actual \ :0:y/b :0:w/c :0:x/d && git rev-parse >expect \ O:z/b O:z/c B:x/d && test_cmp expect actual && test_grep ! "CONFLICT.*directory rename split" out ) ' ########################################################################### # Rules suggested by section 2: # # None; the rule was already covered in section 1. These testcases are # here just to make sure the conflict resolution and necessary warning # messages are handled correctly. ########################################################################### ########################################################################### # SECTION 3: Path in question is the source path for some rename already # # Combining cases from Section 1 and trying to handle them could lead to # directory renaming detection being over-applied. So, this section # provides some good testcases to check that the implementation doesn't go # too far. ########################################################################### # Testcase 3a, Avoid implicit rename if involved as source on other side # (Related to testcases 1c, 1f, and 9h) # Commit O: z/{b,c,d} # Commit A: z/{b,c,d} (no change) # Commit B: y/{b,c}, x/d # Expected: y/{b,c}, x/d test_setup_3a () { git init 3a && ( cd 3a && mkdir z && echo b >z/b && echo c >z/c && echo d >z/d && git add z && test_tick && git commit -m "O" && git branch O && git branch A && git branch B && git checkout A && test_tick && git commit --allow-empty -m "A" && git checkout B && mkdir y && mkdir x && git mv z/b y/ && git mv z/c y/ && git mv z/d x/ && rmdir z && test_tick && git commit -m "B" ) } test_expect_success '3a: Avoid implicit rename if involved as source on other side' ' test_setup_3a && ( cd 3a && git checkout A^0 && git -c merge.directoryRenames=true merge -s recursive B^0 && git ls-files -s >out && test_line_count = 3 out && git rev-parse >actual \ HEAD:y/b HEAD:y/c HEAD:x/d && git rev-parse >expect \ O:z/b O:z/c O:z/d && test_cmp expect actual ) ' # Testcase 3b, Avoid implicit rename if involved as source on other side # (Related to testcases 5c and 7c, also kind of 1e and 1f) # Commit O: z/{b,c,d} # Commit A: y/{b,c}, x/d # Commit B: z/{b,c}, w/d # Expected: y/{b,c}, CONFLICT:(z/d -> x/d vs. w/d) # NOTE: We're particularly checking that since z/d is already involved as # a source in a file rename on the same side of history, that we don't # get it involved in directory rename detection. If it were, we might # end up with CONFLICT:(z/d -> y/d vs. x/d vs. w/d), i.e. a # rename/rename/rename(1to3) conflict, which is just weird. test_setup_3b () { git init 3b && ( cd 3b && mkdir z && echo b >z/b && echo c >z/c && echo d >z/d && git add z && test_tick && git commit -m "O" && git branch O && git branch A && git branch B && git checkout A && mkdir y && mkdir x && git mv z/b y/ && git mv z/c y/ && git mv z/d x/ && rmdir z && test_tick && git commit -m "A" && git checkout B && mkdir w && git mv z/d w/ && test_tick && git commit -m "B" ) } test_expect_success '3b: Avoid implicit rename if involved as source on current side' ' test_setup_3b && ( cd 3b && git checkout A^0 && test_must_fail git -c merge.directoryRenames=true merge -s recursive B^0 >out && test_grep CONFLICT.*rename/rename.*z/d.*x/d.*w/d out && test_grep ! CONFLICT.*rename/rename.*y/d out && git ls-files -s >out && test_line_count = 5 out && git ls-files -u >out && test_line_count = 3 out && git ls-files -o >out && test_line_count = 1 out && git rev-parse >actual \ :0:y/b :0:y/c :1:z/d :2:x/d :3:w/d && git rev-parse >expect \ O:z/b O:z/c O:z/d O:z/d O:z/d && test_cmp expect actual && test_path_is_missing z/d && git hash-object >actual \ x/d w/d && git rev-parse >expect \ O:z/d O:z/d && test_cmp expect actual ) ' ########################################################################### # Rules suggested by section 3: # # Avoid directory-rename-detection for a path, if that path is the source # of a rename on either side of a merge. ########################################################################### ########################################################################### # SECTION 4: Partially renamed directory; still exists on both sides of merge # # What if we were to attempt to do directory rename detection when someone # "mostly" moved a directory but still left some files around, or, # equivalently, fully renamed a directory in one commit and then recreated # that directory in a later commit adding some new files and then tried to # merge? # # It's hard to divine user intent in these cases, because you can make an # argument that, depending on the intermediate history of the side being # merged, that some users will want files in that directory to # automatically be detected and renamed, while users with a different # intermediate history wouldn't want that rename to happen. # # I think that it is best to simply not have directory rename detection # apply to such cases. My reasoning for this is four-fold: (1) it's # easiest for users in general to figure out what happened if we don't # apply directory rename detection in any such case, (2) it's an easy rule # to explain ["We don't do directory rename detection if the directory # still exists on both sides of the merge"], (3) we can get some hairy # edge/corner cases that would be really confusing and possibly not even # representable in the index if we were to even try, and [related to 3] (4) # attempting to resolve this issue of divining user intent by examining # intermediate history goes against the spirit of three-way merges and is a # path towards crazy corner cases that are far more complex than what we're # already dealing with. # # Note that the wording of the rule ("We don't do directory rename # detection if the directory still exists on both sides of the merge.") # also excludes "renaming" of a directory into a subdirectory of itself # (e.g. /some/dir/* -> /some/dir/subdir/*). It may be possible to carve # out an exception for "renaming"-beneath-itself cases without opening # weird edge/corner cases for other partial directory renames, but for now # we are keeping the rule simple. # # This section contains a test for a partially-renamed-directory case. ########################################################################### # Testcase 4a, Directory split, with original directory still present # (Related to testcase 1f) # Commit O: z/{b,c,d,e} # Commit A: y/{b,c,d}, z/e # Commit B: z/{b,c,d,e,f} # Expected: y/{b,c,d}, z/{e,f} # NOTE: Even though most files from z moved to y, we don't want f to follow. test_setup_4a () { git init 4a && ( cd 4a && mkdir z && echo b >z/b && echo c >z/c && echo d >z/d && echo e >z/e && git add z && test_tick && git commit -m "O" && git branch O && git branch A && git branch B && git checkout A && mkdir y && git mv z/b y/ && git mv z/c y/ && git mv z/d y/ && test_tick && git commit -m "A" && git checkout B && echo f >z/f && git add z/f && test_tick && git commit -m "B" ) } test_expect_success '4a: Directory split, with original directory still present' ' test_setup_4a && ( cd 4a && git checkout A^0 && git -c merge.directoryRenames=true merge -s recursive B^0 && git ls-files -s >out && test_line_count = 5 out && git ls-files -u >out && test_line_count = 0 out && git ls-files -o >out && test_line_count = 1 out && git rev-parse >actual \ HEAD:y/b HEAD:y/c HEAD:y/d HEAD:z/e HEAD:z/f && git rev-parse >expect \ O:z/b O:z/c O:z/d O:z/e B:z/f && test_cmp expect actual ) ' ########################################################################### # Rules suggested by section 4: # # Directory-rename-detection should be turned off for any directories (as # a source for renames) that exist on both sides of the merge. (The "as # a source for renames" clarification is due to cases like 1c where # the target directory exists on both sides and we do want the rename # detection.) But, sadly, see testcase 8b. ########################################################################### ########################################################################### # SECTION 5: Files/directories in the way of subset of to-be-renamed paths # # Implicitly renaming files due to a detected directory rename could run # into problems if there are files or directories in the way of the paths # we want to rename. Explore such cases in this section. ########################################################################### # Testcase 5a, Merge directories, other side adds files to original and target # Commit O: z/{b,c}, y/d # Commit A: z/{b,c,e_1,f}, y/{d,e_2} # Commit B: y/{b,c,d} # Expected: z/e_1, y/{b,c,d,e_2,f} + CONFLICT warning # NOTE: While directory rename detection is active here causing z/f to # become y/f, we did not apply this for z/e_1 because that would # give us an add/add conflict for y/e_1 vs y/e_2. This problem with # this add/add, is that both versions of y/e are from the same side # of history, giving us no way to represent this conflict in the # index. test_setup_5a () { git init 5a && ( cd 5a && mkdir z && echo b >z/b && echo c >z/c && mkdir y && echo d >y/d && git add z y && test_tick && git commit -m "O" && git branch O && git branch A && git branch B && git checkout A && echo e1 >z/e && echo f >z/f && echo e2 >y/e && git add z/e z/f y/e && test_tick && git commit -m "A" && git checkout B && git mv z/b y/ && git mv z/c y/ && rmdir z && test_tick && git commit -m "B" ) } test_expect_success '5a: Merge directories, other side adds files to original and target' ' test_setup_5a && ( cd 5a && git checkout A^0 && test_must_fail git -c merge.directoryRenames=true merge -s recursive B^0 >out && test_grep "CONFLICT.*implicit dir rename" out && git ls-files -s >out && test_line_count = 6 out && git ls-files -u >out && test_line_count = 0 out && git ls-files -o >out && test_line_count = 1 out && git rev-parse >actual \ :0:y/b :0:y/c :0:y/d :0:y/e :0:z/e :0:y/f && git rev-parse >expect \ O:z/b O:z/c O:y/d A:y/e A:z/e A:z/f && test_cmp expect actual ) ' # Testcase 5b, Rename/delete in order to get add/add/add conflict # (Related to testcase 8d; these may appear slightly inconsistent to users; # Also related to testcases 7d and 7e) # Commit O: z/{b,c,d_1} # Commit A: y/{b,c,d_2} # Commit B: z/{b,c,d_1,e}, y/d_3 # Expected: y/{b,c,e}, CONFLICT(add/add: y/d_2 vs. y/d_3) # NOTE: If z/d_1 in commit B were to be involved in dir rename detection, as # we normally would since z/ is being renamed to y/, then this would be # a rename/delete (z/d_1 -> y/d_1 vs. deleted) AND an add/add/add # conflict of y/d_1 vs. y/d_2 vs. y/d_3. Add/add/add is not # representable in the index, so the existence of y/d_3 needs to # cause us to bail on directory rename detection for that path, falling # back to git behavior without the directory rename detection. test_setup_5b () { git init 5b && ( cd 5b && mkdir z && echo b >z/b && echo c >z/c && echo d1 >z/d && git add z && test_tick && git commit -m "O" && git branch O && git branch A && git branch B && git checkout A && git rm z/d && git mv z y && echo d2 >y/d && git add y/d && test_tick && git commit -m "A" && git checkout B && mkdir y && echo d3 >y/d && echo e >z/e && git add y/d z/e && test_tick && git commit -m "B" ) } test_expect_success '5b: Rename/delete in order to get add/add/add conflict' ' test_setup_5b && ( cd 5b && git checkout A^0 && test_must_fail git -c merge.directoryRenames=true merge -s recursive B^0 >out && test_grep "CONFLICT (add/add).* y/d" out && git ls-files -s >out && test_line_count = 5 out && git ls-files -u >out && test_line_count = 2 out && git ls-files -o >out && test_line_count = 1 out && git rev-parse >actual \ :0:y/b :0:y/c :0:y/e :2:y/d :3:y/d && git rev-parse >expect \ O:z/b O:z/c B:z/e A:y/d B:y/d && test_cmp expect actual && test_must_fail git rev-parse :1:y/d && test_path_is_file y/d ) ' # Testcase 5c, Transitive rename would cause rename/rename/rename/add/add/add # (Directory rename detection would result in transitive rename vs. # rename/rename(1to2) and turn it into a rename/rename(1to3). Further, # rename paths conflict with separate adds on the other side) # (Related to testcases 3b and 7c) # Commit O: z/{b,c}, x/d_1 # Commit A: y/{b,c,d_2}, w/d_1 # Commit B: z/{b,c,d_1,e}, w/d_3, y/d_4 # Expected: A mess, but only a rename/rename(1to2)/add/add mess. Use the # presence of y/d_4 in B to avoid doing transitive rename of # x/d_1 -> z/d_1 -> y/d_1, so that the only paths we have at # y/d are y/d_2 and y/d_4. We still do the move from z/e to y/e, # though, because it doesn't have anything in the way. test_setup_5c () { git init 5c && ( cd 5c && mkdir z && echo b >z/b && echo c >z/c && mkdir x && echo d1 >x/d && git add z x && test_tick && git commit -m "O" && git branch O && git branch A && git branch B && git checkout A && git mv z y && echo d2 >y/d && git add y/d && git mv x w && test_tick && git commit -m "A" && git checkout B && git mv x/d z/ && mkdir w && mkdir y && echo d3 >w/d && echo d4 >y/d && echo e >z/e && git add w/ y/ z/e && test_tick && git commit -m "B" ) } test_expect_success '5c: Transitive rename would cause rename/rename/rename/add/add/add' ' test_setup_5c && ( cd 5c && git checkout A^0 && test_must_fail git -c merge.directoryRenames=true merge -s recursive B^0 >out && test_grep "CONFLICT (rename/rename).*x/d.*w/d.*z/d" out && test_grep "CONFLICT (add/add).* y/d" out && git ls-files -s >out && test_line_count = 9 out && git ls-files -u >out && test_line_count = 6 out && git ls-files -o >out && test_line_count = 1 out && git rev-parse >actual \ :0:y/b :0:y/c :0:y/e && git rev-parse >expect \ O:z/b O:z/c B:z/e && test_cmp expect actual && test_must_fail git rev-parse :1:y/d && git rev-parse >actual \ :2:w/d :3:w/d :1:x/d :2:y/d :3:y/d :3:z/d && git rev-parse >expect \ O:x/d B:w/d O:x/d A:y/d B:y/d O:x/d && test_cmp expect actual && git hash-object >actual \ z/d && git rev-parse >expect \ O:x/d && test_cmp expect actual && test_path_is_missing x/d && test_path_is_file y/d && grep -q "<<<<" y/d # conflict markers should be present ) ' # Testcase 5d, Directory/file/file conflict due to directory rename # Commit O: z/{b,c} # Commit A: y/{b,c,d_1} # Commit B: z/{b,c,d_2,f}, y/d/e # Expected: y/{b,c,d/e,f}, z/d_2, CONFLICT(file/directory), y/d_1~HEAD # Note: The fact that y/d/ exists in B makes us bail on directory rename # detection for z/d_2, but that doesn't prevent us from applying the # directory rename detection for z/f -> y/f. test_setup_5d () { git init 5d && ( cd 5d && mkdir z && echo b >z/b && echo c >z/c && git add z && test_tick && git commit -m "O" && git branch O && git branch A && git branch B && git checkout A && git mv z y && echo d1 >y/d && git add y/d && test_tick && git commit -m "A" && git checkout B && mkdir -p y/d && echo e >y/d/e && echo d2 >z/d && echo f >z/f && git add y/d/e z/d z/f && test_tick && git commit -m "B" ) } test_expect_success '5d: Directory/file/file conflict due to directory rename' ' test_setup_5d && ( cd 5d && git checkout A^0 && test_must_fail git -c merge.directoryRenames=true merge -s recursive B^0 >out && test_grep "CONFLICT (file/directory).*y/d" out && git ls-files -s >out && test_line_count = 6 out && git ls-files -u >out && test_line_count = 1 out && git ls-files -o >out && test_line_count = 1 out && git rev-parse >actual \ :0:y/b :0:y/c :0:z/d :0:y/f :2:y/d~HEAD :0:y/d/e && git rev-parse >expect \ O:z/b O:z/c B:z/d B:z/f A:y/d B:y/d/e && test_cmp expect actual && git hash-object y/d~HEAD >actual && git rev-parse A:y/d >expect && test_cmp expect actual ) ' ########################################################################### # Rules suggested by section 5: # # If a subset of to-be-renamed files have a file or directory in the way, # "turn off" the directory rename for those specific sub-paths, falling # back to old handling. But, sadly, see testcases 8a and 8b. ########################################################################### ########################################################################### # SECTION 6: Same side of the merge was the one that did the rename # # It may sound obvious that you only want to apply implicit directory # renames to directories if the _other_ side of history did the renaming. # If you did make an implementation that didn't explicitly enforce this # rule, the majority of cases that would fall under this section would # also be solved by following the rules from the above sections. But # there are still a few that stick out, so this section covers them just # to make sure we also get them right. ########################################################################### # Testcase 6a, Tricky rename/delete # Commit O: z/{b,c,d} # Commit A: z/b # Commit B: y/{b,c}, z/d # Expected: y/b, CONFLICT(rename/delete, z/c -> y/c vs. NULL) # Note: We're just checking here that the rename of z/b and z/c to put # them under y/ doesn't accidentally catch z/d and make it look like # it is also involved in a rename/delete conflict. test_setup_6a () { git init 6a && ( cd 6a && mkdir z && echo b >z/b && echo c >z/c && echo d >z/d && git add z && test_tick && git commit -m "O" && git branch O && git branch A && git branch B && git checkout A && git rm z/c && git rm z/d && test_tick && git commit -m "A" && git checkout B && mkdir y && git mv z/b y/ && git mv z/c y/ && test_tick && git commit -m "B" ) } test_expect_success '6a: Tricky rename/delete' ' test_setup_6a && ( cd 6a && git checkout A^0 && test_must_fail git -c merge.directoryRenames=true merge -s recursive B^0 >out && test_grep "CONFLICT (rename/delete).*z/c.*y/c" out && git ls-files -s >out && test_line_count = 3 out && git ls-files -u >out && test_line_count = 2 out && git ls-files -o >out && test_line_count = 1 out && git rev-parse >actual \ :0:y/b :1:y/c :3:y/c && git rev-parse >expect \ O:z/b O:z/c O:z/c && test_cmp expect actual ) ' # Testcase 6b1, Same rename done on both sides # (Related to testcase 6b2 and 8e) # Commit O: z/{b,c,d,e} # Commit A: y/{b,c,d}, x/e # Commit B: y/{b,c,d}, z/{e,f} # Expected: y/{b,c,d,f}, x/e # Note: Directory rename detection says A renamed z/ -> y/ (3 paths renamed # to y/ and only 1 renamed to x/), therefore the new file 'z/f' in B # should be moved to 'y/f'. # # This is a bit of an edge case where any behavior might surprise users, # whether that is treating A as renaming z/ -> y/, treating A as renaming # z/ -> x/, or treating A as not doing any directory rename. However, I # think this answer is the least confusing and most consistent with the # rules elsewhere. # # A note about z/ -> x/, since it may not be clear how that could come # about: If we were to ignore files renamed by both sides # (i.e. z/{b,c,d}), as directory rename detection did in git-2.18 thru # at least git-2.28, then we would note there are no renames from z/ to # y/ and one rename from z/ to x/ and thus come to the conclusion that # A renamed z/ -> x/. This seems more confusing for end users than a # rename of z/ to y/, it makes directory rename detection behavior # harder for them to predict. As such, we modified the rule, changed # the behavior on testcases 6b2 and 8e, and introduced this 6b1 testcase. test_setup_6b1 () { git init 6b1 && ( cd 6b1 && mkdir z && echo b >z/b && echo c >z/c && echo d >z/d && echo e >z/e && git add z && test_tick && git commit -m "O" && git branch O && git branch A && git branch B && git checkout A && git mv z y && mkdir x && git mv y/e x/e && test_tick && git commit -m "A" && git checkout B && git mv z y && mkdir z && git mv y/e z/e && echo f >z/f && git add z/f && test_tick && git commit -m "B" ) } test_expect_success '6b1: Same renames done on both sides, plus another rename' ' test_setup_6b1 && ( cd 6b1 && git checkout A^0 && git -c merge.directoryRenames=true merge -s recursive B^0 && git ls-files -s >out && test_line_count = 5 out && git ls-files -u >out && test_line_count = 0 out && git ls-files -o >out && test_line_count = 1 out && git rev-parse >actual \ HEAD:y/b HEAD:y/c HEAD:y/d HEAD:x/e HEAD:y/f && git rev-parse >expect \ O:z/b O:z/c O:z/d O:z/e B:z/f && test_cmp expect actual ) ' # Testcase 6b2, Same rename done on both sides # (Related to testcases 6c and 8e) # Commit O: z/{b,c} # Commit A: y/{b,c} # Commit B: y/{b,c}, z/d # Expected: y/{b,c,d} # Alternate: y/{b,c}, z/d # Note: Directory rename detection says A renamed z/ -> y/, therefore the new # file 'z/d' in B should be moved to 'y/d'. # # We could potentially ignore the renames of z/{b,c} on side A since # those were renamed on both sides. However, it's a bit of a corner # case because what if there was also a z/e that side A moved to x/e # and side B left alone? If we used the "ignore renames done on both # sides" logic, then we'd compute that A renamed z/ -> x/, and move # z/d to x/d. That seems more surprising and uglier than allowing # the z/ -> y/ rename. test_setup_6b2 () { git init 6b2 && ( cd 6b2 && mkdir z && echo b >z/b && echo c >z/c && git add z && test_tick && git commit -m "O" && git branch O && git branch A && git branch B && git checkout A && git mv z y && test_tick && git commit -m "A" && git checkout B && git mv z y && mkdir z && echo d >z/d && git add z/d && test_tick && git commit -m "B" ) } test_expect_success '6b2: Same rename done on both sides' ' test_setup_6b2 && ( cd 6b2 && git checkout A^0 && git -c merge.directoryRenames=true merge -s recursive B^0 && git ls-files -s >out && test_line_count = 3 out && git ls-files -u >out && test_line_count = 0 out && git ls-files -o >out && test_line_count = 1 out && git rev-parse >actual \ HEAD:y/b HEAD:y/c HEAD:y/d && git rev-parse >expect \ O:z/b O:z/c B:z/d && test_cmp expect actual ) ' # Testcase 6c, Rename only done on same side # (Related to testcases 6b1, 6b2, and 8e) # Commit O: z/{b,c} # Commit A: z/{b,c} (no change) # Commit B: y/{b,c}, z/d # Expected: y/{b,c}, z/d # NOTE: Seems obvious, but just checking that the implementation doesn't # "accidentally detect a rename" and give us y/{b,c,d}. test_setup_6c () { git init 6c && ( cd 6c && mkdir z && echo b >z/b && echo c >z/c && git add z && test_tick && git commit -m "O" && git branch O && git branch A && git branch B && git checkout A && test_tick && git commit --allow-empty -m "A" && git checkout B && git mv z y && mkdir z && echo d >z/d && git add z/d && test_tick && git commit -m "B" ) } test_expect_success '6c: Rename only done on same side' ' test_setup_6c && ( cd 6c && git checkout A^0 && git -c merge.directoryRenames=true merge -s recursive B^0 && git ls-files -s >out && test_line_count = 3 out && git ls-files -u >out && test_line_count = 0 out && git ls-files -o >out && test_line_count = 1 out && git rev-parse >actual \ HEAD:y/b HEAD:y/c HEAD:z/d && git rev-parse >expect \ O:z/b O:z/c B:z/d && test_cmp expect actual ) ' # Testcase 6d, We don't always want transitive renaming # (Related to testcase 1c) # Commit O: z/{b,c}, x/d # Commit A: z/{b,c}, x/d (no change) # Commit B: y/{b,c}, z/d # Expected: y/{b,c}, z/d # NOTE: Again, this seems obvious but just checking that the implementation # doesn't "accidentally detect a rename" and give us y/{b,c,d}. test_setup_6d () { git init 6d && ( cd 6d && mkdir z && echo b >z/b && echo c >z/c && mkdir x && echo d >x/d && git add z x && test_tick && git commit -m "O" && git branch O && git branch A && git branch B && git checkout A && test_tick && git commit --allow-empty -m "A" && git checkout B && git mv z y && git mv x z && test_tick && git commit -m "B" ) } test_expect_success '6d: We do not always want transitive renaming' ' test_setup_6d && ( cd 6d && git checkout A^0 && git -c merge.directoryRenames=true merge -s recursive B^0 && git ls-files -s >out && test_line_count = 3 out && git ls-files -u >out && test_line_count = 0 out && git ls-files -o >out && test_line_count = 1 out && git rev-parse >actual \ HEAD:y/b HEAD:y/c HEAD:z/d && git rev-parse >expect \ O:z/b O:z/c O:x/d && test_cmp expect actual ) ' # Testcase 6e, Add/add from one-side # Commit O: z/{b,c} # Commit A: z/{b,c} (no change) # Commit B: y/{b,c,d_1}, z/d_2 # Expected: y/{b,c,d_1}, z/d_2 # NOTE: Again, this seems obvious but just checking that the implementation # doesn't "accidentally detect a rename" and give us y/{b,c} + # add/add conflict on y/d_1 vs y/d_2. test_setup_6e () { git init 6e && ( cd 6e && mkdir z && echo b >z/b && echo c >z/c && git add z && test_tick && git commit -m "O" && git branch O && git branch A && git branch B && git checkout A && test_tick && git commit --allow-empty -m "A" && git checkout B && git mv z y && echo d1 > y/d && mkdir z && echo d2 > z/d && git add y/d z/d && test_tick && git commit -m "B" ) } test_expect_success '6e: Add/add from one side' ' test_setup_6e && ( cd 6e && git checkout A^0 && git -c merge.directoryRenames=true merge -s recursive B^0 && git ls-files -s >out && test_line_count = 4 out && git ls-files -u >out && test_line_count = 0 out && git ls-files -o >out && test_line_count = 1 out && git rev-parse >actual \ HEAD:y/b HEAD:y/c HEAD:y/d HEAD:z/d && git rev-parse >expect \ O:z/b O:z/c B:y/d B:z/d && test_cmp expect actual ) ' ########################################################################### # Rules suggested by section 6: # # Only apply implicit directory renames to directories if the other # side of history is the one doing the renaming. ########################################################################### ########################################################################### # SECTION 7: More involved Edge/Corner cases # # The ruleset we have generated in the above sections seems to provide # well-defined merges. But can we find edge/corner cases that either (a) # are harder for users to understand, or (b) have a resolution that is # non-intuitive or suboptimal? # # The testcases in this section dive into cases that I've tried to craft in # a way to find some that might be surprising to users or difficult for # them to understand (the next section will look at non-intuitive or # suboptimal merge results). Some of the testcases are similar to ones # from past sections, but have been simplified to try to highlight error # messages using a "modified" path (due to the directory rename). Are # users okay with these? # # In my opinion, testcases that are difficult to understand from this # section is due to difficulty in the testcase rather than the directory # renaming (similar to how t6042 and t6036 have difficult resolutions due # to the problem setup itself being complex). And I don't think the # error messages are a problem. # # On the other hand, the testcases in section 8 worry me slightly more... ########################################################################### # Testcase 7a, rename-dir vs. rename-dir (NOT split evenly) PLUS add-other-file # Commit O: z/{b,c} # Commit A: y/{b,c} # Commit B: w/b, x/c, z/d # Expected: y/d, CONFLICT(rename/rename for both z/b and z/c) # NOTE: There's a rename of z/ here, y/ has more renames, so z/d -> y/d. test_setup_7a () { git init 7a && ( cd 7a && mkdir z && echo b >z/b && echo c >z/c && git add z && test_tick && git commit -m "O" && git branch O && git branch A && git branch B && git checkout A && git mv z y && test_tick && git commit -m "A" && git checkout B && mkdir w && mkdir x && git mv z/b w/ && git mv z/c x/ && echo d > z/d && git add z/d && test_tick && git commit -m "B" ) } test_expect_success '7a: rename-dir vs. rename-dir (NOT split evenly) PLUS add-other-file' ' test_setup_7a && ( cd 7a && git checkout A^0 && test_must_fail git -c merge.directoryRenames=true merge -s recursive B^0 >out && test_grep "CONFLICT (rename/rename).*z/b.*y/b.*w/b" out && test_grep "CONFLICT (rename/rename).*z/c.*y/c.*x/c" out && git ls-files -s >out && test_line_count = 7 out && git ls-files -u >out && test_line_count = 6 out && git ls-files -o >out && test_line_count = 1 out && git rev-parse >actual \ :1:z/b :2:y/b :3:w/b :1:z/c :2:y/c :3:x/c :0:y/d && git rev-parse >expect \ O:z/b O:z/b O:z/b O:z/c O:z/c O:z/c B:z/d && test_cmp expect actual && git hash-object >actual \ y/b w/b y/c x/c && git rev-parse >expect \ O:z/b O:z/b O:z/c O:z/c && test_cmp expect actual ) ' # Testcase 7b, rename/rename(2to1), but only due to transitive rename # (Related to testcase 1d) # Commit O: z/{b,c}, x/d_1, w/d_2 # Commit A: y/{b,c,d_2}, x/d_1 # Commit B: z/{b,c,d_1}, w/d_2 # Expected: y/{b,c}, CONFLICT(rename/rename(2to1): x/d_1, w/d_2 -> y_d) test_setup_7b () { git init 7b && ( cd 7b && mkdir z && mkdir x && mkdir w && echo b >z/b && echo c >z/c && echo d1 > x/d && echo d2 > w/d && git add z x w && test_tick && git commit -m "O" && git branch O && git branch A && git branch B && git checkout A && git mv z y && git mv w/d y/ && test_tick && git commit -m "A" && git checkout B && git mv x/d z/ && rmdir x && test_tick && git commit -m "B" ) } test_expect_success '7b: rename/rename(2to1), but only due to transitive rename' ' test_setup_7b && ( cd 7b && git checkout A^0 && test_must_fail git -c merge.directoryRenames=true merge -s recursive B^0 >out && test_grep "CONFLICT (\(.*\)/\1)" out && git ls-files -s >out && test_line_count = 4 out && git ls-files -u >out && test_line_count = 2 out && git ls-files -o >out && test_line_count = 1 out && git rev-parse >actual \ :0:y/b :0:y/c :2:y/d :3:y/d && git rev-parse >expect \ O:z/b O:z/c O:w/d O:x/d && test_cmp expect actual && # Test that the two-way merge in y/d is as expected git cat-file -p :2:y/d >expect && git cat-file -p :3:y/d >other && >empty && test_must_fail git merge-file \ -L "HEAD:y/d" \ -L "" \ -L "B^0:z/d" \ expect empty other && test_cmp expect y/d ) ' # Testcase 7c, rename/rename(1to...2or3); transitive rename may add complexity # (Related to testcases 3b and 5c) # Commit O: z/{b,c}, x/d # Commit A: y/{b,c}, w/d # Commit B: z/{b,c,d} # Expected: y/{b,c}, CONFLICT(x/d -> w/d vs. y/d) # NOTE: z/ was renamed to y/ so we do want to report # neither CONFLICT(x/d -> w/d vs. z/d) # nor CONFLiCT x/d -> w/d vs. y/d vs. z/d) test_setup_7c () { git init 7c && ( cd 7c && mkdir z && echo b >z/b && echo c >z/c && mkdir x && echo d >x/d && git add z x && test_tick && git commit -m "O" && git branch O && git branch A && git branch B && git checkout A && git mv z y && git mv x w && test_tick && git commit -m "A" && git checkout B && git mv x/d z/ && rmdir x && test_tick && git commit -m "B" ) } test_expect_success '7c: rename/rename(1to...2or3); transitive rename may add complexity' ' test_setup_7c && ( cd 7c && git checkout A^0 && test_must_fail git -c merge.directoryRenames=true merge -s recursive B^0 >out && test_grep "CONFLICT (rename/rename).*x/d.*w/d.*y/d" out && git ls-files -s >out && test_line_count = 5 out && git ls-files -u >out && test_line_count = 3 out && git ls-files -o >out && test_line_count = 1 out && git rev-parse >actual \ :0:y/b :0:y/c :1:x/d :2:w/d :3:y/d && git rev-parse >expect \ O:z/b O:z/c O:x/d O:x/d O:x/d && test_cmp expect actual ) ' # Testcase 7d, transitive rename involved in rename/delete; how is it reported? # (Related somewhat to testcases 5b and 8d) # Commit O: z/{b,c}, x/d # Commit A: y/{b,c} # Commit B: z/{b,c,d} # Expected: y/{b,c}, CONFLICT(delete x/d vs rename to y/d) # NOTE: z->y so NOT CONFLICT(delete x/d vs rename to z/d) test_setup_7d () { git init 7d && ( cd 7d && mkdir z && echo b >z/b && echo c >z/c && mkdir x && echo d >x/d && git add z x && test_tick && git commit -m "O" && git branch O && git branch A && git branch B && git checkout A && git mv z y && git rm -rf x && test_tick && git commit -m "A" && git checkout B && git mv x/d z/ && rmdir x && test_tick && git commit -m "B" ) } test_expect_success '7d: transitive rename involved in rename/delete; how is it reported?' ' test_setup_7d && ( cd 7d && git checkout A^0 && test_must_fail git -c merge.directoryRenames=true merge -s recursive B^0 >out && test_grep "CONFLICT (rename/delete).*x/d.*y/d" out && git ls-files -s >out && test_line_count = 4 out && git ls-files -u >out && test_line_count = 2 out && git ls-files -o >out && test_line_count = 1 out && git rev-parse >actual \ :0:y/b :0:y/c :1:y/d :3:y/d && git rev-parse >expect \ O:z/b O:z/c O:x/d O:x/d && test_cmp expect actual ) ' # Testcase 7e, transitive rename in rename/delete AND dirs in the way # (Very similar to 'both rename source and destination involved in D/F conflict' from t6022-merge-rename.sh) # (Also related to testcases 9c and 9d) # Commit O: z/{b,c}, x/d_1 # Commit A: y/{b,c,d/g}, x/d/f # Commit B: z/{b,c,d_1} # Expected: rename/delete(x/d_1->y/d_1 vs. None) + D/F conflict on y/d # y/{b,c,d/g}, y/d_1~B^0, x/d/f # NOTE: The main path of interest here is d_1 and where it ends up, but # this is actually a case that has two potential directory renames # involved and D/F conflict(s), so it makes sense to walk through # each step. # # Commit A renames z/ -> y/. Thus everything that B adds to z/ # should be instead moved to y/. This gives us the D/F conflict on # y/d because x/d_1 -> z/d_1 -> y/d_1 conflicts with y/d/g. # # Further, commit B renames x/ -> z/, thus everything A adds to x/ # should instead be moved to z/...BUT we removed z/ and renamed it # to y/, so maybe everything should move not from x/ to z/, but # from x/ to z/ to y/. Doing so might make sense from the logic so # far, but note that commit A had both an x/ and a y/; it did the # renaming of z/ to y/ and created x/d/f and it clearly made these # things separate, so it doesn't make much sense to push these # together. Doing so is what I'd call a doubly transitive rename; # see testcases 9c and 9d for further discussion of this issue and # how it's resolved. test_setup_7e () { git init 7e && ( cd 7e && mkdir z && echo b >z/b && echo c >z/c && mkdir x && echo d1 >x/d && git add z x && test_tick && git commit -m "O" && git branch O && git branch A && git branch B && git checkout A && git mv z y && git rm x/d && mkdir -p x/d && mkdir -p y/d && echo f >x/d/f && echo g >y/d/g && git add x/d/f y/d/g && test_tick && git commit -m "A" && git checkout B && git mv x/d z/ && rmdir x && test_tick && git commit -m "B" ) } test_expect_success '7e: transitive rename in rename/delete AND dirs in the way' ' test_setup_7e && ( cd 7e && git checkout A^0 && test_must_fail git -c merge.directoryRenames=true merge -s recursive B^0 >out && test_grep "CONFLICT (rename/delete).*x/d.*y/d" out && git ls-files -s >out && test_line_count = 6 out && git ls-files -u >out && test_line_count = 2 out && git ls-files -o >out && test_line_count = 1 out && git rev-parse >actual \ :0:x/d/f :0:y/d/g :0:y/b :0:y/c :1:y/d~B^0 :3:y/d~B^0 && git rev-parse >expect \ A:x/d/f A:y/d/g O:z/b O:z/c O:x/d O:x/d && test_cmp expect actual && git hash-object y/d~B^0 >actual && git rev-parse O:x/d >expect && test_cmp expect actual ) ' ########################################################################### # SECTION 8: Suboptimal merges # # As alluded to in the last section, the ruleset we have built up for # detecting directory renames unfortunately has some special cases where it # results in slightly suboptimal or non-intuitive behavior. This section # explores these cases. # # To be fair, we already had non-intuitive or suboptimal behavior for most # of these cases in git before introducing implicit directory rename # detection, but it'd be nice if there was a modified ruleset out there # that handled these cases a bit better. ########################################################################### # Testcase 8a, Dual-directory rename, one into the others' way # Commit O. x/{a,b}, y/{c,d} # Commit A. x/{a,b,e}, y/{c,d,f} # Commit B. y/{a,b}, z/{c,d} # # Possible Resolutions: # w/o dir-rename detection: y/{a,b,f}, z/{c,d}, x/e # Currently expected: y/{a,b,e,f}, z/{c,d} # Optimal: y/{a,b,e}, z/{c,d,f} # # Note: Both x and y got renamed and it'd be nice to detect both, and we do # better with directory rename detection than git did without, but the # simple rule from section 5 prevents me from handling this as optimally as # we potentially could. test_setup_8a () { git init 8a && ( cd 8a && mkdir x && mkdir y && echo a >x/a && echo b >x/b && echo c >y/c && echo d >y/d && git add x y && test_tick && git commit -m "O" && git branch O && git branch A && git branch B && git checkout A && echo e >x/e && echo f >y/f && git add x/e y/f && test_tick && git commit -m "A" && git checkout B && git mv y z && git mv x y && test_tick && git commit -m "B" ) } test_expect_success '8a: Dual-directory rename, one into the others way' ' test_setup_8a && ( cd 8a && git checkout A^0 && git -c merge.directoryRenames=true merge -s recursive B^0 && git ls-files -s >out && test_line_count = 6 out && git ls-files -u >out && test_line_count = 0 out && git ls-files -o >out && test_line_count = 1 out && git rev-parse >actual \ HEAD:y/a HEAD:y/b HEAD:y/e HEAD:y/f HEAD:z/c HEAD:z/d && git rev-parse >expect \ O:x/a O:x/b A:x/e A:y/f O:y/c O:y/d && test_cmp expect actual ) ' # Testcase 8b, Dual-directory rename, one into the others' way, with conflicting filenames # Commit O. x/{a_1,b_1}, y/{a_2,b_2} # Commit A. x/{a_1,b_1,e_1}, y/{a_2,b_2,e_2} # Commit B. y/{a_1,b_1}, z/{a_2,b_2} # # w/o dir-rename detection: y/{a_1,b_1,e_2}, z/{a_2,b_2}, x/e_1 # Currently expected: <same> # Scary: y/{a_1,b_1}, z/{a_2,b_2}, CONFLICT(add/add, e_1 vs. e_2) # Optimal: y/{a_1,b_1,e_1}, z/{a_2,b_2,e_2} # # Note: Very similar to 8a, except instead of 'e' and 'f' in directories x and # y, both are named 'e'. Without directory rename detection, neither file # moves directories. Implement directory rename detection suboptimally, and # you get an add/add conflict, but both files were added in commit A, so this # is an add/add conflict where one side of history added both files -- # something we can't represent in the index. Obviously, we'd prefer the last # resolution, but our previous rules are too coarse to allow it. Using both # the rules from section 4 and section 5 save us from the Scary resolution, # making us fall back to pre-directory-rename-detection behavior for both # e_1 and e_2. test_setup_8b () { git init 8b && ( cd 8b && mkdir x && mkdir y && echo a1 >x/a && echo b1 >x/b && echo a2 >y/a && echo b2 >y/b && git add x y && test_tick && git commit -m "O" && git branch O && git branch A && git branch B && git checkout A && echo e1 >x/e && echo e2 >y/e && git add x/e y/e && test_tick && git commit -m "A" && git checkout B && git mv y z && git mv x y && test_tick && git commit -m "B" ) } test_expect_success '8b: Dual-directory rename, one into the others way, with conflicting filenames' ' test_setup_8b && ( cd 8b && git checkout A^0 && git -c merge.directoryRenames=true merge -s recursive B^0 && git ls-files -s >out && test_line_count = 6 out && git ls-files -u >out && test_line_count = 0 out && git ls-files -o >out && test_line_count = 1 out && git rev-parse >actual \ HEAD:y/a HEAD:y/b HEAD:z/a HEAD:z/b HEAD:x/e HEAD:y/e && git rev-parse >expect \ O:x/a O:x/b O:y/a O:y/b A:x/e A:y/e && test_cmp expect actual ) ' # Testcase 8c, modify/delete or rename+modify/delete? # (Related to testcases 5b, 8d, and 9h) # Commit O: z/{b,c,d} # Commit A: y/{b,c} # Commit B: z/{b,c,d_modified,e} # Expected: y/{b,c,e}, CONFLICT(modify/delete: on z/d) # # Note: It could easily be argued that the correct resolution here is # y/{b,c,e}, CONFLICT(rename/delete: z/d -> y/d vs deleted) # and that the modified version of d should be present in y/ after # the merge, just marked as conflicted. Indeed, I previously did # argue that. But applying directory renames to the side of # history where a file is merely modified results in spurious # rename/rename(1to2) conflicts -- see testcase 9h. See also # notes in 8d. test_setup_8c () { git init 8c && ( cd 8c && mkdir z && echo b >z/b && echo c >z/c && test_seq 1 10 >z/d && git add z && test_tick && git commit -m "O" && git branch O && git branch A && git branch B && git checkout A && git rm z/d && git mv z y && test_tick && git commit -m "A" && git checkout B && echo 11 >z/d && test_chmod +x z/d && echo e >z/e && git add z/d z/e && test_tick && git commit -m "B" ) } test_expect_success '8c: modify/delete or rename+modify/delete' ' test_setup_8c && ( cd 8c && git checkout A^0 && test_must_fail git -c merge.directoryRenames=true merge -s recursive B^0 >out && test_grep "CONFLICT (modify/delete).* z/d" out && git ls-files -s >out && test_line_count = 5 out && git ls-files -u >out && test_line_count = 2 out && git ls-files -o >out && test_line_count = 1 out && git rev-parse >actual \ :0:y/b :0:y/c :0:y/e :1:z/d :3:z/d && git rev-parse >expect \ O:z/b O:z/c B:z/e O:z/d B:z/d && test_cmp expect actual && test_must_fail git rev-parse :2:z/d && git ls-files -s z/d | grep ^100755 && test_path_is_file z/d && test_path_is_missing y/d ) ' # Testcase 8d, rename/delete...or not? # (Related to testcase 5b; these may appear slightly inconsistent to users; # Also related to testcases 7d and 7e) # Commit O: z/{b,c,d} # Commit A: y/{b,c} # Commit B: z/{b,c,d,e} # Expected: y/{b,c,e} # # Note: It would also be somewhat reasonable to resolve this as # y/{b,c,e}, CONFLICT(rename/delete: x/d -> y/d or deleted) # # In this case, I'm leaning towards: commit A was the one that deleted z/d # and it did the rename of z to y, so the two "conflicts" (rename vs. # delete) are both coming from commit A, which is illogical. Conflicts # during merging are supposed to be about opposite sides doing things # differently. test_setup_8d () { git init 8d && ( cd 8d && mkdir z && echo b >z/b && echo c >z/c && test_seq 1 10 >z/d && git add z && test_tick && git commit -m "O" && git branch O && git branch A && git branch B && git checkout A && git rm z/d && git mv z y && test_tick && git commit -m "A" && git checkout B && echo e >z/e && git add z/e && test_tick && git commit -m "B" ) } test_expect_success '8d: rename/delete...or not?' ' test_setup_8d && ( cd 8d && git checkout A^0 && git -c merge.directoryRenames=true merge -s recursive B^0 && git ls-files -s >out && test_line_count = 3 out && git rev-parse >actual \ HEAD:y/b HEAD:y/c HEAD:y/e && git rev-parse >expect \ O:z/b O:z/c B:z/e && test_cmp expect actual ) ' # Testcase 8e, Both sides rename, one side adds to original directory # Commit O: z/{b,c} # Commit A: y/{b,c} # Commit B: w/{b,c}, z/d # # Possible Resolutions: # if z not considered renamed: z/d, CONFLICT(z/b -> y/b vs. w/b), # CONFLICT(z/c -> y/c vs. w/c) # if z->y rename considered: y/d, CONFLICT(z/b -> y/b vs. w/b), # CONFLICT(z/c -> y/c vs. w/c) # Optimal: ?? # # Notes: In commit A, directory z got renamed to y. In commit B, directory z # did NOT get renamed; the directory is still present; instead it is # considered to have just renamed a subset of paths in directory z # elsewhere. This is much like testcase 6b2 (where commit B moves all # the original paths out of z/ but opted to keep d within z/). # # It was not clear in the past what should be done with this testcase; # in fact, I noted that I "just picked one" previously. However, # following the new logic for testcase 6b2, we should take the rename # and move z/d to y/d. # # 6b1, 6b2, and this case are definitely somewhat fuzzy in terms of # whether they are optimal for end users, but (a) the default for # directory rename detection is to mark these all as conflicts # anyway, (b) it feels like this is less prone to higher order corner # case confusion, and (c) the current algorithm requires less global # knowledge (i.e. less coupling in the algorithm between renames done # on both sides) which thus means users are better able to predict # the behavior, and predict it without computing as many details. test_setup_8e () { git init 8e && ( cd 8e && mkdir z && echo b >z/b && echo c >z/c && git add z && test_tick && git commit -m "O" && git branch O && git branch A && git branch B && git checkout A && git mv z y && test_tick && git commit -m "A" && git checkout B && git mv z w && mkdir z && echo d >z/d && git add z/d && test_tick && git commit -m "B" ) } test_expect_success '8e: Both sides rename, one side adds to original directory' ' test_setup_8e && ( cd 8e && git checkout A^0 && test_must_fail git -c merge.directoryRenames=true merge -s recursive B^0 >out 2>err && test_grep CONFLICT.*rename/rename.*z/c.*y/c.*w/c out && test_grep CONFLICT.*rename/rename.*z/b.*y/b.*w/b out && git ls-files -s >out && test_line_count = 7 out && git ls-files -u >out && test_line_count = 6 out && git ls-files -o >out && test_line_count = 2 out && git rev-parse >actual \ :1:z/b :2:y/b :3:w/b :1:z/c :2:y/c :3:w/c :0:y/d && git rev-parse >expect \ O:z/b O:z/b O:z/b O:z/c O:z/c O:z/c B:z/d && test_cmp expect actual && git hash-object >actual \ y/b w/b y/c w/c && git rev-parse >expect \ O:z/b O:z/b O:z/c O:z/c && test_cmp expect actual && test_path_is_missing z/b && test_path_is_missing z/c ) ' ########################################################################### # SECTION 9: Other testcases # # This section consists of miscellaneous testcases I thought of during # the implementation which round out the testing. ########################################################################### # Testcase 9a, Inner renamed directory within outer renamed directory # (Related to testcase 1f) # Commit O: z/{b,c,d/{e,f,g}} # Commit A: y/{b,c}, x/w/{e,f,g} # Commit B: z/{b,c,d/{e,f,g,h},i} # Expected: y/{b,c,i}, x/w/{e,f,g,h} # NOTE: The only reason this one is interesting is because when a directory # is split into multiple other directories, we determine by the weight # of which one had the most paths going to it. A naive implementation # of that could take the new file in commit B at z/i to x/w/i or x/i. test_setup_9a () { git init 9a && ( cd 9a && mkdir -p z/d && echo b >z/b && echo c >z/c && echo e >z/d/e && echo f >z/d/f && echo g >z/d/g && git add z && test_tick && git commit -m "O" && git branch O && git branch A && git branch B && git checkout A && mkdir x && git mv z/d x/w && git mv z y && test_tick && git commit -m "A" && git checkout B && echo h >z/d/h && echo i >z/i && git add z && test_tick && git commit -m "B" ) } test_expect_success '9a: Inner renamed directory within outer renamed directory' ' test_setup_9a && ( cd 9a && git checkout A^0 && git -c merge.directoryRenames=true merge -s recursive B^0 && git ls-files -s >out && test_line_count = 7 out && git ls-files -u >out && test_line_count = 0 out && git ls-files -o >out && test_line_count = 1 out && git rev-parse >actual \ HEAD:y/b HEAD:y/c HEAD:y/i && git rev-parse >expect \ O:z/b O:z/c B:z/i && test_cmp expect actual && git rev-parse >actual \ HEAD:x/w/e HEAD:x/w/f HEAD:x/w/g HEAD:x/w/h && git rev-parse >expect \ O:z/d/e O:z/d/f O:z/d/g B:z/d/h && test_cmp expect actual ) ' # Testcase 9b, Transitive rename with content merge # (Related to testcase 1c) # Commit O: z/{b,c}, x/d_1 # Commit A: y/{b,c}, x/d_2 # Commit B: z/{b,c,d_3} # Expected: y/{b,c,d_merged} test_setup_9b () { git init 9b && ( cd 9b && mkdir z && echo b >z/b && echo c >z/c && mkdir x && test_seq 1 10 >x/d && git add z x && test_tick && git commit -m "O" && git branch O && git branch A && git branch B && git checkout A && git mv z y && test_seq 1 11 >x/d && git add x/d && test_tick && git commit -m "A" && git checkout B && test_seq 0 10 >x/d && git mv x/d z/d && git add z/d && test_tick && git commit -m "B" ) } test_expect_success '9b: Transitive rename with content merge' ' test_setup_9b && ( cd 9b && git checkout A^0 && git -c merge.directoryRenames=true merge -s recursive B^0 && git ls-files -s >out && test_line_count = 3 out && test_seq 0 11 >expected && test_cmp expected y/d && git add expected && git rev-parse >actual \ HEAD:y/b HEAD:y/c HEAD:y/d && git rev-parse >expect \ O:z/b O:z/c :0:expected && test_cmp expect actual && test_must_fail git rev-parse HEAD:x/d && test_must_fail git rev-parse HEAD:z/d && test_path_is_missing z/d && test $(git rev-parse HEAD:y/d) != $(git rev-parse O:x/d) && test $(git rev-parse HEAD:y/d) != $(git rev-parse A:x/d) && test $(git rev-parse HEAD:y/d) != $(git rev-parse B:z/d) ) ' # Testcase 9c, Doubly transitive rename? # (Related to testcase 1c, 7e, and 9d) # Commit O: z/{b,c}, x/{d,e}, w/f # Commit A: y/{b,c}, x/{d,e,f,g} # Commit B: z/{b,c,d,e}, w/f # Expected: y/{b,c,d,e}, x/{f,g} # # NOTE: x/f and x/g may be slightly confusing here. The rename from w/f to # x/f is clear. Let's look beyond that. Here's the logic: # Commit B renamed x/ -> z/ # Commit A renamed z/ -> y/ # So, we could possibly further rename x/f to z/f to y/f, a doubly # transient rename. However, where does it end? We can chain these # indefinitely (see testcase 9d). What if there is a D/F conflict # at z/f/ or y/f/? Or just another file conflict at one of those # paths? In the case of an N-long chain of transient renamings, # where do we "abort" the rename at? Can the user make sense of # the resulting conflict and resolve it? # # To avoid this confusion I use the simple rule that if the other side # of history did a directory rename to a path that your side renamed # away, then ignore that particular rename from the other side of # history for any implicit directory renames. test_setup_9c () { git init 9c && ( cd 9c && mkdir z && echo b >z/b && echo c >z/c && mkdir x && echo d >x/d && echo e >x/e && mkdir w && echo f >w/f && git add z x w && test_tick && git commit -m "O" && git branch O && git branch A && git branch B && git checkout A && git mv z y && git mv w/f x/ && echo g >x/g && git add x/g && test_tick && git commit -m "A" && git checkout B && git mv x/d z/d && git mv x/e z/e && test_tick && git commit -m "B" ) } test_expect_success '9c: Doubly transitive rename?' ' test_setup_9c && ( cd 9c && git checkout A^0 && git -c merge.directoryRenames=true merge -s recursive B^0 >out && test_grep "WARNING: Avoiding applying x -> z rename to x/f" out && git ls-files -s >out && test_line_count = 6 out && git ls-files -o >out && test_line_count = 1 out && git rev-parse >actual \ HEAD:y/b HEAD:y/c HEAD:y/d HEAD:y/e HEAD:x/f HEAD:x/g && git rev-parse >expect \ O:z/b O:z/c O:x/d O:x/e O:w/f A:x/g && test_cmp expect actual ) ' # Testcase 9d, N-fold transitive rename? # (Related to testcase 9c...and 1c and 7e) # Commit O: z/a, y/b, x/c, w/d, v/e, u/f # Commit A: y/{a,b}, w/{c,d}, u/{e,f} # Commit B: z/{a,t}, x/{b,c}, v/{d,e}, u/f # Expected: <see NOTE first> # # NOTE: z/ -> y/ (in commit A) # y/ -> x/ (in commit B) # x/ -> w/ (in commit A) # w/ -> v/ (in commit B) # v/ -> u/ (in commit A) # So, if we add a file to z, say z/t, where should it end up? In u? # What if there's another file or directory named 't' in one of the # intervening directories and/or in u itself? Also, shouldn't the # same logic that places 't' in u/ also move ALL other files to u/? # What if there are file or directory conflicts in any of them? If # we attempted to do N-way (N-fold? N-ary? N-uple?) transitive renames # like this, would the user have any hope of understanding any # conflicts or how their working tree ended up? I think not, so I'm # ruling out N-ary transitive renames for N>1. # # Therefore our expected result is: # z/t, y/a, x/b, w/c, u/d, u/e, u/f # The reason that v/d DOES get transitively renamed to u/d is that u/ isn't # renamed somewhere. A slightly sub-optimal result, but it uses fairly # simple rules that are consistent with what we need for all the other # testcases and simplifies things for the user. test_setup_9d () { git init 9d && ( cd 9d && mkdir z y x w v u && echo a >z/a && echo b >y/b && echo c >x/c && echo d >w/d && echo e >v/e && echo f >u/f && git add z y x w v u && test_tick && git commit -m "O" && git branch O && git branch A && git branch B && git checkout A && git mv z/a y/ && git mv x/c w/ && git mv v/e u/ && test_tick && git commit -m "A" && git checkout B && echo t >z/t && git mv y/b x/ && git mv w/d v/ && git add z/t && test_tick && git commit -m "B" ) } test_expect_success '9d: N-way transitive rename?' ' test_setup_9d && ( cd 9d && git checkout A^0 && git -c merge.directoryRenames=true merge -s recursive B^0 >out && test_grep "WARNING: Avoiding applying z -> y rename to z/t" out && test_grep "WARNING: Avoiding applying y -> x rename to y/a" out && test_grep "WARNING: Avoiding applying x -> w rename to x/b" out && test_grep "WARNING: Avoiding applying w -> v rename to w/c" out && git ls-files -s >out && test_line_count = 7 out && git ls-files -o >out && test_line_count = 1 out && git rev-parse >actual \ HEAD:z/t \ HEAD:y/a HEAD:x/b HEAD:w/c \ HEAD:u/d HEAD:u/e HEAD:u/f && git rev-parse >expect \ B:z/t \ O:z/a O:y/b O:x/c \ O:w/d O:v/e A:u/f && test_cmp expect actual ) ' # Testcase 9e, N-to-1 whammo # (Related to testcase 9c...and 1c and 7e) # Commit O: dir1/{a,b}, dir2/{d,e}, dir3/{g,h}, dirN/{j,k} # Commit A: dir1/{a,b,c,yo}, dir2/{d,e,f,yo}, dir3/{g,h,i,yo}, dirN/{j,k,l,yo} # Commit B: combined/{a,b,d,e,g,h,j,k} # Expected: combined/{a,b,c,d,e,f,g,h,i,j,k,l}, CONFLICT(Nto1) warnings, # dir1/yo, dir2/yo, dir3/yo, dirN/yo test_setup_9e () { git init 9e && ( cd 9e && mkdir dir1 dir2 dir3 dirN && echo a >dir1/a && echo b >dir1/b && echo d >dir2/d && echo e >dir2/e && echo g >dir3/g && echo h >dir3/h && echo j >dirN/j && echo k >dirN/k && git add dir* && test_tick && git commit -m "O" && git branch O && git branch A && git branch B && git checkout A && echo c >dir1/c && echo yo >dir1/yo && echo f >dir2/f && echo yo >dir2/yo && echo i >dir3/i && echo yo >dir3/yo && echo l >dirN/l && echo yo >dirN/yo && git add dir* && test_tick && git commit -m "A" && git checkout B && git mv dir1 combined && git mv dir2/* combined/ && git mv dir3/* combined/ && git mv dirN/* combined/ && test_tick && git commit -m "B" ) } test_expect_success '9e: N-to-1 whammo' ' test_setup_9e && ( cd 9e && git checkout A^0 && test_must_fail git -c merge.directoryRenames=true merge -s recursive B^0 >out && grep "CONFLICT (implicit dir rename): Cannot map more than one path to combined/yo" out >error_line && grep -q dir1/yo error_line && grep -q dir2/yo error_line && grep -q dir3/yo error_line && grep -q dirN/yo error_line && git ls-files -s >out && test_line_count = 16 out && git ls-files -u >out && test_line_count = 0 out && git ls-files -o >out && test_line_count = 2 out && git rev-parse >actual \ :0:combined/a :0:combined/b :0:combined/c \ :0:combined/d :0:combined/e :0:combined/f \ :0:combined/g :0:combined/h :0:combined/i \ :0:combined/j :0:combined/k :0:combined/l && git rev-parse >expect \ O:dir1/a O:dir1/b A:dir1/c \ O:dir2/d O:dir2/e A:dir2/f \ O:dir3/g O:dir3/h A:dir3/i \ O:dirN/j O:dirN/k A:dirN/l && test_cmp expect actual && git rev-parse >actual \ :0:dir1/yo :0:dir2/yo :0:dir3/yo :0:dirN/yo && git rev-parse >expect \ A:dir1/yo A:dir2/yo A:dir3/yo A:dirN/yo && test_cmp expect actual ) ' # Testcase 9f, Renamed directory that only contained immediate subdirs # (Related to testcases 1e & 9g) # Commit O: goal/{a,b}/$more_files # Commit A: priority/{a,b}/$more_files # Commit B: goal/{a,b}/$more_files, goal/c # Expected: priority/{a,b}/$more_files, priority/c test_setup_9f () { git init 9f && ( cd 9f && mkdir -p goal/a && mkdir -p goal/b && echo foo >goal/a/foo && echo bar >goal/b/bar && echo baz >goal/b/baz && git add goal && test_tick && git commit -m "O" && git branch O && git branch A && git branch B && git checkout A && git mv goal/ priority && test_tick && git commit -m "A" && git checkout B && echo c >goal/c && git add goal/c && test_tick && git commit -m "B" ) } test_expect_success '9f: Renamed directory that only contained immediate subdirs' ' test_setup_9f && ( cd 9f && git checkout A^0 && git -c merge.directoryRenames=true merge -s recursive B^0 && git ls-files -s >out && test_line_count = 4 out && git rev-parse >actual \ HEAD:priority/a/foo \ HEAD:priority/b/bar \ HEAD:priority/b/baz \ HEAD:priority/c && git rev-parse >expect \ O:goal/a/foo \ O:goal/b/bar \ O:goal/b/baz \ B:goal/c && test_cmp expect actual && test_must_fail git rev-parse HEAD:goal/c ) ' # Testcase 9g, Renamed directory that only contained immediate subdirs, immediate subdirs renamed # (Related to testcases 1e & 9f) # Commit O: goal/{a,b}/$more_files # Commit A: priority/{alpha,bravo}/$more_files # Commit B: goal/{a,b}/$more_files, goal/c # Expected: priority/{alpha,bravo}/$more_files, priority/c # We currently fail this test because the directory renames we detect are # goal/a/ -> priority/alpha/ # goal/b/ -> priority/bravo/ # We do not detect # goal/ -> priority/ # because of no files found within goal/, and the fact that "a" != "alpha" # and "b" != "bravo". But I'm not sure it's really a failure given that # viewpoint... test_setup_9g () { git init 9g && ( cd 9g && mkdir -p goal/a && mkdir -p goal/b && echo foo >goal/a/foo && echo bar >goal/b/bar && echo baz >goal/b/baz && git add goal && test_tick && git commit -m "O" && git branch O && git branch A && git branch B && git checkout A && mkdir priority && git mv goal/a/ priority/alpha && git mv goal/b/ priority/beta && rmdir goal/ && test_tick && git commit -m "A" && git checkout B && echo c >goal/c && git add goal/c && test_tick && git commit -m "B" ) } test_expect_failure '9g: Renamed directory that only contained immediate subdirs, immediate subdirs renamed' ' test_setup_9g && ( cd 9g && git checkout A^0 && git -c merge.directoryRenames=true merge -s recursive B^0 && git ls-files -s >out && test_line_count = 4 out && git rev-parse >actual \ HEAD:priority/alpha/foo \ HEAD:priority/beta/bar \ HEAD:priority/beta/baz \ HEAD:priority/c && git rev-parse >expect \ O:goal/a/foo \ O:goal/b/bar \ O:goal/b/baz \ B:goal/c && test_cmp expect actual && test_must_fail git rev-parse HEAD:goal/c ) ' # Testcase 9h, Avoid implicit rename if involved as source on other side # (Extremely closely related to testcase 3a) # Commit O: z/{b,c,d_1} # Commit A: z/{b,c,d_2} # Commit B: y/{b,c}, x/d_1 # Expected: y/{b,c}, x/d_2 # NOTE: If we applied the z/ -> y/ rename to z/d, then we'd end up with # a rename/rename(1to2) conflict (z/d -> y/d vs. x/d) test_setup_9h () { git init 9h && ( cd 9h && mkdir z && echo b >z/b && echo c >z/c && printf "1\n2\n3\n4\n5\n6\n7\n8\nd\n" >z/d && git add z && test_tick && git commit -m "O" && git branch O && git branch A && git branch B && git checkout A && test_tick && echo more >>z/d && git add z/d && git commit -m "A" && git checkout B && mkdir y && mkdir x && git mv z/b y/ && git mv z/c y/ && git mv z/d x/ && rmdir z && test_tick && git commit -m "B" ) } test_expect_success '9h: Avoid dir rename on merely modified path' ' test_setup_9h && ( cd 9h && git checkout A^0 && git -c merge.directoryRenames=true merge -s recursive B^0 && git ls-files -s >out && test_line_count = 3 out && git rev-parse >actual \ HEAD:y/b HEAD:y/c HEAD:x/d && git rev-parse >expect \ O:z/b O:z/c A:z/d && test_cmp expect actual ) ' ########################################################################### # Rules suggested by section 9: # # If the other side of history did a directory rename to a path that your # side renamed away, then ignore that particular rename from the other # side of history for any implicit directory renames. ########################################################################### ########################################################################### # SECTION 10: Handling untracked files # # unpack_trees(), upon which the recursive merge algorithm is based, aborts # the operation if untracked or dirty files would be deleted or overwritten # by the merge. Unfortunately, unpack_trees() does not understand renames, # and if it doesn't abort, then it muddies up the working directory before # we even get to the point of detecting renames, so we need some special # handling, at least in the case of directory renames. ########################################################################### # Testcase 10a, Overwrite untracked: normal rename/delete # Commit O: z/{b,c_1} # Commit A: z/b + untracked z/c + untracked z/d # Commit B: z/{b,d_1} # Expected: Aborted Merge + # ERROR_MSG(untracked working tree files would be overwritten by merge) test_setup_10a () { git init 10a && ( cd 10a && mkdir z && echo b >z/b && echo c >z/c && git add z && test_tick && git commit -m "O" && git branch O && git branch A && git branch B && git checkout A && git rm z/c && test_tick && git commit -m "A" && git checkout B && git mv z/c z/d && test_tick && git commit -m "B" ) } test_expect_success '10a: Overwrite untracked with normal rename/delete' ' test_setup_10a && ( cd 10a && git checkout A^0 && echo very >z/c && echo important >z/d && test_must_fail git -c merge.directoryRenames=true merge -s recursive B^0 >out 2>err && test_path_is_missing .git/MERGE_HEAD && test_grep "The following untracked working tree files would be overwritten by merge" err && git ls-files -s >out && test_line_count = 1 out && git ls-files -o >out && test_line_count = 4 out && echo very >expect && test_cmp expect z/c && echo important >expect && test_cmp expect z/d && git rev-parse HEAD:z/b >actual && git rev-parse O:z/b >expect && test_cmp expect actual ) ' # Testcase 10b, Overwrite untracked: dir rename + delete # Commit O: z/{b,c_1} # Commit A: y/b + untracked y/{c,d,e} # Commit B: z/{b,d_1,e} # Expected: Failed Merge; y/b + untracked y/c + untracked y/d on disk + # z/c_1 -> z/d_1 rename recorded at stage 3 for y/d + # ERROR_MSG(refusing to lose untracked file at 'y/d') test_setup_10b () { git init 10b && ( cd 10b && mkdir z && echo b >z/b && echo c >z/c && git add z && test_tick && git commit -m "O" && git branch O && git branch A && git branch B && git checkout A && git rm z/c && git mv z/ y/ && test_tick && git commit -m "A" && git checkout B && git mv z/c z/d && echo e >z/e && git add z/e && test_tick && git commit -m "B" ) } test_expect_success '10b: Overwrite untracked with dir rename + delete' ' test_setup_10b && ( cd 10b && git checkout A^0 && echo very >y/c && echo important >y/d && echo contents >y/e && test_must_fail git -c merge.directoryRenames=true merge -s recursive B^0 >out 2>err && test_path_is_missing .git/MERGE_HEAD && test_grep "error: The following untracked working tree files would be overwritten by merge" err && git ls-files -s >out && test_line_count = 1 out && git ls-files -u >out && test_line_count = 0 out && git ls-files -o >out && test_line_count = 5 out && echo very >expect && test_cmp expect y/c && echo important >expect && test_cmp expect y/d && echo contents >expect && test_cmp expect y/e ) ' # Testcase 10c, Overwrite untracked: dir rename/rename(1to2) # Commit O: z/{a,b}, x/{c,d} # Commit A: y/{a,b}, w/c, x/d + different untracked y/c # Commit B: z/{a,b,c}, x/d # Expected: Failed Merge; y/{a,b} + x/d + untracked y/c + # CONFLICT(rename/rename) x/c -> w/c vs y/c + # y/c~B^0 + # ERROR_MSG(Refusing to lose untracked file at y/c) test_setup_10c () { git init 10c_$1 && ( cd 10c_$1 && mkdir z x && echo a >z/a && echo b >z/b && echo c >x/c && echo d >x/d && git add z x && test_tick && git commit -m "O" && git branch O && git branch A && git branch B && git checkout A && mkdir w && git mv x/c w/c && git mv z/ y/ && test_tick && git commit -m "A" && git checkout B && git mv x/c z/ && test_tick && git commit -m "B" ) } test_expect_success '10c1: Overwrite untracked with dir rename/rename(1to2)' ' test_setup_10c 1 && ( cd 10c_1 && git checkout A^0 && echo important >y/c && test_must_fail git -c merge.directoryRenames=true merge -s recursive B^0 >out 2>err && test_path_is_missing .git/MERGE_HEAD && test_grep "error: The following untracked working tree files would be overwritten by merge" err && git ls-files -s >out && test_line_count = 4 out && git ls-files -u >out && test_line_count = 0 out && git ls-files -o >out && test_line_count = 3 out && echo important >expect && test_cmp expect y/c ) ' test_expect_success '10c2: Overwrite untracked with dir rename/rename(1to2), other direction' ' test_setup_10c 2 && ( cd 10c_2 && git reset --hard && git clean -fdqx && git checkout B^0 && mkdir y && echo important >y/c && test_must_fail git -c merge.directoryRenames=true merge -s recursive A^0 >out 2>err && test_path_is_missing .git/MERGE_HEAD && test_grep "error: The following untracked working tree files would be overwritten by merge" err && git ls-files -s >out && test_line_count = 4 out && git ls-files -u >out && test_line_count = 0 out && git ls-files -o >out && test_line_count = 3 out && echo important >expect && test_cmp expect y/c ) ' # Testcase 10d, Delete untracked w/ dir rename/rename(2to1) # Commit O: z/{a,b,c_1}, x/{d,e,f_2} # Commit A: y/{a,b}, x/{d,e,f_2,wham_1} + untracked y/wham # Commit B: z/{a,b,c_1,wham_2}, y/{d,e} # Expected: Failed Merge; y/{a,b,d,e} + untracked y/{wham,wham~merged}+ # CONFLICT(rename/rename) z/c_1 vs x/f_2 -> y/wham # ERROR_MSG(Refusing to lose untracked file at y/wham) test_setup_10d () { git init 10d && ( cd 10d && mkdir z x && echo a >z/a && echo b >z/b && echo c >z/c && echo d >x/d && echo e >x/e && echo f >x/f && git add z x && test_tick && git commit -m "O" && git branch O && git branch A && git branch B && git checkout A && git mv z/c x/wham && git mv z/ y/ && test_tick && git commit -m "A" && git checkout B && git mv x/f z/wham && git mv x/ y/ && test_tick && git commit -m "B" ) } test_expect_success '10d: Delete untracked with dir rename/rename(2to1)' ' test_setup_10d && ( cd 10d && git checkout A^0 && echo important >y/wham && test_must_fail git -c merge.directoryRenames=true merge -s recursive B^0 >out 2>err && test_path_is_missing .git/MERGE_HEAD && test_grep "error: The following untracked working tree files would be overwritten by merge" err && git ls-files -s >out && test_line_count = 6 out && git ls-files -u >out && test_line_count = 0 out && git ls-files -o >out && test_line_count = 3 out && echo important >expect && test_cmp expect y/wham ) ' # Testcase 10e, Does git complain about untracked file that's not in the way? # Commit O: z/{a,b} # Commit A: y/{a,b} + untracked z/c # Commit B: z/{a,b,c} # Expected: y/{a,b,c} + untracked z/c test_setup_10e () { git init 10e && ( cd 10e && mkdir z && echo a >z/a && echo b >z/b && git add z && test_tick && git commit -m "O" && git branch O && git branch A && git branch B && git checkout A && git mv z/ y/ && test_tick && git commit -m "A" && git checkout B && echo c >z/c && git add z/c && test_tick && git commit -m "B" ) } test_expect_success '10e: Does git complain about untracked file that is not really in the way?' ' test_setup_10e && ( cd 10e && git checkout A^0 && mkdir z && echo random >z/c && git -c merge.directoryRenames=true merge -s recursive B^0 >out 2>err && test_grep ! "following untracked working tree files would be overwritten by merge" err && git ls-files -s >out && test_line_count = 3 out && git ls-files -u >out && test_line_count = 0 out && git ls-files -o >out && test_line_count = 3 out && git rev-parse >actual \ :0:y/a :0:y/b :0:y/c && git rev-parse >expect \ O:z/a O:z/b B:z/c && test_cmp expect actual && echo random >expect && test_cmp expect z/c ) ' ########################################################################### # SECTION 11: Handling dirty (not up-to-date) files # # unpack_trees(), upon which the recursive merge algorithm is based, aborts # the operation if untracked or dirty files would be deleted or overwritten # by the merge. Unfortunately, unpack_trees() does not understand renames, # and if it doesn't abort, then it muddies up the working directory before # we even get to the point of detecting renames, so we need some special # handling. This was true even of normal renames, but there are additional # codepaths that need special handling with directory renames. Add # testcases for both renamed-by-directory-rename-detection and standard # rename cases. ########################################################################### # Testcase 11a, Avoid losing dirty contents with simple rename # Commit O: z/{a,b_v1}, # Commit A: z/{a,c_v1}, and z/c_v1 has uncommitted mods # Commit B: z/{a,b_v2} # Expected: ERROR_MSG(Refusing to lose dirty file at z/c) + # z/a, staged version of z/c has sha1sum matching B:z/b_v2, # z/c~HEAD with contents of B:z/b_v2, # z/c with uncommitted mods on top of A:z/c_v1 test_setup_11a () { git init 11a && ( cd 11a && mkdir z && echo a >z/a && test_seq 1 10 >z/b && git add z && test_tick && git commit -m "O" && git branch O && git branch A && git branch B && git checkout A && git mv z/b z/c && test_tick && git commit -m "A" && git checkout B && echo 11 >>z/b && git add z/b && test_tick && git commit -m "B" ) } test_expect_success '11a: Avoid losing dirty contents with simple rename' ' test_setup_11a && ( cd 11a && git checkout A^0 && echo stuff >>z/c && test_must_fail git -c merge.directoryRenames=true merge -s recursive B^0 >out 2>err && test_path_is_missing .git/MERGE_HEAD && test_grep "error: Your local changes to the following files would be overwritten by merge" err && test_seq 1 10 >expected && echo stuff >>expected && test_cmp expected z/c ) ' # Testcase 11b, Avoid losing dirty file involved in directory rename # Commit O: z/a, x/{b,c_v1} # Commit A: z/{a,c_v1}, x/b, and z/c_v1 has uncommitted mods # Commit B: y/a, x/{b,c_v2} # Expected: y/{a,c_v2}, x/b, z/c_v1 with uncommitted mods untracked, # ERROR_MSG(Refusing to lose dirty file at z/c) test_setup_11b () { git init 11b && ( cd 11b && mkdir z x && echo a >z/a && echo b >x/b && test_seq 1 10 >x/c && git add z x && test_tick && git commit -m "O" && git branch O && git branch A && git branch B && git checkout A && git mv x/c z/c && test_tick && git commit -m "A" && git checkout B && git mv z y && echo 11 >>x/c && git add x/c && test_tick && git commit -m "B" ) } test_expect_success '11b: Avoid losing dirty file involved in directory rename' ' test_setup_11b && ( cd 11b && git checkout A^0 && echo stuff >>z/c && test_must_fail git -c merge.directoryRenames=true merge -s recursive B^0 >out 2>err && test_path_is_missing .git/MERGE_HEAD && test_grep "error: Your local changes to the following files would be overwritten by merge" err && grep -q stuff z/c && test_seq 1 10 >expected && echo stuff >>expected && test_cmp expected z/c ) ' # Testcase 11c, Avoid losing not-up-to-date with rename + D/F conflict # Commit O: y/a, x/{b,c_v1} # Commit A: y/{a,c_v1}, x/b, and y/c_v1 has uncommitted mods # Commit B: y/{a,c/d}, x/{b,c_v2} # Expected: Abort_msg("following files would be overwritten by merge") + # y/c left untouched (still has uncommitted mods) test_setup_11c () { git init 11c && ( cd 11c && mkdir y x && echo a >y/a && echo b >x/b && test_seq 1 10 >x/c && git add y x && test_tick && git commit -m "O" && git branch O && git branch A && git branch B && git checkout A && git mv x/c y/c && test_tick && git commit -m "A" && git checkout B && mkdir y/c && echo d >y/c/d && echo 11 >>x/c && git add x/c y/c/d && test_tick && git commit -m "B" ) } test_expect_success '11c: Avoid losing not-uptodate with rename + D/F conflict' ' test_setup_11c && ( cd 11c && git checkout A^0 && echo stuff >>y/c && test_must_fail git -c merge.directoryRenames=true merge -s recursive B^0 >out 2>err && test_path_is_missing .git/MERGE_HEAD && test_grep "error: Your local changes to the following files would be overwritten by merge" err && grep -q stuff y/c && test_seq 1 10 >expected && echo stuff >>expected && test_cmp expected y/c && git ls-files -s >out && test_line_count = 3 out && git ls-files -u >out && test_line_count = 0 out && git ls-files -m >out && test_line_count = 1 out && git ls-files -o >out && test_line_count = 3 out ) ' # Testcase 11d, Avoid losing not-up-to-date with rename + D/F conflict # Commit O: z/a, x/{b,c_v1} # Commit A: z/{a,c_v1}, x/b, and z/c_v1 has uncommitted mods # Commit B: y/{a,c/d}, x/{b,c_v2} # Expected: D/F: y/c_v2 vs y/c/d) + # Warning_Msg("Refusing to lose dirty file at z/c) + # y/{a,c~HEAD,c/d}, x/b, now-untracked z/c_v1 with uncommitted mods test_setup_11d () { git init 11d && ( cd 11d && mkdir z x && echo a >z/a && echo b >x/b && test_seq 1 10 >x/c && git add z x && test_tick && git commit -m "O" && git branch O && git branch A && git branch B && git checkout A && git mv x/c z/c && test_tick && git commit -m "A" && git checkout B && git mv z y && mkdir y/c && echo d >y/c/d && echo 11 >>x/c && git add x/c y/c/d && test_tick && git commit -m "B" ) } test_expect_success '11d: Avoid losing not-uptodate with rename + D/F conflict' ' test_setup_11d && ( cd 11d && git checkout A^0 && echo stuff >>z/c && test_must_fail git -c merge.directoryRenames=true merge -s recursive B^0 >out 2>err && test_path_is_missing .git/MERGE_HEAD && test_grep "error: Your local changes to the following files would be overwritten by merge" err && grep -q stuff z/c && test_seq 1 10 >expected && echo stuff >>expected && test_cmp expected z/c ) ' # Testcase 11e, Avoid deleting not-up-to-date with dir rename/rename(1to2)/add # Commit O: z/{a,b}, x/{c_1,d} # Commit A: y/{a,b,c_2}, x/d, w/c_1, and y/c_2 has uncommitted mods # Commit B: z/{a,b,c_1}, x/d # Expected: Failed Merge; y/{a,b} + x/d + # CONFLICT(rename/rename) x/c_1 -> w/c_1 vs y/c_1 + # ERROR_MSG(Refusing to lose dirty file at y/c) # y/c~B^0 has O:x/c_1 contents # y/c~HEAD has A:y/c_2 contents # y/c has dirty file from before merge test_setup_11e () { git init 11e && ( cd 11e && mkdir z x && echo a >z/a && echo b >z/b && echo c >x/c && echo d >x/d && git add z x && test_tick && git commit -m "O" && git branch O && git branch A && git branch B && git checkout A && git mv z/ y/ && echo different >y/c && mkdir w && git mv x/c w/ && git add y/c && test_tick && git commit -m "A" && git checkout B && git mv x/c z/ && test_tick && git commit -m "B" ) } test_expect_success '11e: Avoid deleting not-uptodate with dir rename/rename(1to2)/add' ' test_setup_11e && ( cd 11e && git checkout A^0 && echo mods >>y/c && test_must_fail git -c merge.directoryRenames=true merge -s recursive B^0 >out 2>err && test_path_is_missing .git/MERGE_HEAD && test_grep "error: Your local changes to the following files would be overwritten by merge" err && echo different >expected && echo mods >>expected && test_cmp expected y/c ) ' # Testcase 11f, Avoid deleting not-up-to-date w/ dir rename/rename(2to1) # Commit O: z/{a,b}, x/{c_1,d_2} # Commit A: y/{a,b,wham_1}, x/d_2, except y/wham has uncommitted mods # Commit B: z/{a,b,wham_2}, x/c_1 # Expected: Failed Merge; y/{a,b} + untracked y/{wham~merged} + # y/wham with dirty changes from before merge + # CONFLICT(rename/rename) x/c vs x/d -> y/wham # ERROR_MSG(Refusing to lose dirty file at y/wham) test_setup_11f () { git init 11f && ( cd 11f && mkdir z x && echo a >z/a && echo b >z/b && test_seq 1 10 >x/c && echo d >x/d && git add z x && test_tick && git commit -m "O" && git branch O && git branch A && git branch B && git checkout A && git mv z/ y/ && git mv x/c y/wham && test_tick && git commit -m "A" && git checkout B && git mv x/d z/wham && test_tick && git commit -m "B" ) } test_expect_success '11f: Avoid deleting not-uptodate with dir rename/rename(2to1)' ' test_setup_11f && ( cd 11f && git checkout A^0 && echo important >>y/wham && test_must_fail git -c merge.directoryRenames=true merge -s recursive B^0 >out 2>err && test_path_is_missing .git/MERGE_HEAD && test_grep "error: Your local changes to the following files would be overwritten by merge" err && test_seq 1 10 >expected && echo important >>expected && test_cmp expected y/wham ) ' ########################################################################### # SECTION 12: Everything else # # Tests suggested by others. Tests added after implementation completed # and submitted. Grab bag. ########################################################################### # Testcase 12a, Moving one directory hierarchy into another # (Related to testcase 9a) # Commit O: node1/{leaf1,leaf2}, node2/{leaf3,leaf4} # Commit A: node1/{leaf1,leaf2,node2/{leaf3,leaf4}} # Commit B: node1/{leaf1,leaf2,leaf5}, node2/{leaf3,leaf4,leaf6} # Expected: node1/{leaf1,leaf2,leaf5,node2/{leaf3,leaf4,leaf6}} test_setup_12a () { git init 12a && ( cd 12a && mkdir -p node1 node2 && echo leaf1 >node1/leaf1 && echo leaf2 >node1/leaf2 && echo leaf3 >node2/leaf3 && echo leaf4 >node2/leaf4 && git add node1 node2 && test_tick && git commit -m "O" && git branch O && git branch A && git branch B && git checkout A && git mv node2/ node1/ && test_tick && git commit -m "A" && git checkout B && echo leaf5 >node1/leaf5 && echo leaf6 >node2/leaf6 && git add node1 node2 && test_tick && git commit -m "B" ) } test_expect_success '12a: Moving one directory hierarchy into another' ' test_setup_12a && ( cd 12a && git checkout A^0 && git -c merge.directoryRenames=true merge -s recursive B^0 && git ls-files -s >out && test_line_count = 6 out && git rev-parse >actual \ HEAD:node1/leaf1 HEAD:node1/leaf2 HEAD:node1/leaf5 \ HEAD:node1/node2/leaf3 \ HEAD:node1/node2/leaf4 \ HEAD:node1/node2/leaf6 && git rev-parse >expect \ O:node1/leaf1 O:node1/leaf2 B:node1/leaf5 \ O:node2/leaf3 \ O:node2/leaf4 \ B:node2/leaf6 && test_cmp expect actual ) ' # Testcase 12b1, Moving two directory hierarchies into each other # (Related to testcases 1c and 12c) # Commit O: node1/{leaf1, leaf2}, node2/{leaf3, leaf4} # Commit A: node1/{leaf1, leaf2, node2/{leaf3, leaf4}} # Commit B: node2/{leaf3, leaf4, node1/{leaf1, leaf2}} # Expected: node1/node2/{leaf3, leaf4} # node2/node1/{leaf1, leaf2} # NOTE: If there were new files added to the old node1/ or node2/ directories, # then we would need to detect renames for those directories and would # find that: # commit A renames node2/ -> node1/node2/ # commit B renames node1/ -> node2/node1/ # Applying those directory renames to the initial result (making all # four paths experience a transitive renaming), yields # node1/node2/node1/{leaf1, leaf2} # node2/node1/node2/{leaf3, leaf4} # as the result. It may be really weird to have two directories # rename each other, but simple rules give weird results when given # weird inputs. HOWEVER, the "If" at the beginning of those NOTE was # false; there were no new files added and thus there is no directory # rename detection to perform. As such, we just have simple renames # and the expected answer is: # node1/node2/{leaf3, leaf4} # node2/node1/{leaf1, leaf2} test_setup_12b1 () { git init 12b1 && ( cd 12b1 && mkdir -p node1 node2 && echo leaf1 >node1/leaf1 && echo leaf2 >node1/leaf2 && echo leaf3 >node2/leaf3 && echo leaf4 >node2/leaf4 && git add node1 node2 && test_tick && git commit -m "O" && git branch O && git branch A && git branch B && git checkout A && git mv node2/ node1/ && test_tick && git commit -m "A" && git checkout B && git mv node1/ node2/ && test_tick && git commit -m "B" ) } test_expect_success '12b1: Moving two directory hierarchies into each other' ' test_setup_12b1 && ( cd 12b1 && git checkout A^0 && git -c merge.directoryRenames=true merge -s recursive B^0 && git ls-files -s >out && test_line_count = 4 out && git rev-parse >actual \ HEAD:node2/node1/leaf1 \ HEAD:node2/node1/leaf2 \ HEAD:node1/node2/leaf3 \ HEAD:node1/node2/leaf4 && git rev-parse >expect \ O:node1/leaf1 \ O:node1/leaf2 \ O:node2/leaf3 \ O:node2/leaf4 && test_cmp expect actual ) ' # Testcase 12b2, Moving two directory hierarchies into each other # (Related to testcases 1c and 12c) # Commit O: node1/{leaf1, leaf2}, node2/{leaf3, leaf4} # Commit A: node1/{leaf1, leaf2, leaf5, node2/{leaf3, leaf4}} # Commit B: node2/{leaf3, leaf4, leaf6, node1/{leaf1, leaf2}} # Expected: node1/node2/{node1/{leaf1, leaf2}, leaf6} # node2/node1/{node2/{leaf3, leaf4}, leaf5} # NOTE: Without directory renames, we would expect # A: node2/leaf3 -> node1/node2/leaf3 # A: node2/leaf1 -> node1/node2/leaf4 # A: Adds node1/leaf5 # B: node1/leaf1 -> node2/node1/leaf1 # B: node1/leaf2 -> node2/node1/leaf2 # B: Adds node2/leaf6 # with directory rename detection, we note that # commit A renames node2/ -> node1/node2/ # commit B renames node1/ -> node2/node1/ # therefore, applying A's directory rename to the paths added in B gives: # B: node1/leaf1 -> node1/node2/node1/leaf1 # B: node1/leaf2 -> node1/node2/node1/leaf2 # B: Adds node1/node2/leaf6 # and applying B's directory rename to the paths added in A gives: # A: node2/leaf3 -> node2/node1/node2/leaf3 # A: node2/leaf1 -> node2/node1/node2/leaf4 # A: Adds node2/node1/leaf5 # resulting in the expected # node1/node2/{node1/{leaf1, leaf2}, leaf6} # node2/node1/{node2/{leaf3, leaf4}, leaf5} # # You may ask, is it weird to have two directories rename each other? # To which, I can do no more than shrug my shoulders and say that # even simple rules give weird results when given weird inputs. test_setup_12b2 () { git init 12b2 && ( cd 12b2 && mkdir -p node1 node2 && echo leaf1 >node1/leaf1 && echo leaf2 >node1/leaf2 && echo leaf3 >node2/leaf3 && echo leaf4 >node2/leaf4 && git add node1 node2 && test_tick && git commit -m "O" && git branch O && git branch A && git branch B && git checkout A && git mv node2/ node1/ && echo leaf5 >node1/leaf5 && git add node1/leaf5 && test_tick && git commit -m "A" && git checkout B && git mv node1/ node2/ && echo leaf6 >node2/leaf6 && git add node2/leaf6 && test_tick && git commit -m "B" ) } test_expect_success '12b2: Moving two directory hierarchies into each other' ' test_setup_12b2 && ( cd 12b2 && git checkout A^0 && git -c merge.directoryRenames=true merge -s recursive B^0 && git ls-files -s >out && test_line_count = 6 out && git rev-parse >actual \ HEAD:node1/node2/node1/leaf1 \ HEAD:node1/node2/node1/leaf2 \ HEAD:node2/node1/node2/leaf3 \ HEAD:node2/node1/node2/leaf4 \ HEAD:node2/node1/leaf5 \ HEAD:node1/node2/leaf6 && git rev-parse >expect \ O:node1/leaf1 \ O:node1/leaf2 \ O:node2/leaf3 \ O:node2/leaf4 \ A:node1/leaf5 \ B:node2/leaf6 && test_cmp expect actual ) ' # Testcase 12c1, Moving two directory hierarchies into each other w/ content merge # (Related to testcase 12b) # Commit O: node1/{ leaf1_1, leaf2_1}, node2/{leaf3_1, leaf4_1} # Commit A: node1/{ leaf1_2, leaf2_2, node2/{leaf3_2, leaf4_2}} # Commit B: node2/{node1/{leaf1_3, leaf2_3}, leaf3_3, leaf4_3} # Expected: Content merge conflicts for each of: # node1/node2/node1/{leaf1, leaf2}, # node2/node1/node2/{leaf3, leaf4} # NOTE: This is *exactly* like 12b1, except that every path is modified on # each side of the merge. test_setup_12c1 () { git init 12c1 && ( cd 12c1 && mkdir -p node1 node2 && printf "1\n2\n3\n4\n5\n6\n7\n8\nleaf1\n" >node1/leaf1 && printf "1\n2\n3\n4\n5\n6\n7\n8\nleaf2\n" >node1/leaf2 && printf "1\n2\n3\n4\n5\n6\n7\n8\nleaf3\n" >node2/leaf3 && printf "1\n2\n3\n4\n5\n6\n7\n8\nleaf4\n" >node2/leaf4 && git add node1 node2 && test_tick && git commit -m "O" && git branch O && git branch A && git branch B && git checkout A && git mv node2/ node1/ && for i in $(git ls-files); do echo side A >>$i; done && git add -u && test_tick && git commit -m "A" && git checkout B && git mv node1/ node2/ && for i in $(git ls-files); do echo side B >>$i; done && git add -u && test_tick && git commit -m "B" ) } test_expect_success '12c1: Moving one directory hierarchy into another w/ content merge' ' test_setup_12c1 && ( cd 12c1 && git checkout A^0 && test_must_fail git -c merge.directoryRenames=true merge -s recursive B^0 && git ls-files -u >out && test_line_count = 12 out && git rev-parse >actual \ :1:node2/node1/leaf1 \ :1:node2/node1/leaf2 \ :1:node1/node2/leaf3 \ :1:node1/node2/leaf4 \ :2:node2/node1/leaf1 \ :2:node2/node1/leaf2 \ :2:node1/node2/leaf3 \ :2:node1/node2/leaf4 \ :3:node2/node1/leaf1 \ :3:node2/node1/leaf2 \ :3:node1/node2/leaf3 \ :3:node1/node2/leaf4 && git rev-parse >expect \ O:node1/leaf1 \ O:node1/leaf2 \ O:node2/leaf3 \ O:node2/leaf4 \ A:node1/leaf1 \ A:node1/leaf2 \ A:node1/node2/leaf3 \ A:node1/node2/leaf4 \ B:node2/node1/leaf1 \ B:node2/node1/leaf2 \ B:node2/leaf3 \ B:node2/leaf4 && test_cmp expect actual ) ' # Testcase 12c2, Moving two directory hierarchies into each other w/ content merge # (Related to testcase 12b) # Commit O: node1/{ leaf1_1, leaf2_1}, node2/{leaf3_1, leaf4_1} # Commit A: node1/{ leaf1_2, leaf2_2, node2/{leaf3_2, leaf4_2}, leaf5} # Commit B: node2/{node1/{leaf1_3, leaf2_3}, leaf3_3, leaf4_3, leaf6} # Expected: Content merge conflicts for each of: # node1/node2/node1/{leaf1, leaf2} # node2/node1/node2/{leaf3, leaf4} # plus # node2/node1/leaf5 # node1/node2/leaf6 # NOTE: This is *exactly* like 12b2, except that every path from O is modified # on each side of the merge. test_setup_12c2 () { git init 12c2 && ( cd 12c2 && mkdir -p node1 node2 && printf "1\n2\n3\n4\n5\n6\n7\n8\nleaf1\n" >node1/leaf1 && printf "1\n2\n3\n4\n5\n6\n7\n8\nleaf2\n" >node1/leaf2 && printf "1\n2\n3\n4\n5\n6\n7\n8\nleaf3\n" >node2/leaf3 && printf "1\n2\n3\n4\n5\n6\n7\n8\nleaf4\n" >node2/leaf4 && git add node1 node2 && test_tick && git commit -m "O" && git branch O && git branch A && git branch B && git checkout A && git mv node2/ node1/ && for i in $(git ls-files); do echo side A >>$i; done && git add -u && echo leaf5 >node1/leaf5 && git add node1/leaf5 && test_tick && git commit -m "A" && git checkout B && git mv node1/ node2/ && for i in $(git ls-files); do echo side B >>$i; done && git add -u && echo leaf6 >node2/leaf6 && git add node2/leaf6 && test_tick && git commit -m "B" ) } test_expect_success '12c2: Moving one directory hierarchy into another w/ content merge' ' test_setup_12c2 && ( cd 12c2 && git checkout A^0 && test_must_fail git -c merge.directoryRenames=true merge -s recursive B^0 && git ls-files -s >out && test_line_count = 14 out && git ls-files -u >out && test_line_count = 12 out && git rev-parse >actual \ :1:node1/node2/node1/leaf1 \ :1:node1/node2/node1/leaf2 \ :1:node2/node1/node2/leaf3 \ :1:node2/node1/node2/leaf4 \ :2:node1/node2/node1/leaf1 \ :2:node1/node2/node1/leaf2 \ :2:node2/node1/node2/leaf3 \ :2:node2/node1/node2/leaf4 \ :3:node1/node2/node1/leaf1 \ :3:node1/node2/node1/leaf2 \ :3:node2/node1/node2/leaf3 \ :3:node2/node1/node2/leaf4 \ :0:node2/node1/leaf5 \ :0:node1/node2/leaf6 && git rev-parse >expect \ O:node1/leaf1 \ O:node1/leaf2 \ O:node2/leaf3 \ O:node2/leaf4 \ A:node1/leaf1 \ A:node1/leaf2 \ A:node1/node2/leaf3 \ A:node1/node2/leaf4 \ B:node2/node1/leaf1 \ B:node2/node1/leaf2 \ B:node2/leaf3 \ B:node2/leaf4 \ A:node1/leaf5 \ B:node2/leaf6 && test_cmp expect actual ) ' # Testcase 12d, Rename/merge of subdirectory into the root # Commit O: a/b/subdir/foo # Commit A: subdir/foo # Commit B: a/b/subdir/foo, a/b/bar # Expected: subdir/foo, bar test_setup_12d () { git init 12d && ( cd 12d && mkdir -p a/b/subdir && test_commit a/b/subdir/foo && git branch O && git branch A && git branch B && git checkout A && mkdir subdir && git mv a/b/subdir/foo.t subdir/foo.t && test_tick && git commit -m "A" && git checkout B && test_commit a/b/bar ) } test_expect_success '12d: Rename/merge subdir into the root, variant 1' ' test_setup_12d && ( cd 12d && git checkout A^0 && git -c merge.directoryRenames=true merge -s recursive B^0 && git ls-files -s >out && test_line_count = 2 out && git rev-parse >actual \ HEAD:subdir/foo.t HEAD:bar.t && git rev-parse >expect \ O:a/b/subdir/foo.t B:a/b/bar.t && test_cmp expect actual && git hash-object bar.t >actual && git rev-parse B:a/b/bar.t >expect && test_cmp expect actual && test_must_fail git rev-parse HEAD:a/b/subdir/foo.t && test_must_fail git rev-parse HEAD:a/b/bar.t && test_path_is_missing a/ && test_path_is_file bar.t ) ' # Testcase 12e, Rename/merge of subdirectory into the root # Commit O: a/b/foo # Commit A: foo # Commit B: a/b/foo, a/b/bar # Expected: foo, bar test_setup_12e () { git init 12e && ( cd 12e && mkdir -p a/b && test_commit a/b/foo && git branch O && git branch A && git branch B && git checkout A && mkdir subdir && git mv a/b/foo.t foo.t && test_tick && git commit -m "A" && git checkout B && test_commit a/b/bar ) } test_expect_success '12e: Rename/merge subdir into the root, variant 2' ' test_setup_12e && ( cd 12e && git checkout A^0 && git -c merge.directoryRenames=true merge -s recursive B^0 && git ls-files -s >out && test_line_count = 2 out && git rev-parse >actual \ HEAD:foo.t HEAD:bar.t && git rev-parse >expect \ O:a/b/foo.t B:a/b/bar.t && test_cmp expect actual && git hash-object bar.t >actual && git rev-parse B:a/b/bar.t >expect && test_cmp expect actual && test_must_fail git rev-parse HEAD:a/b/foo.t && test_must_fail git rev-parse HEAD:a/b/bar.t && test_path_is_missing a/ && test_path_is_file bar.t ) ' # Testcase 12f, Rebase of patches with big directory rename # Commit O: # dir/subdir/{a,b,c,d,e_O,Makefile_TOP_O} # dir/subdir/tweaked/{f,g,h,Makefile_SUB_O} # dir/unchanged/<LOTS OF FILES> # Commit A: # (Remove f & g, move e into newsubdir, rename dir/->folder/, modify files) # folder/subdir/{a,b,c,d,Makefile_TOP_A} # folder/subdir/newsubdir/e_A # folder/subdir/tweaked/{h,Makefile_SUB_A} # folder/unchanged/<LOTS OF FILES> # Commit B1: # (add newfile.{c,py}, modify underscored files) # dir/{a,b,c,d,e_B1,Makefile_TOP_B1,newfile.c} # dir/tweaked/{f,g,h,Makefile_SUB_B1,newfile.py} # dir/unchanged/<LOTS OF FILES> # Commit B2: # (Modify e further, add newfile.rs) # dir/{a,b,c,d,e_B2,Makefile_TOP_B1,newfile.c,newfile.rs} # dir/tweaked/{f,g,h,Makefile_SUB_B1,newfile.py} # dir/unchanged/<LOTS OF FILES> # Expected: # B1-picked: # folder/subdir/{a,b,c,d,Makefile_TOP_Merge1,newfile.c} # folder/subdir/newsubdir/e_Merge1 # folder/subdir/tweaked/{h,Makefile_SUB_Merge1,newfile.py} # folder/unchanged/<LOTS OF FILES> # B2-picked: # folder/subdir/{a,b,c,d,Makefile_TOP_Merge1,newfile.c,newfile.rs} # folder/subdir/newsubdir/e_Merge2 # folder/subdir/tweaked/{h,Makefile_SUB_Merge1,newfile.py} # folder/unchanged/<LOTS OF FILES> # Things being checked here: # 1. dir/subdir/newfile.c does not get pushed into folder/subdir/newsubdir/. # dir/subdir/{a,b,c,d} -> folder/subdir/{a,b,c,d} looks like # dir/ -> folder/, # whereas dir/subdir/e -> folder/subdir/newsubdir/e looks like # dir/subdir/ -> folder/subdir/newsubdir/ # and if we note that newfile.c is found in dir/subdir/, we might overlook # the dir/ -> folder/ rule that has more weight. Older git versions did # this. # 2. The code to do trivial directory resolves. Note that # dir/subdir/unchanged/ is unchanged and can be deleted, and files in the # new folder/subdir/unchanged/ are not needed as a target to any renames. # Thus, in the second collect_merge_info_callback() we can just resolve # these two directories trivially without recursing.) # 3. Exercising the codepaths for caching renames and deletes from one cherry # pick and re-applying them in the subsequent one. test_setup_12f () { git init 12f && ( cd 12f && mkdir -p dir/unchanged && mkdir -p dir/subdir/tweaked && echo a >dir/subdir/a && echo b >dir/subdir/b && echo c >dir/subdir/c && echo d >dir/subdir/d && test_seq 1 10 >dir/subdir/e && test_seq 10 20 >dir/subdir/Makefile && echo f >dir/subdir/tweaked/f && echo g >dir/subdir/tweaked/g && echo h >dir/subdir/tweaked/h && test_seq 20 30 >dir/subdir/tweaked/Makefile && for i in $(test_seq 1 88); do echo content $i >dir/unchanged/file_$i done && git add . && git commit -m "O" && git branch O && git branch A && git branch B && git switch A && git rm dir/subdir/tweaked/f dir/subdir/tweaked/g && test_seq 2 10 >dir/subdir/e && test_seq 11 20 >dir/subdir/Makefile && test_seq 21 30 >dir/subdir/tweaked/Makefile && mkdir dir/subdir/newsubdir && git mv dir/subdir/e dir/subdir/newsubdir/ && git mv dir folder && git add . && git commit -m "A" && git switch B && mkdir dir/subdir/newsubdir/ && echo c code >dir/subdir/newfile.c && echo python code >dir/subdir/newsubdir/newfile.py && test_seq 1 11 >dir/subdir/e && test_seq 10 21 >dir/subdir/Makefile && test_seq 20 31 >dir/subdir/tweaked/Makefile && git add . && git commit -m "B1" && echo rust code >dir/subdir/newfile.rs && test_seq 1 12 >dir/subdir/e && git add . && git commit -m "B2" ) } test_expect_success '12f: Trivial directory resolve, caching, all kinds of fun' ' test_setup_12f && ( cd 12f && git checkout A^0 && git branch Bmod B && GIT_TRACE2_PERF="$(pwd)/trace.output" git -c merge.directoryRenames=true rebase A Bmod && echo Checking the pick of B1... && test_must_fail git rev-parse Bmod~1:dir && git ls-tree -r Bmod~1 >out && test_line_count = 98 out && git diff --name-status A Bmod~1 >actual && q_to_tab >expect <<-\EOF && MQfolder/subdir/Makefile AQfolder/subdir/newfile.c MQfolder/subdir/newsubdir/e AQfolder/subdir/newsubdir/newfile.py MQfolder/subdir/tweaked/Makefile EOF test_cmp expect actual && # Three-way merged files test_seq 2 11 >e_Merge1 && test_seq 11 21 >Makefile_TOP && test_seq 21 31 >Makefile_SUB && git hash-object >expect \ e_Merge1 \ Makefile_TOP \ Makefile_SUB && git rev-parse >actual \ Bmod~1:folder/subdir/newsubdir/e \ Bmod~1:folder/subdir/Makefile \ Bmod~1:folder/subdir/tweaked/Makefile && test_cmp expect actual && # New files showed up at the right location with right contents git rev-parse >expect \ B~1:dir/subdir/newfile.c \ B~1:dir/subdir/newsubdir/newfile.py && git rev-parse >actual \ Bmod~1:folder/subdir/newfile.c \ Bmod~1:folder/subdir/newsubdir/newfile.py && test_cmp expect actual && # Removed files test_path_is_missing folder/subdir/tweaked/f && test_path_is_missing folder/subdir/tweaked/g && # Unchanged files or directories git rev-parse >actual \ Bmod~1:folder/subdir/a \ Bmod~1:folder/subdir/b \ Bmod~1:folder/subdir/c \ Bmod~1:folder/subdir/d \ Bmod~1:folder/unchanged \ Bmod~1:folder/subdir/tweaked/h && git rev-parse >expect \ O:dir/subdir/a \ O:dir/subdir/b \ O:dir/subdir/c \ O:dir/subdir/d \ O:dir/unchanged \ O:dir/subdir/tweaked/h && test_cmp expect actual && echo Checking the pick of B2... && test_must_fail git rev-parse Bmod:dir && git ls-tree -r Bmod >out && test_line_count = 99 out && git diff --name-status Bmod~1 Bmod >actual && q_to_tab >expect <<-\EOF && AQfolder/subdir/newfile.rs MQfolder/subdir/newsubdir/e EOF test_cmp expect actual && # Three-way merged file test_seq 2 12 >e_Merge2 && git hash-object e_Merge2 >expect && git rev-parse Bmod:folder/subdir/newsubdir/e >actual && test_cmp expect actual && grep region_enter.*collect_merge_info trace.output >collect && test_line_count = 4 collect && grep region_enter.*process_entries$ trace.output >process && test_line_count = 2 process ) ' # Testcase 12g, Testcase with two kinds of "relevant" renames # Commit O: somefile_O, subdir/{a_O,b_O} # Commit A: somefile_A, subdir/{a_O,b_O,c_A} # Commit B: newfile_B, newdir/{a_B,b_B} # Expected: newfile_{merged}, newdir/{a_B,b_B,c_A} test_setup_12g () { git init 12g && ( cd 12g && mkdir -p subdir && test_write_lines upon a time there was a >somefile && test_write_lines 1 2 3 4 5 6 7 8 9 10 >subdir/a && test_write_lines one two three four five six >subdir/b && git add . && test_tick && git commit -m "O" && git branch O && git branch A && git branch B && git switch A && test_write_lines once upon a time there was a >somefile && > subdir/c && git add somefile subdir/c && test_tick && git commit -m "A" && git checkout B && git mv somefile newfile && git mv subdir newdir && echo repo >>newfile && test_write_lines 1 2 3 4 5 6 7 8 9 10 11 >newdir/a && test_write_lines one two three four five six seven >newdir/b && git add newfile newdir && test_tick && git commit -m "B" ) } test_expect_success '12g: Testcase with two kinds of "relevant" renames' ' test_setup_12g && ( cd 12g && git checkout A^0 && git -c merge.directoryRenames=true merge -s recursive B^0 && test_write_lines once upon a time there was a repo >expect && test_cmp expect newfile && git ls-files -s >out && test_line_count = 4 out && git rev-parse >actual \ HEAD:newdir/a HEAD:newdir/b HEAD:newdir/c && git rev-parse >expect \ B:newdir/a B:newdir/b A:subdir/c && test_cmp expect actual && test_must_fail git rev-parse HEAD:subdir/a && test_must_fail git rev-parse HEAD:subdir/b && test_must_fail git rev-parse HEAD:subdir/c && test_path_is_missing subdir/ && test_path_is_file newdir/c ) ' # Testcase 12h, Testcase with two kinds of "relevant" renames # Commit O: olddir/{a_1, b} # Commit A: newdir/{a_2, b} # Commit B: olddir/{alpha_1, b} # Expected: newdir/{alpha_2, b} test_setup_12h () { git init 12h && ( cd 12h && mkdir olddir && test_seq 3 8 >olddir/a && >olddir/b && git add olddir && git commit -m orig && git branch O && git branch A && git branch B && git switch A && test_seq 3 10 >olddir/a && git add olddir/a && git mv olddir newdir && git commit -m A && git switch B && git mv olddir/a olddir/alpha && git commit -m B ) } test_expect_failure '12h: renaming a file within a renamed directory' ' test_setup_12h && ( cd 12h && git checkout A^0 && test_might_fail git -c merge.directoryRenames=true merge -s recursive B^0 && git ls-files >tracked && test_line_count = 2 tracked && test_path_is_missing olddir/a && test_path_is_file newdir/alpha && test_path_is_file newdir/b && git rev-parse >actual \ HEAD:newdir/alpha HEAD:newdir/b && git rev-parse >expect \ A:newdir/a O:oldir/b && test_cmp expect actual ) ' # Testcase 12i, Directory rename causes rename-to-self # Commit O: source/{subdir/foo, bar, baz_1} # Commit A: source/{foo, bar, baz_1} # Commit B: source/{subdir/{foo, bar}, baz_2} # Expected: source/{foo, bar, baz_2}, with conflicts on # source/bar vs. source/subdir/bar test_setup_12i () { git init 12i && ( cd 12i && mkdir -p source/subdir && echo foo >source/subdir/foo && printf "%d\n" 1 2 3 4 5 6 7 >source/bar && echo baz >source/baz && git add source && git commit -m orig && git branch O && git branch A && git branch B && git switch A && git mv source/subdir/foo source/foo && git commit -m A && git switch B && git mv source/bar source/subdir/bar && echo more baz >>source/baz && git add source/baz && git commit -m B ) } test_expect_success '12i: Directory rename causes rename-to-self' ' test_setup_12i && ( cd 12i && git checkout A^0 && # NOTE: A potentially better resolution would be for # source/bar -> source/subdir/bar # to use the directory rename to become # source/bar -> source/bar # (a rename to self), and thus we end up with bar with # a path conflict (given merge.directoryRenames=conflict). # However, since the relevant renames optimization # prevents us from noticing # source/bar -> source/subdir/bar # as a rename and looking at it just as # delete source/bar # add source/subdir/bar # the directory rename of source/subdir/bar -> source/bar does # not look like a rename-to-self situation but a # rename-on-top-of-other-file situation. We do not want # stage 1 entries from an unrelated file, so we expect an # error about there being a file in the way. test_must_fail git -c merge.directoryRenames=conflict merge -s recursive B^0 >out && grep "CONFLICT (implicit dir rename).*source/bar in the way" out && test_path_is_missing source/bar && test_path_is_file source/subdir/bar && test_path_is_file source/baz && git ls-files >actual && uniq <actual >tracked && test_line_count = 3 tracked && git status --porcelain -uno >actual && cat >expect <<-\EOF && M source/baz R source/bar -> source/subdir/bar EOF test_cmp expect actual ) ' # Testcase 12i2, Identical to 12i except that source/subdir/bar modified on unrenamed side # Commit O: source/{subdir/foo, bar, baz_1} # Commit A: source/{foo, bar_2, baz_1} # Commit B: source/{subdir/{foo, bar}, baz_2} # Expected: source/{foo, bar, baz_2}, with conflicts on # source/bar vs. source/subdir/bar test_setup_12i2 () { git init 12i2 && ( cd 12i2 && mkdir -p source/subdir && echo foo >source/subdir/foo && printf "%d\n" 1 2 3 4 5 6 7 >source/bar && echo baz >source/baz && git add source && git commit -m orig && git branch O && git branch A && git branch B && git switch A && git mv source/subdir/foo source/foo && echo 8 >> source/bar && git add source/bar && git commit -m A && git switch B && git mv source/bar source/subdir/bar && echo more baz >>source/baz && git add source/baz && git commit -m B ) } test_expect_success '12i2: Directory rename causes rename-to-self' ' test_setup_12i2 && ( cd 12i2 && git checkout A^0 && test_must_fail git -c merge.directoryRenames=conflict merge -s recursive B^0 && test_path_is_missing source/subdir && test_path_is_file source/bar && test_path_is_file source/baz && git ls-files >actual && uniq <actual >tracked && test_line_count = 3 tracked && git status --porcelain -uno >actual && cat >expect <<-\EOF && UU source/bar M source/baz EOF test_cmp expect actual ) ' # Testcase 12j, Directory rename to root causes rename-to-self # Commit O: {subdir/foo, bar, baz_1} # Commit A: {foo, bar, baz_1} # Commit B: {subdir/{foo, bar}, baz_2} # Expected: {foo, bar, baz_2}, with conflicts on bar vs. subdir/bar test_setup_12j () { git init 12j && ( cd 12j && mkdir -p subdir && echo foo >subdir/foo && echo bar >bar && echo baz >baz && git add . && git commit -m orig && git branch O && git branch A && git branch B && git switch A && git mv subdir/foo foo && git commit -m A && git switch B && git mv bar subdir/bar && echo more baz >>baz && git add baz && git commit -m B ) } test_expect_success '12j: Directory rename to root causes rename-to-self' ' test_setup_12j && ( cd 12j && git checkout A^0 && # NOTE: A potentially better resolution would be for # bar -> subdir/bar # to use the directory rename to become # bar -> bar # (a rename to self), and thus we end up with bar with # a path conflict (given merge.directoryRenames=conflict). # However, since the relevant renames optimization # prevents us from noticing # bar -> subdir/bar # as a rename and looking at it just as # delete bar # add subdir/bar # the directory rename of subdir/bar -> bar does not look # like a rename-to-self situation but a # rename-on-top-of-other-file situation. We do not want # stage 1 entries from an unrelated file, so we expect an # error about there being a file in the way. test_must_fail git -c merge.directoryRenames=conflict merge -s recursive B^0 >out && grep "CONFLICT (implicit dir rename).*bar in the way" out && test_path_is_missing bar && test_path_is_file subdir/bar && test_path_is_file baz && git ls-files >actual && uniq <actual >tracked && test_line_count = 3 tracked && git status --porcelain -uno >actual && cat >expect <<-\EOF && M baz R bar -> subdir/bar EOF test_cmp expect actual ) ' # Testcase 12k, Directory rename with sibling causes rename-to-self # Commit O: dirB/foo, dirA/{bar, baz_1} # Commit A: dirA/{foo, bar, baz_1} # Commit B: dirB/{foo, bar}, dirA/baz_2 # Expected: dirA/{foo, bar, baz_2}, with conflicts on dirA/bar vs. dirB/bar test_setup_12k () { git init 12k && ( cd 12k && mkdir dirA dirB && echo foo >dirB/foo && echo bar >dirA/bar && echo baz >dirA/baz && git add . && git commit -m orig && git branch O && git branch A && git branch B && git switch A && git mv dirB/* dirA/ && git commit -m A && git switch B && git mv dirA/bar dirB/bar && echo more baz >>dirA/baz && git add dirA/baz && git commit -m B ) } test_expect_success '12k: Directory rename with sibling causes rename-to-self' ' test_setup_12k && ( cd 12k && git checkout A^0 && # NOTE: A potentially better resolution would be for # dirA/bar -> dirB/bar # to use the directory rename (dirB/ -> dirA/) to become # dirA/bar -> dirA/bar # (a rename to self), and thus we end up with bar with # a path conflict (given merge.directoryRenames=conflict). # However, since the relevant renames optimization # prevents us from noticing # dirA/bar -> dirB/bar # as a rename and looking at it just as # delete dirA/bar # add dirB/bar # the directory rename of dirA/bar -> dirB/bar does # not look like a rename-to-self situation but a # rename-on-top-of-other-file situation. We do not want # stage 1 entries from an unrelated file, so we expect an # error about there being a file in the way. test_must_fail git -c merge.directoryRenames=conflict merge -s recursive B^0 >out && grep "CONFLICT (implicit dir rename).*dirA/bar in the way" out && test_path_is_missing dirA/bar && test_path_is_file dirB/bar && test_path_is_file dirA/baz && git ls-files >actual && uniq <actual >tracked && test_line_count = 3 tracked && git status --porcelain -uno >actual && cat >expect <<-\EOF && M dirA/baz R dirA/bar -> dirB/bar EOF test_cmp expect actual ) ' # Testcase 12l, Both sides rename a directory into the other side, both add # a file which after directory renames are the same filename # Commit O: sub1/file, sub2/other # Commit A: sub3/file, sub2/{other, new_add_add_file_1} # Commit B: sub1/{file, newfile}, sub1/sub2/{other, new_add_add_file_2} # # In words: # A: sub1/ -> sub3/, add sub2/new_add_add_file_1 # B: sub2/ -> sub1/sub2, add sub1/newfile, add sub1/sub2/new_add_add_file_2 # # Expected: sub3/{file, newfile, sub2/other} # CONFLICT (add/add): sub1/sub2/new_add_add_file # # Note that sub1/newfile is not extraneous. Directory renames are only # detected if they are needed, and they are only needed if the old directory # had a new file added on the opposite side of history. So sub1/newfile # is needed for there to be a sub1/ -> sub3/ rename. test_setup_12l () { git init 12l_$1 && ( cd 12l_$1 && mkdir sub1 sub2 echo file >sub1/file && echo other >sub2/other && git add sub1 sub2 && git commit -m "O" && git branch O && git branch A && git branch B && git checkout A && git mv sub1 sub3 && echo conflicting >sub2/new_add_add_file && git add sub2 && test_tick && git add -u && git commit -m "A" && git checkout B && echo dissimilar >sub2/new_add_add_file && echo brand >sub1/newfile && git add sub1 sub2 && git mv sub2 sub1 && test_tick && git commit -m "B" ) } test_expect_success '12l (B into A): Rename into each other + add/add conflict' ' test_setup_12l BintoA && ( cd 12l_BintoA && git checkout -q A^0 && test_must_fail git -c merge.directoryRenames=true merge -s recursive B^0 && test_stdout_line_count = 5 git ls-files -s && git rev-parse >actual \ :0:sub3/file :0:sub3/newfile :0:sub3/sub2/other \ :2:sub1/sub2/new_add_add_file \ :3:sub1/sub2/new_add_add_file && git rev-parse >expect \ O:sub1/file B:sub1/newfile O:sub2/other \ A:sub2/new_add_add_file \ B:sub1/sub2/new_add_add_file && test_cmp expect actual && git ls-files -o >actual && test_write_lines actual expect >expect && test_cmp expect actual ) ' test_expect_success '12l (A into B): Rename into each other + add/add conflict' ' test_setup_12l AintoB && ( cd 12l_AintoB && git checkout -q B^0 && test_must_fail git -c merge.directoryRenames=true merge -s recursive A^0 && test_stdout_line_count = 5 git ls-files -s && git rev-parse >actual \ :0:sub3/file :0:sub3/newfile :0:sub3/sub2/other \ :2:sub1/sub2/new_add_add_file \ :3:sub1/sub2/new_add_add_file && git rev-parse >expect \ O:sub1/file B:sub1/newfile O:sub2/other \ B:sub1/sub2/new_add_add_file \ A:sub2/new_add_add_file && test_cmp expect actual && git ls-files -o >actual && test_write_lines actual expect >expect && test_cmp expect actual ) ' # Testcase 12m, Directory rename, plus change of parent dir to symlink # Commit O: dir/subdir/file # Commit A: renamed-dir/subdir/file # Commit B: dir/subdir # In words: # A: dir/subdir/ -> renamed-dir/subdir # B: delete dir/subdir/file, add dir/subdir as symlink # # Expected: CONFLICT (rename/delete): renamed-dir/subdir/file, # CONFLICT (file location): renamed-dir/subdir vs. dir/subdir # CONFLICT (directory/file): renamed-dir/subdir symlink has # renamed-dir/subdir in the way test_setup_12m () { git init 12m && ( cd 12m && mkdir -p dir/subdir && echo 1 >dir/subdir/file && git add . && git commit -m "O" && git branch O && git branch A && git branch B && git switch A && git mv dir/ renamed-dir/ && git add . && git commit -m "A" && git switch B && git rm dir/subdir/file && mkdir dir && if test_have_prereq MINGW then cmd //c 'mklink dir\subdir NUL' else ln -s /dev/null dir/subdir fi && git add . && git commit -m "B" ) } test_expect_success SYMLINKS '12m: Change parent of renamed-dir to symlink on other side' ' test_setup_12m && ( cd 12m && git checkout -q A^0 && test_must_fail git -c merge.directoryRenames=conflict merge -s recursive B^0 && test_stdout_line_count = 3 git ls-files -s && test_stdout_line_count = 2 ls -1 renamed-dir && test_path_is_missing dir ) ' # Testcase 12n, Directory rename transitively makes rename back to self # # (Since this is a cherry-pick instead of merge, the labels are a bit weird. # O, the original commit, is A~1 rather than what branch O points to.) # # Commit O: tools/hello # world # Commit A: tools/hello # tools/world # Commit B: hello # In words: # A: world -> tools/world # B: tools/ -> /, i.e. rename all of tools to toplevel directory # delete world # # Expected: # CONFLICT (file location): tools/world vs. world # test_setup_12n () { git init 12n && ( cd 12n && mkdir tools && echo hello >tools/hello && git add tools/hello && git commit -m "O" && git branch O && git branch A && git branch B && git switch A && echo world >world && git add world && git commit -q world -m 'Add world' && git mv world tools/world && git commit -m "Move world into tools/" && git switch B && git mv tools/hello hello && git commit -m "Move hello from tools/ to toplevel" ) } test_expect_success '12n: Directory rename transitively makes rename back to self' ' test_setup_12n && ( cd 12n && git checkout -q B^0 && test_must_fail git cherry-pick A^0 >out && test_grep "CONFLICT (file location).*should perhaps be moved" out && # Should have 1 entry for hello, and 2 for world test_stdout_line_count = 3 git ls-files -s && test_stdout_line_count = 1 git ls-files -s hello && test_stdout_line_count = 2 git ls-files -s world ) ' # Testcase 12n2, Directory rename transitively makes rename back to self # # Commit O: tools/hello # world # Commit A: tools/hello # tools/world # Commit B: hello # In words: # A: world -> tools/world # B: tools/ -> /, i.e. rename all of tools to toplevel directory # delete world # # Expected: # CONFLICT (file location): tools/world vs. world # test_setup_12n2 () { git init 12n2 && ( cd 12n2 && mkdir tools && echo hello >tools/hello && git add tools/hello && echo world >world && git add world && git commit -m "O" && git branch O && git branch A && git branch B && git switch A && git mv world tools/world && git commit -m "Move world into tools/" && git switch B && git mv tools/hello hello && git rm world && git commit -m "Move hello from tools/ to toplevel" ) } test_expect_success '12n2: Directory rename transitively makes rename back to self' ' test_setup_12n2 && ( cd 12n2 && git checkout -q B^0 && test_might_fail git -c merge.directoryRenames=true merge A^0 >out && # Should have 1 entry for hello, and either 0 or 2 for world # # NOTE: Since merge.directoryRenames=true, there is no path # conflict for world vs. tools/world; it should end up at # world. The fact that world was unmodified on side A, means # there was no content conflict; we should just take the # content from side B -- i.e. delete the file. So merging # could just delete world. # # However, rename-to-self-via-directory-rename is a bit more # challenging. Relax this test to allow world to be treated # as a modify/delete conflict as well, meaning it will have # two higher order stages, that just so happen to match. # test_stdout_line_count = 1 git ls-files -s hello && test_stdout_line_count = 2 git ls-files -s world && test_grep "CONFLICT (modify/delete).*world deleted in HEAD" out ) ' # Testcase 12o, Directory rename hits other rename source; file still in way on same side # Commit O: A/file1_1 # A/stuff # B/file1_2 # B/stuff # C/other # Commit A: A/file1_1 # A/stuff # B/stuff # C/file1_2 # C/other # Commit B: D/file2_1 # A/stuff # B/file1_2 # B/stuff # A/other # In words: # A: rename B/file1_2 -> C/file1_2 # B: rename C/ -> A/ # rename A/file1_1 -> D/file2_1 # Rationale: # A/stuff is unmodified, it shows up in final output # B/stuff is unmodified, it shows up in final output # C/other touched on one side (rename to A), so A/other shows up in output # A/file1 is renamed to D/file2 # B/file1 -> C/file1 and even though C/ -> A/, A/file1 is # "in the way" so we don't do the directory rename # Expected: A/stuff # B/stuff # A/other # D/file2 # C/file1 # + CONFLICT (implicit dir rename): A/file1 in way of C/file1 # test_setup_12o () { git init 12o && ( cd 12o && mkdir -p A B C && echo 1 >A/file1 && echo 2 >B/file1 && echo other >C/other && echo Astuff >A/stuff && echo Bstuff >B/stuff && git add . && git commit -m "O" && git branch O && git branch A && git branch B && git switch A && git mv B/file1 C/ && git add . && git commit -m "A" && git switch B && mkdir -p D && git mv A/file1 D/file2 && git mv C/other A/other && git add . && git commit -m "B" ) } test_expect_success '12o: Directory rename hits other rename source; file still in way on same side' ' test_setup_12o && ( cd 12o && git checkout -q A^0 && test_must_fail git -c merge.directoryRenames=conflict merge -s recursive B^0 >out && test_stdout_line_count = 5 git ls-files -s && test_stdout_line_count = 1 git ls-files -s A/other && test_stdout_line_count = 1 git ls-files -s A/stuff && test_stdout_line_count = 1 git ls-files -s B/stuff && test_stdout_line_count = 1 git ls-files -s D/file2 && grep "CONFLICT (implicit dir rename).*Existing file/dir at A/file1 in the way" out && test_stdout_line_count = 1 git ls-files -s C/file1 ) ' # Testcase 12p, Directory rename hits other rename source; file still in way on other side # Commit O: A/file1_1 # A/stuff # B/file1_2 # B/stuff # C/other # Commit A: D/file2_1 # A/stuff # B/stuff # C/file1_2 # C/other # Commit B: A/file1_1 # A/stuff # B/file1_2 # B/stuff # A/other # Short version: # A: rename A/file1_1 -> D/file2_1 # rename B/file1_2 -> C/file1_2 # B: Rename C/ -> A/ # Rationale: # A/stuff is unmodified, it shows up in final output # B/stuff is unmodified, it shows up in final output # C/other touched on one side (rename to A), so A/other shows up in output # A/file1 is renamed to D/file2 # B/file1 -> C/file1 and even though C/ -> A/, A/file1 is # "in the way" so we don't do the directory rename # Expected: A/stuff # B/stuff # A/other # D/file2 # C/file1 # + CONFLICT (implicit dir rename): A/file1 in way of C/file1 # test_setup_12p () { git init 12p && ( cd 12p && mkdir -p A B C && echo 1 >A/file1 && echo 2 >B/file1 && echo other >C/other && echo Astuff >A/stuff && echo Bstuff >B/stuff && git add . && git commit -m "O" && git branch O && git branch A && git branch B && git switch A && mkdir -p D && git mv A/file1 D/file2 && git mv B/file1 C/ && git add . && git commit -m "A" && git switch B && git mv C/other A/other && git add . && git commit -m "B" ) } test_expect_success '12p: Directory rename hits other rename source; file still in way on other side' ' test_setup_12p && ( cd 12p && git checkout -q A^0 && test_must_fail git -c merge.directoryRenames=conflict merge -s recursive B^0 >out && test_stdout_line_count = 5 git ls-files -s && test_stdout_line_count = 1 git ls-files -s A/other && test_stdout_line_count = 1 git ls-files -s A/stuff && test_stdout_line_count = 1 git ls-files -s B/stuff && test_stdout_line_count = 1 git ls-files -s D/file2 && grep "CONFLICT (implicit dir rename).*Existing file/dir at A/file1 in the way" out && test_stdout_line_count = 1 git ls-files -s C/file1 ) ' # Testcase 12q, Directory rename hits other rename source; file removed though # Commit O: A/file1_1 # A/stuff # B/file1_2 # B/stuff # C/other # Commit A: A/stuff # B/stuff # C/file1_2 # C/other # Commit B: D/file2_1 # A/stuff # B/file1_2 # B/stuff # A/other # In words: # A: delete A/file1_1, rename B/file1_2 -> C/file1_2 # B: Rename C/ -> A/, rename A/file1_1 -> D/file2_1 # Rationale: # A/stuff is unmodified, it shows up in final output # B/stuff is unmodified, it shows up in final output # C/other touched on one side (rename to A), so A/other shows up in output # A/file1 is rename/delete to D/file2, so two stages for D/file2 # B/file1 -> C/file1 and even though C/ -> A/, A/file1 as a source was # "in the way" (ish) so we don't do the directory rename # Expected: A/stuff # B/stuff # A/other # D/file2 (two stages) # C/file1 # + CONFLICT (implicit dir rename): A/file1 in way of C/file1 # + CONFLICT (rename/delete): D/file2 # test_setup_12q () { git init 12q && ( cd 12q && mkdir -p A B C && echo 1 >A/file1 && echo 2 >B/file1 && echo other >C/other && echo Astuff >A/stuff && echo Bstuff >B/stuff && git add . && git commit -m "O" && git branch O && git branch A && git branch B && git switch A && git rm A/file1 && git mv B/file1 C/ && git add . && git commit -m "A" && git switch B && mkdir -p D && git mv A/file1 D/file2 && git mv C/other A/other && git add . && git commit -m "B" ) } test_expect_success '12q: Directory rename hits other rename source; file removed though' ' test_setup_12q && ( cd 12q && git checkout -q A^0 && test_must_fail git -c merge.directoryRenames=conflict merge -s recursive B^0 >out && grep "CONFLICT (rename/delete).*A/file1.*D/file2" out && grep "CONFLICT (implicit dir rename).*Existing file/dir at A/file1 in the way" out && test_stdout_line_count = 6 git ls-files -s && test_stdout_line_count = 1 git ls-files -s A/other && test_stdout_line_count = 1 git ls-files -s A/stuff && test_stdout_line_count = 1 git ls-files -s B/stuff && test_stdout_line_count = 2 git ls-files -s D/file2 && # This is a slightly suboptimal resolution; allowing the # rename of C/ -> A/ to affect C/file1 and further rename # it to A/file1 would probably be preferable, but since # A/file1 existed as the source of another rename, allowing # the dir rename of C/file1 -> A/file1 would mean modifying # the code so that renames do not adjust both their source # and target paths in all cases. ! grep "CONFLICT (file location)" out && test_stdout_line_count = 1 git ls-files -s C/file1 ) ' ########################################################################### # SECTION 13: Checking informational and conflict messages # # A year after directory rename detection became the default, it was # instead decided to report conflicts on the pathname on the basis that # some users may expect the new files added or moved into a directory to # be unrelated to all the other files in that directory, and thus that # directory rename detection is unexpected. Test that the messages printed # match our expectation. ########################################################################### # Testcase 13a, Basic directory rename with newly added files # Commit O: z/{b,c} # Commit A: y/{b,c} # Commit B: z/{b,c,d,e/f} # Expected: y/{b,c,d,e/f}, with notices/conflicts for both y/d and y/e/f test_setup_13a () { git init 13a_$1 && ( cd 13a_$1 && mkdir z && echo b >z/b && echo c >z/c && git add z && test_tick && git commit -m "O" && git branch O && git branch A && git branch B && git checkout A && git mv z y && test_tick && git commit -m "A" && git checkout B && echo d >z/d && mkdir z/e && echo f >z/e/f && git add z/d z/e/f && test_tick && git commit -m "B" ) } test_expect_success '13a(conflict): messages for newly added files' ' test_setup_13a conflict && ( cd 13a_conflict && git checkout A^0 && test_must_fail git merge -s recursive B^0 >out 2>err && test_grep CONFLICT..file.location.*z/e/f.added.in.B^0.*y/e/f out && test_grep CONFLICT..file.location.*z/d.added.in.B^0.*y/d out && git ls-files >paths && ! grep z/ paths && grep "y/[de]" paths && test_path_is_missing z/d && test_path_is_file y/d && test_path_is_missing z/e/f && test_path_is_file y/e/f ) ' test_expect_success '13a(info): messages for newly added files' ' test_setup_13a info && ( cd 13a_info && git reset --hard && git checkout A^0 && git -c merge.directoryRenames=true merge -s recursive B^0 >out 2>err && test_grep Path.updated:.*z/e/f.added.in.B^0.*y/e/f out && test_grep Path.updated:.*z/d.added.in.B^0.*y/d out && git ls-files >paths && ! grep z/ paths && grep "y/[de]" paths && test_path_is_missing z/d && test_path_is_file y/d && test_path_is_missing z/e/f && test_path_is_file y/e/f ) ' # Testcase 13b, Transitive rename with conflicted content merge and default # "conflict" setting # (Related to testcase 1c, 9b) # Commit O: z/{b,c}, x/d_1 # Commit A: y/{b,c}, x/d_2 # Commit B: z/{b,c,d_3} # Expected: y/{b,c,d_merged}, with two conflict messages for y/d, # one about content, and one about file location test_setup_13b () { git init 13b_$1 && ( cd 13b_$1 && mkdir x && mkdir z && test_seq 1 10 >x/d && echo b >z/b && echo c >z/c && git add x z && test_tick && git commit -m "O" && git branch O && git branch A && git branch B && git checkout A && git mv z y && echo 11 >>x/d && git add x/d && test_tick && git commit -m "A" && git checkout B && echo eleven >>x/d && git mv x/d z/d && git add z/d && test_tick && git commit -m "B" ) } test_expect_success '13b(conflict): messages for transitive rename with conflicted content' ' test_setup_13b conflict && ( cd 13b_conflict && git checkout A^0 && test_must_fail git merge -s recursive B^0 >out 2>err && test_grep CONFLICT.*content.*Merge.conflict.in.y/d out && test_grep CONFLICT..file.location.*x/d.renamed.to.z/d.*moved.to.y/d out && git ls-files >paths && ! grep z/ paths && grep "y/d" paths && test_path_is_missing z/d && test_path_is_file y/d ) ' test_expect_success '13b(info): messages for transitive rename with conflicted content' ' test_setup_13b info && ( cd 13b_info && git reset --hard && git checkout A^0 && test_must_fail git -c merge.directoryRenames=true merge -s recursive B^0 >out 2>err && test_grep CONFLICT.*content.*Merge.conflict.in.y/d out && test_grep Path.updated:.*x/d.renamed.to.z/d.in.B^0.*moving.it.to.y/d out && git ls-files >paths && ! grep z/ paths && grep "y/d" paths && test_path_is_missing z/d && test_path_is_file y/d ) ' # Testcase 13c, Rename/rename(1to1) due to directory rename # Commit O: z/{b,c}, x/{d,e} # Commit A: y/{b,c,d}, x/e # Commit B: z/{b,c,d}, x/e # Expected: y/{b,c,d}, x/e, with info or conflict messages for d # B: renamed x/d -> z/d; A: renamed z/ -> y/ AND renamed x/d to y/d # One could argue B had partial knowledge of what was done with # d and A had full knowledge, but that's a slippery slope as # shown in testcase 13d. test_setup_13c () { git init 13c_$1 && ( cd 13c_$1 && mkdir x && mkdir z && test_seq 1 10 >x/d && echo e >x/e && echo b >z/b && echo c >z/c && git add x z && test_tick && git commit -m "O" && git branch O && git branch A && git branch B && git checkout A && git mv z y && git mv x/d y/ && test_tick && git commit -m "A" && git checkout B && git mv x/d z/d && git add z/d && test_tick && git commit -m "B" ) } test_expect_success '13c(conflict): messages for rename/rename(1to1) via transitive rename' ' test_setup_13c conflict && ( cd 13c_conflict && git checkout A^0 && test_must_fail git merge -s recursive B^0 >out 2>err && test_grep CONFLICT..file.location.*x/d.renamed.to.z/d.*moved.to.y/d out && git ls-files >paths && ! grep z/ paths && grep "y/d" paths && test_path_is_missing z/d && test_path_is_file y/d ) ' test_expect_success '13c(info): messages for rename/rename(1to1) via transitive rename' ' test_setup_13c info && ( cd 13c_info && git reset --hard && git checkout A^0 && git -c merge.directoryRenames=true merge -s recursive B^0 >out 2>err && test_grep Path.updated:.*x/d.renamed.to.z/d.in.B^0.*moving.it.to.y/d out && git ls-files >paths && ! grep z/ paths && grep "y/d" paths && test_path_is_missing z/d && test_path_is_file y/d ) ' # Testcase 13d, Rename/rename(1to1) due to directory rename on both sides # Commit O: a/{z,y}, b/x, c/w # Commit A: a/z, b/{y,x}, d/w # Commit B: a/z, d/x, c/{y,w} # Expected: a/z, d/{y,x,w} with no file location conflict for x # Easy cases: # * z is always in a; so it stays in a. # * x starts in b, only modified on one side to move into d/ # * w starts in c, only modified on one side to move into d/ # Hard case: # * A renames a/y to b/y, and B renames b/->d/ => a/y -> d/y # * B renames a/y to c/y, and A renames c/->d/ => a/y -> d/y # No conflict in where a/y ends up, so put it in d/y. test_setup_13d () { git init 13d_$1 && ( cd 13d_$1 && mkdir a && mkdir b && mkdir c && echo z >a/z && echo y >a/y && echo x >b/x && echo w >c/w && git add a b c && test_tick && git commit -m "O" && git branch O && git branch A && git branch B && git checkout A && git mv a/y b/ && git mv c/ d/ && test_tick && git commit -m "A" && git checkout B && git mv a/y c/ && git mv b/ d/ && test_tick && git commit -m "B" ) } test_expect_success '13d(conflict): messages for rename/rename(1to1) via dual transitive rename' ' test_setup_13d conflict && ( cd 13d_conflict && git checkout A^0 && test_must_fail git merge -s recursive B^0 >out 2>err && test_grep CONFLICT..file.location.*a/y.renamed.to.b/y.*moved.to.d/y out && test_grep CONFLICT..file.location.*a/y.renamed.to.c/y.*moved.to.d/y out && git ls-files >paths && ! grep b/ paths && ! grep c/ paths && grep "d/y" paths && test_path_is_missing b/y && test_path_is_missing c/y && test_path_is_file d/y ) ' test_expect_success '13d(info): messages for rename/rename(1to1) via dual transitive rename' ' test_setup_13d info && ( cd 13d_info && git reset --hard && git checkout A^0 && git -c merge.directoryRenames=true merge -s recursive B^0 >out 2>err && test_grep Path.updated.*a/y.renamed.to.b/y.*moving.it.to.d/y out && test_grep Path.updated.*a/y.renamed.to.c/y.*moving.it.to.d/y out && git ls-files >paths && ! grep b/ paths && ! grep c/ paths && grep "d/y" paths && test_path_is_missing b/y && test_path_is_missing c/y && test_path_is_file d/y ) ' # Testcase 13e, directory rename in virtual merge base # # This testcase has a slightly different setup than all the above cases, in # order to include a recursive case: # # A C # o - o # / \ / \ # O o X ? # \ / \ / # o o # B D # # Commit O: a/{z,y} # Commit A: b/{z,y} # Commit B: a/{z,y,x} # Commit C: b/{z,y,x} # Commit D: b/{z,y}, a/x # Expected: b/{z,y,x} (sort of; see below for why this might not be expected) # # NOTES: 'X' represents a virtual merge base. With the default of # directory rename detection yielding conflicts, merging A and B # results in a conflict complaining about whether 'x' should be # under 'a/' or 'b/'. However, when creating the virtual merge # base 'X', since virtual merge bases need to be written out as a # tree, we cannot have a conflict, so some resolution has to be # picked. # # In choosing the right resolution, it's worth noting here that # commits C & D are merges of A & B that choose different # locations for 'x' (i.e. they resolve the conflict differently), # and so it would be nice when merging C & D if git could detect # this difference of opinion and report a conflict. But the only # way to do so that I can think of would be to have the virtual # merge base place 'x' in some directory other than either 'a/' or # 'b/', which seems a little weird -- especially since it'd result # in a rename/rename(1to2) conflict with a source path that never # existed in any version. # # So, for now, when directory rename detection is set to # 'conflict' just avoid doing directory rename detection at all in # the recursive case. This will not allow us to detect a conflict # in the outer merge for this special kind of setup, but it at # least avoids hitting a BUG(). # test_setup_13e () { git init 13e && ( cd 13e && mkdir a && echo z >a/z && echo y >a/y && git add a && test_tick && git commit -m "O" && git branch O && git branch A && git branch B && git checkout A && git mv a/ b/ && test_tick && git commit -m "A" && git checkout B && echo x >a/x && git add a && test_tick && git commit -m "B" && git branch C A && git branch D B && git checkout C && test_must_fail git -c merge.directoryRenames=conflict merge B && git add b/x && test_tick && git commit -m "C" && git checkout D && test_must_fail git -c merge.directoryRenames=conflict merge A && git add b/x && mkdir a && git mv b/x a/x && test_tick && git commit -m "D" ) } test_expect_success '13e: directory rename detection in recursive case' ' test_setup_13e && ( cd 13e && git checkout --quiet D^0 && git -c merge.directoryRenames=conflict merge -s recursive C^0 >out 2>err && test_grep ! CONFLICT out && test_grep ! BUG: err && test_grep ! core.dumped err && test_must_be_empty err && git ls-files >paths && ! grep a/x paths && grep b/x paths ) ' test_done
unknown
github
https://github.com/git/git
t/t6423-merge-rename-directories.sh
--- title: "@react-router/{adapter}" --- # Server Adapters ## Official Adapters Idiomatic React Router apps can generally be deployed anywhere because React Router adapts the server's request/response to the [Web Fetch API][web-fetch-api]. It does this through adapters. We maintain a few adapters: - `@react-router/architect` - `@react-router/cloudflare` - `@react-router/express` These adapters are imported into your server's entry and are not used inside your React Router app itself. If you initialized your app with `npx create-react-router@latest` with something other than the built-in [React Router App Server][rr-serve] (`@react-router/serve`), you will note a `server/index.js` file that imports and uses one of these adapters. <docs-info>If you're using the built-in React Router App Server, you don't interact with this API</docs-info> Each adapter has the same API. In the future, we may have helpers specific to the platform you're deploying to. ## `@react-router/express` [Reference Documentation ↗](https://api.reactrouter.com/v7/modules/_react-router_express.html) Here's an example with [Express][express]: ```ts lines=[1-3,11-22] const { createRequestHandler, } = require("@react-router/express"); const express = require("express"); const app = express(); // needs to handle all verbs (GET, POST, etc.) app.all( "*", createRequestHandler({ // `react-router build` and `react-router dev` output files to a build directory, // you need to pass that build to the request handler build: require("./build"), // Return anything you want here to be available as `context` in your // loaders and actions. This is where you can bridge the gap between your // server and React Router getLoadContext(req, res) { return {}; }, }), ); ``` ### Migrating from the React Router App Server If you started an app with the [React Router App Server][rr-serve] but find that you want to take control over the Express server and customize it, it should be fairly straightforward to migrate way from `@react-router/serve`. You can refer to the [Express template][express-template] as a reference, but here are the main changes you will need to make: **1. Update deps** ```shellscript nonumber npm uninstall @react-router/serve npm install @react-router/express compression express morgan cross-env npm install --save-dev @types/express @types/express-serve-static-core @types/morgan ``` **2. Add a server** Create your React Router Express server in `server/app.ts`: ```ts filename=server/app.ts import "react-router"; import { createRequestHandler } from "@react-router/express"; import express from "express"; export const app = express(); app.use( createRequestHandler({ build: () => import("virtual:react-router/server-build"), }), ); ``` Copy the [`server.js`][express-template-server-js] into your app. This is the boilerplate setup we recommend to allow the same server code to run both the development and production builds of your app. Two separate files are used here so that the main Express server code can be written in TypeScript (`server/app.ts`) and compiled into your server build by React Router, and then executed via `node server.js`. **3. Update `vite.config.ts` to compile the server** ```tsx filename=vite.config.ts lines=[6-10] import { reactRouter } from "@react-router/dev/vite"; import { defineConfig } from "vite"; import tsconfigPaths from "vite-tsconfig-paths"; export default defineConfig(({ isSsrBuild }) => ({ build: { rollupOptions: isSsrBuild ? { input: "./server/app.ts" } : undefined, }, plugins: [reactRouter(), tsconfigPaths()], })); ``` **4. Update `package.json` scripts** Update the `dev` and `start` scripts to use your new Express server: ```json filename=package.json { // ... "scripts": { "dev": "cross-env NODE_ENV=development node server.js", "start": "node server.js" // ... } // ... } ``` ## `@react-router/cloudflare` [Reference Documentation ↗](https://api.reactrouter.com/v7/modules/_react-router_cloudflare.html) Here's an example with Cloudflare: ```ts import { createRequestHandler } from "react-router"; declare module "react-router" { export interface AppLoadContext { cloudflare: { env: Env; ctx: ExecutionContext; }; } } const requestHandler = createRequestHandler( () => import("virtual:react-router/server-build"), import.meta.env.MODE, ); export default { async fetch(request, env, ctx) { return requestHandler(request, { cloudflare: { env, ctx }, }); }, } satisfies ExportedHandler<Env>; ``` ## `@react-router/node` While not a direct "adapter" like the above, this package contains utilities for working with Node-based adapters. [Reference Documentation ↗](https://api.reactrouter.com/v7/modules/_react-router_node.html) ### Node Version Support React Router officially supports **Active** and **Maintenance** [Node LTS versions][node-releases] at any given point in time. Dropped support for End of Life Node versions is done in a React Router Minor release. [express]: https://expressjs.com [node-releases]: https://nodejs.org/en/about/previous-releases [web-fetch-api]: https://developer.mozilla.org/en-US/docs/Web/API/Fetch_API [rr-serve]: ./serve [express-template]: https://github.com/remix-run/react-router-templates/tree/main/node-custom-server [express-template-server-js]: https://github.com/remix-run/react-router-templates/blob/main/node-custom-server/server.js
unknown
github
https://github.com/remix-run/react-router
docs/api/other-api/adapter.md
# -*- coding: utf-8 -*- from __future__ import division, print_function, absolute_import from numpy import (abs, sum, sin, cos, sqrt, log, prod, where, pi, exp, arange, floor, log10, atleast_2d, zeros) from .go_benchmark import Benchmark class Parsopoulos(Benchmark): r""" Parsopoulos objective function. This class defines the Parsopoulos [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\text{Parsopoulos}}(x) = \cos(x_1)^2 + \sin(x_2)^2 with :math:`x_i \in [-5, 5]` for :math:`i = 1, 2`. *Global optimum*: This function has infinite number of global minima in R2, at points :math:`\left(k\frac{\pi}{2}, \lambda \pi \right)`, where :math:`k = \pm1, \pm3, ...` and :math:`\lambda = 0, \pm1, \pm2, ...` In the given domain problem, function has 12 global minima all equal to zero. .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194. """ def __init__(self, dimensions=2): Benchmark.__init__(self, dimensions) self._bounds = zip([-5.0] * self.N, [5.0] * self.N) self.global_optimum = [[pi / 2.0, pi]] self.fglob = 0 def fun(self, x, *args): self.nfev += 1 return cos(x[0]) ** 2.0 + sin(x[1]) ** 2.0 class Pathological(Benchmark): r""" Pathological objective function. This class defines the Pathological [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\text{Pathological}}(x) = \sum_{i=1}^{n -1} \frac{\sin^{2}\left( \sqrt{100 x_{i+1}^{2} + x_{i}^{2}}\right) -0.5}{0.001 \left(x_{i}^{2} - 2x_{i}x_{i+1} + x_{i+1}^{2}\right)^{2} + 0.50} Here, :math:`n` represents the number of dimensions and :math:`x_i \in [-100, 100]` for :math:`i = 1, 2`. *Global optimum*: :math:`f(x) = 0.` for :math:`x = [0, 0]` for :math:`i = 1, 2` .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194. """ def __init__(self, dimensions=2): Benchmark.__init__(self, dimensions) self._bounds = zip([-100.0] * self.N, [100.0] * self.N) self.global_optimum = [[0 for _ in range(self.N)]] self.fglob = 0. def fun(self, x, *args): self.nfev += 1 vec = (0.5 + (sin(sqrt(100 * x[: -1] ** 2 + x[1:] ** 2)) ** 2 - 0.5) / (1. + 0.001 * (x[: -1] ** 2 - 2 * x[: -1] * x[1:] + x[1:] ** 2) ** 2)) return sum(vec) class Paviani(Benchmark): r""" Paviani objective function. This class defines the Paviani [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\text{Paviani}}(x) = \sum_{i=1}^{10} \left[\log^{2}\left(10 - x_i\right) + \log^{2}\left(x_i -2\right)\right] - \left(\prod_{i=1}^{10} x_i^{10} \right)^{0.2} with :math:`x_i \in [2.001, 9.999]` for :math:`i = 1, ... , 10`. *Global optimum*: :math:`f(x_i) = -45.7784684040686` for :math:`x_i = 9.350266` for :math:`i = 1, ..., 10` .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194. TODO: think Gavana web/code definition is wrong because final product term shouldn't raise x to power 10. """ def __init__(self, dimensions=10): Benchmark.__init__(self, dimensions) self._bounds = zip([2.001] * self.N, [9.999] * self.N) self.global_optimum = [[9.350266 for _ in range(self.N)]] self.fglob = -45.7784684040686 def fun(self, x, *args): self.nfev += 1 return sum(log(x - 2) ** 2.0 + log(10.0 - x) ** 2.0) - prod(x) ** 0.2 class Penalty01(Benchmark): r""" Penalty 1 objective function. This class defines the Penalty 1 [1]_ global optimization problem. This is a imultimodal minimization problem defined as follows: .. math:: f_{\text{Penalty01}}(x) = \frac{\pi}{30} \left\{10 \sin^2(\pi y_1) + \sum_{i=1}^{n-1} (y_i - 1)^2 \left[1 + 10 \sin^2(\pi y_{i+1}) \right] + (y_n - 1)^2 \right \} + \sum_{i=1}^n u(x_i, 10, 100, 4) Where, in this exercise: .. math:: y_i = 1 + \frac{1}{4}(x_i + 1) And: .. math:: u(x_i, a, k, m) = \begin{cases} k(x_i - a)^m & \textrm{if} \hspace{5pt} x_i > a \\ 0 & \textrm{if} \hspace{5pt} -a \leq x_i \leq a \\ k(-x_i - a)^m & \textrm{if} \hspace{5pt} x_i < -a \end{cases} Here, :math:`n` represents the number of dimensions and :math:`x_i \in [-50, 50]` for :math:`i= 1, ..., n`. *Global optimum*: :math:`f(x) = 0` for :math:`x_i = -1` for :math:`i = 1, ..., n` .. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015 """ def __init__(self, dimensions=2): Benchmark.__init__(self, dimensions) self._bounds = zip([-50.0] * self.N, [50.0] * self.N) self.custom_bounds = ([-5.0, 5.0], [-5.0, 5.0]) self.global_optimum = [[-1.0 for _ in range(self.N)]] self.fglob = 0.0 self.change_dimensionality = True def fun(self, x, *args): self.nfev += 1 a, b, c = 10.0, 100.0, 4.0 xx = abs(x) u = where(xx > a, b * (xx - a) ** c, 0.0) y = 1.0 + (x + 1.0) / 4.0 return (sum(u) + (pi / 30.0) * (10.0 * sin(pi * y[0]) ** 2.0 + sum((y[: -1] - 1.0) ** 2.0 * (1.0 + 10.0 * sin(pi * y[1:]) ** 2.0)) + (y[-1] - 1) ** 2.0)) class Penalty02(Benchmark): r""" Penalty 2 objective function. This class defines the Penalty 2 [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\text{Penalty02}}(x) = 0.1 \left\{\sin^2(3\pi x_1) + \sum_{i=1}^{n-1} (x_i - 1)^2 \left[1 + \sin^2(3\pi x_{i+1}) \right ] + (x_n - 1)^2 \left [1 + \sin^2(2 \pi x_n) \right ]\right \} + \sum_{i=1}^n u(x_i, 5, 100, 4) Where, in this exercise: .. math:: u(x_i, a, k, m) = \begin{cases} k(x_i - a)^m & \textrm{if} \hspace{5pt} x_i > a \\ 0 & \textrm{if} \hspace{5pt} -a \leq x_i \leq a \\ k(-x_i - a)^m & \textrm{if} \hspace{5pt} x_i < -a \\ \end{cases} Here, :math:`n` represents the number of dimensions and :math:`x_i \in [-50, 50]` for :math:`i = 1, ..., n`. *Global optimum*: :math:`f(x) = 0` for :math:`x_i = 1` for :math:`i = 1, ..., n` .. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015 """ def __init__(self, dimensions=2): Benchmark.__init__(self, dimensions) self._bounds = zip([-50.0] * self.N, [50.0] * self.N) self.custom_bounds = ([-4.0, 4.0], [-4.0, 4.0]) self.global_optimum = [[1.0 for _ in range(self.N)]] self.fglob = 0.0 self.change_dimensionality = True def fun(self, x, *args): self.nfev += 1 a, b, c = 5.0, 100.0, 4.0 xx = abs(x) u = where(xx > a, b * (xx - a) ** c, 0.0) return (sum(u) + 0.1 * (10 * sin(3.0 * pi * x[0]) ** 2.0 + sum((x[:-1] - 1.0) ** 2.0 * (1.0 + sin(3 * pi * x[1:]) ** 2.0)) + (x[-1] - 1) ** 2.0 * (1 + sin(2 * pi * x[-1]) ** 2.0))) class PenHolder(Benchmark): r""" PenHolder objective function. This class defines the PenHolder [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\text{PenHolder}}(x) = -e^{\left|{e^{-\left|{- \frac{\sqrt{x_{1}^{2} + x_{2}^{2}}}{\pi} + 1}\right|} \cos\left(x_{1}\right) \cos\left(x_{2}\right)}\right|^{-1}} with :math:`x_i \in [-11, 11]` for :math:`i = 1, 2`. *Global optimum*: :math:`f(x_i) = -0.9635348327265058` for :math:`x_i = \pm 9.646167671043401` for :math:`i = 1, 2` .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194. """ def __init__(self, dimensions=2): Benchmark.__init__(self, dimensions) self._bounds = zip([-11.0] * self.N, [11.0] * self.N) self.global_optimum = [[-9.646167708023526, 9.646167671043401]] self.fglob = -0.9635348327265058 def fun(self, x, *args): self.nfev += 1 a = abs(1. - (sqrt(x[0] ** 2 + x[1] ** 2) / pi)) b = cos(x[0]) * cos(x[1]) * exp(a) return -exp(-abs(b) ** -1) class PermFunction01(Benchmark): r""" PermFunction 1 objective function. This class defines the PermFunction1 [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\text{PermFunction01}}(x) = \sum_{k=1}^n \left\{ \sum_{j=1}^n (j^k + \beta) \left[ \left(\frac{x_j}{j}\right)^k - 1 \right] \right\}^2 Here, :math:`n` represents the number of dimensions and :math:`x_i \in [-n, n + 1]` for :math:`i = 1, ..., n`. *Global optimum*: :math:`f(x) = 0` for :math:`x_i = i` for :math:`i = 1, ..., n` .. [1] Mishra, S. Global Optimization by Differential Evolution and Particle Swarm Methods: Evaluation on Some Benchmark Functions. Munich Personal RePEc Archive, 2006, 1005 TODO: line 560 """ def __init__(self, dimensions=2): Benchmark.__init__(self, dimensions) self._bounds = zip([-self.N] * self.N, [self.N + 1] * self.N) self.global_optimum = [range(1, self.N + 1)] self.fglob = 0.0 self.change_dimensionality = True def fun(self, x, *args): self.nfev += 1 b = 0.5 k = atleast_2d(arange(self.N) + 1).T j = atleast_2d(arange(self.N) + 1) s = (j ** k + b) * ((x / j) ** k - 1) return sum((sum(s, axis=1) ** 2)) class PermFunction02(Benchmark): r""" PermFunction 2 objective function. This class defines the Perm Function 2 [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\text{PermFunction02}}(x) = \sum_{k=1}^n \left\{ \sum_{j=1}^n (j + \beta) \left[ \left(x_j^k - {\frac{1}{j}}^{k} \right ) \right] \right\}^2 Here, :math:`n` represents the number of dimensions and :math:`x_i \in [-n, n+1]` for :math:`i = 1, ..., n`. *Global optimum*: :math:`f(x) = 0` for :math:`x_i = \frac{1}{i}` for :math:`i = 1, ..., n` .. [1] Mishra, S. Global Optimization by Differential Evolution and Particle Swarm Methods: Evaluation on Some Benchmark Functions. Munich Personal RePEc Archive, 2006, 1005 TODO: line 582 """ def __init__(self, dimensions=2): Benchmark.__init__(self, dimensions) self._bounds = zip([-self.N] * self.N, [self.N + 1] * self.N) self.custom_bounds = ([0, 1.5], [0, 1.0]) self.global_optimum = [1. / arange(1, self.N + 1)] self.fglob = 0.0 self.change_dimensionality = True def fun(self, x, *args): self.nfev += 1 b = 10 k = atleast_2d(arange(self.N) + 1).T j = atleast_2d(arange(self.N) + 1) s = (j + b) * (x ** k - (1. / j) ** k) return sum((sum(s, axis=1) ** 2)) class Pinter(Benchmark): r""" Pinter objective function. This class defines the Pinter [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\text{Pinter}}(x) = \sum_{i=1}^n ix_i^2 + \sum_{i=1}^n 20i \sin^2 A + \sum_{i=1}^n i \log_{10} (1 + iB^2) Where, in this exercise: .. math:: \begin{cases} A = x_{i-1} \sin x_i + \sin x_{i+1} \\ B = x_{i-1}^2 - 2x_i + 3x_{i + 1} - \cos x_i + 1\\ \end{cases} Where :math:`x_0 = x_n` and :math:`x_{n + 1} = x_1`. Here, :math:`n` represents the number of dimensions and :math:`x_i \in [-10, 10]` for :math:`i = 1, ..., n`. *Global optimum*: :math:`f(x) = 0` for :math:`x_i = 0` for :math:`i = 1, ..., n` .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194. """ def __init__(self, dimensions=2): Benchmark.__init__(self, dimensions) self._bounds = zip([-10.0] * self.N, [10.0] * self.N) self.global_optimum = [[0.0 for _ in range(self.N)]] self.fglob = 0.0 self.change_dimensionality = True def fun(self, x, *args): self.nfev += 1 i = arange(self.N) + 1 xx = zeros(self.N + 2) xx[1: - 1] = x xx[0] = x[-1] xx[-1] = x[0] A = xx[0: -2] * sin(xx[1: - 1]) + sin(xx[2:]) B = xx[0: -2] ** 2 - 2 * xx[1: - 1] + 3 * xx[2:] - cos(xx[1: - 1]) + 1 return (sum(i * x ** 2) + sum(20 * i * sin(A) ** 2) + sum(i * log10(1 + i * B ** 2))) class Plateau(Benchmark): r""" Plateau objective function. This class defines the Plateau [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\text{Plateau}}(x) = 30 + \sum_{i=1}^n \lfloor \lvert x_i \rvert\rfloor Here, :math:`n` represents the number of dimensions and :math:`x_i \in [-5.12, 5.12]` for :math:`i = 1, ..., n`. *Global optimum*: :math:`f(x) = 30` for :math:`x_i = 0` for :math:`i = 1, ..., n` .. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015 """ def __init__(self, dimensions=2): Benchmark.__init__(self, dimensions) self._bounds = zip([-5.12] * self.N, [5.12] * self.N) self.global_optimum = [[0.0 for _ in range(self.N)]] self.fglob = 30.0 self.change_dimensionality = True def fun(self, x, *args): self.nfev += 1 return 30.0 + sum(floor(abs(x))) class Powell(Benchmark): r""" Powell objective function. This class defines the Powell [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\text{Powell}}(x) = (x_3+10x_1)^2 + 5(x_2-x_4)^2 + (x_1-2x_2)^4 + 10(x_3-x_4)^4 Here, :math:`n` represents the number of dimensions and :math:`x_i \in [-4, 5]` for :math:`i = 1, ..., 4`. *Global optimum*: :math:`f(x) = 0` for :math:`x_i = 0` for :math:`i = 1, ..., 4` ..[1] Powell, M. An iterative method for finding stationary values of a function of several variables Computer Journal, 1962, 5, 147-151 """ def __init__(self, dimensions=4): Benchmark.__init__(self, dimensions) self._bounds = zip([-4.0] * self.N, [5.0] * self.N) self.global_optimum = [[0, 0, 0, 0]] self.fglob = 0 def fun(self, x, *args): self.nfev += 1 return ((x[0] + 10 * x[1]) ** 2 + 5 * (x[2] - x[3]) ** 2 + (x[1] - 2 * x[2]) ** 4 + 10 * (x[0] - x[3]) ** 4) class PowerSum(Benchmark): r""" Power sum objective function. This class defines the Power Sum global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\text{PowerSum}}(x) = \sum_{k=1}^n\left[\left(\sum_{i=1}^n x_i^k \right) - b_k \right]^2 Where, in this exercise, :math:`b = [8, 18, 44, 114]` Here, :math:`x_i \in [0, 4]` for :math:`i = 1, ..., 4`. *Global optimum*: :math:`f(x) = 0` for :math:`x = [1, 2, 2, 3]` .. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015 """ def __init__(self, dimensions=4): Benchmark.__init__(self, dimensions) self._bounds = zip([0.0] * self.N, [4.0] * self.N) self.global_optimum = [[1.0, 2.0, 2.0, 3.0]] self.fglob = 0.0 def fun(self, x, *args): self.nfev += 1 b = [8.0, 18.0, 44.0, 114.0] k = atleast_2d(arange(self.N) + 1).T return sum((sum(x ** k, axis=1) - b) ** 2) class Price01(Benchmark): r""" Price 1 objective function. This class defines the Price 1 [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\text{Price01}}(x) = (\lvert x_1 \rvert - 5)^2 + (\lvert x_2 \rvert - 5)^2 with :math:`x_i \in [-500, 500]` for :math:`i = 1, 2`. *Global optimum*: :math:`f(x_i) = 0.0` for :math:`x = [5, 5]` or :math:`x = [5, -5]` or :math:`x = [-5, 5]` or :math:`x = [-5, -5]`. .. [1] Price, W. A controlled random search procedure for global optimisation Computer Journal, 1977, 20, 367-370 """ def __init__(self, dimensions=2): Benchmark.__init__(self, dimensions) self._bounds = zip([-500.0] * self.N, [500.0] * self.N) self.custom_bounds = ([-10.0, 10.0], [-10.0, 10.0]) self.global_optimum = [[5.0, 5.0]] self.fglob = 0.0 def fun(self, x, *args): self.nfev += 1 return (abs(x[0]) - 5.0) ** 2.0 + (abs(x[1]) - 5.0) ** 2.0 class Price02(Benchmark): r""" Price 2 objective function. This class defines the Price 2 [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\text{Price02}}(x) = 1 + \sin^2(x_1) + \sin^2(x_2) - 0.1e^{(-x_1^2 - x_2^2)} with :math:`x_i \in [-10, 10]` for :math:`i = 1, 2`. *Global optimum*: :math:`f(x) = 0.9` for :math:`x_i = [0, 0]` .. [1] Price, W. A controlled random search procedure for global optimisation Computer Journal, 1977, 20, 367-370 """ def __init__(self, dimensions=2): Benchmark.__init__(self, dimensions) self._bounds = zip([-10.0] * self.N, [10.0] * self.N) self.global_optimum = [[0.0, 0.0]] self.fglob = 0.9 def fun(self, x, *args): self.nfev += 1 return 1.0 + sum(sin(x) ** 2) - 0.1 * exp(-x[0] ** 2.0 - x[1] ** 2.0) class Price03(Benchmark): r""" Price 3 objective function. This class defines the Price 3 [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\text{Price03}}(x) = 100(x_2 - x_1^2)^2 + \left[6.4(x_2 - 0.5)^2 - x_1 - 0.6 \right]^2 with :math:`x_i \in [-50, 50]` for :math:`i = 1, 2`. *Global optimum*: :math:`f(x) = 0` for :math:`x = [-5, -5]`, :math:`x = [-5, 5]`, :math:`x = [5, -5]`, :math:`x = [5, 5]`. .. [1] Price, W. A controlled random search procedure for global optimisation Computer Journal, 1977, 20, 367-370 TODO Jamil #96 has an erroneous factor of 6 in front of the square brackets """ def __init__(self, dimensions=2): Benchmark.__init__(self, dimensions) self._bounds = zip([-5.0] * self.N, [5.0] * self.N) self.custom_bounds = ([0, 2], [0, 2]) self.global_optimum = [[1.0, 1.0]] self.fglob = 0.0 def fun(self, x, *args): self.nfev += 1 return (100 * (x[1] - x[0] ** 2) ** 2 + (6.4 * (x[1] - 0.5) ** 2 - x[0] - 0.6) ** 2) class Price04(Benchmark): r""" Price 4 objective function. This class defines the Price 4 [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\text{Price04}}(x) = (2 x_1^3 x_2 - x_2^3)^2 + (6 x_1 - x_2^2 + x_2)^2 with :math:`x_i \in [-50, 50]` for :math:`i = 1, 2`. *Global optimum*: :math:`f(x) = 0` for :math:`x = [0, 0]`, :math:`x = [2, 4]` and :math:`x = [1.464, -2.506]` .. [1] Price, W. A controlled random search procedure for global optimisation Computer Journal, 1977, 20, 367-370 """ def __init__(self, dimensions=2): Benchmark.__init__(self, dimensions) self._bounds = zip([-50.0] * self.N, [50.0] * self.N) self.custom_bounds = ([0, 2], [0, 2]) self.global_optimum = [[2.0, 4.0]] self.fglob = 0.0 def fun(self, x, *args): self.nfev += 1 return ((2.0 * x[1] * x[0] ** 3.0 - x[1] ** 3.0) ** 2.0 + (6.0 * x[0] - x[1] ** 2.0 + x[1]) ** 2.0)
unknown
codeparrot/codeparrot-clean
/*------------------------------------------------------------------------- * * tupdesc.h * POSTGRES tuple descriptor definitions. * * * Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/access/tupdesc.h * *------------------------------------------------------------------------- */ #ifndef TUPDESC_H #define TUPDESC_H #include "access/attnum.h" #include "catalog/pg_attribute.h" #include "nodes/pg_list.h" typedef struct AttrDefault { AttrNumber adnum; char *adbin; /* nodeToString representation of expr */ } AttrDefault; typedef struct ConstrCheck { char *ccname; char *ccbin; /* nodeToString representation of expr */ bool ccenforced; bool ccvalid; bool ccnoinherit; /* this is a non-inheritable constraint */ } ConstrCheck; /* This structure contains constraints of a tuple */ typedef struct TupleConstr { AttrDefault *defval; /* array */ ConstrCheck *check; /* array */ struct AttrMissing *missing; /* missing attributes values, NULL if none */ uint16 num_defval; uint16 num_check; bool has_not_null; /* any not-null, including not valid ones */ bool has_generated_stored; bool has_generated_virtual; } TupleConstr; /* * CompactAttribute * Cut-down version of FormData_pg_attribute for faster access for tasks * such as tuple deformation. The fields of this struct are populated * using the populate_compact_attribute() function, which must be called * directly after the FormData_pg_attribute struct is populated or * altered in any way. * * Currently, this struct is 16 bytes. Any code changes which enlarge this * struct should be considered very carefully. * * Code which must access a TupleDesc's attribute data should always make use * the fields of this struct when required fields are available here. It's * more efficient to access the memory in CompactAttribute due to it being a * more compact representation of FormData_pg_attribute and also because * accessing the FormData_pg_attribute requires an additional calculations to * obtain the base address of the array within the TupleDesc. */ typedef struct CompactAttribute { int32 attcacheoff; /* fixed offset into tuple, if known, or -1 */ int16 attlen; /* attr len in bytes or -1 = varlen, -2 = * cstring */ bool attbyval; /* as FormData_pg_attribute.attbyval */ bool attispackable; /* FormData_pg_attribute.attstorage != * TYPSTORAGE_PLAIN */ bool atthasmissing; /* as FormData_pg_attribute.atthasmissing */ bool attisdropped; /* as FormData_pg_attribute.attisdropped */ bool attgenerated; /* FormData_pg_attribute.attgenerated != '\0' */ char attnullability; /* status of not-null constraint, see below */ uint8 attalignby; /* alignment requirement in bytes */ } CompactAttribute; /* Valid values for CompactAttribute->attnullability */ #define ATTNULLABLE_UNRESTRICTED 'f' /* No constraint exists */ #define ATTNULLABLE_UNKNOWN 'u' /* constraint exists, validity unknown */ #define ATTNULLABLE_VALID 'v' /* valid constraint exists */ #define ATTNULLABLE_INVALID 'i' /* constraint exists, marked invalid */ /* * This struct is passed around within the backend to describe the structure * of tuples. For tuples coming from on-disk relations, the information is * collected from the pg_attribute, pg_attrdef, and pg_constraint catalogs. * Transient row types (such as the result of a join query) have anonymous * TupleDesc structs that generally omit any constraint info; therefore the * structure is designed to let the constraints be omitted efficiently. * * Note that only user attributes, not system attributes, are mentioned in * TupleDesc. * * If the tupdesc is known to correspond to a named rowtype (such as a table's * rowtype) then tdtypeid identifies that type and tdtypmod is -1. Otherwise * tdtypeid is RECORDOID, and tdtypmod can be either -1 for a fully anonymous * row type, or a value >= 0 to allow the rowtype to be looked up in the * typcache.c type cache. * * Note that tdtypeid is never the OID of a domain over composite, even if * we are dealing with values that are known (at some higher level) to be of * a domain-over-composite type. This is because tdtypeid/tdtypmod need to * match up with the type labeling of composite Datums, and those are never * explicitly marked as being of a domain type, either. * * Tuple descriptors that live in caches (relcache or typcache, at present) * are reference-counted: they can be deleted when their reference count goes * to zero. Tuple descriptors created by the executor need no reference * counting, however: they are simply created in the appropriate memory * context and go away when the context is freed. We set the tdrefcount * field of such a descriptor to -1, while reference-counted descriptors * always have tdrefcount >= 0. * * Beyond the compact_attrs variable length array, the TupleDesc stores an * array of FormData_pg_attribute. The TupleDescAttr() function, as defined * below, takes care of calculating the address of the elements of the * FormData_pg_attribute array. * * The array of CompactAttribute is effectively an abbreviated version of the * array of FormData_pg_attribute. Because CompactAttribute is significantly * smaller than FormData_pg_attribute, code, especially performance-critical * code, should prioritize using the fields from the CompactAttribute over the * equivalent fields in FormData_pg_attribute. * * Any code making changes manually to and fields in the FormData_pg_attribute * array must subsequently call populate_compact_attribute() to flush the * changes out to the corresponding 'compact_attrs' element. */ typedef struct TupleDescData { int natts; /* number of attributes in the tuple */ Oid tdtypeid; /* composite type ID for tuple type */ int32 tdtypmod; /* typmod for tuple type */ int tdrefcount; /* reference count, or -1 if not counting */ TupleConstr *constr; /* constraints, or NULL if none */ /* compact_attrs[N] is the compact metadata of Attribute Number N+1 */ CompactAttribute compact_attrs[FLEXIBLE_ARRAY_MEMBER]; } TupleDescData; typedef struct TupleDescData *TupleDesc; extern void populate_compact_attribute(TupleDesc tupdesc, int attnum); /* * Calculates the base address of the Form_pg_attribute at the end of the * TupleDescData struct. */ #define TupleDescAttrAddress(desc) \ (Form_pg_attribute) ((char *) (desc) + \ (offsetof(struct TupleDescData, compact_attrs) + \ (desc)->natts * sizeof(CompactAttribute))) /* Accessor for the i'th FormData_pg_attribute element of tupdesc. */ static inline FormData_pg_attribute * TupleDescAttr(TupleDesc tupdesc, int i) { FormData_pg_attribute *attrs = TupleDescAttrAddress(tupdesc); return &attrs[i]; } #undef TupleDescAttrAddress extern void verify_compact_attribute(TupleDesc, int attnum); /* * Accessor for the i'th CompactAttribute element of tupdesc. */ static inline CompactAttribute * TupleDescCompactAttr(TupleDesc tupdesc, int i) { CompactAttribute *cattr = &tupdesc->compact_attrs[i]; #ifdef USE_ASSERT_CHECKING /* Check that the CompactAttribute is correctly populated */ verify_compact_attribute(tupdesc, i); #endif return cattr; } extern TupleDesc CreateTemplateTupleDesc(int natts); extern TupleDesc CreateTupleDesc(int natts, Form_pg_attribute *attrs); extern TupleDesc CreateTupleDescCopy(TupleDesc tupdesc); extern TupleDesc CreateTupleDescTruncatedCopy(TupleDesc tupdesc, int natts); extern TupleDesc CreateTupleDescCopyConstr(TupleDesc tupdesc); #define TupleDescSize(src) \ (offsetof(struct TupleDescData, compact_attrs) + \ (src)->natts * sizeof(CompactAttribute) + \ (src)->natts * sizeof(FormData_pg_attribute)) extern void TupleDescCopy(TupleDesc dst, TupleDesc src); extern void TupleDescCopyEntry(TupleDesc dst, AttrNumber dstAttno, TupleDesc src, AttrNumber srcAttno); extern void FreeTupleDesc(TupleDesc tupdesc); extern void IncrTupleDescRefCount(TupleDesc tupdesc); extern void DecrTupleDescRefCount(TupleDesc tupdesc); #define PinTupleDesc(tupdesc) \ do { \ if ((tupdesc)->tdrefcount >= 0) \ IncrTupleDescRefCount(tupdesc); \ } while (0) #define ReleaseTupleDesc(tupdesc) \ do { \ if ((tupdesc)->tdrefcount >= 0) \ DecrTupleDescRefCount(tupdesc); \ } while (0) extern bool equalTupleDescs(TupleDesc tupdesc1, TupleDesc tupdesc2); extern bool equalRowTypes(TupleDesc tupdesc1, TupleDesc tupdesc2); extern uint32 hashRowType(TupleDesc desc); extern void TupleDescInitEntry(TupleDesc desc, AttrNumber attributeNumber, const char *attributeName, Oid oidtypeid, int32 typmod, int attdim); extern void TupleDescInitBuiltinEntry(TupleDesc desc, AttrNumber attributeNumber, const char *attributeName, Oid oidtypeid, int32 typmod, int attdim); extern void TupleDescInitEntryCollation(TupleDesc desc, AttrNumber attributeNumber, Oid collationid); extern TupleDesc BuildDescFromLists(const List *names, const List *types, const List *typmods, const List *collations); extern Node *TupleDescGetDefault(TupleDesc tupdesc, AttrNumber attnum); #endif /* TUPDESC_H */
c
github
https://github.com/postgres/postgres
src/include/access/tupdesc.h
"""Tests for distutils.command.check.""" import unittest from test.support import run_unittest from distutils.command.check import check, HAS_DOCUTILS from distutils.tests import support from distutils.errors import DistutilsSetupError class CheckTestCase(support.LoggingSilencer, support.TempdirManager, unittest.TestCase): def _run(self, metadata=None, **options): if metadata is None: metadata = {} pkg_info, dist = self.create_dist(**metadata) cmd = check(dist) cmd.initialize_options() for name, value in options.items(): setattr(cmd, name, value) cmd.ensure_finalized() cmd.run() return cmd def test_check_metadata(self): # let's run the command with no metadata at all # by default, check is checking the metadata # should have some warnings cmd = self._run() self.assertEqual(cmd._warnings, 2) # now let's add the required fields # and run it again, to make sure we don't get # any warning anymore metadata = {'url': 'xxx', 'author': 'xxx', 'author_email': 'xxx', 'name': 'xxx', 'version': 'xxx'} cmd = self._run(metadata) self.assertEqual(cmd._warnings, 0) # now with the strict mode, we should # get an error if there are missing metadata self.assertRaises(DistutilsSetupError, self._run, {}, **{'strict': 1}) # and of course, no error when all metadata are present cmd = self._run(metadata, strict=1) self.assertEqual(cmd._warnings, 0) def test_check_document(self): if not HAS_DOCUTILS: # won't test without docutils return pkg_info, dist = self.create_dist() cmd = check(dist) # let's see if it detects broken rest broken_rest = 'title\n===\n\ntest' msgs = cmd._check_rst_data(broken_rest) self.assertEqual(len(msgs), 1) # and non-broken rest rest = 'title\n=====\n\ntest' msgs = cmd._check_rst_data(rest) self.assertEqual(len(msgs), 0) def test_check_restructuredtext(self): if not HAS_DOCUTILS: # won't test without docutils return # let's see if it detects broken rest in long_description broken_rest = 'title\n===\n\ntest' pkg_info, dist = self.create_dist(long_description=broken_rest) cmd = check(dist) cmd.check_restructuredtext() self.assertEqual(cmd._warnings, 1) # let's see if we have an error with strict=1 metadata = {'url': 'xxx', 'author': 'xxx', 'author_email': 'xxx', 'name': 'xxx', 'version': 'xxx', 'long_description': broken_rest} self.assertRaises(DistutilsSetupError, self._run, metadata, **{'strict': 1, 'restructuredtext': 1}) # and non-broken rest metadata['long_description'] = 'title\n=====\n\ntest' cmd = self._run(metadata, strict=1, restructuredtext=1) self.assertEqual(cmd._warnings, 0) def test_check_all(self): metadata = {'url': 'xxx', 'author': 'xxx'} self.assertRaises(DistutilsSetupError, self._run, {}, **{'strict': 1, 'restructuredtext': 1}) def test_suite(): return unittest.makeSuite(CheckTestCase) if __name__ == "__main__": run_unittest(test_suite())
unknown
codeparrot/codeparrot-clean
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // //go:build !windows package main import ( "fmt" "net/http" "os" "os/exec" "testing" "time" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/util/testutil" ) // As soon as prometheus starts responding to http request it should be able to // accept Interrupt signals for a graceful shutdown. func TestStartupInterrupt(t *testing.T) { if testing.Short() { t.Skip("skipping test in short mode.") } t.Parallel() port := fmt.Sprintf(":%d", testutil.RandomUnprivilegedPort(t)) prom := exec.Command(promPath, "-test.main", "--config.file="+promConfig, "--storage.tsdb.path="+t.TempDir(), "--web.listen-address=0.0.0.0"+port) err := prom.Start() require.NoError(t, err) done := make(chan error, 1) go func() { done <- prom.Wait() }() var startedOk bool var stoppedErr error url := "http://localhost" + port + "/graph" Loop: for range 10 { // error=nil means prometheus has started, so we can send the interrupt // signal and wait for the graceful shutdown. if _, err := http.Get(url); err == nil { startedOk = true prom.Process.Signal(os.Interrupt) select { case stoppedErr = <-done: break Loop case <-time.After(10 * time.Second): } break Loop } time.Sleep(500 * time.Millisecond) } require.True(t, startedOk, "prometheus didn't start in the specified timeout") err = prom.Process.Kill() require.Error(t, err, "prometheus didn't shutdown gracefully after sending the Interrupt signal") // TODO - find a better way to detect when the process didn't exit as expected! if stoppedErr != nil { require.EqualError(t, stoppedErr, "signal: interrupt", "prometheus exit") } }
go
github
https://github.com/prometheus/prometheus
cmd/prometheus/main_unix_test.go
// Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: BUSL-1.1 package command import ( "fmt" "os" "path/filepath" "runtime" "strings" "testing" "github.com/hashicorp/cli" "github.com/hashicorp/terraform/internal/addrs" "github.com/hashicorp/terraform/internal/depsfile" "github.com/hashicorp/terraform/internal/getproviders" ) func TestProvidersLock(t *testing.T) { t.Run("noop", func(t *testing.T) { // in the most basic case, running providers lock in a directory with no configuration at all should succeed. // create an empty working directory td := t.TempDir() os.MkdirAll(td, 0755) t.Chdir(td) ui := new(cli.MockUi) c := &ProvidersLockCommand{ Meta: Meta{ Ui: ui, }, } code := c.Run([]string{}) if code != 0 { t.Fatalf("wrong exit code; expected 0, got %d", code) } }) // This test depends on the -fs-mirror argument, so we always know what results to expect t.Run("basic", func(t *testing.T) { testDirectory := "providers-lock/basic" expected := `# This file is maintained automatically by "terraform init". # Manual edits may be lost in future updates. provider "registry.terraform.io/hashicorp/test" { version = "1.0.0" hashes = [ "h1:7MjN4eFisdTv4tlhXH5hL4QQd39Jy4baPhFxwAd/EFE=", ] } ` runProviderLockGenericTest(t, testDirectory, expected, false) }) // This test depends on the -fs-mirror argument, so we always know what results to expect t.Run("append", func(t *testing.T) { testDirectory := "providers-lock/append" expected := `# This file is maintained automatically by "terraform init". # Manual edits may be lost in future updates. provider "registry.terraform.io/hashicorp/test" { version = "1.0.0" hashes = [ "h1:7MjN4eFisdTv4tlhXH5hL4QQd39Jy4baPhFxwAd/EFE=", "h1:invalid", ] } ` runProviderLockGenericTest(t, testDirectory, expected, false) }) // This test depends on the -fs-mirror argument, so we always know what results to expect t.Run("tests", func(t *testing.T) { testDirectory := "providers-lock/with-tests" expected := `# This file is maintained automatically by "terraform init". # Manual edits may be lost in future updates. provider "registry.terraform.io/hashicorp/test" { version = "1.0.0" hashes = [ "h1:7MjN4eFisdTv4tlhXH5hL4QQd39Jy4baPhFxwAd/EFE=", ] } ` runProviderLockGenericTest(t, testDirectory, expected, true) }) } func runProviderLockGenericTest(t *testing.T, testDirectory, expected string, init bool) { td := t.TempDir() testCopyDir(t, testFixturePath(testDirectory), td) t.Chdir(td) // Our fixture dir has a generic os_arch dir, which we need to customize // to the actual OS/arch where this test is running in order to get the // desired result. fixtMachineDir := filepath.Join(td, "fs-mirror/registry.terraform.io/hashicorp/test/1.0.0/os_arch") wantMachineDir := filepath.Join(td, "fs-mirror/registry.terraform.io/hashicorp/test/1.0.0/", fmt.Sprintf("%s_%s", runtime.GOOS, runtime.GOARCH)) err := os.Rename(fixtMachineDir, wantMachineDir) if err != nil { t.Fatalf("unexpected error: %s", err) } if init { // optionally execute the get command to fetch local modules if the // test case needs them c := &GetCommand{ Meta: Meta{ Ui: new(cli.MockUi), }, } code := c.Run(nil) if code != 0 { t.Fatal("failed get command") } } p := testProvider() ui := new(cli.MockUi) c := &ProvidersLockCommand{ Meta: Meta{ Ui: ui, testingOverrides: metaOverridesForProvider(p), }, } args := []string{"-fs-mirror=fs-mirror"} code := c.Run(args) if code != 0 { t.Fatalf("wrong exit code; expected 0, got %d", code) } lockfile, err := os.ReadFile(".terraform.lock.hcl") if err != nil { t.Fatal("error reading lockfile") } if string(lockfile) != expected { t.Fatalf("wrong lockfile content") } } func TestProvidersLock_args(t *testing.T) { t.Run("mirror collision", func(t *testing.T) { ui := new(cli.MockUi) c := &ProvidersLockCommand{ Meta: Meta{ Ui: ui, }, } // only one of these arguments can be used at a time args := []string{ "-fs-mirror=/foo/", "-net-mirror=www.foo.com", } code := c.Run(args) if code != 1 { t.Fatalf("wrong exit code; expected 1, got %d", code) } output := ui.ErrorWriter.String() if !strings.Contains(output, "The -fs-mirror and -net-mirror command line options are mutually-exclusive.") { t.Fatalf("missing expected error message: %s", output) } }) t.Run("invalid platform", func(t *testing.T) { ui := new(cli.MockUi) c := &ProvidersLockCommand{ Meta: Meta{ Ui: ui, }, } // not a valid platform args := []string{"-platform=arbitrary_nonsense_that_isnt_valid"} code := c.Run(args) if code != 1 { t.Fatalf("wrong exit code; expected 1, got %d", code) } output := ui.ErrorWriter.String() if !strings.Contains(output, "must be two words separated by an underscore.") { t.Fatalf("missing expected error message: %s", output) } }) t.Run("invalid provider argument", func(t *testing.T) { ui := new(cli.MockUi) c := &ProvidersLockCommand{ Meta: Meta{ Ui: ui, }, } // There is no configuration, so it's not valid to use any provider argument args := []string{"hashicorp/random"} code := c.Run(args) if code != 1 { t.Fatalf("wrong exit code; expected 1, got %d", code) } output := ui.ErrorWriter.String() if !strings.Contains(output, "The provider registry.terraform.io/hashicorp/random is not required by the\ncurrent configuration.") { t.Fatalf("missing expected error message: %s", output) } }) } func TestProvidersLockCalculateChangeType(t *testing.T) { provider := addrs.NewDefaultProvider("provider") v2 := getproviders.MustParseVersion("2.0.0") v2EqConstraints := getproviders.MustParseVersionConstraints("2.0.0") t.Run("oldLock == nil", func(t *testing.T) { platformLock := depsfile.NewProviderLock(provider, v2, v2EqConstraints, []getproviders.Hash{ "9r3i9a9QmASqMnQM", "K43RHM2klOoywtyW", "swJPXfuCNhJsTM5c", }) if ct := providersLockCalculateChangeType(nil, platformLock); ct != providersLockChangeTypeNewProvider { t.Fatalf("output was %s but should be %s", ct, providersLockChangeTypeNewProvider) } }) t.Run("oldLock == platformLock", func(t *testing.T) { platformLock := depsfile.NewProviderLock(provider, v2, v2EqConstraints, []getproviders.Hash{ "9r3i9a9QmASqMnQM", "K43RHM2klOoywtyW", "swJPXfuCNhJsTM5c", }) oldLock := depsfile.NewProviderLock(provider, v2, v2EqConstraints, []getproviders.Hash{ "9r3i9a9QmASqMnQM", "K43RHM2klOoywtyW", "swJPXfuCNhJsTM5c", }) if ct := providersLockCalculateChangeType(oldLock, platformLock); ct != providersLockChangeTypeNoChange { t.Fatalf("output was %s but should be %s", ct, providersLockChangeTypeNoChange) } }) t.Run("oldLock > platformLock", func(t *testing.T) { platformLock := depsfile.NewProviderLock(provider, v2, v2EqConstraints, []getproviders.Hash{ "9r3i9a9QmASqMnQM", "K43RHM2klOoywtyW", "swJPXfuCNhJsTM5c", }) oldLock := depsfile.NewProviderLock(provider, v2, v2EqConstraints, []getproviders.Hash{ "9r3i9a9QmASqMnQM", "1ZAChGWUMWn4zmIk", "K43RHM2klOoywtyW", "HWjRvIuWZ1LVatnc", "swJPXfuCNhJsTM5c", "KwhJK4p/U2dqbKhI", }) if ct := providersLockCalculateChangeType(oldLock, platformLock); ct != providersLockChangeTypeNoChange { t.Fatalf("output was %s but should be %s", ct, providersLockChangeTypeNoChange) } }) t.Run("oldLock < platformLock", func(t *testing.T) { platformLock := depsfile.NewProviderLock(provider, v2, v2EqConstraints, []getproviders.Hash{ "9r3i9a9QmASqMnQM", "1ZAChGWUMWn4zmIk", "K43RHM2klOoywtyW", "HWjRvIuWZ1LVatnc", "swJPXfuCNhJsTM5c", "KwhJK4p/U2dqbKhI", }) oldLock := depsfile.NewProviderLock(provider, v2, v2EqConstraints, []getproviders.Hash{ "9r3i9a9QmASqMnQM", "K43RHM2klOoywtyW", "swJPXfuCNhJsTM5c", }) if ct := providersLockCalculateChangeType(oldLock, platformLock); ct != providersLockChangeTypeNewHashes { t.Fatalf("output was %s but should be %s", ct, providersLockChangeTypeNoChange) } }) }
go
github
https://github.com/hashicorp/terraform
internal/command/providers_lock_test.go
# -*- coding: utf-8 -*- from south.utils import datetime_utils as datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Deleting field 'SoftwareSecurePhotoVerification.window' db.delete_column('verify_student_softwaresecurephotoverification', 'window_id') def backwards(self, orm): # Add field 'SoftwareSecurePhotoVerification.window'. Setting its default value to None if db.backend_name == 'mysql': db.execute('ALTER TABLE verify_student_softwaresecurephotoverification ADD `window_id` int(11) DEFAULT NULL;') else: db.add_column('verify_student_softwaresecurephotoverification', 'window', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['reverification.MidcourseReverificationWindow'], null=True), keep_default=False) models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'verify_student.incoursereverificationconfiguration': { 'Meta': {'ordering': "('-change_date',)", 'object_name': 'InCourseReverificationConfiguration'}, 'change_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'on_delete': 'models.PROTECT'}), 'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}) }, 'verify_student.skippedreverification': { 'Meta': {'unique_together': "(('user', 'course_id'),)", 'object_name': 'SkippedReverification'}, 'checkpoint': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'skipped_checkpoint'", 'to': "orm['verify_student.VerificationCheckpoint']"}), 'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'verify_student.softwaresecurephotoverification': { 'Meta': {'ordering': "['-created_at']", 'object_name': 'SoftwareSecurePhotoVerification'}, 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}), 'display': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}), 'error_code': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}), 'error_msg': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'face_image_url': ('django.db.models.fields.URLField', [], {'max_length': '255', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), 'photo_id_image_url': ('django.db.models.fields.URLField', [], {'max_length': '255', 'blank': 'True'}), 'photo_id_key': ('django.db.models.fields.TextField', [], {'max_length': '1024'}), 'receipt_id': ('django.db.models.fields.CharField', [], {'default': "'c6b63663-5694-49b2-ae71-494b9afee0cf'", 'max_length': '255', 'db_index': 'True'}), 'reviewing_service': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), 'reviewing_user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'photo_verifications_reviewed'", 'null': 'True', 'to': "orm['auth.User']"}), 'status': ('model_utils.fields.StatusField', [], {'default': "'created'", 'max_length': '100', u'no_check_for_status': 'True'}), 'status_changed': ('model_utils.fields.MonitorField', [], {'default': 'datetime.datetime.now', u'monitor': "u'status'"}), 'submitted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'verify_student.verificationcheckpoint': { 'Meta': {'unique_together': "(('course_id', 'checkpoint_location'),)", 'object_name': 'VerificationCheckpoint'}, 'checkpoint_location': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'photo_verification': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['verify_student.SoftwareSecurePhotoVerification']", 'symmetrical': 'False'}) }, 'verify_student.verificationstatus': { 'Meta': {'object_name': 'VerificationStatus'}, 'checkpoint': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'checkpoint_status'", 'to': "orm['verify_student.VerificationCheckpoint']"}), 'error': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'response': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'status': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}), 'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) } } complete_apps = ['verify_student']
unknown
codeparrot/codeparrot-clean