code stringlengths 3 1.05M | repo_name stringlengths 4 116 | path stringlengths 4 991 | language stringclasses 9
values | license stringclasses 15
values | size int32 3 1.05M |
|---|---|---|---|---|---|
using System.Collections.Generic;
using System.ComponentModel.DataAnnotations;
using Microsoft.AspNet.Identity;
using Microsoft.Owin.Security;
namespace canadaPostResearchAPI.Models
{
public class IndexViewModel
{
public bool HasPassword { get; set; }
public IList<UserLoginInfo> Logins { get; set; }
public string PhoneNumber { get; set; }
public bool TwoFactor { get; set; }
public bool BrowserRemembered { get; set; }
}
public class ManageLoginsViewModel
{
public IList<UserLoginInfo> CurrentLogins { get; set; }
public IList<AuthenticationDescription> OtherLogins { get; set; }
}
public class FactorViewModel
{
public string Purpose { get; set; }
}
public class SetPasswordViewModel
{
[Required]
[StringLength(100, ErrorMessage = "The {0} must be at least {2} characters long.", MinimumLength = 6)]
[DataType(DataType.Password)]
[Display(Name = "New password")]
public string NewPassword { get; set; }
[DataType(DataType.Password)]
[Display(Name = "Confirm new password")]
[Compare("NewPassword", ErrorMessage = "The new password and confirmation password do not match.")]
public string ConfirmPassword { get; set; }
}
public class ChangePasswordViewModel
{
[Required]
[DataType(DataType.Password)]
[Display(Name = "Current password")]
public string OldPassword { get; set; }
[Required]
[StringLength(100, ErrorMessage = "The {0} must be at least {2} characters long.", MinimumLength = 6)]
[DataType(DataType.Password)]
[Display(Name = "New password")]
public string NewPassword { get; set; }
[DataType(DataType.Password)]
[Display(Name = "Confirm new password")]
[Compare("NewPassword", ErrorMessage = "The new password and confirmation password do not match.")]
public string ConfirmPassword { get; set; }
}
public class AddPhoneNumberViewModel
{
[Required]
[Phone]
[Display(Name = "Phone Number")]
public string Number { get; set; }
}
public class VerifyPhoneNumberViewModel
{
[Required]
[Display(Name = "Code")]
public string Code { get; set; }
[Required]
[Phone]
[Display(Name = "Phone Number")]
public string PhoneNumber { get; set; }
}
public class ConfigureTwoFactorViewModel
{
public string SelectedProvider { get; set; }
public ICollection<System.Web.Mvc.SelectListItem> Providers { get; set; }
}
} | Kiandr/MS | Languages/CSharp/poc/canadaPostResearchApi/canadaPostResearchAPI/canadaPostResearchAPI/Models/ManageViewModels.cs | C# | apache-2.0 | 2,667 |
# !/usr/bin/env ruby
# Encoding: utf-8
#
# Copyright:: Copyright 2011, Google Inc. All Rights Reserved.
#
# License:: Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This example updates the names of all companies that are advertisers by
# appending "LLC." up to the first 500. To determine which companies exist, run
# get_all_companies.rb.
require 'dfp_api'
API_VERSION = :v201608
def update_companies()
# Get DfpApi instance and load configuration from ~/dfp_api.yml.
dfp = DfpApi::Api.new
# To enable logging of SOAP requests, set the log_level value to 'DEBUG' in
# the configuration file or provide your own logger:
# dfp.logger = Logger.new('dfp_xml.log')
# Get the CompanyService.
company_service = dfp.service(:CompanyService, API_VERSION)
# Specify a single company to fetch.
company_id = 'INSERT_COMPANY_ID_HERE'
# Create a statement to only select a single company.
statement = DfpApi::FilterStatement.new(
'WHERE id = :company_id ORDER BY id ASC',
[
{:key => 'company_id',
:value => {:value => company_id, :xsi_type => 'NumberValue'}
}
],
1
)
# Get companies by statement.
page = company_service.get_companies_by_statement(statement.toStatement())
if page[:results]
companies = page[:results]
# Update each local company object by appending ' LLC.' to its name.
companies.each do |company|
company[:name] += ' LLC.'
# Workaround for issue #94.
[:address, :email, :fax_phone, :primary_phone,
:external_id, :comment].each do |item|
company[item] = "" if company[item].nil?
end
end
# Update the companies on the server.
return_companies = company_service.update_companies(companies)
if return_companies
return_companies.each do |company|
puts "A company with ID [%d], name: %s and type %s was updated." %
[company[:id], company[:name], company[:type]]
end
else
raise 'No companies were updated.'
end
else
puts 'No companies found to update.'
end
end
if __FILE__ == $0
begin
update_companies()
# HTTP errors.
rescue AdsCommon::Errors::HttpError => e
puts "HTTP Error: %s" % e
# API errors.
rescue DfpApi::Errors::ApiException => e
puts "Message: %s" % e.message
puts 'Errors:'
e.errors.each_with_index do |error, index|
puts "\tError [%d]:" % (index + 1)
error.each do |field, value|
puts "\t\t%s: %s" % [field, value]
end
end
end
end
| kjvarga/google-api-ads-ruby | dfp_api/examples/v201608/company_service/update_companies.rb | Ruby | apache-2.0 | 3,117 |
package com.evolveum.midpoint.prism;
import com.evolveum.midpoint.prism.xnode.RootXNode;
import com.evolveum.midpoint.prism.xnode.XNode;
import com.evolveum.midpoint.util.exception.SchemaException;
import org.jetbrains.annotations.NotNull;
/**
* @author mederly
*/
public class SerializerXNodeTarget extends SerializerTarget<RootXNode> {
public SerializerXNodeTarget(@NotNull PrismContextImpl prismContext) {
super(prismContext);
}
@NotNull
@Override
public RootXNode write(@NotNull RootXNode xroot, SerializationContext context) throws SchemaException {
return xroot;
}
}
| PetrGasparik/midpoint | infra/prism/src/main/java/com/evolveum/midpoint/prism/SerializerXNodeTarget.java | Java | apache-2.0 | 619 |
/*
Copyright 2011-2021 Frederic Langlet
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
you may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kanzi.io;
import java.io.OutputStream;
public class NullOutputStream extends OutputStream
{
@Override
public void write(int b)
{
}
}
| flanglet/kanzi | java/src/main/java/kanzi/io/NullOutputStream.java | Java | apache-2.0 | 736 |
/*
* Copyright 2017 João Pedro Sacheti
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License athttp://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
//
// Este arquivo foi gerado pela Arquitetura JavaTM para Implementação de Referência (JAXB) de Bind XML, v2.2.8-b130911.1802
// Consulte <a href="http://java.sun.com/xml/jaxb">http://java.sun.com/xml/jaxb</a>
// Todas as modificações neste arquivo serão perdidas após a recompilação do esquema de origem.
// Gerado em: 2017.07.16 às 05:41:14 PM BRT
//
package br.edu.fema.nfe.xml;
import javax.xml.bind.annotation.*;
/**
* Tipo Dados do Veículo
*
* <p>Classe Java de TVeiculo complex type.
*
* <p>O seguinte fragmento do esquema especifica o conteúdo esperado contido dentro desta classe.
*
* <pre>
* <complexType name="TVeiculo">
* <complexContent>
* <restriction base="{http://www.w3.org/2001/XMLSchema}anyType">
* <sequence>
* <element name="placa">
* <simpleType>
* <restriction base="{http://www.w3.org/2001/XMLSchema}string">
* <whiteSpace value="preserve"/>
* <pattern value="[A-Z]{2,3}[0-9]{4}|[A-Z]{3,4}[0-9]{3}"/>
* </restriction>
* </simpleType>
* </element>
* <element name="UF" type="{http://www.portalfiscal.inf.br/nfe}TUf"/>
* <element name="RNTC" minOccurs="0">
* <simpleType>
* <restriction base="{http://www.portalfiscal.inf.br/nfe}TString">
* <minLength value="1"/>
* <maxLength value="20"/>
* </restriction>
* </simpleType>
* </element>
* </sequence>
* </restriction>
* </complexContent>
* </complexType>
* </pre>
*
*
*/
@XmlAccessorType(XmlAccessType.FIELD)
@XmlType(name = "TVeiculo", propOrder = {
"placa",
"uf",
"rntc"
})
public class TVeiculo {
@XmlElement(required = true)
protected String placa;
@XmlElement(name = "UF", required = true)
@XmlSchemaType(name = "string")
protected TUf uf;
@XmlElement(name = "RNTC")
protected String rntc;
/**
* Obtém o valor da propriedade placa.
*
* @return
* possible object is
* {@link String }
*
*/
public String getPlaca() {
return placa;
}
/**
* Define o valor da propriedade placa.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setPlaca(String value) {
this.placa = value;
}
/**
* Obtém o valor da propriedade uf.
*
* @return
* possible object is
* {@link TUf }
*
*/
public TUf getUF() {
return uf;
}
/**
* Define o valor da propriedade uf.
*
* @param value
* allowed object is
* {@link TUf }
*
*/
public void setUF(TUf value) {
this.uf = value;
}
/**
* Obtém o valor da propriedade rntc.
*
* @return
* possible object is
* {@link String }
*
*/
public String getRNTC() {
return rntc;
}
/**
* Define o valor da propriedade rntc.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setRNTC(String value) {
this.rntc = value;
}
}
| jpsacheti/nfe-fema | src/main/java/br/edu/fema/nfe/xml/TVeiculo.java | Java | apache-2.0 | 3,913 |
package org.ovirt.engine.ui.webadmin.section.main.view.popup.vm;
import org.ovirt.engine.ui.common.view.popup.AbstractModelBoundWidgetPopupView;
import org.ovirt.engine.ui.common.widget.uicommon.popup.vm.VmChangeCDPopupWidget;
import org.ovirt.engine.ui.uicommonweb.models.userportal.AttachCdModel;
import org.ovirt.engine.ui.webadmin.ApplicationResources;
import org.ovirt.engine.ui.webadmin.section.main.presenter.popup.vm.VmChangeCDPopupPresenterWidget;
import com.google.gwt.event.shared.EventBus;
import com.google.inject.Inject;
public class VmChangeCDPopupView extends AbstractModelBoundWidgetPopupView<AttachCdModel> implements VmChangeCDPopupPresenterWidget.ViewDef {
@Inject
public VmChangeCDPopupView(EventBus eventBus, ApplicationResources resources) {
super(eventBus, resources, new VmChangeCDPopupWidget(), "400px", "170px"); //$NON-NLS-1$ //$NON-NLS-2$
}
}
| derekhiggins/ovirt-engine | frontend/webadmin/modules/webadmin/src/main/java/org/ovirt/engine/ui/webadmin/section/main/view/popup/vm/VmChangeCDPopupView.java | Java | apache-2.0 | 897 |
# coding=utf-8
# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import unittest
from contextlib import closing
from pants.engine.exp.fs import Path
from pants.engine.exp.scheduler import StepRequest, StepResult
from pants.engine.exp.storage import Cache, InvalidKeyError, Key, Lmdb, Storage
class StorageTest(unittest.TestCase):
TEST_KEY = b'hello'
TEST_VALUE = b'world'
TEST_PATH = Path('/foo')
TEST_PATH2 = Path('/bar')
class SomeException(Exception): pass
def setUp(self):
self.storage = Storage.create(in_memory=True)
self.result = StepResult(state='something')
self.request = StepRequest(step_id=123, node='some node',
dependencies={'some dep': 'some state',
'another dep': 'another state'},
project_tree='some project tree')
def test_lmdb_key_value_store(self):
lmdb = Lmdb.create()[0]
with closing(lmdb) as kvs:
# Initially key does not exist.
self.assertFalse(kvs.get(self.TEST_KEY))
# Now write a key value pair and read back.
written = kvs.put(self.TEST_KEY, self.TEST_VALUE)
self.assertTrue(written)
self.assertEquals(self.TEST_VALUE, kvs.get(self.TEST_KEY).getvalue())
# Write the same key again will not overwrite.
self.assertFalse(kvs.put(self.TEST_KEY, self.TEST_VALUE))
def test_storage(self):
with closing(self.storage) as storage:
key = storage.put(self.TEST_PATH)
self.assertEquals(self.TEST_PATH, storage.get(key))
# The deserialized blob is equal by not the same as the input data.
self.assertFalse(storage.get(key) is self.TEST_PATH)
# Any other keys won't exist in the subjects.
self.assertNotEqual(self.TEST_KEY, key)
with self.assertRaises(InvalidKeyError):
self.assertFalse(storage.get(self.TEST_KEY))
# Verify key and value's types must match.
key._type = str
with self.assertRaises(ValueError):
storage.get(key)
def test_storage_key_mappings(self):
with closing(self.storage) as storage:
key1 = storage.put(self.TEST_PATH)
key2 = storage.put(self.TEST_PATH2)
storage.add_mapping(key1, key2)
self.assertEquals(key2, storage.get_mapping(key1))
# key2 isn't mapped to any other key.
self.assertIsNone(storage.get_mapping(key2))
def test_key_for_request(self):
with closing(self.storage) as storage:
keyed_request = storage.key_for_request(self.request)
for dep, dep_state in keyed_request.dependencies.items():
self.assertEquals(Key, type(dep))
self.assertEquals(Key, type(dep_state))
self.assertIs(self.request.node, keyed_request.node)
self.assertIs(self.request.project_tree, keyed_request.project_tree)
self.assertEquals(keyed_request, storage.key_for_request(keyed_request))
def test_resolve_request(self):
with closing(self.storage) as storage:
keyed_request = storage.key_for_request(self.request)
resolved_request = storage.resolve_request(keyed_request)
self.assertEquals(self.request, resolved_request)
self.assertIsNot(self.request, resolved_request)
self.assertEquals(resolved_request, self.storage.resolve_request(resolved_request))
def test_key_for_result(self):
with closing(self.storage) as storage:
keyed_result = storage.key_for_result(self.result)
self.assertEquals(Key, type(keyed_result.state))
self.assertEquals(keyed_result, storage.key_for_result(keyed_result))
def test_resolve_result(self):
with closing(self.storage) as storage:
keyed_result = storage.key_for_result(self.result)
resolved_result = storage.resolve_result(keyed_result)
self.assertEquals(self.result, resolved_result)
self.assertIsNot(self.result, resolved_result)
self.assertEquals(resolved_result, self.storage.resolve_result(resolved_result))
class CacheTest(unittest.TestCase):
def setUp(self):
"""Setup cache as well as request and result."""
self.storage = Storage.create(in_memory=True)
self.cache = Cache.create(storage=self.storage)
request = StepRequest(step_id=123, node='some node',
dependencies={'some dep': 'some state',
'another dep': 'another state'},
project_tree='some project tree')
self.result = StepResult(state='something')
self.keyed_request = self.storage.key_for_request(request)
def test_cache(self):
"""Verify get and put."""
with closing(self.cache):
self.assertIsNone(self.cache.get(self.keyed_request))
self._assert_hits_misses(hits=0, misses=1)
self.cache.put(self.keyed_request, self.result)
self.assertEquals(self.result, self.cache.get(self.keyed_request))
self.assertIsNot(self.result, self.cache.get(self.keyed_request))
self._assert_hits_misses(hits=2, misses=1)
def test_failure_to_update_mapping(self):
"""Verify we can access cached result only if we save both result and the key mapping."""
with closing(self.cache):
# This places result to the main storage without saving to key mapping. This
# simulates error might happen for saving key mapping after successfully saving the result.
self.cache._storage.put(self.result)
self.assertIsNone(self.cache.get(self.keyed_request))
self._assert_hits_misses(hits=0, misses=1)
def _assert_hits_misses(self, hits, misses):
self.assertEquals(hits, self.cache.get_stats().hits)
self.assertEquals(misses, self.cache.get_stats().misses)
self.assertEquals(hits+misses, self.cache.get_stats().total)
| dbentley/pants | tests/python/pants_test/engine/exp/test_storage.py | Python | apache-2.0 | 5,956 |
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.22.0
// protoc v3.12.3
// source: google/monitoring/v3/notification_service.proto
package monitoring
import (
context "context"
reflect "reflect"
sync "sync"
proto "github.com/golang/protobuf/proto"
empty "github.com/golang/protobuf/ptypes/empty"
_ "github.com/golang/protobuf/ptypes/struct"
timestamp "github.com/golang/protobuf/ptypes/timestamp"
_ "google.golang.org/genproto/googleapis/api/annotations"
field_mask "google.golang.org/genproto/protobuf/field_mask"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
// This is a compile-time assertion that a sufficiently up-to-date version
// of the legacy proto package is being used.
const _ = proto.ProtoPackageIsVersion4
// The `ListNotificationChannelDescriptors` request.
type ListNotificationChannelDescriptorsRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Required. The REST resource name of the parent from which to retrieve
// the notification channel descriptors. The expected syntax is:
//
// projects/[PROJECT_ID_OR_NUMBER]
//
// Note that this names the parent container in which to look for the
// descriptors; to retrieve a single descriptor by name, use the
// [GetNotificationChannelDescriptor][google.monitoring.v3.NotificationChannelService.GetNotificationChannelDescriptor]
// operation, instead.
Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"`
// The maximum number of results to return in a single response. If
// not set to a positive number, a reasonable value will be chosen by the
// service.
PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
// If non-empty, `page_token` must contain a value returned as the
// `next_page_token` in a previous response to request the next set
// of results.
PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
}
func (x *ListNotificationChannelDescriptorsRequest) Reset() {
*x = ListNotificationChannelDescriptorsRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ListNotificationChannelDescriptorsRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ListNotificationChannelDescriptorsRequest) ProtoMessage() {}
func (x *ListNotificationChannelDescriptorsRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ListNotificationChannelDescriptorsRequest.ProtoReflect.Descriptor instead.
func (*ListNotificationChannelDescriptorsRequest) Descriptor() ([]byte, []int) {
return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{0}
}
func (x *ListNotificationChannelDescriptorsRequest) GetName() string {
if x != nil {
return x.Name
}
return ""
}
func (x *ListNotificationChannelDescriptorsRequest) GetPageSize() int32 {
if x != nil {
return x.PageSize
}
return 0
}
func (x *ListNotificationChannelDescriptorsRequest) GetPageToken() string {
if x != nil {
return x.PageToken
}
return ""
}
// The `ListNotificationChannelDescriptors` response.
type ListNotificationChannelDescriptorsResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// The monitored resource descriptors supported for the specified
// project, optionally filtered.
ChannelDescriptors []*NotificationChannelDescriptor `protobuf:"bytes,1,rep,name=channel_descriptors,json=channelDescriptors,proto3" json:"channel_descriptors,omitempty"`
// If not empty, indicates that there may be more results that match
// the request. Use the value in the `page_token` field in a
// subsequent request to fetch the next set of results. If empty,
// all results have been returned.
NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
}
func (x *ListNotificationChannelDescriptorsResponse) Reset() {
*x = ListNotificationChannelDescriptorsResponse{}
if protoimpl.UnsafeEnabled {
mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ListNotificationChannelDescriptorsResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ListNotificationChannelDescriptorsResponse) ProtoMessage() {}
func (x *ListNotificationChannelDescriptorsResponse) ProtoReflect() protoreflect.Message {
mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ListNotificationChannelDescriptorsResponse.ProtoReflect.Descriptor instead.
func (*ListNotificationChannelDescriptorsResponse) Descriptor() ([]byte, []int) {
return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{1}
}
func (x *ListNotificationChannelDescriptorsResponse) GetChannelDescriptors() []*NotificationChannelDescriptor {
if x != nil {
return x.ChannelDescriptors
}
return nil
}
func (x *ListNotificationChannelDescriptorsResponse) GetNextPageToken() string {
if x != nil {
return x.NextPageToken
}
return ""
}
// The `GetNotificationChannelDescriptor` response.
type GetNotificationChannelDescriptorRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Required. The channel type for which to execute the request. The format is:
//
// projects/[PROJECT_ID_OR_NUMBER]/notificationChannelDescriptors/[CHANNEL_TYPE]
Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
}
func (x *GetNotificationChannelDescriptorRequest) Reset() {
*x = GetNotificationChannelDescriptorRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *GetNotificationChannelDescriptorRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*GetNotificationChannelDescriptorRequest) ProtoMessage() {}
func (x *GetNotificationChannelDescriptorRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[2]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use GetNotificationChannelDescriptorRequest.ProtoReflect.Descriptor instead.
func (*GetNotificationChannelDescriptorRequest) Descriptor() ([]byte, []int) {
return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{2}
}
func (x *GetNotificationChannelDescriptorRequest) GetName() string {
if x != nil {
return x.Name
}
return ""
}
// The `CreateNotificationChannel` request.
type CreateNotificationChannelRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Required. The project on which to execute the request. The format is:
//
// projects/[PROJECT_ID_OR_NUMBER]
//
// This names the container into which the channel will be
// written, this does not name the newly created channel. The resulting
// channel's name will have a normalized version of this field as a prefix,
// but will add `/notificationChannels/[CHANNEL_ID]` to identify the channel.
Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
// Required. The definition of the `NotificationChannel` to create.
NotificationChannel *NotificationChannel `protobuf:"bytes,2,opt,name=notification_channel,json=notificationChannel,proto3" json:"notification_channel,omitempty"`
}
func (x *CreateNotificationChannelRequest) Reset() {
*x = CreateNotificationChannelRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *CreateNotificationChannelRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*CreateNotificationChannelRequest) ProtoMessage() {}
func (x *CreateNotificationChannelRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[3]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use CreateNotificationChannelRequest.ProtoReflect.Descriptor instead.
func (*CreateNotificationChannelRequest) Descriptor() ([]byte, []int) {
return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{3}
}
func (x *CreateNotificationChannelRequest) GetName() string {
if x != nil {
return x.Name
}
return ""
}
func (x *CreateNotificationChannelRequest) GetNotificationChannel() *NotificationChannel {
if x != nil {
return x.NotificationChannel
}
return nil
}
// The `ListNotificationChannels` request.
type ListNotificationChannelsRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Required. The project on which to execute the request. The format is:
//
// projects/[PROJECT_ID_OR_NUMBER]
//
// This names the container
// in which to look for the notification channels; it does not name a
// specific channel. To query a specific channel by REST resource name, use
// the
// [`GetNotificationChannel`][google.monitoring.v3.NotificationChannelService.GetNotificationChannel]
// operation.
Name string `protobuf:"bytes,5,opt,name=name,proto3" json:"name,omitempty"`
// If provided, this field specifies the criteria that must be met by
// notification channels to be included in the response.
//
// For more details, see [sorting and
// filtering](https://cloud.google.com/monitoring/api/v3/sorting-and-filtering).
Filter string `protobuf:"bytes,6,opt,name=filter,proto3" json:"filter,omitempty"`
// A comma-separated list of fields by which to sort the result. Supports
// the same set of fields as in `filter`. Entries can be prefixed with
// a minus sign to sort in descending rather than ascending order.
//
// For more details, see [sorting and
// filtering](https://cloud.google.com/monitoring/api/v3/sorting-and-filtering).
OrderBy string `protobuf:"bytes,7,opt,name=order_by,json=orderBy,proto3" json:"order_by,omitempty"`
// The maximum number of results to return in a single response. If
// not set to a positive number, a reasonable value will be chosen by the
// service.
PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
// If non-empty, `page_token` must contain a value returned as the
// `next_page_token` in a previous response to request the next set
// of results.
PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
}
func (x *ListNotificationChannelsRequest) Reset() {
*x = ListNotificationChannelsRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ListNotificationChannelsRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ListNotificationChannelsRequest) ProtoMessage() {}
func (x *ListNotificationChannelsRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[4]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ListNotificationChannelsRequest.ProtoReflect.Descriptor instead.
func (*ListNotificationChannelsRequest) Descriptor() ([]byte, []int) {
return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{4}
}
func (x *ListNotificationChannelsRequest) GetName() string {
if x != nil {
return x.Name
}
return ""
}
func (x *ListNotificationChannelsRequest) GetFilter() string {
if x != nil {
return x.Filter
}
return ""
}
func (x *ListNotificationChannelsRequest) GetOrderBy() string {
if x != nil {
return x.OrderBy
}
return ""
}
func (x *ListNotificationChannelsRequest) GetPageSize() int32 {
if x != nil {
return x.PageSize
}
return 0
}
func (x *ListNotificationChannelsRequest) GetPageToken() string {
if x != nil {
return x.PageToken
}
return ""
}
// The `ListNotificationChannels` response.
type ListNotificationChannelsResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// The notification channels defined for the specified project.
NotificationChannels []*NotificationChannel `protobuf:"bytes,3,rep,name=notification_channels,json=notificationChannels,proto3" json:"notification_channels,omitempty"`
// If not empty, indicates that there may be more results that match
// the request. Use the value in the `page_token` field in a
// subsequent request to fetch the next set of results. If empty,
// all results have been returned.
NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
}
func (x *ListNotificationChannelsResponse) Reset() {
*x = ListNotificationChannelsResponse{}
if protoimpl.UnsafeEnabled {
mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ListNotificationChannelsResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ListNotificationChannelsResponse) ProtoMessage() {}
func (x *ListNotificationChannelsResponse) ProtoReflect() protoreflect.Message {
mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[5]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ListNotificationChannelsResponse.ProtoReflect.Descriptor instead.
func (*ListNotificationChannelsResponse) Descriptor() ([]byte, []int) {
return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{5}
}
func (x *ListNotificationChannelsResponse) GetNotificationChannels() []*NotificationChannel {
if x != nil {
return x.NotificationChannels
}
return nil
}
func (x *ListNotificationChannelsResponse) GetNextPageToken() string {
if x != nil {
return x.NextPageToken
}
return ""
}
// The `GetNotificationChannel` request.
type GetNotificationChannelRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Required. The channel for which to execute the request. The format is:
//
// projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID]
Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
}
func (x *GetNotificationChannelRequest) Reset() {
*x = GetNotificationChannelRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *GetNotificationChannelRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*GetNotificationChannelRequest) ProtoMessage() {}
func (x *GetNotificationChannelRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[6]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use GetNotificationChannelRequest.ProtoReflect.Descriptor instead.
func (*GetNotificationChannelRequest) Descriptor() ([]byte, []int) {
return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{6}
}
func (x *GetNotificationChannelRequest) GetName() string {
if x != nil {
return x.Name
}
return ""
}
// The `UpdateNotificationChannel` request.
type UpdateNotificationChannelRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// The fields to update.
UpdateMask *field_mask.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"`
// Required. A description of the changes to be applied to the specified
// notification channel. The description must provide a definition for
// fields to be updated; the names of these fields should also be
// included in the `update_mask`.
NotificationChannel *NotificationChannel `protobuf:"bytes,3,opt,name=notification_channel,json=notificationChannel,proto3" json:"notification_channel,omitempty"`
}
func (x *UpdateNotificationChannelRequest) Reset() {
*x = UpdateNotificationChannelRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *UpdateNotificationChannelRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*UpdateNotificationChannelRequest) ProtoMessage() {}
func (x *UpdateNotificationChannelRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[7]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use UpdateNotificationChannelRequest.ProtoReflect.Descriptor instead.
func (*UpdateNotificationChannelRequest) Descriptor() ([]byte, []int) {
return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{7}
}
func (x *UpdateNotificationChannelRequest) GetUpdateMask() *field_mask.FieldMask {
if x != nil {
return x.UpdateMask
}
return nil
}
func (x *UpdateNotificationChannelRequest) GetNotificationChannel() *NotificationChannel {
if x != nil {
return x.NotificationChannel
}
return nil
}
// The `DeleteNotificationChannel` request.
type DeleteNotificationChannelRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Required. The channel for which to execute the request. The format is:
//
// projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID]
Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
// If true, the notification channel will be deleted regardless of its
// use in alert policies (the policies will be updated to remove the
// channel). If false, channels that are still referenced by an existing
// alerting policy will fail to be deleted in a delete operation.
Force bool `protobuf:"varint,5,opt,name=force,proto3" json:"force,omitempty"`
}
func (x *DeleteNotificationChannelRequest) Reset() {
*x = DeleteNotificationChannelRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *DeleteNotificationChannelRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DeleteNotificationChannelRequest) ProtoMessage() {}
func (x *DeleteNotificationChannelRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[8]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DeleteNotificationChannelRequest.ProtoReflect.Descriptor instead.
func (*DeleteNotificationChannelRequest) Descriptor() ([]byte, []int) {
return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{8}
}
func (x *DeleteNotificationChannelRequest) GetName() string {
if x != nil {
return x.Name
}
return ""
}
func (x *DeleteNotificationChannelRequest) GetForce() bool {
if x != nil {
return x.Force
}
return false
}
// The `SendNotificationChannelVerificationCode` request.
type SendNotificationChannelVerificationCodeRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Required. The notification channel to which to send a verification code.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
}
func (x *SendNotificationChannelVerificationCodeRequest) Reset() {
*x = SendNotificationChannelVerificationCodeRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *SendNotificationChannelVerificationCodeRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SendNotificationChannelVerificationCodeRequest) ProtoMessage() {}
func (x *SendNotificationChannelVerificationCodeRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[9]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SendNotificationChannelVerificationCodeRequest.ProtoReflect.Descriptor instead.
func (*SendNotificationChannelVerificationCodeRequest) Descriptor() ([]byte, []int) {
return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{9}
}
func (x *SendNotificationChannelVerificationCodeRequest) GetName() string {
if x != nil {
return x.Name
}
return ""
}
// The `GetNotificationChannelVerificationCode` request.
type GetNotificationChannelVerificationCodeRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Required. The notification channel for which a verification code is to be generated
// and retrieved. This must name a channel that is already verified; if
// the specified channel is not verified, the request will fail.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// The desired expiration time. If specified, the API will guarantee that
// the returned code will not be valid after the specified timestamp;
// however, the API cannot guarantee that the returned code will be
// valid for at least as long as the requested time (the API puts an upper
// bound on the amount of time for which a code may be valid). If omitted,
// a default expiration will be used, which may be less than the max
// permissible expiration (so specifying an expiration may extend the
// code's lifetime over omitting an expiration, even though the API does
// impose an upper limit on the maximum expiration that is permitted).
ExpireTime *timestamp.Timestamp `protobuf:"bytes,2,opt,name=expire_time,json=expireTime,proto3" json:"expire_time,omitempty"`
}
func (x *GetNotificationChannelVerificationCodeRequest) Reset() {
*x = GetNotificationChannelVerificationCodeRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[10]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *GetNotificationChannelVerificationCodeRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*GetNotificationChannelVerificationCodeRequest) ProtoMessage() {}
func (x *GetNotificationChannelVerificationCodeRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[10]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use GetNotificationChannelVerificationCodeRequest.ProtoReflect.Descriptor instead.
func (*GetNotificationChannelVerificationCodeRequest) Descriptor() ([]byte, []int) {
return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{10}
}
func (x *GetNotificationChannelVerificationCodeRequest) GetName() string {
if x != nil {
return x.Name
}
return ""
}
func (x *GetNotificationChannelVerificationCodeRequest) GetExpireTime() *timestamp.Timestamp {
if x != nil {
return x.ExpireTime
}
return nil
}
// The `GetNotificationChannelVerificationCode` request.
type GetNotificationChannelVerificationCodeResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// The verification code, which may be used to verify other channels
// that have an equivalent identity (i.e. other channels of the same
// type with the same fingerprint such as other email channels with
// the same email address or other sms channels with the same number).
Code string `protobuf:"bytes,1,opt,name=code,proto3" json:"code,omitempty"`
// The expiration time associated with the code that was returned. If
// an expiration was provided in the request, this is the minimum of the
// requested expiration in the request and the max permitted expiration.
ExpireTime *timestamp.Timestamp `protobuf:"bytes,2,opt,name=expire_time,json=expireTime,proto3" json:"expire_time,omitempty"`
}
func (x *GetNotificationChannelVerificationCodeResponse) Reset() {
*x = GetNotificationChannelVerificationCodeResponse{}
if protoimpl.UnsafeEnabled {
mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[11]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *GetNotificationChannelVerificationCodeResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*GetNotificationChannelVerificationCodeResponse) ProtoMessage() {}
func (x *GetNotificationChannelVerificationCodeResponse) ProtoReflect() protoreflect.Message {
mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[11]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use GetNotificationChannelVerificationCodeResponse.ProtoReflect.Descriptor instead.
func (*GetNotificationChannelVerificationCodeResponse) Descriptor() ([]byte, []int) {
return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{11}
}
func (x *GetNotificationChannelVerificationCodeResponse) GetCode() string {
if x != nil {
return x.Code
}
return ""
}
func (x *GetNotificationChannelVerificationCodeResponse) GetExpireTime() *timestamp.Timestamp {
if x != nil {
return x.ExpireTime
}
return nil
}
// The `VerifyNotificationChannel` request.
type VerifyNotificationChannelRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Required. The notification channel to verify.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// Required. The verification code that was delivered to the channel as
// a result of invoking the `SendNotificationChannelVerificationCode` API
// method or that was retrieved from a verified channel via
// `GetNotificationChannelVerificationCode`. For example, one might have
// "G-123456" or "TKNZGhhd2EyN3I1MnRnMjRv" (in general, one is only
// guaranteed that the code is valid UTF-8; one should not
// make any assumptions regarding the structure or format of the code).
Code string `protobuf:"bytes,2,opt,name=code,proto3" json:"code,omitempty"`
}
func (x *VerifyNotificationChannelRequest) Reset() {
*x = VerifyNotificationChannelRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[12]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *VerifyNotificationChannelRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*VerifyNotificationChannelRequest) ProtoMessage() {}
func (x *VerifyNotificationChannelRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[12]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use VerifyNotificationChannelRequest.ProtoReflect.Descriptor instead.
func (*VerifyNotificationChannelRequest) Descriptor() ([]byte, []int) {
return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{12}
}
func (x *VerifyNotificationChannelRequest) GetName() string {
if x != nil {
return x.Name
}
return ""
}
func (x *VerifyNotificationChannelRequest) GetCode() string {
if x != nil {
return x.Code
}
return ""
}
var File_google_monitoring_v3_notification_service_proto protoreflect.FileDescriptor
var file_google_monitoring_v3_notification_service_proto_rawDesc = []byte{
0x0a, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74,
0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f,
0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f,
0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70,
0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f,
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64,
0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a,
0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f,
0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x27, 0x67, 0x6f, 0x6f, 0x67,
0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33,
0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x22, 0xbc, 0x01, 0x0a, 0x29, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69,
0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73,
0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
0x53, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x3f, 0xe0,
0x41, 0x02, 0xfa, 0x41, 0x39, 0x12, 0x37, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e,
0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d,
0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61,
0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x04,
0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a,
0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a,
0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18,
0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e,
0x22, 0xba, 0x01, 0x0a, 0x2a, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63,
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63,
0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,
0x64, 0x0a, 0x13, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72,
0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67,
0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f,
0x72, 0x52, 0x12, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69,
0x70, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61,
0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d,
0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x7e, 0x0a,
0x27, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f,
0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x53, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65,
0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x3f, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x39, 0x0a, 0x37,
0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69,
0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73,
0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xd0, 0x01,
0x0a, 0x20, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61,
0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65,
0x73, 0x74, 0x12, 0x49, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09,
0x42, 0x35, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2f, 0x12, 0x2d, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f,
0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e,
0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x61, 0x0a,
0x14, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x68,
0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f,
0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e,
0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43,
0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x13, 0x6e, 0x6f, 0x74,
0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c,
0x22, 0xdb, 0x01, 0x0a, 0x1f, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63,
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x52, 0x65, 0x71,
0x75, 0x65, 0x73, 0x74, 0x12, 0x49, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01,
0x28, 0x09, 0x42, 0x35, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2f, 0x12, 0x2d, 0x6d, 0x6f, 0x6e, 0x69,
0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69,
0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69,
0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12,
0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52,
0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x19, 0x0a, 0x08, 0x6f, 0x72, 0x64, 0x65, 0x72,
0x5f, 0x62, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6f, 0x72, 0x64, 0x65, 0x72,
0x42, 0x79, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18,
0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12,
0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x04, 0x20,
0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xaa,
0x01, 0x0a, 0x20, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74,
0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f,
0x6e, 0x73, 0x65, 0x12, 0x5e, 0x0a, 0x15, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74,
0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03,
0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69,
0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69,
0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x14, 0x6e,
0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e,
0x65, 0x6c, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65,
0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65,
0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x6a, 0x0a, 0x1d, 0x47,
0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68,
0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x49, 0x0a, 0x04,
0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x35, 0xe0, 0x41, 0x02, 0xfa,
0x41, 0x2f, 0x0a, 0x2d, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f,
0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65,
0x6c, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xc2, 0x01, 0x0a, 0x20, 0x55, 0x70, 0x64, 0x61,
0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68,
0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3b, 0x0a, 0x0b,
0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28,
0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x52, 0x0a, 0x75,
0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x12, 0x61, 0x0a, 0x14, 0x6e, 0x6f, 0x74,
0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65,
0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4e,
0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e,
0x65, 0x6c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x13, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63,
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x22, 0x83, 0x01, 0x0a,
0x20, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74,
0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
0x74, 0x12, 0x49, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42,
0x35, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2f, 0x0a, 0x2d, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63,
0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43,
0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05,
0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72,
0x63, 0x65, 0x22, 0x7b, 0x0a, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69,
0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x56, 0x65, 0x72,
0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71,
0x75, 0x65, 0x73, 0x74, 0x12, 0x49, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01,
0x28, 0x09, 0x42, 0x35, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2f, 0x0a, 0x2d, 0x6d, 0x6f, 0x6e, 0x69,
0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69,
0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69,
0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22,
0xb7, 0x01, 0x0a, 0x2d, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74,
0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69,
0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
0x74, 0x12, 0x49, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42,
0x35, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2f, 0x0a, 0x2d, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63,
0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43,
0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x0b,
0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x65,
0x78, 0x70, 0x69, 0x72, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x22, 0x81, 0x01, 0x0a, 0x2e, 0x47, 0x65,
0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61,
0x6e, 0x6e, 0x65, 0x6c, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
0x43, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04,
0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65,
0x12, 0x3b, 0x0a, 0x0b, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18,
0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d,
0x70, 0x52, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x22, 0x86, 0x01,
0x0a, 0x20, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61,
0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65,
0x73, 0x74, 0x12, 0x49, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
0x42, 0x35, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2f, 0x0a, 0x2d, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f,
0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e,
0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x17, 0x0a,
0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02,
0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x32, 0xea, 0x12, 0x0a, 0x1a, 0x4e, 0x6f, 0x74, 0x69, 0x66,
0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x53, 0x65,
0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0xec, 0x01, 0x0a, 0x22, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f,
0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65,
0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x3f, 0x2e, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67,
0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61,
0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72,
0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x40, 0x2e,
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e,
0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63,
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63,
0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
0x43, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x36, 0x12, 0x34, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61,
0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x6e,
0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e,
0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0xda, 0x41, 0x04,
0x6e, 0x61, 0x6d, 0x65, 0x12, 0xdd, 0x01, 0x0a, 0x20, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69,
0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44,
0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x12, 0x3d, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33,
0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f,
0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e,
0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e,
0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x22, 0x45, 0x82,
0xd3, 0xe4, 0x93, 0x02, 0x38, 0x12, 0x36, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65,
0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6e, 0x6f, 0x74, 0x69,
0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44,
0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x2f, 0x2a, 0x7d, 0xda, 0x41, 0x04,
0x6e, 0x61, 0x6d, 0x65, 0x12, 0xc4, 0x01, 0x0a, 0x18, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74,
0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c,
0x73, 0x12, 0x35, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74,
0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74,
0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c,
0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e,
0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
0x22, 0x39, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2c, 0x12, 0x2a, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e,
0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f,
0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e,
0x6e, 0x65, 0x6c, 0x73, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0xb5, 0x01, 0x0a, 0x16,
0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43,
0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x12, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x65,
0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61,
0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x67, 0x6f,
0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e,
0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43,
0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x22, 0x3b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2e, 0x12, 0x2c,
0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63,
0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f,
0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x2f, 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e,
0x61, 0x6d, 0x65, 0x12, 0xe4, 0x01, 0x0a, 0x19, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f,
0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65,
0x6c, 0x12, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74,
0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e,
0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e,
0x65, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33,
0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61,
0x6e, 0x6e, 0x65, 0x6c, 0x22, 0x64, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x42, 0x22, 0x2a, 0x2f, 0x76,
0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73,
0x2f, 0x2a, 0x7d, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x3a, 0x14, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69,
0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0xda, 0x41,
0x19, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69,
0x6f, 0x6e, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x12, 0x83, 0x02, 0x0a, 0x19, 0x55,
0x70, 0x64, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f,
0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x12, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e,
0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69,
0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
0x1a, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f,
0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61,
0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x22, 0x82, 0x01, 0x82, 0xd3,
0xe4, 0x93, 0x02, 0x59, 0x32, 0x41, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x6f, 0x74, 0x69, 0x66,
0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x2e,
0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f,
0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e,
0x6e, 0x65, 0x6c, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x14, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63,
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0xda, 0x41, 0x20,
0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2c, 0x6e, 0x6f, 0x74, 0x69,
0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c,
0x12, 0xae, 0x01, 0x0a, 0x19, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66,
0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x12, 0x36,
0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69,
0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69,
0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52,
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x41,
0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2e, 0x2a, 0x2c, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d,
0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6e, 0x6f, 0x74,
0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c,
0x73, 0x2f, 0x2a, 0x7d, 0xda, 0x41, 0x0a, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x66, 0x6f, 0x72, 0x63,
0x65, 0x12, 0xdc, 0x01, 0x0a, 0x27, 0x53, 0x65, 0x6e, 0x64, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69,
0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x56, 0x65, 0x72,
0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x44, 0x2e,
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e,
0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63,
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x56, 0x65, 0x72, 0x69,
0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75,
0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x53, 0x82, 0xd3, 0xe4,
0x93, 0x02, 0x46, 0x22, 0x41, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70,
0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69,
0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x2f, 0x2a,
0x7d, 0x3a, 0x73, 0x65, 0x6e, 0x64, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69,
0x6f, 0x6e, 0x43, 0x6f, 0x64, 0x65, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65,
0x12, 0x87, 0x02, 0x0a, 0x26, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61,
0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x56, 0x65, 0x72, 0x69, 0x66,
0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x43, 0x2e, 0x67, 0x6f,
0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e,
0x76, 0x33, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69,
0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63,
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
0x1a, 0x44, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f,
0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66,
0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x56, 0x65,
0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x65,
0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x52, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x45, 0x22, 0x40,
0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63,
0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f,
0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x67, 0x65, 0x74,
0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x64, 0x65,
0x3a, 0x01, 0x2a, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0xca, 0x01, 0x0a, 0x19, 0x56,
0x65, 0x72, 0x69, 0x66, 0x79, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f,
0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x12, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e,
0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69,
0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
0x1a, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f,
0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61,
0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x22, 0x4a, 0x82, 0xd3, 0xe4,
0x93, 0x02, 0x38, 0x22, 0x33, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70,
0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69,
0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x2f, 0x2a,
0x7d, 0x3a, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x09, 0x6e, 0x61,
0x6d, 0x65, 0x2c, 0x63, 0x6f, 0x64, 0x65, 0x1a, 0xa9, 0x01, 0xca, 0x41, 0x19, 0x6d, 0x6f, 0x6e,
0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70,
0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x89, 0x01, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a,
0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73,
0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d,
0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f,
0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e,
0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
0x69, 0x6e, 0x67, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e,
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61,
0x75, 0x74, 0x68, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x72,
0x65, 0x61, 0x64, 0x42, 0xd0, 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33,
0x42, 0x18, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65,
0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3e, 0x67, 0x6f,
0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f,
0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61,
0x70, 0x69, 0x73, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76,
0x33, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0xaa, 0x02, 0x1a, 0x47,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69,
0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67,
0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
0x69, 0x6e, 0x67, 0x5c, 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a,
0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69,
0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
file_google_monitoring_v3_notification_service_proto_rawDescOnce sync.Once
file_google_monitoring_v3_notification_service_proto_rawDescData = file_google_monitoring_v3_notification_service_proto_rawDesc
)
func file_google_monitoring_v3_notification_service_proto_rawDescGZIP() []byte {
file_google_monitoring_v3_notification_service_proto_rawDescOnce.Do(func() {
file_google_monitoring_v3_notification_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_notification_service_proto_rawDescData)
})
return file_google_monitoring_v3_notification_service_proto_rawDescData
}
var file_google_monitoring_v3_notification_service_proto_msgTypes = make([]protoimpl.MessageInfo, 13)
var file_google_monitoring_v3_notification_service_proto_goTypes = []interface{}{
(*ListNotificationChannelDescriptorsRequest)(nil), // 0: google.monitoring.v3.ListNotificationChannelDescriptorsRequest
(*ListNotificationChannelDescriptorsResponse)(nil), // 1: google.monitoring.v3.ListNotificationChannelDescriptorsResponse
(*GetNotificationChannelDescriptorRequest)(nil), // 2: google.monitoring.v3.GetNotificationChannelDescriptorRequest
(*CreateNotificationChannelRequest)(nil), // 3: google.monitoring.v3.CreateNotificationChannelRequest
(*ListNotificationChannelsRequest)(nil), // 4: google.monitoring.v3.ListNotificationChannelsRequest
(*ListNotificationChannelsResponse)(nil), // 5: google.monitoring.v3.ListNotificationChannelsResponse
(*GetNotificationChannelRequest)(nil), // 6: google.monitoring.v3.GetNotificationChannelRequest
(*UpdateNotificationChannelRequest)(nil), // 7: google.monitoring.v3.UpdateNotificationChannelRequest
(*DeleteNotificationChannelRequest)(nil), // 8: google.monitoring.v3.DeleteNotificationChannelRequest
(*SendNotificationChannelVerificationCodeRequest)(nil), // 9: google.monitoring.v3.SendNotificationChannelVerificationCodeRequest
(*GetNotificationChannelVerificationCodeRequest)(nil), // 10: google.monitoring.v3.GetNotificationChannelVerificationCodeRequest
(*GetNotificationChannelVerificationCodeResponse)(nil), // 11: google.monitoring.v3.GetNotificationChannelVerificationCodeResponse
(*VerifyNotificationChannelRequest)(nil), // 12: google.monitoring.v3.VerifyNotificationChannelRequest
(*NotificationChannelDescriptor)(nil), // 13: google.monitoring.v3.NotificationChannelDescriptor
(*NotificationChannel)(nil), // 14: google.monitoring.v3.NotificationChannel
(*field_mask.FieldMask)(nil), // 15: google.protobuf.FieldMask
(*timestamp.Timestamp)(nil), // 16: google.protobuf.Timestamp
(*empty.Empty)(nil), // 17: google.protobuf.Empty
}
var file_google_monitoring_v3_notification_service_proto_depIdxs = []int32{
13, // 0: google.monitoring.v3.ListNotificationChannelDescriptorsResponse.channel_descriptors:type_name -> google.monitoring.v3.NotificationChannelDescriptor
14, // 1: google.monitoring.v3.CreateNotificationChannelRequest.notification_channel:type_name -> google.monitoring.v3.NotificationChannel
14, // 2: google.monitoring.v3.ListNotificationChannelsResponse.notification_channels:type_name -> google.monitoring.v3.NotificationChannel
15, // 3: google.monitoring.v3.UpdateNotificationChannelRequest.update_mask:type_name -> google.protobuf.FieldMask
14, // 4: google.monitoring.v3.UpdateNotificationChannelRequest.notification_channel:type_name -> google.monitoring.v3.NotificationChannel
16, // 5: google.monitoring.v3.GetNotificationChannelVerificationCodeRequest.expire_time:type_name -> google.protobuf.Timestamp
16, // 6: google.monitoring.v3.GetNotificationChannelVerificationCodeResponse.expire_time:type_name -> google.protobuf.Timestamp
0, // 7: google.monitoring.v3.NotificationChannelService.ListNotificationChannelDescriptors:input_type -> google.monitoring.v3.ListNotificationChannelDescriptorsRequest
2, // 8: google.monitoring.v3.NotificationChannelService.GetNotificationChannelDescriptor:input_type -> google.monitoring.v3.GetNotificationChannelDescriptorRequest
4, // 9: google.monitoring.v3.NotificationChannelService.ListNotificationChannels:input_type -> google.monitoring.v3.ListNotificationChannelsRequest
6, // 10: google.monitoring.v3.NotificationChannelService.GetNotificationChannel:input_type -> google.monitoring.v3.GetNotificationChannelRequest
3, // 11: google.monitoring.v3.NotificationChannelService.CreateNotificationChannel:input_type -> google.monitoring.v3.CreateNotificationChannelRequest
7, // 12: google.monitoring.v3.NotificationChannelService.UpdateNotificationChannel:input_type -> google.monitoring.v3.UpdateNotificationChannelRequest
8, // 13: google.monitoring.v3.NotificationChannelService.DeleteNotificationChannel:input_type -> google.monitoring.v3.DeleteNotificationChannelRequest
9, // 14: google.monitoring.v3.NotificationChannelService.SendNotificationChannelVerificationCode:input_type -> google.monitoring.v3.SendNotificationChannelVerificationCodeRequest
10, // 15: google.monitoring.v3.NotificationChannelService.GetNotificationChannelVerificationCode:input_type -> google.monitoring.v3.GetNotificationChannelVerificationCodeRequest
12, // 16: google.monitoring.v3.NotificationChannelService.VerifyNotificationChannel:input_type -> google.monitoring.v3.VerifyNotificationChannelRequest
1, // 17: google.monitoring.v3.NotificationChannelService.ListNotificationChannelDescriptors:output_type -> google.monitoring.v3.ListNotificationChannelDescriptorsResponse
13, // 18: google.monitoring.v3.NotificationChannelService.GetNotificationChannelDescriptor:output_type -> google.monitoring.v3.NotificationChannelDescriptor
5, // 19: google.monitoring.v3.NotificationChannelService.ListNotificationChannels:output_type -> google.monitoring.v3.ListNotificationChannelsResponse
14, // 20: google.monitoring.v3.NotificationChannelService.GetNotificationChannel:output_type -> google.monitoring.v3.NotificationChannel
14, // 21: google.monitoring.v3.NotificationChannelService.CreateNotificationChannel:output_type -> google.monitoring.v3.NotificationChannel
14, // 22: google.monitoring.v3.NotificationChannelService.UpdateNotificationChannel:output_type -> google.monitoring.v3.NotificationChannel
17, // 23: google.monitoring.v3.NotificationChannelService.DeleteNotificationChannel:output_type -> google.protobuf.Empty
17, // 24: google.monitoring.v3.NotificationChannelService.SendNotificationChannelVerificationCode:output_type -> google.protobuf.Empty
11, // 25: google.monitoring.v3.NotificationChannelService.GetNotificationChannelVerificationCode:output_type -> google.monitoring.v3.GetNotificationChannelVerificationCodeResponse
14, // 26: google.monitoring.v3.NotificationChannelService.VerifyNotificationChannel:output_type -> google.monitoring.v3.NotificationChannel
17, // [17:27] is the sub-list for method output_type
7, // [7:17] is the sub-list for method input_type
7, // [7:7] is the sub-list for extension type_name
7, // [7:7] is the sub-list for extension extendee
0, // [0:7] is the sub-list for field type_name
}
func init() { file_google_monitoring_v3_notification_service_proto_init() }
func file_google_monitoring_v3_notification_service_proto_init() {
if File_google_monitoring_v3_notification_service_proto != nil {
return
}
file_google_monitoring_v3_notification_proto_init()
if !protoimpl.UnsafeEnabled {
file_google_monitoring_v3_notification_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ListNotificationChannelDescriptorsRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_google_monitoring_v3_notification_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ListNotificationChannelDescriptorsResponse); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_google_monitoring_v3_notification_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*GetNotificationChannelDescriptorRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_google_monitoring_v3_notification_service_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*CreateNotificationChannelRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_google_monitoring_v3_notification_service_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ListNotificationChannelsRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_google_monitoring_v3_notification_service_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ListNotificationChannelsResponse); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_google_monitoring_v3_notification_service_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*GetNotificationChannelRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_google_monitoring_v3_notification_service_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*UpdateNotificationChannelRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_google_monitoring_v3_notification_service_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*DeleteNotificationChannelRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_google_monitoring_v3_notification_service_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*SendNotificationChannelVerificationCodeRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_google_monitoring_v3_notification_service_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*GetNotificationChannelVerificationCodeRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_google_monitoring_v3_notification_service_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*GetNotificationChannelVerificationCodeResponse); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_google_monitoring_v3_notification_service_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*VerifyNotificationChannelRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_google_monitoring_v3_notification_service_proto_rawDesc,
NumEnums: 0,
NumMessages: 13,
NumExtensions: 0,
NumServices: 1,
},
GoTypes: file_google_monitoring_v3_notification_service_proto_goTypes,
DependencyIndexes: file_google_monitoring_v3_notification_service_proto_depIdxs,
MessageInfos: file_google_monitoring_v3_notification_service_proto_msgTypes,
}.Build()
File_google_monitoring_v3_notification_service_proto = out.File
file_google_monitoring_v3_notification_service_proto_rawDesc = nil
file_google_monitoring_v3_notification_service_proto_goTypes = nil
file_google_monitoring_v3_notification_service_proto_depIdxs = nil
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConnInterface
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion6
// NotificationChannelServiceClient is the client API for NotificationChannelService service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type NotificationChannelServiceClient interface {
// Lists the descriptors for supported channel types. The use of descriptors
// makes it possible for new channel types to be dynamically added.
ListNotificationChannelDescriptors(ctx context.Context, in *ListNotificationChannelDescriptorsRequest, opts ...grpc.CallOption) (*ListNotificationChannelDescriptorsResponse, error)
// Gets a single channel descriptor. The descriptor indicates which fields
// are expected / permitted for a notification channel of the given type.
GetNotificationChannelDescriptor(ctx context.Context, in *GetNotificationChannelDescriptorRequest, opts ...grpc.CallOption) (*NotificationChannelDescriptor, error)
// Lists the notification channels that have been created for the project.
ListNotificationChannels(ctx context.Context, in *ListNotificationChannelsRequest, opts ...grpc.CallOption) (*ListNotificationChannelsResponse, error)
// Gets a single notification channel. The channel includes the relevant
// configuration details with which the channel was created. However, the
// response may truncate or omit passwords, API keys, or other private key
// matter and thus the response may not be 100% identical to the information
// that was supplied in the call to the create method.
GetNotificationChannel(ctx context.Context, in *GetNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error)
// Creates a new notification channel, representing a single notification
// endpoint such as an email address, SMS number, or PagerDuty service.
CreateNotificationChannel(ctx context.Context, in *CreateNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error)
// Updates a notification channel. Fields not specified in the field mask
// remain unchanged.
UpdateNotificationChannel(ctx context.Context, in *UpdateNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error)
// Deletes a notification channel.
DeleteNotificationChannel(ctx context.Context, in *DeleteNotificationChannelRequest, opts ...grpc.CallOption) (*empty.Empty, error)
// Causes a verification code to be delivered to the channel. The code
// can then be supplied in `VerifyNotificationChannel` to verify the channel.
SendNotificationChannelVerificationCode(ctx context.Context, in *SendNotificationChannelVerificationCodeRequest, opts ...grpc.CallOption) (*empty.Empty, error)
// Requests a verification code for an already verified channel that can then
// be used in a call to VerifyNotificationChannel() on a different channel
// with an equivalent identity in the same or in a different project. This
// makes it possible to copy a channel between projects without requiring
// manual reverification of the channel. If the channel is not in the
// verified state, this method will fail (in other words, this may only be
// used if the SendNotificationChannelVerificationCode and
// VerifyNotificationChannel paths have already been used to put the given
// channel into the verified state).
//
// There is no guarantee that the verification codes returned by this method
// will be of a similar structure or form as the ones that are delivered
// to the channel via SendNotificationChannelVerificationCode; while
// VerifyNotificationChannel() will recognize both the codes delivered via
// SendNotificationChannelVerificationCode() and returned from
// GetNotificationChannelVerificationCode(), it is typically the case that
// the verification codes delivered via
// SendNotificationChannelVerificationCode() will be shorter and also
// have a shorter expiration (e.g. codes such as "G-123456") whereas
// GetVerificationCode() will typically return a much longer, websafe base
// 64 encoded string that has a longer expiration time.
GetNotificationChannelVerificationCode(ctx context.Context, in *GetNotificationChannelVerificationCodeRequest, opts ...grpc.CallOption) (*GetNotificationChannelVerificationCodeResponse, error)
// Verifies a `NotificationChannel` by proving receipt of the code
// delivered to the channel as a result of calling
// `SendNotificationChannelVerificationCode`.
VerifyNotificationChannel(ctx context.Context, in *VerifyNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error)
}
type notificationChannelServiceClient struct {
cc grpc.ClientConnInterface
}
func NewNotificationChannelServiceClient(cc grpc.ClientConnInterface) NotificationChannelServiceClient {
return ¬ificationChannelServiceClient{cc}
}
func (c *notificationChannelServiceClient) ListNotificationChannelDescriptors(ctx context.Context, in *ListNotificationChannelDescriptorsRequest, opts ...grpc.CallOption) (*ListNotificationChannelDescriptorsResponse, error) {
out := new(ListNotificationChannelDescriptorsResponse)
err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/ListNotificationChannelDescriptors", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *notificationChannelServiceClient) GetNotificationChannelDescriptor(ctx context.Context, in *GetNotificationChannelDescriptorRequest, opts ...grpc.CallOption) (*NotificationChannelDescriptor, error) {
out := new(NotificationChannelDescriptor)
err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/GetNotificationChannelDescriptor", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *notificationChannelServiceClient) ListNotificationChannels(ctx context.Context, in *ListNotificationChannelsRequest, opts ...grpc.CallOption) (*ListNotificationChannelsResponse, error) {
out := new(ListNotificationChannelsResponse)
err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/ListNotificationChannels", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *notificationChannelServiceClient) GetNotificationChannel(ctx context.Context, in *GetNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error) {
out := new(NotificationChannel)
err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/GetNotificationChannel", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *notificationChannelServiceClient) CreateNotificationChannel(ctx context.Context, in *CreateNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error) {
out := new(NotificationChannel)
err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/CreateNotificationChannel", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *notificationChannelServiceClient) UpdateNotificationChannel(ctx context.Context, in *UpdateNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error) {
out := new(NotificationChannel)
err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/UpdateNotificationChannel", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *notificationChannelServiceClient) DeleteNotificationChannel(ctx context.Context, in *DeleteNotificationChannelRequest, opts ...grpc.CallOption) (*empty.Empty, error) {
out := new(empty.Empty)
err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/DeleteNotificationChannel", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *notificationChannelServiceClient) SendNotificationChannelVerificationCode(ctx context.Context, in *SendNotificationChannelVerificationCodeRequest, opts ...grpc.CallOption) (*empty.Empty, error) {
out := new(empty.Empty)
err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/SendNotificationChannelVerificationCode", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *notificationChannelServiceClient) GetNotificationChannelVerificationCode(ctx context.Context, in *GetNotificationChannelVerificationCodeRequest, opts ...grpc.CallOption) (*GetNotificationChannelVerificationCodeResponse, error) {
out := new(GetNotificationChannelVerificationCodeResponse)
err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/GetNotificationChannelVerificationCode", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *notificationChannelServiceClient) VerifyNotificationChannel(ctx context.Context, in *VerifyNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error) {
out := new(NotificationChannel)
err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/VerifyNotificationChannel", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// NotificationChannelServiceServer is the server API for NotificationChannelService service.
type NotificationChannelServiceServer interface {
// Lists the descriptors for supported channel types. The use of descriptors
// makes it possible for new channel types to be dynamically added.
ListNotificationChannelDescriptors(context.Context, *ListNotificationChannelDescriptorsRequest) (*ListNotificationChannelDescriptorsResponse, error)
// Gets a single channel descriptor. The descriptor indicates which fields
// are expected / permitted for a notification channel of the given type.
GetNotificationChannelDescriptor(context.Context, *GetNotificationChannelDescriptorRequest) (*NotificationChannelDescriptor, error)
// Lists the notification channels that have been created for the project.
ListNotificationChannels(context.Context, *ListNotificationChannelsRequest) (*ListNotificationChannelsResponse, error)
// Gets a single notification channel. The channel includes the relevant
// configuration details with which the channel was created. However, the
// response may truncate or omit passwords, API keys, or other private key
// matter and thus the response may not be 100% identical to the information
// that was supplied in the call to the create method.
GetNotificationChannel(context.Context, *GetNotificationChannelRequest) (*NotificationChannel, error)
// Creates a new notification channel, representing a single notification
// endpoint such as an email address, SMS number, or PagerDuty service.
CreateNotificationChannel(context.Context, *CreateNotificationChannelRequest) (*NotificationChannel, error)
// Updates a notification channel. Fields not specified in the field mask
// remain unchanged.
UpdateNotificationChannel(context.Context, *UpdateNotificationChannelRequest) (*NotificationChannel, error)
// Deletes a notification channel.
DeleteNotificationChannel(context.Context, *DeleteNotificationChannelRequest) (*empty.Empty, error)
// Causes a verification code to be delivered to the channel. The code
// can then be supplied in `VerifyNotificationChannel` to verify the channel.
SendNotificationChannelVerificationCode(context.Context, *SendNotificationChannelVerificationCodeRequest) (*empty.Empty, error)
// Requests a verification code for an already verified channel that can then
// be used in a call to VerifyNotificationChannel() on a different channel
// with an equivalent identity in the same or in a different project. This
// makes it possible to copy a channel between projects without requiring
// manual reverification of the channel. If the channel is not in the
// verified state, this method will fail (in other words, this may only be
// used if the SendNotificationChannelVerificationCode and
// VerifyNotificationChannel paths have already been used to put the given
// channel into the verified state).
//
// There is no guarantee that the verification codes returned by this method
// will be of a similar structure or form as the ones that are delivered
// to the channel via SendNotificationChannelVerificationCode; while
// VerifyNotificationChannel() will recognize both the codes delivered via
// SendNotificationChannelVerificationCode() and returned from
// GetNotificationChannelVerificationCode(), it is typically the case that
// the verification codes delivered via
// SendNotificationChannelVerificationCode() will be shorter and also
// have a shorter expiration (e.g. codes such as "G-123456") whereas
// GetVerificationCode() will typically return a much longer, websafe base
// 64 encoded string that has a longer expiration time.
GetNotificationChannelVerificationCode(context.Context, *GetNotificationChannelVerificationCodeRequest) (*GetNotificationChannelVerificationCodeResponse, error)
// Verifies a `NotificationChannel` by proving receipt of the code
// delivered to the channel as a result of calling
// `SendNotificationChannelVerificationCode`.
VerifyNotificationChannel(context.Context, *VerifyNotificationChannelRequest) (*NotificationChannel, error)
}
// UnimplementedNotificationChannelServiceServer can be embedded to have forward compatible implementations.
type UnimplementedNotificationChannelServiceServer struct {
}
func (*UnimplementedNotificationChannelServiceServer) ListNotificationChannelDescriptors(context.Context, *ListNotificationChannelDescriptorsRequest) (*ListNotificationChannelDescriptorsResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method ListNotificationChannelDescriptors not implemented")
}
func (*UnimplementedNotificationChannelServiceServer) GetNotificationChannelDescriptor(context.Context, *GetNotificationChannelDescriptorRequest) (*NotificationChannelDescriptor, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetNotificationChannelDescriptor not implemented")
}
func (*UnimplementedNotificationChannelServiceServer) ListNotificationChannels(context.Context, *ListNotificationChannelsRequest) (*ListNotificationChannelsResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method ListNotificationChannels not implemented")
}
func (*UnimplementedNotificationChannelServiceServer) GetNotificationChannel(context.Context, *GetNotificationChannelRequest) (*NotificationChannel, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetNotificationChannel not implemented")
}
func (*UnimplementedNotificationChannelServiceServer) CreateNotificationChannel(context.Context, *CreateNotificationChannelRequest) (*NotificationChannel, error) {
return nil, status.Errorf(codes.Unimplemented, "method CreateNotificationChannel not implemented")
}
func (*UnimplementedNotificationChannelServiceServer) UpdateNotificationChannel(context.Context, *UpdateNotificationChannelRequest) (*NotificationChannel, error) {
return nil, status.Errorf(codes.Unimplemented, "method UpdateNotificationChannel not implemented")
}
func (*UnimplementedNotificationChannelServiceServer) DeleteNotificationChannel(context.Context, *DeleteNotificationChannelRequest) (*empty.Empty, error) {
return nil, status.Errorf(codes.Unimplemented, "method DeleteNotificationChannel not implemented")
}
func (*UnimplementedNotificationChannelServiceServer) SendNotificationChannelVerificationCode(context.Context, *SendNotificationChannelVerificationCodeRequest) (*empty.Empty, error) {
return nil, status.Errorf(codes.Unimplemented, "method SendNotificationChannelVerificationCode not implemented")
}
func (*UnimplementedNotificationChannelServiceServer) GetNotificationChannelVerificationCode(context.Context, *GetNotificationChannelVerificationCodeRequest) (*GetNotificationChannelVerificationCodeResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetNotificationChannelVerificationCode not implemented")
}
func (*UnimplementedNotificationChannelServiceServer) VerifyNotificationChannel(context.Context, *VerifyNotificationChannelRequest) (*NotificationChannel, error) {
return nil, status.Errorf(codes.Unimplemented, "method VerifyNotificationChannel not implemented")
}
func RegisterNotificationChannelServiceServer(s *grpc.Server, srv NotificationChannelServiceServer) {
s.RegisterService(&_NotificationChannelService_serviceDesc, srv)
}
func _NotificationChannelService_ListNotificationChannelDescriptors_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ListNotificationChannelDescriptorsRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(NotificationChannelServiceServer).ListNotificationChannelDescriptors(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.monitoring.v3.NotificationChannelService/ListNotificationChannelDescriptors",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(NotificationChannelServiceServer).ListNotificationChannelDescriptors(ctx, req.(*ListNotificationChannelDescriptorsRequest))
}
return interceptor(ctx, in, info, handler)
}
func _NotificationChannelService_GetNotificationChannelDescriptor_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(GetNotificationChannelDescriptorRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(NotificationChannelServiceServer).GetNotificationChannelDescriptor(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.monitoring.v3.NotificationChannelService/GetNotificationChannelDescriptor",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(NotificationChannelServiceServer).GetNotificationChannelDescriptor(ctx, req.(*GetNotificationChannelDescriptorRequest))
}
return interceptor(ctx, in, info, handler)
}
func _NotificationChannelService_ListNotificationChannels_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ListNotificationChannelsRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(NotificationChannelServiceServer).ListNotificationChannels(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.monitoring.v3.NotificationChannelService/ListNotificationChannels",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(NotificationChannelServiceServer).ListNotificationChannels(ctx, req.(*ListNotificationChannelsRequest))
}
return interceptor(ctx, in, info, handler)
}
func _NotificationChannelService_GetNotificationChannel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(GetNotificationChannelRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(NotificationChannelServiceServer).GetNotificationChannel(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.monitoring.v3.NotificationChannelService/GetNotificationChannel",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(NotificationChannelServiceServer).GetNotificationChannel(ctx, req.(*GetNotificationChannelRequest))
}
return interceptor(ctx, in, info, handler)
}
func _NotificationChannelService_CreateNotificationChannel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(CreateNotificationChannelRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(NotificationChannelServiceServer).CreateNotificationChannel(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.monitoring.v3.NotificationChannelService/CreateNotificationChannel",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(NotificationChannelServiceServer).CreateNotificationChannel(ctx, req.(*CreateNotificationChannelRequest))
}
return interceptor(ctx, in, info, handler)
}
func _NotificationChannelService_UpdateNotificationChannel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(UpdateNotificationChannelRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(NotificationChannelServiceServer).UpdateNotificationChannel(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.monitoring.v3.NotificationChannelService/UpdateNotificationChannel",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(NotificationChannelServiceServer).UpdateNotificationChannel(ctx, req.(*UpdateNotificationChannelRequest))
}
return interceptor(ctx, in, info, handler)
}
func _NotificationChannelService_DeleteNotificationChannel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(DeleteNotificationChannelRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(NotificationChannelServiceServer).DeleteNotificationChannel(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.monitoring.v3.NotificationChannelService/DeleteNotificationChannel",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(NotificationChannelServiceServer).DeleteNotificationChannel(ctx, req.(*DeleteNotificationChannelRequest))
}
return interceptor(ctx, in, info, handler)
}
func _NotificationChannelService_SendNotificationChannelVerificationCode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(SendNotificationChannelVerificationCodeRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(NotificationChannelServiceServer).SendNotificationChannelVerificationCode(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.monitoring.v3.NotificationChannelService/SendNotificationChannelVerificationCode",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(NotificationChannelServiceServer).SendNotificationChannelVerificationCode(ctx, req.(*SendNotificationChannelVerificationCodeRequest))
}
return interceptor(ctx, in, info, handler)
}
func _NotificationChannelService_GetNotificationChannelVerificationCode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(GetNotificationChannelVerificationCodeRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(NotificationChannelServiceServer).GetNotificationChannelVerificationCode(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.monitoring.v3.NotificationChannelService/GetNotificationChannelVerificationCode",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(NotificationChannelServiceServer).GetNotificationChannelVerificationCode(ctx, req.(*GetNotificationChannelVerificationCodeRequest))
}
return interceptor(ctx, in, info, handler)
}
func _NotificationChannelService_VerifyNotificationChannel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(VerifyNotificationChannelRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(NotificationChannelServiceServer).VerifyNotificationChannel(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.monitoring.v3.NotificationChannelService/VerifyNotificationChannel",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(NotificationChannelServiceServer).VerifyNotificationChannel(ctx, req.(*VerifyNotificationChannelRequest))
}
return interceptor(ctx, in, info, handler)
}
var _NotificationChannelService_serviceDesc = grpc.ServiceDesc{
ServiceName: "google.monitoring.v3.NotificationChannelService",
HandlerType: (*NotificationChannelServiceServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "ListNotificationChannelDescriptors",
Handler: _NotificationChannelService_ListNotificationChannelDescriptors_Handler,
},
{
MethodName: "GetNotificationChannelDescriptor",
Handler: _NotificationChannelService_GetNotificationChannelDescriptor_Handler,
},
{
MethodName: "ListNotificationChannels",
Handler: _NotificationChannelService_ListNotificationChannels_Handler,
},
{
MethodName: "GetNotificationChannel",
Handler: _NotificationChannelService_GetNotificationChannel_Handler,
},
{
MethodName: "CreateNotificationChannel",
Handler: _NotificationChannelService_CreateNotificationChannel_Handler,
},
{
MethodName: "UpdateNotificationChannel",
Handler: _NotificationChannelService_UpdateNotificationChannel_Handler,
},
{
MethodName: "DeleteNotificationChannel",
Handler: _NotificationChannelService_DeleteNotificationChannel_Handler,
},
{
MethodName: "SendNotificationChannelVerificationCode",
Handler: _NotificationChannelService_SendNotificationChannelVerificationCode_Handler,
},
{
MethodName: "GetNotificationChannelVerificationCode",
Handler: _NotificationChannelService_GetNotificationChannelVerificationCode_Handler,
},
{
MethodName: "VerifyNotificationChannel",
Handler: _NotificationChannelService_VerifyNotificationChannel_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "google/monitoring/v3/notification_service.proto",
}
| googleinterns/knative-source-mongodb | vendor/google.golang.org/genproto/googleapis/monitoring/v3/notification_service.pb.go | GO | apache-2.0 | 101,661 |
/**
* This file is part of the Iritgo/Aktario Framework.
*
* Copyright (C) 2005-2011 Iritgo Technologies.
* Copyright (C) 2003-2005 BueroByte GbR.
*
* Iritgo licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package de.iritgo.aktario.framework.base.action;
import de.iritgo.aktario.core.network.ClientTransceiver;
import de.iritgo.aktario.framework.action.ActionTools;
import de.iritgo.aktario.framework.appcontext.AppContext;
import java.io.IOException;
/**
*
*/
public class PingAction extends FrameworkAction
{
private long createTimeFromThis;
private long pingTime;
/**
* Standard constructor
*/
public PingAction()
{
super(- 1);
}
/**
* Standard constructor
*/
public PingAction(long pingTime)
{
createTimeFromThis = System.currentTimeMillis();
this.pingTime = pingTime;
}
/**
* Read the attributes from the given stream.
*/
@Override
public void readObject(FrameworkInputStream stream) throws IOException, ClassNotFoundException
{
pingTime = stream.readLong();
}
/**
* Write the attributes to the given stream.
*/
@Override
public void writeObject(FrameworkOutputStream stream) throws IOException
{
stream.writeLong(pingTime);
}
/**
* Perform the action.
*/
@Override
public void perform()
{
AppContext.instance().getUser().addPingTime(createTimeFromThis - pingTime);
double channelNumber = AppContext.instance().getChannelNumber();
ClientTransceiver clientTransceiver = new ClientTransceiver(channelNumber);
clientTransceiver.addReceiver(channelNumber);
PingServerAction pingServerAction = new PingServerAction(System.currentTimeMillis());
pingServerAction.setTransceiver(clientTransceiver);
ActionTools.sendToServer(pingServerAction);
}
}
| iritgo/iritgo-aktario | aktario-framework/src/main/java/de/iritgo/aktario/framework/base/action/PingAction.java | Java | apache-2.0 | 2,259 |
package com.qlm.similitude.lsh;
import com.google.common.hash.HashFunction;
import com.google.common.hash.Hashing;
import com.google.common.io.BaseEncoding;
import java.io.Serializable;
import java.nio.charset.Charset;
import java.util.*;
/**
* Takes an array of Strings and produces and LSH keys
* based on the number of hash functions and the number of bands.
* <p/>
* NOTE: Must be Serializable for Spark.
*/
public class LshBlocking64Bit implements Serializable, LshBlockAsString {
private HashFunction hf;
// private static final HashFunction murmur3 = Hashing.murmur3_128();
private static final HashFunction md5 = Hashing.md5();
private static final BaseEncoding encoder = BaseEncoding.base64().omitPadding();
private static final Charset UTF8 = Charset.defaultCharset();
private final int numHashFunctions;
private final int numRowsPerBand;
private final int[] hashFunctions;
private final boolean shiftKey;
private final boolean compressKey;
public LshBlocking64Bit(int numHashFunctions, int numRowsPerBand, boolean shiftKey, boolean compressKey, String hashAlgorithm) {
this.compressKey = compressKey;
this.shiftKey = shiftKey;
this.numHashFunctions = numHashFunctions;
this.numRowsPerBand = numRowsPerBand;
hashFunctions = new int[numHashFunctions - 1];
final Random random = new Random(63689);
for (int i = 0; i < numHashFunctions - 1; i++) {
hashFunctions[i] = random.nextInt() + 1;
}
if (hashAlgorithm.equalsIgnoreCase("SHA256")) {
hf = Hashing.sha256();
} else if (hashAlgorithm.equalsIgnoreCase("murmur3")) {
hf = Hashing.murmur3_128();
}
}
@Override
public Set<String> lsh(String...values) {
return new HashSet<>(bandsToStrings(lsh(hashValues(values))));
}
/**
* Generates a two dimensional array where the first dimension is the band and the second dimension is the minhash rows for that band
*
* @param values the values to LSH
* @return a two dimensional array where the first dimension is the band and the second dimension is the minhash rows for that band
*/
public long[][] lsh(long[] values) {
long[] minHash = minHash(values);
int numBands;
if (shiftKey) {
numBands = numHashFunctions - numRowsPerBand + 1;
} else {
numBands = numHashFunctions/numRowsPerBand;
}
long[][] lsh = new long[numBands][numRowsPerBand];
if (numBands == 1) {
lsh[0] = minHash;
} else if (shiftKey) {
for (int i = 0; i <= numHashFunctions-numRowsPerBand; i++) {
lsh[i] = Arrays.copyOfRange(minHash, i, numRowsPerBand+i);
}
} else {
long[] tmpHash = new long[numRowsPerBand];
int row = 0;
for (int i = 0; i < numHashFunctions; i++) {
tmpHash[i % numRowsPerBand] = minHash[i];
if (i % numRowsPerBand == numRowsPerBand - 1) {
lsh[row++] = tmpHash;
tmpHash = new long[numRowsPerBand];
}
}
}
return lsh;
}
/**
* Generates a minHash for a given set of values
*
* @param values the values to minHash on
* @return An array of ints where the first int is the minimum of the actual values and the last value is the minimun of the last hash function
*/
public long[] minHash(long[] values) {
long[] minHash = new long[numHashFunctions];
for (int i = 0; i < numHashFunctions; i++) {
minHash[i] = minHashN(values, i);
}
return minHash;
}
/**
* Hash the values from the OHOB object which are extracted by the fieldSpecs passed in the constructor
*
* @param values The String value to hash
* @return An array of hash values
*/
public long[] hashValues(String...values) {
long[] valueHashes = new long[values.length];
for (int i = 0; i<values.length; i++){
valueHashes[i] = hashString(values[i]);
}
return valueHashes;
}
private long hashString(String str) {
return hf.hashString(str, UTF8).asLong();
}
/**
* Get the minimum hash for the values for the hash function designated by <code>hashFunction</code>
*
* @param values The values to hash and find the minimum hash value for
* @param hashFunction The hash function number to use. <code>0</code> results in finding the minimum from the values passed in
* @return The minimum hash for the values
*/
protected long minHashN(long[] values, int hashFunction) {
long min = Long.MAX_VALUE;
long minVal = 0;
long tmpVal;
for (long value : values) {
tmpVal = getHash(value, hashFunction);
if (tmpVal < min) {
min = tmpVal;
minVal = value;
}
}
return minVal;
}
/**
* Gets a hash value given a certain hash function.
*
* @param value The value to hash
* @param hashFunction the hash function to use
* @return a hashCode of the given value
*/
protected long getHash(long value, int hashFunction) {
//For the first hash function, simply return the value
if (hashFunction == 0) {
return value;
}
long rst = (value >>> hashFunction) | (value << (Integer.SIZE - hashFunction));
return rst ^ hashFunctions[hashFunction - 1];
}
List<String> bandsToStrings(long[][] lsh) {
List<String> vals = new ArrayList<>();
if (lsh != null) {
for (long[] band : lsh) {
vals.add(bandToString(band));
}
}
return vals;
}
String bandToString(long[] hashCodes) {
StringBuilder builder = new StringBuilder();
if (hashCodes != null) {
for (long i : hashCodes) {
if (builder.length() > 0) {
builder.append("-");
}
builder.append(Long.toHexString(i));
}
}
String s = builder.toString();
if (compressKey && s.length() > 0) {
s = encoder.encode(md5.hashString(s, UTF8).asBytes());
}
return s;
}
}
| engrean/similitude | lsh/src/main/java/com/qlm/similitude/lsh/LshBlocking64Bit.java | Java | apache-2.0 | 5,864 |
/*
* Copyright 2013-2014 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.nebhale.buildmonitor.web.resource;
import org.junit.Before;
import org.springframework.mock.web.MockHttpServletRequest;
import org.springframework.mock.web.MockHttpServletResponse;
import org.springframework.web.context.request.RequestContextHolder;
import org.springframework.web.context.request.ServletWebRequest;
public abstract class AbstractResourceAssemblerTest {
@Before
public final void requestContext() {
MockHttpServletRequest request = new MockHttpServletRequest();
MockHttpServletResponse response = new MockHttpServletResponse();
RequestContextHolder.setRequestAttributes(new ServletWebRequest(request, response));
}
}
| nebhale/build-monitor | src/test/java/com/nebhale/buildmonitor/web/resource/AbstractResourceAssemblerTest.java | Java | apache-2.0 | 1,307 |
<?php
namespace php\gdx;
/**
* Class Clipboard
* @package php\gdx
*/
class Clipboard {
private function __construct() { }
/**
* gets the current content of the clipboard if it contains text
* @return string the clipboard content or null
*/
public function getContent() { }
/**
* Sets the content of the system clipboard.
*
* @param string $content
*/
public function setContent($content) { }
} | livingvirus/jphp | jphp-gdx-ext/src/main/resources/JPHP-INF/sdk/php/gdx/Clipboard.php | PHP | apache-2.0 | 454 |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for ListContexts
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-dialogflow
# [START dialogflow_generated_dialogflow_v2_Contexts_ListContexts_async]
from google.cloud import dialogflow_v2
async def sample_list_contexts():
# Create a client
client = dialogflow_v2.ContextsAsyncClient()
# Initialize request argument(s)
request = dialogflow_v2.ListContextsRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_contexts(request=request)
# Handle the response
async for response in page_result:
print(response)
# [END dialogflow_generated_dialogflow_v2_Contexts_ListContexts_async]
| googleapis/python-dialogflow | samples/generated_samples/dialogflow_generated_dialogflow_v2_contexts_list_contexts_async.py | Python | apache-2.0 | 1,522 |
/*
* #%L
* GwtMaterial
* %%
* Copyright (C) 2015 - 2017 GwtMaterialDesign
* %%
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* #L%
*/
package gwt.material.design.client.ui;
import com.google.gwt.dom.client.Element;
import com.google.gwt.user.client.Timer;
import gwt.material.design.client.ui.base.MaterialWidgetTest;
import static gwt.material.design.jquery.client.api.JQuery.$;
/**
* Test case for Toasts
*
* @author kevzlou7979
*/
public class MaterialToastTest extends MaterialWidgetTest {
public void init() {
checkToastStructure();
checkToastWithWidget();
checkToastWithStyling();
checkToastWithCallback();
checkMultipleToasts();
}
private void checkMultipleToasts() {
for (int i = 1; i <= 5; i++) {
MaterialToast.fireToast("test" + i);
}
Element toastContainer = $("body").find("#toast-container").asElement();
assertNotNull(toastContainer);
assertEquals(toastContainer.getChildCount(), 5);
// Check each toasts
for (int i = 0; i < 5; i++) {
Element toastElement = (Element) toastContainer.getChild(i);
assertEquals(toastElement.getInnerHTML(), "test" + (i + 1));
}
toastContainer.setInnerHTML("");
assertEquals(toastContainer.getChildCount(), 0);
}
private void checkToastWithCallback() {
final boolean[] isCallbackFired = new boolean[1];
new MaterialToast(() -> {
isCallbackFired[0] = true;
}).toast("callback", 1000);
Timer t = new Timer() {
@Override
public void run() {
assertTrue(isCallbackFired[0]);
}
};
t.schedule(1000);
Element toastContainer = $("body").find("#toast-container").asElement();
assertNotNull(toastContainer);
toastContainer.setInnerHTML("");
}
private void checkToastWithStyling() {
MaterialToast.fireToast("test", "rounded");
Element toastContainer = $("body").find("#toast-container").asElement();
assertNotNull(toastContainer);
assertEquals(toastContainer.getChildCount(), 1);
assertNotNull(toastContainer.getChild(0));
assertTrue(toastContainer.getChild(0) instanceof Element);
Element toastElement = (Element) toastContainer.getChild(0);
assertTrue(toastElement.hasClassName("rounded"));
toastContainer.setInnerHTML("");
}
private void checkToastWithWidget() {
MaterialLink link = new MaterialLink();
new MaterialToast(link).toast("test");
Element toastContainer = $("body").find("#toast-container").asElement();
assertNotNull(toastContainer);
assertEquals(toastContainer.getChildCount(), 1);
assertNotNull(toastContainer.getChild(0));
assertTrue(toastContainer.getChild(0) instanceof Element);
Element toastElement = (Element) toastContainer.getChild(0);
// Check the span text
assertEquals($(toastElement.getChild(0)).text(), "test");
// Check the added link to toast component
assertEquals(link.getElement(), toastElement.getChild(1));
toastContainer.setInnerHTML("");
}
private void checkToastStructure() {
MaterialToast.fireToast("test");
Element toastContainer = $("body").find("#toast-container").asElement();
assertNotNull(toastContainer);
assertEquals(toastContainer.getChildCount(), 1);
assertNotNull(toastContainer.getChild(0));
assertTrue(toastContainer.getChild(0) instanceof Element);
Element toastElement = (Element) toastContainer.getChild(0);
assertEquals(toastElement.getInnerHTML(), "test");
toastContainer.setInnerHTML("");
}
}
| guibertjulien/gwt-material | gwt-material/src/test/java/gwt/material/design/client/ui/MaterialToastTest.java | Java | apache-2.0 | 4,305 |
using System;
using System.Linq;
using System.Net;
using System.Net.Http;
using System.Runtime.InteropServices.ComTypes;
using System.Threading.Tasks;
using System.Web;
namespace WebMinder.Core
{
public static class RequestUtility
{
public static string GetCurrentIpAddress()
{
return GetClientIpAddress(GetRequest());
}
public static string GetClientIpAddress(HttpRequestBase request)
{
try
{
var userHostAddress = request.UserHostAddress;
// Attempt to parse. If it fails, we catch below and return "0.0.0.0"
// Could use TryParse instead, but I wanted to catch all exceptions
IPAddress.Parse(userHostAddress);
var xForwardedFor = request.ServerVariables["X_FORWARDED_FOR"];
if (string.IsNullOrEmpty(xForwardedFor))
return userHostAddress;
// Get a list of public ip addresses in the X_FORWARDED_FOR variable
var publicForwardingIps = xForwardedFor.Split(',').Where(ip => !IsPrivateIpAddress(ip)).ToList();
// If we found any, return the last one, otherwise return the user host address
return publicForwardingIps.Any() ? publicForwardingIps.Last() : userHostAddress;
}
catch (Exception)
{
// Always return all zeroes for any failure (my calling code expects it)
return "0.0.0.0";
}
}
private static bool IsPrivateIpAddress(string ipAddress)
{
// http://en.wikipedia.org/wiki/Private_network
// Private IP Addresses are:
// 24-bit block: 10.0.0.0 through 10.255.255.255
// 20-bit block: 172.16.0.0 through 172.31.255.255
// 16-bit block: 192.168.0.0 through 192.168.255.255
// Link-local addresses: 169.254.0.0 through 169.254.255.255 (http://en.wikipedia.org/wiki/Link-local_address)
var ip = IPAddress.Parse(ipAddress);
var octets = ip.GetAddressBytes();
var is24BitBlock = octets[0] == 10;
if (is24BitBlock) return true; // Return to prevent further processing
var is20BitBlock = octets[0] == 172 && octets[1] >= 16 && octets[1] <= 31;
if (is20BitBlock) return true; // Return to prevent further processing
var is16BitBlock = octets[0] == 192 && octets[1] == 168;
if (is16BitBlock) return true; // Return to prevent further processing
var isLinkLocalAddress = octets[0] == 169 && octets[1] == 254;
return isLinkLocalAddress;
}
public static async Task<bool> UrlIsValid(string url, Action<string, string> logger)
{
try
{
var client = new HttpClient();
await client.GetAsync(url).ContinueWith((requestTask) =>
{
HttpResponseMessage response = requestTask.Result;
response.EnsureSuccessStatusCode();
});
return true;
}
catch (Exception ex)
{
logger("WARN", "URL invalid :" + url + " exception: " + ex.ToString());
return false;
}
}
public static HttpRequestWrapper GetRequest()
{
if (HttpContext.Current == null) return null;
var request = new HttpRequestWrapper(HttpContext.Current.Request);
return request;
}
/// <summary>
/// http://stackoverflow.com/questions/2138706/how-to-check-a-input-ip-fall-in-a-specific-ip-range
/// </summary>
/// <param name="startIpAddr"></param>
/// <param name="endIpAddr"></param>
/// <param name="address"></param>
/// <returns></returns>
public static bool IsInRange(string startIpAddr, string endIpAddr, string address)
{
long ipStart = BitConverter.ToInt32(IPAddress.Parse(startIpAddr).GetAddressBytes().Reverse().ToArray(), 0);
long ipEnd = BitConverter.ToInt32(IPAddress.Parse(endIpAddr).GetAddressBytes().Reverse().ToArray(), 0);
long ip = BitConverter.ToInt32(IPAddress.Parse(address).GetAddressBytes().Reverse().ToArray(), 0);
return ip >= ipStart && ip <= ipEnd; //edited
}
}
} | chrismckelt/WebMinder | Core/RequestUtility.cs | C# | apache-2.0 | 4,461 |
package com.funnyhatsoftware.spacedock.activity;
import android.app.Activity;
import android.app.ProgressDialog;
import android.content.Intent;
import android.content.SharedPreferences;
import android.os.AsyncTask;
import android.os.Bundle;
import android.preference.PreferenceManager;
import android.support.v4.app.Fragment;
import android.support.v4.app.FragmentPagerAdapter;
import android.view.Menu;
import android.view.MenuItem;
import android.widget.Toast;
import com.funnyhatsoftware.spacedock.DataHelper;
import com.funnyhatsoftware.spacedock.R;
import com.funnyhatsoftware.spacedock.SpaceDockApplication;
import com.funnyhatsoftware.spacedock.data.DataLoader;
import com.funnyhatsoftware.spacedock.data.Universe;
import com.funnyhatsoftware.spacedock.fragment.BrowseListFragment;
import com.funnyhatsoftware.spacedock.fragment.BrowseTwoPaneFragment;
import com.funnyhatsoftware.spacedock.fragment.ManageSquadsFragment;
import org.xml.sax.SAXException;
import java.io.BufferedInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.net.MalformedURLException;
import java.net.URL;
import java.net.URLConnection;
import java.net.UnknownHostException;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import javax.xml.parsers.ParserConfigurationException;
public class RootTabActivity extends FragmentTabActivity implements
ManageSquadsFragment.SquadSelectListener {
private boolean mTwoPane;
private boolean checkedVersion = false;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
mTwoPane = getResources().getBoolean(R.bool.use_two_pane);
if (getIntent().getData() != null) {
DataHelper.loadUniverseDataFromUri(this, getIntent().getData());
}
SpaceDockApplication app = (SpaceDockApplication)getApplication();
if (savedInstanceState == null) {
checkForUpdates();
}
}
@Override
public void onSaveInstanceState(Bundle savedInstanceState) {
super.onSaveInstanceState(savedInstanceState);
// Save UI state changes to the savedInstanceState.
// This bundle will be passed to onCreate if the process is
// killed and restarted.
savedInstanceState.putBoolean("checkedVersion", checkedVersion);
}
@Override
public void onRestoreInstanceState(Bundle savedInstanceState) {
super.onRestoreInstanceState(savedInstanceState);
// Save UI state changes to the savedInstanceState.
// This bundle will be passed to onCreate if the process is
// killed and restarted.
checkedVersion = savedInstanceState.getBoolean("checkedVersion");
}
@Override
protected void onPause() {
super.onPause();
DataHelper.saveUniverseData(this);
}
@Override
public boolean onCreateOptionsMenu(Menu menu) {
super.onCreateOptionsMenu(menu);
getMenuInflater().inflate(R.menu.menu_root, menu);
return true;
}
@Override
public boolean onPrepareOptionsMenu(Menu menu) {
if (Universe.getUniverse().updateAvailable) {
menu.findItem(R.id.menu_update).setVisible(false);
menu.findItem(R.id.menu_loadupdate).setVisible(true);
} else {
menu.findItem(R.id.menu_update).setVisible(true);
menu.findItem(R.id.menu_loadupdate).setVisible(false);
}
return super.onPrepareOptionsMenu(menu);
}
@Override
public boolean onOptionsItemSelected(MenuItem item) {
final int itemId = item.getItemId();
if (itemId == R.id.menu_settings) {
startActivity(new Intent(this, SettingsActivity.class));
return true;
}
if (itemId == R.id.menu_loadupdate) {
Universe.getUniverse().installUpdate(getApplicationContext(), this);
return true;
}
if (itemId == R.id.menu_update) {
checkForUpdates(true,false);
return true;
}
return super.onOptionsItemSelected(item);
}
@Override
protected FragmentPagerAdapter createPagerAdapter() {
return new FragmentPagerAdapter(getSupportFragmentManager()) {
String[] mTitles = getResources().getStringArray(R.array.root_tab_labels);
@Override
public CharSequence getPageTitle(int position) {
return mTitles[position];
}
@Override
public Fragment getItem(int i) {
if (i == 0) {
return new ManageSquadsFragment();
} else {
return mTwoPane ? new BrowseTwoPaneFragment() : new BrowseListFragment();
}
}
@Override
public int getCount() {
return 2;
}
};
}
@Override
public void onSquadSelected(String squadUuid) {
startActivity(SquadTabActivity.getIntent(this, squadUuid));
}
public void checkForUpdates() {
checkForUpdates(false,true);
}
public void checkForUpdates(boolean force) {
checkForUpdates(force, true);
}
public void checkForUpdates(boolean force, boolean hidden) {
SharedPreferences sharedPrefs = PreferenceManager.getDefaultSharedPreferences(getApplicationContext());
SpaceDockApplication.loadSetPreferences(getApplicationContext());
if (sharedPrefs.getBoolean("pref_key_check_updates",true)) {
if (!checkedVersion) {
VersionCheck versionCheck = new VersionCheck();
versionCheck.setActivity(this);
versionCheck.hidden = hidden;
versionCheck.execute();
}
} else if (force) {
checkedVersion = false;
VersionCheck versionCheck = new VersionCheck();
versionCheck.setActivity(this);
versionCheck.hidden = hidden;
versionCheck.execute();
}
}
public void updateAvailable() {
if (!checkedVersion && Universe.getUniverse().updateAvailable) {
Toast.makeText(getApplicationContext(),"Game Data Update Available",Toast.LENGTH_LONG).show();
}
checkedVersion = true;
}
private class VersionCheck extends AsyncTask<String,String,String> {
private RootTabActivity _activity = null;
public boolean hidden = true;
ProgressDialog pd;
public void setActivity(RootTabActivity activity) {
_activity = activity;
}
@Override
protected void onCancelled() {
if (pd != null) {
pd.dismiss();
}
}
@Override
protected void onPreExecute() {
if (_activity != null) {
if (!hidden) {
pd = new ProgressDialog(_activity);
pd.setMessage("Checking for Updated Game Data");
pd.show();
}
}
super.onPreExecute();
}
@Override
protected String doInBackground(String... strings) {
Universe universe = Universe.getUniverse();
String newVersion = null;
try {
URL url = new URL("http://spacedockapp.org/DataVersion.php");
URLConnection urlConnection = url.openConnection();
InputStream in = new BufferedInputStream(urlConnection.getInputStream());
DataLoader loader = new DataLoader(universe, in);
loader.versionOnly = true;
loader.load();
newVersion = loader.dataVersion;
} catch (MalformedURLException e) {
universe.updateAvailable = false;
} catch (UnknownHostException e) {
return null;
} catch (IOException e) {
universe.updateAvailable = false;
e.printStackTrace();
} catch (SAXException e) {
universe.updateAvailable = false;
e.printStackTrace();
} catch (ParserConfigurationException e) {
universe.updateAvailable = false;
e.printStackTrace();
}
return newVersion;
}
@Override
protected void onPostExecute(String result) {
Universe universe = Universe.getUniverse();
if (universe.getVersion() != null && result != null && result.compareTo(universe.getVersion()) > 0) {
universe.updateAvailable = true;
_activity.updateAvailable();
} else {
universe.updateAvailable = false;
}
if (pd != null) {
pd.dismiss();
if (_activity != null && result != null) {
Toast.makeText(_activity.getApplicationContext(), "Game Data is Up to Date", Toast.LENGTH_LONG).show();
} else if (_activity != null && result == null) {
Toast.makeText(_activity.getApplicationContext(), "Unable to connect to Space Dock sever at this time. Please try again later.", Toast.LENGTH_LONG).show();
}
}
}
}
}
| spacedockapp/spacedock | android/app/src/main/java/com/funnyhatsoftware/spacedock/activity/RootTabActivity.java | Java | apache-2.0 | 9,376 |
<?php
/**
* ShopEx licence
*
* @copyright Copyright (c) 2005-2010 ShopEx Technologies Inc. (http://www.shopex.cn)
* @license http://ecos.shopex.cn/ ShopEx License
*
*
*/
class wap_errorpage_get
{
public function getConf($key='') {
if( $key )
return app::get('wap')->getConf($key);
else return false;
}
} | liuguogen/Ecstore | app/wap/lib/errorpage/get.php | PHP | apache-2.0 | 364 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.myfaces.trinidadinternal.renderkit.core.xhtml;
import org.apache.myfaces.trinidad.bean.FacesBean;
import org.apache.myfaces.trinidad.component.core.input.CoreSelectOneChoice;
public class SelectOneChoiceRenderer extends InputLabelAndMessageRenderer
{
public SelectOneChoiceRenderer()
{
super(CoreSelectOneChoice.TYPE);
}
protected SelectOneChoiceRenderer(FacesBean.Type type)
{
super(type);
}
@Override
protected void findTypeConstants(FacesBean.Type type)
{
super.findTypeConstants(type);
_simpleSelectOneChoice = new SimpleSelectOneChoiceRenderer(type);
}
@Override
protected String getRootStyleClass(FacesBean bean)
{
return "af|selectOneChoice";
}
@Override
protected FormInputRenderer getFormInputRenderer()
{
return _simpleSelectOneChoice;
}
private SimpleSelectOneChoiceRenderer _simpleSelectOneChoice;
}
| adamrduffy/trinidad-1.0.x | trinidad-impl/src/main/java/org/apache/myfaces/trinidadinternal/renderkit/core/xhtml/SelectOneChoiceRenderer.java | Java | apache-2.0 | 1,736 |
package mq
import "github.com/NeowayLabs/wabbit"
// Describes producer for internal usages.
// Used to summarize common logic for sync and async producers.
type internalProducer interface {
init()
setChannel(channel wabbit.Channel)
Stop()
}
func newInternalProducer(channel wabbit.Channel, errorChannel chan<- error, config ProducerConfig) internalProducer {
if config.Sync {
return newSyncProducer(channel, errorChannel, config)
}
return newAsyncProducer(channel, errorChannel, config)
}
| cheshir/go-mq | internal_producer.go | GO | apache-2.0 | 501 |
/**
* Copyright 2011 Vecna Technologies, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You may
* obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package com.vecna.dbDiff.builder;
import java.sql.DatabaseMetaData;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.ForkJoinPool;
import java.util.concurrent.Future;
import com.google.common.base.Function;
import com.google.common.collect.ArrayListMultimap;
import com.google.common.collect.Collections2;
import com.google.common.collect.Lists;
import com.google.common.collect.Multimap;
import com.vecna.dbDiff.jdbc.MetadataFactory;
import com.vecna.dbDiff.model.CatalogSchema;
import com.vecna.dbDiff.model.ColumnType;
import com.vecna.dbDiff.model.TableType;
import com.vecna.dbDiff.model.db.Column;
import com.vecna.dbDiff.model.db.ForeignKey;
import com.vecna.dbDiff.model.relationalDb.InconsistentSchemaException;
import com.vecna.dbDiff.model.relationalDb.RelationalDatabase;
import com.vecna.dbDiff.model.relationalDb.RelationalIndex;
import com.vecna.dbDiff.model.relationalDb.RelationalTable;
/**
* Builds a {@link RelationalDatabase} representation of a live database schema.
*
* @author dlopuch@vecna.com
* @author ogolberg@vecna.com
*/
public class RelationalDatabaseBuilderImpl implements RelationalDatabaseBuilder {
private final MetadataFactory m_metadataFactory;
private ExecutorService m_executor = new ForkJoinPool();
/**
* Execute multiple tasks in parallel (scaling to the number of available cores). If an exception is thrown by one of the tasks, it is converted as specified below.
* @param <T> task return type.
* @param tasks tasks to execute.
* @throws RelationalDatabaseReadException if one of the tasks throws a {@link SQLException} or a {@link RelationalDatabaseReadException}.
* @throws InconsistentSchemaException if one of the tasks throws an {@link InconsistentSchemaException}.
* @throws RuntimeException if one of the tasks throws any other exception.
*/
private <T> void runInParallel(Collection<? extends Callable<T>> tasks) throws RelationalDatabaseReadException, InconsistentSchemaException, RuntimeException {
Collection<Future<T>> futures;
try {
futures = m_executor.invokeAll(tasks);
} catch (InterruptedException e) {
throw new IllegalStateException(e);
}
for (Future<T> future : futures) {
try {
future.get();
} catch (ExecutionException e) {
Throwable cause = e.getCause();
if (cause instanceof RuntimeException) {
throw (RuntimeException) cause;
} else if (cause instanceof SQLException) {
throw new RelationalDatabaseReadException(cause);
} else {
throw new RuntimeException(e);
}
} catch (InterruptedException e) {
throw new IllegalStateException(e);
}
}
}
/**
* Constructor that sets metadata based on a JDBC connection
* @param metadataFactory a {@link MetadataFactory}.
*/
public RelationalDatabaseBuilderImpl(MetadataFactory metadataFactory) {
m_metadataFactory = metadataFactory;
}
/**
* Retrieve all tables from a schema.
* @param catalogSchema catalog/schema.
* @return the tables.
* @throws SQLException if thrown by the jdbc driver.
*/
private List<RelationalTable> getTables(final CatalogSchema catalogSchema) throws SQLException {
// Get the ResultSet of tables
String[] tableTypes = {TableType.TABLE.name()};
ResultSet rs = doGetTablesQuery(catalogSchema, tableTypes);
// Build a set of Tables
List<RelationalTable> tables = new ArrayList<RelationalTable>();
while (rs.next()) {
RelationalTable table = new RelationalTable(new CatalogSchema(rs.getString(1), rs.getString(2)), rs.getString(3));
table.setType(rs.getString(4));
table.setTypeName(rs.getString(5));
tables.add(table);
}
return tables;
}
/**
* Performs a metaData.getTables() query.
* @param catalogSchema the desired catalog and schema names.
* @param tableTypes the desired table types, specific for the particular implementation.
* @return The ResultSet of the getTables() call.
* @throws SQLException if thrown by the jdbc driver.
*/
protected ResultSet doGetTablesQuery(CatalogSchema catalogSchema, String[] tableTypes) throws SQLException {
return m_metadataFactory.getMetadata().getTables(catalogSchema.getCatalog(), catalogSchema.getSchema(), null, tableTypes);
}
/**
* Retrieve column information for a table.
* @param table the table.
* @return ordered list of columns.
* @throws SQLException if thrown by the jdbc driver.
*/
private List<Column> getColumns(RelationalTable table) throws SQLException {
ResultSet columnResultSet = m_metadataFactory.getMetadata().getColumns(table.getCatalogSchema().getCatalog(), table.getCatalogSchema().getSchema(), table.getName(), null);
List<Column> columns = new LinkedList<Column>();
while (columnResultSet.next()) {
Column column = new Column(columnResultSet.getString(1), columnResultSet.getString(2),
columnResultSet.getString(4), columnResultSet.getString(3));
column.setColumnType(new ColumnType(columnResultSet.getInt(5), columnResultSet.getString(6)));
column.setColumnSize(columnResultSet.getInt(7));
//Nullability
int nullable = columnResultSet.getInt(11);
column.setIsNullable((DatabaseMetaData.columnNullable == nullable ? true
: (DatabaseMetaData.columnNoNulls == nullable ? false : null)));
column.setDefault(columnResultSet.getString(13));
column.setOrdinal(columnResultSet.getInt(17));
columns.add(column);
}
return columns;
}
/**
* Retrieve foreign keys for a table.
* @param table table.
* @return ordered list of foreign keys.
* @throws SQLException if thrown by the jdbc driver.
*/
private List<ForeignKey> getForeignKeys(RelationalTable table) throws SQLException {
ResultSet fkResultSet = m_metadataFactory.getMetadata().getImportedKeys(table.getCatalogSchema().getCatalog(), table.getCatalogSchema().getSchema(), table.getName());
List<ForeignKey> fks = new LinkedList<ForeignKey>();
while (fkResultSet.next()) {
ForeignKey fk = new ForeignKey();
fk.setFkName(fkResultSet.getString(12));
fk.setFkCatalogSchema(new CatalogSchema(fkResultSet.getString(5), fkResultSet.getString(6)));
fk.setFkTable(fkResultSet.getString(7));
fk.setFkColumn(fkResultSet.getString(8));
fk.setPkCatalogSchema(new CatalogSchema(fkResultSet.getString(1), fkResultSet.getString(2)));
fk.setPkTable(fkResultSet.getString(3));
fk.setPkColumn(fkResultSet.getString(4));
fk.setKeySeq(fkResultSet.getString(9));
fks.add(fk);
}
return fks;
}
/**
* Retrieve index information for a table.
* @param table the table.
* @return list of indices.
* @throws SQLException if thrown by the jdbc driver.
*/
private List<RelationalIndex> getIndices(RelationalTable table) throws SQLException {
List<RelationalIndex> indices = new ArrayList<>();
// maps index name to column names
Multimap<String, String> idxColumns = ArrayListMultimap.create();
// one row per index-column pair
ResultSet rs = m_metadataFactory.getMetadata().getIndexInfo(table.getCatalogSchema().getCatalog(),
table.getCatalogSchema().getSchema(),
table.getName(), false, false);
while (rs.next()) {
String idxName = rs.getString(6);
Collection<String> columns = idxColumns.get(idxName);
if (columns.isEmpty()) {
// build a new index
RelationalIndex index = new RelationalIndex(table.getCatalogSchema(), rs.getString(6));
indices.add(index);
}
columns.add(rs.getString(9));
}
for (RelationalIndex index : indices) {
List<Column> columns = new ArrayList<>(idxColumns.size());
for (String idxColumnName : idxColumns.get(index.getName())) {
// Some db preserved names are double-quoted
String columnName = idxColumnName.replaceAll("^\"|\"$", "");
Column column = table.getColumnByName(columnName);
if (column == null) {
throw new InconsistentSchemaException("cannot find column " + columnName + " referenced by index " + index.getName() + " in table " + table.getName());
}
columns.add(column);
}
index.setColumns(columns);
}
return indices;
}
/**
* Retrieve primary key information for a table.
* @param table the table.
* @return ordered list of primary key column names.
* @throws SQLException if thrown by the jdbc driver.
*/
private List<String> getPrimaryKeyColumns(RelationalTable table) throws SQLException {
Map<Short, String> primaryKeys = new TreeMap<>();
ResultSet rs = m_metadataFactory.getMetadata().getPrimaryKeys(table.getCatalogSchema().getCatalog(), table.getCatalogSchema().getSchema(), table.getName());
while (rs.next()) {
primaryKeys.put(rs.getShort(5), rs.getString(4));
}
return Lists.newArrayList(primaryKeys.values());
}
@Override
public RelationalDatabase createRelationalDatabase(CatalogSchema catalogSchema) {
//Grab all the tables
List<RelationalTable> tables;
try {
tables = getTables(catalogSchema);
} catch (SQLException e) {
throw new RelationalDatabaseReadException("could not read table information", e);
}
// build columns, foreign and primary keys in parallel
runInParallel(Collections2.transform(tables, new Function<RelationalTable, Callable<Void>>() {
@Override
public Callable<Void> apply(final RelationalTable table) {
return new Callable<Void>() {
@Override
public Void call() throws Exception {
table.setColumns(getColumns(table));
table.setFks(new HashSet<>(getForeignKeys(table)));
table.setPkColumns(getPrimaryKeyColumns(table));
table.setIndices(getIndices(table));
return null;
}
};
}
}));
return new RelationalDatabase(tables);
}
} | vecnatechnologies/dbDiff | core/src/main/java/com/vecna/dbDiff/builder/RelationalDatabaseBuilderImpl.java | Java | apache-2.0 | 11,089 |
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.10.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'e9p$k7urj^zl)s-!nmprv1#8z-@m@d6a76j=m9z03#gb!%lf6='
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'haystack',
'blog',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'mysite/templates'),
os.path.join(BASE_DIR, 'blog/templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'zh-hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (BASE_DIR,"static")
LOGIN_REDIRECT_URL = '/'
HAYSTACK_CONNECTIONS={
'default': {
'ENGINE': 'blog.whoosh_cn_backend.WhooshEngine',
'PATH': os.path.join(BASE_DIR, 'whoosh_index'),
}
}
HAYSTACK_SIGNAL_PROCESSOR = 'haystack.signals.RealtimeSignalProcessor' | kleinzh/PythonBlog | mysite/mysite/settings.py | Python | apache-2.0 | 3,558 |
using System;
using System.Collections.Generic;
using System.Linq;
using System.Net;
using System.Net.Http;
using System.Web.Http;
namespace SampleServer.Controllers
{
public class ValuesController : ApiController
{
// GET api/values
public IEnumerable<string> Get()
{
return new string[] { "value1", "value2" };
}
// GET api/values/5
public string Get(int id)
{
return "value";
}
// POST api/values
public void Post([FromBody]string value)
{
}
// PUT api/values/5
public void Put(int id, [FromBody]string value)
{
}
// DELETE api/values/5
public void Delete(int id)
{
}
}
}
| onovotny/dynamodb-geo-csharp | SampleServer/Controllers/ValuesController.cs | C# | apache-2.0 | 779 |
/*
* Copyright 2014 CELAR.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package gr.ntua.cslab.orchestrator.cache;
/**
* This file represent the basic interface that each cache mechanism should
* have. Basically each implementing subclass must have a mechanism to flush
* their data into the disk (a flat file or a database).
* @author Giannis Giannakopoulos
*/
public interface AbstractCache {
/**
* This method serializes the cache object in the disk. This method is implemented
* differently by each subclass, according to the data that they hold.
* <br/>
* The objective of this method is to free the RAM from the in-memory data.
*
*/
public void flush();
}
| CELAR/app-orchestrator | orchestrator-daemon/src/main/java/gr/ntua/cslab/orchestrator/cache/AbstractCache.java | Java | apache-2.0 | 1,233 |
(function () {
'use strict';
angular
.module('wordsearchApp')
.config(pagerConfig);
pagerConfig.$inject = ['uibPagerConfig', 'paginationConstants'];
function pagerConfig(uibPagerConfig, paginationConstants) {
uibPagerConfig.itemsPerPage = paginationConstants.itemsPerPage;
uibPagerConfig.previousText = '«';
uibPagerConfig.nextText = '»';
}
})();
| hillwater/wordsearch | src/main/webapp/app/blocks/config/uib-pager.config.js | JavaScript | apache-2.0 | 412 |
package com.nilhcem.devfestnantes.data.database;
import android.os.Build;
import com.nilhcem.devfestnantes.BuildConfig;
import com.nilhcem.devfestnantes.core.moshi.LocalDateTimeAdapter;
import com.nilhcem.devfestnantes.data.app.AppMapper;
import com.nilhcem.devfestnantes.data.app.model.Room;
import com.nilhcem.devfestnantes.data.app.model.Session;
import com.nilhcem.devfestnantes.data.app.model.Speaker;
import com.squareup.moshi.Moshi;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.Mock;
import org.mockito.MockitoAnnotations;
import org.robolectric.RobolectricTestRunner;
import org.robolectric.annotation.Config;
import org.threeten.bp.LocalDateTime;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import static com.google.common.truth.Truth.assertThat;
import static java.util.Collections.singletonList;
import static org.mockito.Mockito.when;
@RunWith(RobolectricTestRunner.class)
@Config(constants = BuildConfig.class, sdk = Build.VERSION_CODES.LOLLIPOP)
public class DbMapperTest {
private DbMapper dbMapper;
private final Moshi moshi = new Moshi.Builder().build();
private final AppMapper appMapper = new AppMapper();
private final LocalDateTime now = LocalDateTime.now();
@Mock LocalDateTimeAdapter adapter;
@Before
public void setup() {
MockitoAnnotations.initMocks(this);
dbMapper = new DbMapper(moshi, appMapper, adapter);
}
@Test
public void should_convert_db_sessions_to_app_sessions() {
// Given
when(adapter.fromText("now")).thenReturn(now);
com.nilhcem.devfestnantes.data.database.model.Session session = new com.nilhcem.devfestnantes.data.database.model.Session(2, "now", 10, 3, "[1]", "title", "description");
List<com.nilhcem.devfestnantes.data.database.model.Session> sessions = singletonList(session);
Map<Integer, Speaker> speakersMap = new HashMap<>();
Speaker speaker = new Speaker(1, "name", null, null, null, null, null, null);
speakersMap.put(1, speaker);
// When
List<Session> result = dbMapper.toAppSessions(sessions, speakersMap);
// Then
assertThat(result).hasSize(1);
assertThat(result.get(0).getId()).isEqualTo(2);
assertThat(result.get(0).getFromTime()).isEqualTo(now);
assertThat(result.get(0).getTitle()).isEqualTo("title");
assertThat(result.get(0).getDescription()).isEqualTo("description");
assertThat(result.get(0).getSpeakers().get(0)).isEqualTo(speaker);
}
@Test
public void should_convert_app_session_to_db_session() {
// Given
List<Speaker> speakers = singletonList(new Speaker(7, null, null, null, null, null, null, null));
Session session = new Session(11, Room.NONE.label, speakers, "title", "description", now, now.plusMinutes(45));
// When
com.nilhcem.devfestnantes.data.database.model.Session result = dbMapper.fromAppSession(session);
// Then
assertThat(result.id).isEqualTo(11);
assertThat(result.roomId).isEqualTo(Room.NONE.id);
assertThat(result.speakersIds).isEqualTo("[7]");
assertThat(result.title).isEqualTo("title");
assertThat(result.description).isEqualTo("description");
}
@Test
public void should_convert_app_speaker_to_db_speaker() {
// Given
Speaker speaker = new Speaker(10, "name", "title", "bio", "website", "twitter", "github", "photo");
// When
com.nilhcem.devfestnantes.data.database.model.Speaker result = dbMapper.fromAppSpeaker(speaker);
// Then
assertThat(result.id).isEqualTo(10);
assertThat(result.name).isEqualTo("name");
assertThat(result.title).isEqualTo("title");
assertThat(result.bio).isEqualTo("bio");
assertThat(result.website).isEqualTo("website");
assertThat(result.twitter).isEqualTo("twitter");
assertThat(result.github).isEqualTo("github");
assertThat(result.photo).isEqualTo("photo");
}
@Test
public void should_convert_db_speakers_to_app_speakers() {
// Given
com.nilhcem.devfestnantes.data.database.model.Speaker speaker = new com.nilhcem.devfestnantes.data.database.model.Speaker(58, "nilh", "dev", "bio", "nilhcem.com", "Nilhcem", "nilhcem", "photo");
// When
List<Speaker> result = dbMapper.toAppSpeakers(singletonList(speaker));
// Then
assertThat(result).hasSize(1);
assertThat(result.get(0).getId()).isEqualTo(58);
assertThat(result.get(0).getName()).isEqualTo("nilh");
assertThat(result.get(0).getTitle()).isEqualTo("dev");
assertThat(result.get(0).getBio()).isEqualTo("bio");
assertThat(result.get(0).getWebsite()).isEqualTo("nilhcem.com");
assertThat(result.get(0).getTwitter()).isEqualTo("Nilhcem");
assertThat(result.get(0).getGithub()).isEqualTo("nilhcem");
assertThat(result.get(0).getPhoto()).isEqualTo("photo");
}
}
| Nilhcem/devfestnantes-2016 | app/src/test/java/com/nilhcem/devfestnantes/data/database/DbMapperTest.java | Java | apache-2.0 | 5,055 |
class CheckPriceYDPage:
def __init__(self, driver):
self.driver = driver
def get_page_yellow_duck(self):
self.driver.find_element_by_xpath(
"//a[@href='http://localhost/litecart/en/rubber-ducks-c-1/subcategory-c-2/yellow-duck-p-1']").click()
def get_compaign_price_yd(self):
p2 = self.driver.find_element_by_xpath("//div[@class='price-wrapper']/strong[@class='campaign-price']").text
return p2
def get_regular_price_yd(self):
pr2 = self.driver.find_element_by_xpath("//div[@class='price-wrapper']/s[@class='regular-price']").text
return pr2
| skostya64/Selenium_tasks | pages/check_price_yd_page.py | Python | apache-2.0 | 622 |
/*
* Copyright 2014-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.securitytoken.model;
import java.io.Serializable;
import javax.annotation.Generated;
import com.amazonaws.AmazonWebServiceRequest;
/**
*
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity" target="_top">AWS API
* Documentation</a>
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
public class GetCallerIdentityRequest extends com.amazonaws.AmazonWebServiceRequest implements Serializable, Cloneable {
/**
* Returns a string representation of this object. This is useful for testing and debugging. Sensitive data will be
* redacted from this string using a placeholder value.
*
* @return A string representation of this object.
*
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("{");
sb.append("}");
return sb.toString();
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (obj instanceof GetCallerIdentityRequest == false)
return false;
GetCallerIdentityRequest other = (GetCallerIdentityRequest) obj;
return true;
}
@Override
public int hashCode() {
final int prime = 31;
int hashCode = 1;
return hashCode;
}
@Override
public GetCallerIdentityRequest clone() {
return (GetCallerIdentityRequest) super.clone();
}
}
| jentfoo/aws-sdk-java | aws-java-sdk-sts/src/main/java/com/amazonaws/services/securitytoken/model/GetCallerIdentityRequest.java | Java | apache-2.0 | 2,159 |
using System.Linq;
using NUnit.Framework;
using SolrNet.Impl;
using SolrNet.Impl.DocumentPropertyVisitors;
using SolrNet.Impl.FieldParsers;
using SolrNet.Impl.ResponseParsers;
using SolrNet.Mapping;
using SolrNet.Tests.Utils;
namespace SolrNet.Tests
{
[TestFixture]
public class CollapseExpandResponseParserTests
{
[Test]
public void Parse()
{
var mapper = new AttributesMappingManager();
var parser = new CollapseExpandResponseParser<Doc>(new SolrDocumentResponseParser<Doc>(mapper, new DefaultDocumentVisitor(mapper, new DefaultFieldParser()), new SolrDocumentActivator<Doc>()));
var xml = EmbeddedResource.GetEmbeddedXml(GetType(), "Resources.collapseWithoutExpandResponse.xml");
var results = new SolrQueryResults<Doc>();
parser.Parse(xml, results);
Assert.IsNull(results.CollapseExpand);
}
[Test]
public void Parse2()
{
var mapper = new AttributesMappingManager();
var parser = new CollapseExpandResponseParser<Doc>(new SolrDocumentResponseParser<Doc>(mapper, new DefaultDocumentVisitor(mapper, new DefaultFieldParser()), new SolrDocumentActivator<Doc>()));
var xml = EmbeddedResource.GetEmbeddedXml(GetType(), "Resources.collapseWithExpandResponse.xml");
var results = new SolrQueryResults<Doc>();
parser.Parse(xml, results);
Assert.IsNotNull(results.CollapseExpand);
Assert.AreEqual(4, results.CollapseExpand.Groups.Count);
var group = results.CollapseExpand.Groups.ElementAt(0);
Assert.AreEqual(group.Documents.Count, 2);
Assert.AreEqual(group.GroupValue, "First");
Assert.AreEqual(group.NumFound, 2);
}
class Doc {}
}
}
| vladen/SolrNet | SolrNet.Tests/CollapseExpandResponseParserTests.cs | C# | apache-2.0 | 1,825 |
def brancher( # noqa: E302
self, branches=None, all_branches=False, tags=None, all_tags=False
):
"""Generator that iterates over specified revisions.
Args:
branches (list): a list of branches to iterate over.
all_branches (bool): iterate over all available branches.
tags (list): a list of tags to iterate over.
all_tags (bool): iterate over all available tags.
Yields:
str: the display name for the currently selected tree, it could be:
- a git revision identifier
- empty string it there is no branches to iterate over
- "Working Tree" if there are uncommited changes in the SCM repo
"""
if not any([branches, all_branches, tags, all_tags]):
yield ""
return
saved_tree = self.tree
revs = []
scm = self.scm
if self.scm.is_dirty():
from dvc.scm.tree import WorkingTree
self.tree = WorkingTree()
yield "Working Tree"
if all_branches:
branches = scm.list_branches()
if all_tags:
tags = scm.list_tags()
if branches is None:
revs.extend([scm.active_branch()])
else:
revs.extend(branches)
if tags is not None:
revs.extend(tags)
# NOTE: it might be a good idea to wrap this loop in try/finally block
# to don't leave the tree on some unexpected branch after the
# `brancher()`, but this could cause problems on exception handling
# code which might expect the tree on which exception was raised to
# stay in place. This behavior is a subject to change.
for rev in revs:
self.tree = scm.get_tree(rev)
yield rev
self.tree = saved_tree
| dataversioncontrol/dvc | dvc/repo/brancher.py | Python | apache-2.0 | 1,700 |
package com.sduwh.match;
import com.sduwh.match.enums.*;
import com.sduwh.match.jedis.JedisAdapter;
import com.sduwh.match.jedis.RedisKeyGenerator;
import com.sduwh.match.model.entity.*;
import com.sduwh.match.model.to.MatchItemTO;
import com.sduwh.match.model.wrapper.MatchItemWithScore;
import com.sduwh.match.service.MailSender;
import com.sduwh.match.service.apply.ApplyService;
import com.sduwh.match.service.concludingstagtement.middlecheck.ConcludingStatementService;
import com.sduwh.match.service.grade.GradeService;
import com.sduwh.match.service.matchinfo.MatchInfoService;
import com.sduwh.match.service.matchitem.MatchItemService;
import com.sduwh.match.service.middlecheck.MiddleCheckService;
import com.sduwh.match.service.pass.PassService;
import com.sduwh.match.service.personalmatchinfo.PersonalMatchInfoService;
import com.sduwh.match.service.researchlog.ResearchLogService;
import com.sduwh.match.service.stage.StageService;
import com.sduwh.match.service.tmprater.TmpRaterService;
import com.sduwh.match.service.transfermember.TransferMemberService;
import com.sduwh.match.service.user.UserService;
import com.sduwh.match.util.StringUtils;
import com.sduwh.match.util.TimestampFormatUtils;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.test.context.ContextConfiguration;
import org.springframework.test.context.junit4.SpringJUnit4ClassRunner;
import java.io.File;
import java.security.Key;
import java.util.*;
import java.util.stream.Collectors;
/**
* Created by qxg on 17-6-30.
*/
@RunWith(SpringJUnit4ClassRunner.class)
@ContextConfiguration(locations = {"classpath:/spring/spring-context.xml"})
public class TestMapper {
@Autowired
MiddleCheckService middleCheckService;
@Autowired
UserService userService;
@Autowired
TransferMemberService transferMemberService;
@Autowired
PersonalMatchInfoService personalMatchInfoService;
@Autowired
MatchItemService matchItemService;
@Autowired
MatchInfoService matchInfoService;
@Autowired
StageService stageService;
@Autowired
ConcludingStatementService concludingStatementService;
@Autowired
TmpRaterService tmpRaterService;
@Autowired
JedisAdapter jedisAdapter;
@Autowired
GradeService gradeService;
@Autowired
ApplyService applyService;
@Autowired
PassService passService;
@Autowired
ResearchLogService researchLogService;
@Test
public void test(){
MatchItem matchItem = matchItemService.selectByPrimaryKey(4);
matchItem.setByTime(MatchByTimeEnum.NO.getCode());
matchItemService.updateByPrimaryKeySelective(matchItem);
}
@Test
public void testRedis(){
String key = RedisKeyGenerator.getListShowHandleHasDoneKey();
jedisAdapter.srem(key,"109");
}
}
| 583462423/match | match-web/src/test/java/com/sduwh/match/TestMapper.java | Java | apache-2.0 | 2,898 |
/**
* Copyright 2015 Brendan Murray
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
**/
// Dependency - dht sensor package
var sensorLib = require("node-dht-sensor");
module.exports = function(RED) {
"use strict";
// The main node definition - most things happen in here
function dht22Sensor(config) {
// Mapping tables
var gpio = [ -1, -1, 8, -1, 9, -1, 7, 15, -1, 16,
0, 1, 2, -1, 3, 4, -1, 5, 12, -1,
13, 6, 14, 10, -1, 11, -1, -1, 21, -1,
22, 26, 23, -1, 24, 27, 25, 28, -1, 29 ];
var bcm1 = [ -1, -1, 0, -1, 1, -1, 4, 14, -1, 15,
17, 18, 21, -1, 22, 23, -1, 24, 10, -1,
9, 25, 11, 8, -1, 7, -1, -1, 5, -1,
6, 12, 13, -1, 19, 16, 26, 20, -1, 21 ];
var bcm2 = [ -1, -1, 2, -1, 3, -1, 4, 14, -1, 15,
17, 18, 27, -1, 22, 23, -1, 24, 10, -1,
9, 25, 11, 8, -1, 7, -1, -1, 5, -1,
6, 12, 13, -1, 19, 16, 26, 20, -1, 21 ];
// Create a RED node
RED.nodes.createNode(this, config);
// Store local copies of the node configuration (as defined in the .html)
var node = this;
this.topic = config.topic;
this.dht = config.dht;
if (config.pintype == 0) { // BCM GPIO pin
this.pin = config.pin;
} else if (config.pintype == 1) { // Physical pin number - Rev 1
this.pin = bcm1[config.pin-1];
} else if (config.pintype == 2) { // Physical pin number - Rev 2
this.pin = bcm2[config.pin-1];
} else if (config.pintype == 3) { // WiringPi pin number - Rev 1
for (var iX=0; iX<40; iX++) {
if (gpio[iX] == config.pin) {
this.pin = bcm1[iX];
break;
}
}
} else { // WiringPi pin number - Rev 2
for (var iX=0; iX<40; iX++) {
if (gpio[iX] == config.pin) {
this.pin = bcm2[iX];
break;
}
}
}
// Read the data & return a message object
this.read = function(msgIn) {
var msg = msgIn ? msgIn : {};
var reading = { temperature : 100.0, humidity : 110.0 };
if (this.dht === undefined || this.pin === undefined) {
// Miscommunication - use silly values
} else {
// Read the data from the sensors
reading = sensorLib.read(this.dht, this.pin);
}
msg.payload = reading.temperature.toFixed(2);
msg.humidity = reading.humidity.toFixed(2);
msg.isValid = reading.isValid;
msg.errors = reading.errors;
msg.topic = node.topic || node.name;
msg.location = node.name;
msg.sensorid = 'dht' + node.dht;
return msg;
};
// respond to inputs....
this.on('input', function (msg) {
msg = this.read(msg);
if (msg)
node.send(msg);
});
// var msg = this.read();
// // send out the message to the rest of the workspace.
// if (msg)
// this.send(msg);
}
// Register the node by name.
RED.nodes.registerType("rpi-dht22", dht22Sensor);
}
| bpmurray/node-red-contrib-dht-sensor | dht22-node/dht22-node.js | JavaScript | apache-2.0 | 3,759 |
package com.wisedu.cpdaily.ui.contact.teacher;
import com.wisedu.cpdaily.di.components.NetComponent;
import com.wisedu.cpdaily.di.modules.ApiModule;
import com.wisedu.cpdaily.di.scope.ScopeFragment;
import dagger.Component;
/**
* 注入器
*/
@ScopeFragment
@Component(dependencies = NetComponent.class, modules = {TeacherModule.class, ApiModule.class})
interface TeacherComponent {
void inject(TeacherFragment fragment);
}
| Gagarinwjj/Cpdaily | app/src/main/java/com/wisedu/cpdaily/ui/contact/teacher/TeacherComponent.java | Java | apache-2.0 | 434 |
package czsem.fs.query.restrictions;
import czsem.fs.query.QueryNode;
import czsem.fs.query.constants.MetaAttribute;
import czsem.fs.query.eval.obsolete.IterateSubtreeEvaluator;
public abstract class OtherPrintableRestriction implements PrintableRestriction {
protected final QueryNode n;
public OtherPrintableRestriction(QueryNode n) {
this.n = n;
}
@Override
public String getComparator() {
return "=";
}
public static class PrintName extends OtherPrintableRestriction
{
public PrintName(QueryNode n) { super(n);}
@Override
public String getLeftArg() { return MetaAttribute.NODE_NAME; }
@Override
public String getRightArg() {return n.getName();}
}
public static class PrintOptional extends OtherPrintableRestriction
{
public PrintOptional(QueryNode n) { super(n);}
@Override
public String getLeftArg() { return MetaAttribute.OPTIONAL; }
@Override
public String getRightArg() {return Boolean.toString(n.isOptional());}
}
public static class PrintOptionalSubtree extends OtherPrintableRestriction
{
public PrintOptionalSubtree(QueryNode n) { super(n);}
@Override
public String getLeftArg() { return MetaAttribute.OPTIONAL_SUBTREE; }
@Override
public String getRightArg() {return Boolean.toString(n.isOptionalSubtree());}
}
public static class PrintForbiddenSubtree extends OtherPrintableRestriction
{
public PrintForbiddenSubtree(QueryNode n) { super(n);}
@Override
public String getLeftArg() { return MetaAttribute.FORBIDDEN_SUBTREE; }
@Override
public String getRightArg() {return Boolean.toString(n.isForbiddenSubtree());}
}
public static class PrintSubtreeDepth extends OtherPrintableRestriction
{
public PrintSubtreeDepth(QueryNode n) { super(n);}
@Override
public String getLeftArg() { return IterateSubtreeEvaluator.META_ATTR_SUBTREE_DEPTH; }
@Override
public String getRightArg() {return Integer.toString(n.getSubtreeDepth());}
}
}
| datlowe/czsem-gate-tools | modules/fs-query/src/main/java/czsem/fs/query/restrictions/OtherPrintableRestriction.java | Java | apache-2.0 | 1,941 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.apache.jmeter.report.dashboard;
import java.io.File;
import java.io.IOException;
import java.nio.file.Files;
import java.util.HashMap;
import java.util.Map;
import java.util.TimeZone;
import java.util.regex.Pattern;
import java.util.regex.PatternSyntaxException;
import org.apache.commons.io.FileUtils;
import org.apache.commons.lang3.StringEscapeUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.Validate;
import org.apache.jmeter.JMeter;
import org.apache.jmeter.report.config.ConfigurationException;
import org.apache.jmeter.report.config.ExporterConfiguration;
import org.apache.jmeter.report.config.GraphConfiguration;
import org.apache.jmeter.report.config.ReportGeneratorConfiguration;
import org.apache.jmeter.report.config.SubConfiguration;
import org.apache.jmeter.report.core.DataContext;
import org.apache.jmeter.report.core.TimeHelper;
import org.apache.jmeter.report.processor.ListResultData;
import org.apache.jmeter.report.processor.MapResultData;
import org.apache.jmeter.report.processor.ResultData;
import org.apache.jmeter.report.processor.ResultDataVisitor;
import org.apache.jmeter.report.processor.SampleContext;
import org.apache.jmeter.report.processor.ValueResultData;
import org.apache.jmeter.report.processor.graph.AbstractGraphConsumer;
import org.apache.jmeter.util.JMeterUtils;
import org.apache.jorphan.util.JOrphanUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import freemarker.template.Configuration;
import freemarker.template.TemplateExceptionHandler;
/**
* The class HtmlTemplateExporter provides a data exporter that generates and
* processes template files using freemarker.
*
* @since 3.0
*/
public class HtmlTemplateExporter extends AbstractDataExporter {
private static final String CUSTOM_GRAPH_PREFIX = "custom_";
/** Format used for non null check of parameters. */
private static final String MUST_NOT_BE_NULL = "%s must not be null";
private static final Logger log = LoggerFactory.getLogger(HtmlTemplateExporter.class);
public static final String DATA_CTX_REPORT_TITLE = "reportTitle";
public static final String DATA_CTX_TESTFILE = "testFile";
public static final String DATA_CTX_BEGINDATE = "beginDate";
public static final String DATA_CTX_ENDDATE = "endDate";
public static final String DATA_CTX_TIMEZONE = "timeZone";
public static final String DATA_CTX_TIMEZONE_OFFSET = "timeZoneOffset";
public static final String DATA_CTX_OVERALL_FILTER = "overallFilter";
public static final String DATA_CTX_SHOW_CONTROLLERS_ONLY = "showControllersOnly";
public static final String DATA_CTX_RESULT = "result";
public static final String DATA_CTX_EXTRA_OPTIONS = "extraOptions";
public static final String DATA_CTX_SERIES_FILTER = "seriesFilter";
public static final String DATA_CTX_FILTERS_ONLY_SAMPLE_SERIES = "filtersOnlySampleSeries";
public static final String TIMESTAMP_FORMAT_MS = "ms";
private static final String INVALID_TEMPLATE_DIRECTORY_FMT = "\"%s\" is not a valid template directory";
private static final String INVALID_PROPERTY_CONFIG_FMT = "Wrong property \"%s\" in \"%s\" export configuration";
// Template directory
private static final String TEMPLATE_DIR = "template_dir";
private static final String TEMPLATE_DIR_NAME_DEFAULT = "report-template";
// Output directory
private static final String OUTPUT_DIR = "output_dir";
// Default output folder name
private static final String OUTPUT_DIR_NAME_DEFAULT = "report-output";
/**
* Adds to context the value surrounding it with quotes
* @param key Key
* @param value Value
* @param context {@link DataContext}
*/
private void addToContext(String key, Object value, DataContext context) {
if (value instanceof String) {
value = '"' + (String) value + '"';
}
context.put(key, value);
}
/**
* This class allows to customize data before exporting them
*
*/
private interface ResultCustomizer {
ResultData customizeResult(ResultData result);
}
/**
* This class allows to inject graph_options properties to the exported data
*
*/
private class ExtraOptionsResultCustomizer implements ResultCustomizer {
private SubConfiguration extraOptions;
/**
* Sets the extra options to inject in the result data
*
* @param extraOptions to inject
*/
public final void setExtraOptions(SubConfiguration extraOptions) {
this.extraOptions = extraOptions;
}
/*
* (non-Javadoc)
*
* @see org.apache.jmeter.report.dashboard.HtmlTemplateExporter.
* ResultCustomizer#customizeResult(org.apache.jmeter.report.processor.
* ResultData)
*/
@Override
public ResultData customizeResult(ResultData result) {
MapResultData customizedResult = new MapResultData();
customizedResult.setResult(DATA_CTX_RESULT, result);
if (extraOptions != null) {
MapResultData extraResult = new MapResultData();
for (Map.Entry<String, String> extraEntry : extraOptions
.getProperties().entrySet()) {
extraResult.setResult(extraEntry.getKey(),
new ValueResultData(extraEntry.getValue()));
}
customizedResult.setResult(DATA_CTX_EXTRA_OPTIONS, extraResult);
}
return customizedResult;
}
}
/**
* This class allows to check exported data
*
*/
private interface ResultChecker {
boolean checkResult(DataContext dataContext, ResultData result);
}
/**
* This class allows to detect empty graphs
*
*/
private class EmptyGraphChecker implements ResultChecker {
private final boolean filtersOnlySampleSeries;
private final boolean showControllerSeriesOnly;
private final Pattern filterPattern;
private boolean excludesControllers;
private String graphId;
public final void setExcludesControllers(boolean excludesControllers) {
this.excludesControllers = excludesControllers;
}
public final void setGraphId(String graphId) {
this.graphId = graphId;
}
/**
* Instantiates a new EmptyGraphChecker.
*
* @param filtersOnlySampleSeries flag to control filter for samples
* @param showControllerSeriesOnly flag to control visibility of controller
* @param filterPattern to use
*/
public EmptyGraphChecker(boolean filtersOnlySampleSeries,
boolean showControllerSeriesOnly, Pattern filterPattern) {
this.filtersOnlySampleSeries = filtersOnlySampleSeries;
this.showControllerSeriesOnly = showControllerSeriesOnly;
this.filterPattern = filterPattern;
}
/*
* (non-Javadoc)
*
* @see
* org.apache.jmeter.report.dashboard.HtmlTemplateExporter.ResultChecker
* #checkResult( org.apache.jmeter.report.core.DataContext dataContext, org.apache.jmeter.report.processor.ResultData)
*/
@Override
public boolean checkResult(DataContext dataContext, ResultData result) {
Boolean supportsControllerDiscrimination = findValue(Boolean.class,
AbstractGraphConsumer.RESULT_SUPPORTS_CONTROLLERS_DISCRIMINATION,
result);
if (supportsControllerDiscrimination.booleanValue() && showControllerSeriesOnly
&& excludesControllers) {
// Exporter shows controller series only
// whereas the current graph support controller
// discrimination and excludes
// controllers
log.warn("{} is set while the graph {} excludes controllers.",
ReportGeneratorConfiguration.EXPORTER_KEY_SHOW_CONTROLLERS_ONLY, graphId);
return false;
} else {
if (filterPattern != null) {
// Detect whether none series matches
// the series filter.
ResultData seriesResult = findData(
AbstractGraphConsumer.RESULT_SERIES, result);
if (seriesResult instanceof ListResultData) {
// Try to find at least one pattern matching
ListResultData seriesList = (ListResultData) seriesResult;
int count = seriesList.getSize();
int index = 0;
boolean matches = false;
while (index < count && !matches) {
ResultData currentResult = seriesList.get(index);
if (currentResult instanceof MapResultData) {
MapResultData seriesData = (MapResultData) currentResult;
String name = findValue(String.class,
AbstractGraphConsumer.RESULT_SERIES_NAME,
seriesData);
// Is the current series a controller series ?
boolean isController = findValue(Boolean.class,
AbstractGraphConsumer.RESULT_SERIES_IS_CONTROLLER,
seriesData).booleanValue();
matches = filterPattern.matcher(name).matches();
if (matches) {
// If the name matches pattern, other
// properties can discard the series
matches = !filtersOnlySampleSeries
|| !supportsControllerDiscrimination.booleanValue()
|| isController
|| !showControllerSeriesOnly;
if(log.isDebugEnabled()) {
log.debug(
"name:{} matches pattern:{}, supportsControllerDiscrimination:{}, "
+ "isController:{}, showControllerSeriesOnly:{}",
name, filterPattern.pattern(),
supportsControllerDiscrimination,
isController,
showControllerSeriesOnly);
}
} else {
// If the name does not match the pattern,
// other properties can hold the series
matches = filtersOnlySampleSeries
&& !supportsControllerDiscrimination.booleanValue();
if(log.isDebugEnabled()) {
log.debug("name:{} does not match pattern:{}, filtersOnlySampleSeries:{},"
+ " supportsControllerDiscrimination:{}",
name, filterPattern.pattern(),
filtersOnlySampleSeries,
supportsControllerDiscrimination);
}
}
}
index++;
}
if (!matches) {
// None series matches the pattern
log.warn("No serie matches the series_filter: {} in graph: {}",
ReportGeneratorConfiguration.EXPORTER_KEY_SERIES_FILTER, graphId);
return false;
}
}
}
}
return true;
}
}
private <TVisit> void addResultToContext(String resultKey,
Map<String, Object> storage, DataContext dataContext,
ResultDataVisitor<TVisit> visitor) {
addResultToContext(resultKey, storage, dataContext, visitor, null,
null);
}
private <TVisit> void addResultToContext(String resultKey,
Map<String, Object> storage, DataContext dataContext,
ResultDataVisitor<TVisit> visitor, ResultCustomizer customizer,
ResultChecker checker) {
Object data = storage.get(resultKey);
if (data instanceof ResultData) {
ResultData result = (ResultData) data;
if (checker != null) {
checker.checkResult(dataContext, result);
}
if (customizer != null) {
result = customizer.customizeResult(result);
}
dataContext.put(resultKey, result.accept(visitor));
}
}
private long formatTimestamp(String key, DataContext context) {
// FIXME Why convert to double then long (rounding ?)
double result = Double.parseDouble((String) context.get(key));
long timestamp = (long) result;
// Quote the string to respect Json spec.
context.put(key, '"' + TimeHelper.formatTimeStamp(timestamp) + '"');
return timestamp;
}
private <TProperty> TProperty getPropertyFromConfig(SubConfiguration cfg,
String property, TProperty defaultValue, Class<TProperty> clazz)
throws ExportException {
try {
return cfg.getProperty(property, defaultValue, clazz);
} catch (ConfigurationException ex) {
throw new ExportException(String.format(INVALID_PROPERTY_CONFIG_FMT,
property, getName()), ex);
}
}
/*
* (non-Javadoc)
*
* @see
* org.apache.jmeter.report.dashboard.DataExporter#Export(org.apache.jmeter
* .report.processor.SampleContext,
* org.apache.jmeter.report.config.ReportGeneratorConfiguration)
*/
@Override
public void export(SampleContext context, File file,
ReportGeneratorConfiguration configuration) throws ExportException {
Validate.notNull(context, MUST_NOT_BE_NULL, "context");
Validate.notNull(file, MUST_NOT_BE_NULL, "file");
Validate.notNull(configuration, MUST_NOT_BE_NULL, "configuration");
log.debug("Start template processing");
// Create data context and populate it
DataContext dataContext = new DataContext();
// Get the configuration of the current exporter
final ExporterConfiguration exportCfg = configuration
.getExportConfigurations().get(getName());
// Get template directory property value
File templateDirectory = getPropertyFromConfig(exportCfg, TEMPLATE_DIR,
new File(JMeterUtils.getJMeterBinDir(), TEMPLATE_DIR_NAME_DEFAULT), File.class);
if (!templateDirectory.isDirectory()) {
String message = String.format(INVALID_TEMPLATE_DIRECTORY_FMT,
templateDirectory.getAbsolutePath());
log.error(message);
throw new ExportException(message);
}
// Get output directory property value
File outputDir = getPropertyFromConfig(exportCfg, OUTPUT_DIR,
new File(JMeterUtils.getJMeterBinDir(), OUTPUT_DIR_NAME_DEFAULT), File.class);
String globallyDefinedOutputDir = JMeterUtils.getProperty(JMeter.JMETER_REPORT_OUTPUT_DIR_PROPERTY);
if(!StringUtils.isEmpty(globallyDefinedOutputDir)) {
outputDir = new File(globallyDefinedOutputDir);
}
JOrphanUtils.canSafelyWriteToFolder(outputDir);
if (log.isInfoEnabled()) {
log.info("Will generate dashboard in folder: {}", outputDir.getAbsolutePath());
}
// Add the flag defining whether only sample series are filtered to the
// context
final boolean filtersOnlySampleSeries = exportCfg
.filtersOnlySampleSeries();
addToContext(DATA_CTX_FILTERS_ONLY_SAMPLE_SERIES,
Boolean.valueOf(filtersOnlySampleSeries), dataContext);
// Add the series filter to the context
final String seriesFilter = exportCfg.getSeriesFilter();
Pattern filterPattern = null;
if (StringUtils.isNotBlank(seriesFilter)) {
try {
filterPattern = Pattern.compile(seriesFilter);
} catch (PatternSyntaxException ex) {
log.error("Invalid series filter: '{}', {}", seriesFilter, ex.getDescription());
}
}
addToContext(DATA_CTX_SERIES_FILTER, seriesFilter, dataContext);
// Add the flag defining whether only controller series are displayed
final boolean showControllerSeriesOnly = exportCfg
.showControllerSeriesOnly();
addToContext(DATA_CTX_SHOW_CONTROLLERS_ONLY,
Boolean.valueOf(showControllerSeriesOnly), dataContext);
JsonizerVisitor jsonizer = new JsonizerVisitor();
Map<String, Object> storedData = context.getData();
// Add begin date consumer result to the data context
addResultToContext(ReportGenerator.BEGIN_DATE_CONSUMER_NAME, storedData,
dataContext, jsonizer);
// Add end date summary consumer result to the data context
addResultToContext(ReportGenerator.END_DATE_CONSUMER_NAME, storedData,
dataContext, jsonizer);
// Add Apdex summary consumer result to the data context
addResultToContext(ReportGenerator.APDEX_SUMMARY_CONSUMER_NAME,
storedData, dataContext, jsonizer);
// Add errors summary consumer result to the data context
addResultToContext(ReportGenerator.ERRORS_SUMMARY_CONSUMER_NAME,
storedData, dataContext, jsonizer);
// Add requests summary consumer result to the data context
addResultToContext(ReportGenerator.REQUESTS_SUMMARY_CONSUMER_NAME,
storedData, dataContext, jsonizer);
// Add statistics summary consumer result to the data context
addResultToContext(ReportGenerator.STATISTICS_SUMMARY_CONSUMER_NAME,
storedData, dataContext, jsonizer);
// Add Top 5 errors by sampler consumer result to the data context
addResultToContext(ReportGenerator.TOP5_ERRORS_BY_SAMPLER_CONSUMER_NAME,
storedData, dataContext, jsonizer);
// Collect graph results from sample context and transform them into
// Json strings to inject in the data context
ExtraOptionsResultCustomizer customizer = new ExtraOptionsResultCustomizer();
EmptyGraphChecker checker = new EmptyGraphChecker(
filtersOnlySampleSeries, showControllerSeriesOnly,
filterPattern);
DataContext customGraphs = new DataContext();
Map<String, GraphConfiguration> mapConfiguration = new HashMap<>();
for (Map.Entry<String, GraphConfiguration> graphEntry : configuration
.getGraphConfigurations().entrySet()) {
final String graphId = graphEntry.getKey();
final GraphConfiguration graphConfiguration = graphEntry.getValue();
final SubConfiguration extraOptions = exportCfg.getGraphExtraConfigurations().get(graphId);
// Initialize customizer and checker
customizer.setExtraOptions(extraOptions);
checker.setExcludesControllers(
graphConfiguration.excludesControllers());
checker.setGraphId(graphId);
mapConfiguration.put(graphId, graphConfiguration);
if(graphId.startsWith(CUSTOM_GRAPH_PREFIX)) {
addResultToContext(graphId, storedData, customGraphs, jsonizer,
customizer, checker);
} else {
// Export graph data
addResultToContext(graphId, storedData, dataContext, jsonizer,
customizer, checker);
}
}
dataContext.put("graphConfigurations", mapConfiguration);
dataContext.put("customsGraphsData", customGraphs);
// Replace the begin date with its formatted string and store the old
// timestamp
long oldTimestamp = formatTimestamp(
ReportGenerator.BEGIN_DATE_CONSUMER_NAME, dataContext);
// Replace the end date with its formatted string
formatTimestamp(ReportGenerator.END_DATE_CONSUMER_NAME, dataContext);
// Add time zone offset (that matches the begin date) to the context
TimeZone timezone = TimeZone.getDefault();
addToContext(DATA_CTX_TIMEZONE_OFFSET,
Integer.valueOf(timezone.getOffset(oldTimestamp)), dataContext);
// Add report title to the context
if(!StringUtils.isEmpty(configuration.getReportTitle())) {
dataContext.put(DATA_CTX_REPORT_TITLE, StringEscapeUtils.escapeHtml4(configuration.getReportTitle()));
}
// Add the test file name to the context
addToContext(DATA_CTX_TESTFILE, file.getName(), dataContext);
// Add the overall filter property to the context
addToContext(DATA_CTX_OVERALL_FILTER, configuration.getSampleFilter(),
dataContext);
// Walk template directory to copy files and process templated ones
Configuration templateCfg = new Configuration(
Configuration.getVersion());
try {
templateCfg.setDirectoryForTemplateLoading(templateDirectory);
templateCfg.setTemplateExceptionHandler(
TemplateExceptionHandler.RETHROW_HANDLER);
if (log.isInfoEnabled()) {
log.info("Report will be generated in: {}, creating folder structure", outputDir.getAbsolutePath());
}
FileUtils.forceMkdir(outputDir);
TemplateVisitor visitor = new TemplateVisitor(
templateDirectory.toPath(), outputDir.toPath(), templateCfg,
dataContext);
Files.walkFileTree(templateDirectory.toPath(), visitor);
} catch (IOException ex) {
throw new ExportException("Unable to process template files.", ex);
}
log.debug("End of template processing");
}
}
| vherilier/jmeter | src/core/org/apache/jmeter/report/dashboard/HtmlTemplateExporter.java | Java | apache-2.0 | 23,630 |
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.trino.plugin.base.security;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.annotation.JsonValue;
import com.google.common.collect.ImmutableSet;
import java.util.Locale;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.regex.Pattern;
import static com.google.common.base.MoreObjects.toStringHelper;
import static com.google.common.base.Preconditions.checkState;
import static com.google.common.collect.ImmutableMap.toImmutableMap;
import static java.util.Arrays.stream;
import static java.util.Objects.requireNonNull;
import static java.util.function.Function.identity;
public class QueryAccessRule
{
private final Set<AccessMode> allow;
private final Optional<Pattern> userRegex;
private final Optional<Pattern> roleRegex;
private final Optional<Pattern> groupRegex;
private final Optional<Pattern> queryOwnerRegex;
@JsonCreator
public QueryAccessRule(
@JsonProperty("allow") Set<AccessMode> allow,
@JsonProperty("user") Optional<Pattern> userRegex,
@JsonProperty("role") Optional<Pattern> roleRegex,
@JsonProperty("group") Optional<Pattern> groupRegex,
@JsonProperty("queryOwner") Optional<Pattern> queryOwnerRegex)
{
this.allow = ImmutableSet.copyOf(requireNonNull(allow, "allow is null"));
this.userRegex = requireNonNull(userRegex, "userRegex is null");
this.roleRegex = requireNonNull(roleRegex, "roleRegex is null");
this.groupRegex = requireNonNull(groupRegex, "groupRegex is null");
this.queryOwnerRegex = requireNonNull(queryOwnerRegex, "ownerRegex is null");
checkState(
queryOwnerRegex.isEmpty() || !allow.contains(AccessMode.EXECUTE),
"A valid query rule cannot combine an queryOwner condition with access mode 'execute'");
}
public Optional<Set<AccessMode>> match(String user, Set<String> roles, Set<String> groups, Optional<String> queryOwner)
{
if (userRegex.map(regex -> regex.matcher(user).matches()).orElse(true) &&
roleRegex.map(regex -> roles.stream().anyMatch(role -> regex.matcher(role).matches())).orElse(true) &&
groupRegex.map(regex -> groups.stream().anyMatch(role -> regex.matcher(role).matches())).orElse(true) &&
((queryOwner.isEmpty() && queryOwnerRegex.isEmpty()) || (queryOwner.isPresent() && queryOwnerRegex.map(regex -> regex.matcher(queryOwner.get()).matches()).orElse(true)))) {
return Optional.of(allow);
}
return Optional.empty();
}
@Override
public String toString()
{
return toStringHelper(this)
.omitNullValues()
.add("allow", allow)
.add("userRegex", userRegex.orElse(null))
.add("roleRegex", roleRegex.orElse(null))
.add("groupRegex", groupRegex.orElse(null))
.add("ownerRegex", queryOwnerRegex.orElse(null))
.toString();
}
public enum AccessMode
{
EXECUTE("execute"),
VIEW("view"),
KILL("kill");
private static final Map<String, AccessMode> modeByName = stream(AccessMode.values()).collect(toImmutableMap(AccessMode::toString, identity()));
private final String stringValue;
AccessMode(String stringValue)
{
this.stringValue = requireNonNull(stringValue, "stringValue is null");
}
@JsonValue
@Override
public String toString()
{
return stringValue;
}
@JsonCreator
public static AccessMode fromJson(Object value)
{
if (value instanceof String) {
AccessMode accessMode = modeByName.get(((String) value).toLowerCase(Locale.US));
if (accessMode != null) {
return accessMode;
}
}
throw new IllegalArgumentException("Unknown " + AccessMode.class.getSimpleName() + ": " + value);
}
}
}
| ebyhr/presto | lib/trino-plugin-toolkit/src/main/java/io/trino/plugin/base/security/QueryAccessRule.java | Java | apache-2.0 | 4,722 |
// This file was created automatically, do not modify the contents of this file.
// ReSharper disable InvalidXmlDocComment
// ReSharper disable InconsistentNaming
// ReSharper disable CheckNamespace
// ReSharper disable MemberCanBePrivate.Global
using System;
using System.Runtime.InteropServices;
// Source file C:\Program Files\Epic Games\UE_4.22\Engine\Source\Runtime\AIModule\Classes\BehaviorTree\Tasks\BTTask_RunEQSQuery.h:25
namespace UnrealEngine
{
[ManageType("ManageBTTask_RunEQSQuery")]
public partial class ManageBTTask_RunEQSQuery : UBTTask_RunEQSQuery, IManageWrapper
{
public ManageBTTask_RunEQSQuery(IntPtr adress)
: base(adress)
{
}
#region DLLInmport
[DllImport(NativeManager.UnrealDotNetDll, CallingConvention = CallingConvention.Cdecl)]
private static extern void E__Supper__UBTTask_RunEQSQuery_BeginDestroy(IntPtr self);
[DllImport(NativeManager.UnrealDotNetDll, CallingConvention = CallingConvention.Cdecl)]
private static extern void E__Supper__UBTTask_RunEQSQuery_FinishDestroy(IntPtr self);
[DllImport(NativeManager.UnrealDotNetDll, CallingConvention = CallingConvention.Cdecl)]
private static extern void E__Supper__UBTTask_RunEQSQuery_MarkAsEditorOnlySubobject(IntPtr self);
[DllImport(NativeManager.UnrealDotNetDll, CallingConvention = CallingConvention.Cdecl)]
private static extern void E__Supper__UBTTask_RunEQSQuery_PostCDOContruct(IntPtr self);
[DllImport(NativeManager.UnrealDotNetDll, CallingConvention = CallingConvention.Cdecl)]
private static extern void E__Supper__UBTTask_RunEQSQuery_PostEditImport(IntPtr self);
[DllImport(NativeManager.UnrealDotNetDll, CallingConvention = CallingConvention.Cdecl)]
private static extern void E__Supper__UBTTask_RunEQSQuery_PostInitProperties(IntPtr self);
[DllImport(NativeManager.UnrealDotNetDll, CallingConvention = CallingConvention.Cdecl)]
private static extern void E__Supper__UBTTask_RunEQSQuery_PostLoad(IntPtr self);
[DllImport(NativeManager.UnrealDotNetDll, CallingConvention = CallingConvention.Cdecl)]
private static extern void E__Supper__UBTTask_RunEQSQuery_PostNetReceive(IntPtr self);
[DllImport(NativeManager.UnrealDotNetDll, CallingConvention = CallingConvention.Cdecl)]
private static extern void E__Supper__UBTTask_RunEQSQuery_PostRepNotifies(IntPtr self);
[DllImport(NativeManager.UnrealDotNetDll, CallingConvention = CallingConvention.Cdecl)]
private static extern void E__Supper__UBTTask_RunEQSQuery_PostSaveRoot(IntPtr self, bool bCleanupIsRequired);
[DllImport(NativeManager.UnrealDotNetDll, CallingConvention = CallingConvention.Cdecl)]
private static extern void E__Supper__UBTTask_RunEQSQuery_PreDestroyFromReplication(IntPtr self);
[DllImport(NativeManager.UnrealDotNetDll, CallingConvention = CallingConvention.Cdecl)]
private static extern void E__Supper__UBTTask_RunEQSQuery_PreNetReceive(IntPtr self);
[DllImport(NativeManager.UnrealDotNetDll, CallingConvention = CallingConvention.Cdecl)]
private static extern void E__Supper__UBTTask_RunEQSQuery_ShutdownAfterError(IntPtr self);
[DllImport(NativeManager.UnrealDotNetDll, CallingConvention = CallingConvention.Cdecl)]
private static extern void E__Supper__UBTTask_RunEQSQuery_CreateCluster(IntPtr self);
[DllImport(NativeManager.UnrealDotNetDll, CallingConvention = CallingConvention.Cdecl)]
private static extern void E__Supper__UBTTask_RunEQSQuery_OnClusterMarkedAsPendingKill(IntPtr self);
#endregion
#region Methods
/// <summary>
/// Called before destroying the object. This is called immediately upon deciding to destroy the object, to allow the object to begin an
/// <para>asynchronous cleanup process. </para>
/// </summary>
public override void BeginDestroy()
=> E__Supper__UBTTask_RunEQSQuery_BeginDestroy(this);
/// <summary>
/// Called to finish destroying the object. After UObject::FinishDestroy is called, the object's memory should no longer be accessed.
/// <para>@warning Because properties are destroyed here, Super::FinishDestroy() should always be called at the end of your child class's FinishDestroy() method, rather than at the beginning. </para>
/// </summary>
public override void FinishDestroy()
=> E__Supper__UBTTask_RunEQSQuery_FinishDestroy(this);
/// <summary>
/// Called during subobject creation to mark this component as editor only, which causes it to get stripped in packaged builds
/// </summary>
public override void MarkAsEditorOnlySubobject()
=> E__Supper__UBTTask_RunEQSQuery_MarkAsEditorOnlySubobject(this);
/// <summary>
/// Called after the C++ constructor has run on the CDO for a class. This is an obscure routine used to deal with the recursion
/// <para>in the construction of the default materials </para>
/// </summary>
public override void PostCDOContruct()
=> E__Supper__UBTTask_RunEQSQuery_PostCDOContruct(this);
/// <summary>
/// Called after importing property values for this object (paste, duplicate or .t3d import)
/// <para>Allow the object to perform any cleanup for properties which shouldn't be duplicated or </para>
/// are unsupported by the script serialization
/// </summary>
public override void PostEditImport()
=> E__Supper__UBTTask_RunEQSQuery_PostEditImport(this);
/// <summary>
/// Called after the C++ constructor and after the properties have been initialized, including those loaded from config.
/// <para>This is called before any serialization or other setup has happened. </para>
/// </summary>
public override void PostInitProperties()
=> E__Supper__UBTTask_RunEQSQuery_PostInitProperties(this);
/// <summary>
/// Do any object-specific cleanup required immediately after loading an object.
/// <para>This is not called for newly-created objects, and by default will always execute on the game thread. </para>
/// </summary>
public override void PostLoad()
=> E__Supper__UBTTask_RunEQSQuery_PostLoad(this);
/// <summary>
/// Called right after receiving a bunch
/// </summary>
public override void PostNetReceive()
=> E__Supper__UBTTask_RunEQSQuery_PostNetReceive(this);
/// <summary>
/// Called right after calling all OnRep notifies (called even when there are no notifies)
/// </summary>
public override void PostRepNotifies()
=> E__Supper__UBTTask_RunEQSQuery_PostRepNotifies(this);
/// <summary>
/// Called from within SavePackage on the passed in base/root object.
/// <para>This function is called after the package has been saved and can perform cleanup. </para>
/// </summary>
/// <param name="bCleanupIsRequired">Whether PreSaveRoot dirtied state that needs to be cleaned up</param>
public override void PostSaveRoot(bool bCleanupIsRequired)
=> E__Supper__UBTTask_RunEQSQuery_PostSaveRoot(this, bCleanupIsRequired);
/// <summary>
/// Called right before being marked for destruction due to network replication
/// </summary>
public override void PreDestroyFromReplication()
=> E__Supper__UBTTask_RunEQSQuery_PreDestroyFromReplication(this);
/// <summary>
/// Called right before receiving a bunch
/// </summary>
public override void PreNetReceive()
=> E__Supper__UBTTask_RunEQSQuery_PreNetReceive(this);
/// <summary>
/// After a critical error, perform any mission-critical cleanup, such as restoring the video mode orreleasing hardware resources.
/// </summary>
public override void ShutdownAfterError()
=> E__Supper__UBTTask_RunEQSQuery_ShutdownAfterError(this);
/// <summary>
/// Called after PostLoad to create UObject cluster
/// </summary>
public override void CreateCluster()
=> E__Supper__UBTTask_RunEQSQuery_CreateCluster(this);
/// <summary>
/// Called during Garbage Collection to perform additional cleanup when the cluster is about to be destroyed due to PendingKill flag being set on it.
/// </summary>
public override void OnClusterMarkedAsPendingKill()
=> E__Supper__UBTTask_RunEQSQuery_OnClusterMarkedAsPendingKill(this);
#endregion
public static implicit operator IntPtr(ManageBTTask_RunEQSQuery self)
{
return self?.NativePointer ?? IntPtr.Zero;
}
public static implicit operator ManageBTTask_RunEQSQuery(ObjectPointerDescription PtrDesc)
{
return NativeManager.GetWrapper<ManageBTTask_RunEQSQuery>(PtrDesc);
}
}
}
| mrkriv/UnrealDotNet | Plugins/UnrealDotNet/Source/UnrealEngineSharp/Generate/Manage/ManageBTTask_RunEQSQuery.cs | C# | apache-2.0 | 8,609 |
<?php
/**
* @created Alexey Kutuzov <lexus27.khv@gmail.com>
* @Project: php-abac
*/
namespace Jungle\ABAC;
class Generator{
public function a(){
$ctx=new \stdClass();
// target(any_of, all_of; n-conditions)
// rule(1-condition)
// if target->conformed===true
/**
/* FULL condition code generate
/* left operand : code for fetch val from ctx by path OR value render
/* operator identifier code render, OR @operator_special_function code generate
* right operand : code for fetch val from ctx by path OR value render
*/
// one condition
if($ctx->{'path_to_parameter'} !== 'example'){
}
if(call_user_func('operator_123',$ctx->{'path_to_parameter'}, 'example')){
}
// target composition ANY_OF ALL_OF
if(
/* ANY_OF */($ctx->{'path_to_parameter'} !== 'example' || $ctx->{'path_to_parameter'} !== 'example') &&
/* ALL_OF */($ctx->{'path_to_parameter'} !== 'example' && $ctx->{'path_to_parameter'} !== 'example')
){
}
// combiner (пока самое сложное)
/*
* Суть чтобы выстроить дочерние правила и политики соответственно комбинеру
* Одно дело, правило если не совпадает, все дочерние идущие дальше - игнорируются (можно перескачить при помощи goto)
* Другое дело, первое правило совпало - означает успех - нужно вызывать обработчики каждого правила
*/
// operator special function
function operator_123($a,$b){
return true;
}
}
}
| lexus27/php-abac | src/Generator.php | PHP | apache-2.0 | 1,720 |
/*
* Copyright 2000-2009 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* Created by IntelliJ IDEA.
* User: cdr
* Date: Jul 30, 2002
*/
package com.intellij.codeInsight.daemon.impl.analysis;
import com.intellij.codeInsight.ExceptionUtil;
import com.intellij.codeInsight.daemon.JavaErrorMessages;
import com.intellij.codeInsight.daemon.impl.HighlightInfo;
import com.intellij.codeInsight.daemon.impl.HighlightInfoType;
import com.intellij.codeInsight.daemon.impl.quickfix.*;
import com.intellij.codeInsight.highlighting.HighlightUsagesDescriptionLocation;
import com.intellij.codeInsight.intention.IntentionAction;
import com.intellij.codeInsight.intention.QuickFixFactory;
import com.intellij.codeInsight.quickfix.ChangeVariableTypeQuickFixProvider;
import com.intellij.codeInsight.quickfix.UnresolvedReferenceQuickFixProvider;
import com.intellij.lang.StdLanguages;
import com.intellij.lang.findUsages.LanguageFindUsages;
import com.intellij.openapi.diagnostic.Logger;
import com.intellij.openapi.extensions.Extensions;
import com.intellij.openapi.project.Project;
import com.intellij.openapi.util.Comparing;
import com.intellij.openapi.util.Pair;
import com.intellij.openapi.util.TextRange;
import com.intellij.psi.*;
import com.intellij.psi.impl.source.jsp.jspJava.JspHolderMethod;
import com.intellij.psi.javadoc.PsiDocComment;
import com.intellij.psi.jsp.JspFile;
import com.intellij.psi.scope.processor.VariablesNotProcessor;
import com.intellij.psi.scope.util.PsiScopesUtil;
import com.intellij.psi.tree.IElementType;
import com.intellij.psi.util.*;
import com.intellij.util.ArrayUtil;
import com.intellij.util.IncorrectOperationException;
import com.intellij.xml.util.XmlStringUtil;
import gnu.trove.THashMap;
import gnu.trove.THashSet;
import org.intellij.lang.annotations.Language;
import org.jetbrains.annotations.NonNls;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import java.util.*;
public class HighlightUtil {
private static final Logger LOG = Logger.getInstance("#com.intellij.codeInsight.daemon.impl.analysis.HighlightUtil");
private static final Map<String, Set<String>> ourInterfaceIncompatibleModifiers;
private static final Map<String, Set<String>> ourMethodIncompatibleModifiers;
private static final Map<String, Set<String>> ourFieldIncompatibleModifiers;
private static final Map<String, Set<String>> ourClassIncompatibleModifiers;
private static final Map<String, Set<String>> ourClassInitializerIncompatibleModifiers;
private static final Set<String> ourConstructorNotAllowedModifiers;
@NonNls private static final String SERIAL_VERSION_UID_FIELD_NAME = "serialVersionUID";
@NonNls private static final String SERIAL_PERSISTENT_FIELDS_FIELD_NAME = "serialPersistentFields";
private static final QuickFixFactory QUICK_FIX_FACTORY = QuickFixFactory.getInstance();
private HighlightUtil() {
}
static {
ourClassIncompatibleModifiers = new THashMap<String, Set<String>>(8);
Set<String> modifiers = new THashSet<String>(1);
modifiers.add(PsiModifier.FINAL);
ourClassIncompatibleModifiers.put(PsiModifier.ABSTRACT, modifiers);
modifiers = new THashSet<String>(1);
modifiers.add(PsiModifier.ABSTRACT);
ourClassIncompatibleModifiers.put(PsiModifier.FINAL, modifiers);
modifiers = new THashSet<String>(3);
modifiers.add(PsiModifier.PRIVATE);
modifiers.add(PsiModifier.PUBLIC);
modifiers.add(PsiModifier.PROTECTED);
ourClassIncompatibleModifiers.put(PsiModifier.PACKAGE_LOCAL, modifiers);
modifiers = new THashSet<String>(3);
modifiers.add(PsiModifier.PACKAGE_LOCAL);
modifiers.add(PsiModifier.PUBLIC);
modifiers.add(PsiModifier.PROTECTED);
ourClassIncompatibleModifiers.put(PsiModifier.PRIVATE, modifiers);
modifiers = new THashSet<String>(3);
modifiers.add(PsiModifier.PACKAGE_LOCAL);
modifiers.add(PsiModifier.PRIVATE);
modifiers.add(PsiModifier.PROTECTED);
ourClassIncompatibleModifiers.put(PsiModifier.PUBLIC, modifiers);
modifiers = new THashSet<String>(3);
modifiers.add(PsiModifier.PACKAGE_LOCAL);
modifiers.add(PsiModifier.PUBLIC);
modifiers.add(PsiModifier.PRIVATE);
ourClassIncompatibleModifiers.put(PsiModifier.PROTECTED, modifiers);
ourClassIncompatibleModifiers.put(PsiModifier.STRICTFP, Collections.<String>emptySet());
ourClassIncompatibleModifiers.put(PsiModifier.STATIC, Collections.<String>emptySet());
ourInterfaceIncompatibleModifiers = new THashMap<String, Set<String>>(7);
ourInterfaceIncompatibleModifiers.put(PsiModifier.ABSTRACT, Collections.<String>emptySet());
modifiers = new THashSet<String>(3);
modifiers.add(PsiModifier.PRIVATE);
modifiers.add(PsiModifier.PUBLIC);
modifiers.add(PsiModifier.PROTECTED);
ourInterfaceIncompatibleModifiers.put(PsiModifier.PACKAGE_LOCAL, modifiers);
modifiers = new THashSet<String>(3);
modifiers.add(PsiModifier.PACKAGE_LOCAL);
modifiers.add(PsiModifier.PUBLIC);
modifiers.add(PsiModifier.PROTECTED);
ourInterfaceIncompatibleModifiers.put(PsiModifier.PRIVATE, modifiers);
modifiers = new THashSet<String>(3);
modifiers.add(PsiModifier.PRIVATE);
modifiers.add(PsiModifier.PACKAGE_LOCAL);
modifiers.add(PsiModifier.PROTECTED);
ourInterfaceIncompatibleModifiers.put(PsiModifier.PUBLIC, modifiers);
modifiers = new THashSet<String>(3);
modifiers.add(PsiModifier.PRIVATE);
modifiers.add(PsiModifier.PUBLIC);
modifiers.add(PsiModifier.PACKAGE_LOCAL);
ourInterfaceIncompatibleModifiers.put(PsiModifier.PROTECTED, modifiers);
ourInterfaceIncompatibleModifiers.put(PsiModifier.STRICTFP, Collections.<String>emptySet());
ourInterfaceIncompatibleModifiers.put(PsiModifier.STATIC, Collections.<String>emptySet());
ourMethodIncompatibleModifiers = new THashMap<String, Set<String>>(10);
modifiers = new THashSet<String>(6);
modifiers.addAll(Arrays.asList(PsiModifier.NATIVE, PsiModifier.STATIC, PsiModifier.FINAL, PsiModifier.PRIVATE, PsiModifier.STRICTFP,
PsiModifier.SYNCHRONIZED));
ourMethodIncompatibleModifiers.put(PsiModifier.ABSTRACT, modifiers);
modifiers = new THashSet<String>(2);
modifiers.add(PsiModifier.ABSTRACT);
modifiers.add(PsiModifier.STRICTFP);
ourMethodIncompatibleModifiers.put(PsiModifier.NATIVE, modifiers);
modifiers = new THashSet<String>(3);
modifiers.add(PsiModifier.PRIVATE);
modifiers.add(PsiModifier.PUBLIC);
modifiers.add(PsiModifier.PROTECTED);
ourMethodIncompatibleModifiers.put(PsiModifier.PACKAGE_LOCAL, modifiers);
modifiers = new THashSet<String>(4);
modifiers.add(PsiModifier.ABSTRACT);
modifiers.add(PsiModifier.PACKAGE_LOCAL);
modifiers.add(PsiModifier.PUBLIC);
modifiers.add(PsiModifier.PROTECTED);
ourMethodIncompatibleModifiers.put(PsiModifier.PRIVATE, modifiers);
modifiers = new THashSet<String>(3);
modifiers.add(PsiModifier.PACKAGE_LOCAL);
modifiers.add(PsiModifier.PRIVATE);
modifiers.add(PsiModifier.PROTECTED);
ourMethodIncompatibleModifiers.put(PsiModifier.PUBLIC, modifiers);
modifiers = new THashSet<String>(3);
modifiers.add(PsiModifier.PACKAGE_LOCAL);
modifiers.add(PsiModifier.PUBLIC);
modifiers.add(PsiModifier.PRIVATE);
ourMethodIncompatibleModifiers.put(PsiModifier.PROTECTED, modifiers);
modifiers = new THashSet<String>(1);
modifiers.add(PsiModifier.ABSTRACT);
ourMethodIncompatibleModifiers.put(PsiModifier.STATIC, modifiers);
ourMethodIncompatibleModifiers.put(PsiModifier.SYNCHRONIZED, modifiers);
ourMethodIncompatibleModifiers.put(PsiModifier.STRICTFP, modifiers);
ourMethodIncompatibleModifiers.put(PsiModifier.FINAL, modifiers);
ourFieldIncompatibleModifiers = new THashMap<String, Set<String>>(8);
modifiers = new THashSet<String>(1);
modifiers.add(PsiModifier.VOLATILE);
ourFieldIncompatibleModifiers.put(PsiModifier.FINAL, modifiers);
modifiers = new THashSet<String>(3);
modifiers.add(PsiModifier.PRIVATE);
modifiers.add(PsiModifier.PUBLIC);
modifiers.add(PsiModifier.PROTECTED);
ourFieldIncompatibleModifiers.put(PsiModifier.PACKAGE_LOCAL, modifiers);
modifiers = new THashSet<String>(3);
modifiers.add(PsiModifier.PACKAGE_LOCAL);
modifiers.add(PsiModifier.PUBLIC);
modifiers.add(PsiModifier.PROTECTED);
ourFieldIncompatibleModifiers.put(PsiModifier.PRIVATE, modifiers);
modifiers = new THashSet<String>(3);
modifiers.add(PsiModifier.PACKAGE_LOCAL);
modifiers.add(PsiModifier.PRIVATE);
modifiers.add(PsiModifier.PROTECTED);
ourFieldIncompatibleModifiers.put(PsiModifier.PUBLIC, modifiers);
modifiers = new THashSet<String>(3);
modifiers.add(PsiModifier.PACKAGE_LOCAL);
modifiers.add(PsiModifier.PRIVATE);
modifiers.add(PsiModifier.PUBLIC);
ourFieldIncompatibleModifiers.put(PsiModifier.PROTECTED, modifiers);
ourFieldIncompatibleModifiers.put(PsiModifier.STATIC, Collections.<String>emptySet());
ourFieldIncompatibleModifiers.put(PsiModifier.TRANSIENT, Collections.<String>emptySet());
modifiers = new THashSet<String>(1);
modifiers.add(PsiModifier.FINAL);
ourFieldIncompatibleModifiers.put(PsiModifier.VOLATILE, modifiers);
ourClassInitializerIncompatibleModifiers = new THashMap<String, Set<String>>(1);
ourClassInitializerIncompatibleModifiers.put(PsiModifier.STATIC, Collections.<String>emptySet());
ourConstructorNotAllowedModifiers = new THashSet<String>(6);
ourConstructorNotAllowedModifiers.add(PsiModifier.ABSTRACT);
ourConstructorNotAllowedModifiers.add(PsiModifier.STATIC);
ourConstructorNotAllowedModifiers.add(PsiModifier.NATIVE);
ourConstructorNotAllowedModifiers.add(PsiModifier.FINAL);
ourConstructorNotAllowedModifiers.add(PsiModifier.STRICTFP);
ourConstructorNotAllowedModifiers.add(PsiModifier.SYNCHRONIZED);
}
@Nullable
public static String getIncompatibleModifier(String modifier,
PsiModifierList modifierList,
Map<String, Set<String>> incompatibleModifiersHash) {
if (modifierList == null) return null;
// modifier is always incompatible with itself
PsiElement[] modifiers = modifierList.getChildren();
int modifierCount = 0;
for (PsiElement otherModifier : modifiers) {
if (Comparing.equal(modifier, otherModifier.getText(), true)) modifierCount++;
}
if (modifierCount > 1) {
return modifier;
}
Set<String> incompatibles = incompatibleModifiersHash.get(modifier);
if (incompatibles == null) return null;
for (@Modifier String incompatible : incompatibles) {
if (modifierList.hasModifierProperty(incompatible)) {
return incompatible;
}
}
return null;
}
/**
* make element protected/package local/public suggestion
*/
static void registerAccessQuickFixAction(PsiMember refElement,
PsiJavaCodeReferenceElement place,
HighlightInfo errorResult,
final PsiElement fileResolveScope) {
if (refElement instanceof PsiCompiledElement) return;
PsiModifierList modifierList = refElement.getModifierList();
if (modifierList == null) return;
PsiClass packageLocalClassInTheMiddle = getPackageLocalClassInTheMiddle(place);
if (packageLocalClassInTheMiddle != null) {
IntentionAction fix =
QUICK_FIX_FACTORY.createModifierListFix(packageLocalClassInTheMiddle, PsiModifier.PUBLIC, true, true);
QuickFixAction.registerQuickFixAction(errorResult, fix);
return;
}
try {
Project project = refElement.getProject();
JavaPsiFacade facade = JavaPsiFacade.getInstance(project);
PsiModifierList modifierListCopy = facade.getElementFactory().createFieldFromText("int a;", null).getModifierList();
modifierListCopy.setModifierProperty(PsiModifier.STATIC, modifierList.hasModifierProperty(PsiModifier.STATIC));
@Modifier String minModifier = PsiModifier.PACKAGE_LOCAL;
if (refElement.hasModifierProperty(PsiModifier.PACKAGE_LOCAL)) {
minModifier = PsiModifier.PROTECTED;
}
if (refElement.hasModifierProperty(PsiModifier.PROTECTED)) {
minModifier = PsiModifier.PUBLIC;
}
String[] modifiers = {PsiModifier.PACKAGE_LOCAL, PsiModifier.PROTECTED, PsiModifier.PUBLIC,};
PsiClass accessObjectClass = null;
PsiElement qualifier = place.getQualifier();
if (qualifier instanceof PsiExpression) {
accessObjectClass = (PsiClass)PsiUtil.getAccessObjectClass((PsiExpression)qualifier).getElement();
}
for (int i = ArrayUtil.indexOf(modifiers, minModifier); i < modifiers.length; i++) {
@Modifier String modifier = modifiers[i];
modifierListCopy.setModifierProperty(modifier, true);
if (facade.getResolveHelper().isAccessible(refElement, modifierListCopy, place, accessObjectClass, fileResolveScope)) {
IntentionAction fix = QUICK_FIX_FACTORY.createModifierListFix(refElement, modifier, true, true);
TextRange fixRange = new TextRange(errorResult.startOffset, errorResult.endOffset);
PsiElement ref = place.getReferenceNameElement();
if (ref != null) {
fixRange = fixRange.union(ref.getTextRange());
}
QuickFixAction.registerQuickFixAction(errorResult, fixRange, fix, null);
}
}
}
catch (IncorrectOperationException e) {
LOG.error(e);
}
}
@Nullable
private static PsiClass getPackageLocalClassInTheMiddle(PsiJavaCodeReferenceElement place) {
if (place instanceof PsiReferenceExpression) {
// check for package local classes in the middle
PsiReferenceExpression expression = (PsiReferenceExpression)place;
while (true) {
PsiElement resolved = expression.resolve();
if (resolved instanceof PsiField) {
PsiField field = (PsiField)resolved;
PsiClass aClass = field.getContainingClass();
if (aClass != null && aClass.hasModifierProperty(PsiModifier.PACKAGE_LOCAL) &&
!JavaPsiFacade.getInstance(aClass.getProject()).arePackagesTheSame(aClass, place)) {
return aClass;
}
}
PsiExpression qualifier = expression.getQualifierExpression();
if (!(qualifier instanceof PsiReferenceExpression)) break;
expression = (PsiReferenceExpression)qualifier;
}
}
return null;
}
@Nullable
static HighlightInfo checkInstanceOfApplicable(PsiInstanceOfExpression expression) {
PsiExpression operand = expression.getOperand();
PsiTypeElement typeElement = expression.getCheckType();
if (typeElement == null) return null;
PsiType checkType = typeElement.getType();
PsiType operandType = operand.getType();
if (operandType == null) return null;
if (TypeConversionUtil.isPrimitiveAndNotNull(operandType)
|| TypeConversionUtil.isPrimitiveAndNotNull(checkType)
|| !TypeConversionUtil.areTypesConvertible(operandType, checkType)) {
String message = JavaErrorMessages.message("inconvertible.type.cast", formatType(operandType), formatType(checkType));
return HighlightInfo.createHighlightInfo(HighlightInfoType.ERROR, expression, message);
}
return null;
}
@Nullable
static HighlightInfo checkInconvertibleTypeCast(PsiTypeCastExpression expression) {
PsiExpression operand = expression.getOperand();
PsiType castType = expression.getCastType().getType();
PsiType operandType = operand == null ? null : operand.getType();
if (operandType != null && !TypeConversionUtil.areTypesConvertible(operandType, castType)) {
String message = JavaErrorMessages.message("inconvertible.type.cast", formatType(operandType), formatType(castType));
return HighlightInfo.createHighlightInfo(HighlightInfoType.ERROR, expression, message);
}
return null;
}
static HighlightInfo checkVariableExpected(PsiExpression expression) {
PsiExpression lValue;
if (expression instanceof PsiAssignmentExpression) {
PsiAssignmentExpression assignment = (PsiAssignmentExpression)expression;
lValue = assignment.getLExpression();
}
else if (PsiUtil.isIncrementDecrementOperation(expression)) {
lValue = expression instanceof PsiPostfixExpression
? ((PsiPostfixExpression)expression).getOperand()
: ((PsiPrefixExpression)expression).getOperand();
}
else {
lValue = null;
}
HighlightInfo errorResult = null;
if (lValue != null && !TypeConversionUtil.isLValue(lValue)) {
errorResult = HighlightInfo.createHighlightInfo(HighlightInfoType.ERROR, lValue, JavaErrorMessages.message("variable.expected"));
}
return errorResult;
}
@Nullable
static HighlightInfo checkAssignmentOperatorApplicable(PsiAssignmentExpression assignment) {
PsiJavaToken operationSign = assignment.getOperationSign();
IElementType eqOpSign = operationSign.getTokenType();
IElementType opSign = TypeConversionUtil.convertEQtoOperation(eqOpSign);
if (opSign == null) return null;
HighlightInfo errorResult = null;
final PsiType lType = assignment.getLExpression().getType();
if (!TypeConversionUtil.isBinaryOperatorApplicable(opSign, assignment.getLExpression(), assignment.getRExpression(), true) ||
PsiType.getJavaLangObject(assignment.getManager(), assignment.getResolveScope()).equals(lType)) {
String operatorText = operationSign.getText().substring(0, operationSign.getText().length() - 1);
String message = JavaErrorMessages.message("binary.operator.not.applicable", operatorText,
formatType(lType),
formatType(assignment.getRExpression().getType()));
errorResult = HighlightInfo.createHighlightInfo(HighlightInfoType.ERROR, assignment, message);
}
return errorResult;
}
@Nullable
static HighlightInfo checkAssignmentCompatibleTypes(PsiAssignmentExpression assignment) {
if (!"=".equals(assignment.getOperationSign().getText())) return null;
PsiExpression lExpr = assignment.getLExpression();
PsiExpression rExpr = assignment.getRExpression();
if (rExpr == null) return null;
PsiType lType = lExpr.getType();
PsiType rType = rExpr.getType();
if (rType == null) return null;
HighlightInfo highlightInfo = checkAssignability(lType, rType, rExpr, assignment);
if (highlightInfo != null) {
PsiVariable leftVar = null;
if (lExpr instanceof PsiReferenceExpression) {
PsiElement element = ((PsiReferenceExpression)lExpr).resolve();
if (element instanceof PsiVariable) {
leftVar = (PsiVariable)element;
}
}
if (leftVar != null) {
registerChangeVariableTypeFixes(leftVar, rType, highlightInfo);
}
}
return highlightInfo;
}
private static boolean isCastIntentionApplicable(PsiExpression expression, PsiType toType) {
while (expression instanceof PsiTypeCastExpression || expression instanceof PsiParenthesizedExpression) {
if (expression instanceof PsiTypeCastExpression) {
expression = ((PsiTypeCastExpression)expression).getOperand();
}
if (expression instanceof PsiParenthesizedExpression) {
expression = ((PsiParenthesizedExpression)expression).getExpression();
}
}
if (expression == null) return false;
PsiType rType = expression.getType();
return rType != null && toType != null && TypeConversionUtil.areTypesConvertible(rType, toType);
}
@Nullable
static HighlightInfo checkVariableInitializerType(PsiVariable variable) {
PsiExpression initializer = variable.getInitializer();
// array initalizer checked in checkArrayInitializerApplicable
if (initializer == null || initializer instanceof PsiArrayInitializerExpression) return null;
PsiType lType = variable.getType();
PsiType rType = initializer.getType();
int start = variable.getTypeElement().getTextRange().getStartOffset();
int end = variable.getTextRange().getEndOffset();
HighlightInfo highlightInfo = checkAssignability(lType, rType, initializer, new TextRange(start, end));
if (highlightInfo != null) {
registerChangeVariableTypeFixes(variable, rType, highlightInfo);
}
return highlightInfo;
}
@Nullable
static HighlightInfo checkAssignability(PsiType lType, PsiType rType, PsiExpression expression, PsiElement elementToHighlight) {
TextRange textRange = elementToHighlight.getTextRange();
return checkAssignability(lType, rType, expression, textRange);
}
@Nullable
public static HighlightInfo checkAssignability(@Nullable PsiType lType, @Nullable PsiType rType, @Nullable PsiExpression expression, TextRange textRange) {
if (lType == rType) return null;
if (expression == null) {
if (rType == null || lType == null || TypeConversionUtil.isAssignable(lType, rType)) return null;
}
else if (TypeConversionUtil.areTypesAssignmentCompatible(lType, expression)) {
if (lType == null || rType == null) return null;
return GenericsHighlightUtil.checkRawToGenericAssignment(lType, rType, expression);
}
if (rType == null) {
rType = expression.getType();
}
HighlightInfo highlightInfo = createIncompatibleTypeHighlightInfo(lType, rType, textRange);
if (rType != null && expression != null && isCastIntentionApplicable(expression, lType)) {
QuickFixAction.registerQuickFixAction(highlightInfo, new AddTypeCastFix(lType, expression));
}
if (expression != null && lType != null) {
QuickFixAction.registerQuickFixAction(highlightInfo, new WrapExpressionFix(lType, expression));
}
ChangeNewOperatorTypeFix.register(highlightInfo, expression, lType);
return highlightInfo;
}
@Nullable
static HighlightInfo checkReturnStatementType(PsiReturnStatement statement) {
PsiMethod method = null;
PsiElement parent = statement.getParent();
while (true) {
if (parent instanceof PsiFile) break;
if (parent instanceof PsiClassInitializer) break;
if (parent instanceof PsiMethod) {
method = (PsiMethod)parent;
break;
}
parent = parent.getParent();
}
String description;
int navigationShift = 0;
HighlightInfo errorResult = null;
if (method == null && !(parent instanceof JspFile)) {
description = JavaErrorMessages.message("return.outside.method");
errorResult = HighlightInfo.createHighlightInfo(HighlightInfoType.ERROR, statement, description);
}
else {
PsiType returnType = method != null ? method.getReturnType() : null/*JSP page returns void*/;
boolean isMethodVoid = returnType == null || PsiType.VOID.equals(returnType);
final PsiExpression returnValue = statement.getReturnValue();
if (returnValue != null) {
PsiType valueType = returnValue.getType();
if (isMethodVoid) {
description = JavaErrorMessages.message("return.from.void.method");
errorResult = HighlightInfo.createHighlightInfo(HighlightInfoType.ERROR, statement, description);
if (valueType != null) {
IntentionAction fix = QUICK_FIX_FACTORY.createMethodReturnFix(method, valueType, true);
QuickFixAction.registerQuickFixAction(errorResult, fix);
}
}
else {
errorResult = checkAssignability(returnType, valueType, returnValue, statement);
if (errorResult != null && valueType != null) {
IntentionAction fix = QUICK_FIX_FACTORY.createMethodReturnFix(method, valueType, true);
QuickFixAction.registerQuickFixAction(errorResult, fix);
if (returnType instanceof PsiArrayType && TypeConversionUtil.isAssignable(((PsiArrayType)returnType).getComponentType(), valueType)) {
QuickFixAction.registerQuickFixAction(errorResult, new SurroundWithArrayFix(null){
@Override
protected PsiExpression getExpression(final PsiElement element) {
return returnValue.isValid() ? returnValue : null;
}
});
}
}
}
navigationShift = returnValue.getStartOffsetInParent();
}
else {
if (!isMethodVoid) {
description = JavaErrorMessages.message("missing.return.value");
errorResult = HighlightInfo.createHighlightInfo(HighlightInfoType.ERROR, statement, description);
IntentionAction fix = QUICK_FIX_FACTORY.createMethodReturnFix(method, PsiType.VOID, true);
QuickFixAction.registerQuickFixAction(errorResult, fix);
navigationShift = PsiKeyword.RETURN.length();
}
}
}
if (errorResult != null) {
errorResult.navigationShift = navigationShift;
}
return errorResult;
}
public static String getUnhandledExceptionsDescriptor(Collection<PsiClassType> unhandledExceptions) {
StringBuilder exceptionsText = new StringBuilder();
for (PsiClassType unhandledException : unhandledExceptions) {
if (exceptionsText.length() != 0) exceptionsText.append(", ");
exceptionsText.append(formatType(unhandledException));
}
return JavaErrorMessages.message("unhandled.exceptions", exceptionsText.toString(), unhandledExceptions.size());
}
@Nullable
static HighlightInfo checkVariableAlreadyDefined(PsiVariable variable) {
if (variable instanceof ExternallyDefinedPsiElement) return null;
boolean isIncorrect = false;
PsiIdentifier identifier = variable.getNameIdentifier();
String name = variable.getName();
if (variable instanceof PsiLocalVariable ||
variable instanceof PsiParameter && ((PsiParameter)variable).getDeclarationScope() instanceof PsiCatchSection ||
variable instanceof PsiParameter && ((PsiParameter)variable).getDeclarationScope() instanceof PsiForeachStatement) {
PsiElement scope = PsiTreeUtil.getParentOfType(variable, PsiFile.class, PsiMethod.class, PsiClassInitializer.class);
VariablesNotProcessor proc = new VariablesNotProcessor(variable, false){
protected boolean check(final PsiVariable var, final ResolveState state) {
return (var instanceof PsiLocalVariable || var instanceof PsiParameter) && super.check(var, state);
}
};
PsiScopesUtil.treeWalkUp(proc, identifier, scope);
if (proc.size() > 0) {
isIncorrect = true;
}
}
else if (variable instanceof PsiField) {
PsiField field = (PsiField)variable;
PsiClass aClass = field.getContainingClass();
if (aClass == null) return null;
PsiField fieldByName = aClass.findFieldByName(name, false);
if (fieldByName != null && fieldByName != field) {
isIncorrect = true;
}
}
else {
PsiElement scope = variable.getParent();
PsiElement[] children = scope.getChildren();
for (PsiElement child : children) {
if (child instanceof PsiVariable) {
if (child.equals(variable)) continue;
if (name.equals(((PsiVariable)child).getName())) {
isIncorrect = true;
break;
}
}
}
}
if (isIncorrect) {
String description = JavaErrorMessages.message("variable.already.defined", name);
HighlightInfo highlightInfo = HighlightInfo.createHighlightInfo(HighlightInfoType.ERROR, identifier, description);
QuickFixAction.registerQuickFixAction(highlightInfo, new ReuseVariableDeclarationFix(variable, identifier));
return highlightInfo;
}
return null;
}
@NotNull
public static String formatClass(@NotNull PsiClass aClass) {
return formatClass(aClass, true);
}
@NotNull
public static String formatClass(@NotNull PsiClass aClass, boolean fqn) {
return PsiFormatUtil.formatClass(aClass, PsiFormatUtilBase.SHOW_NAME |
PsiFormatUtilBase.SHOW_ANONYMOUS_CLASS_VERBOSE | (fqn ? PsiFormatUtilBase.SHOW_FQ_NAME : 0));
}
@NotNull
public static String formatMethod(@NotNull PsiMethod method) {
return PsiFormatUtil.formatMethod(method, PsiSubstitutor.EMPTY, PsiFormatUtilBase.SHOW_NAME | PsiFormatUtilBase.SHOW_PARAMETERS,
PsiFormatUtilBase.SHOW_TYPE);
}
@NotNull
public static String formatType(@Nullable PsiType type) {
if (type == null) return PsiKeyword.NULL;
String text = type.getInternalCanonicalText();
return text == null ? PsiKeyword.NULL : text;
}
public static HighlightInfo checkUnhandledExceptions(PsiElement element, TextRange fixRange) {
List<PsiClassType> unhandledExceptions = ExceptionUtil.getUnhandledExceptions(element);
HighlightInfo errorResult = null;
if (!unhandledExceptions.isEmpty()) {
if (fixRange == null) {
fixRange = element.getTextRange();
}
HighlightInfoType highlightType = getUnhandledExceptionHighlightType(element);
if (highlightType == null) return null;
errorResult = HighlightInfo.createHighlightInfo(highlightType, fixRange, getUnhandledExceptionsDescriptor(unhandledExceptions));
QuickFixAction.registerQuickFixAction(errorResult, new AddExceptionToCatchFix());
QuickFixAction.registerQuickFixAction(errorResult, new AddExceptionToThrowsFix(element));
QuickFixAction.registerQuickFixAction(errorResult, new SurroundWithTryCatchFix(element));
if (unhandledExceptions.size() == 1) {
QuickFixAction.registerQuickFixAction(errorResult, new GeneralizeCatchFix(element, unhandledExceptions.get(0)));
}
}
return errorResult;
}
private static HighlightInfoType getUnhandledExceptionHighlightType(final PsiElement element) {
if (!JspPsiUtil.isInJspFile(element)) {
return HighlightInfoType.UNHANDLED_EXCEPTION;
}
PsiMethod targetMethod = PsiTreeUtil.getParentOfType(element, PsiMethod.class);
if (!(targetMethod instanceof JspHolderMethod)) return HighlightInfoType.UNHANDLED_EXCEPTION;
// ignore JSP top level errors - it handled by UnhandledExceptionInJSP inspection
return null;
}
@Nullable
static HighlightInfo checkBreakOutsideLoop(PsiBreakStatement statement) {
if (statement.getLabelIdentifier() == null) {
if (new PsiMatcherImpl(statement).ancestor(EnclosingLoopOrSwitchMatcherExpression.INSTANCE).getElement() == null) {
return HighlightInfo
.createHighlightInfo(HighlightInfoType.ERROR, statement, JavaErrorMessages.message("break.outside.switch.or.loop"));
}
}
else {
// todo labeled
}
return null;
}
@Nullable
static HighlightInfo checkContinueOutsideLoop(PsiContinueStatement statement) {
if (statement.getLabelIdentifier() == null) {
if (new PsiMatcherImpl(statement).ancestor(EnclosingLoopMatcherExpression.INSTANCE).getElement() == null) {
return HighlightInfo.createHighlightInfo(HighlightInfoType.ERROR, statement, JavaErrorMessages.message("continue.outside.loop"));
}
}
else {
PsiStatement exitedStatement = statement.findContinuedStatement();
if (exitedStatement == null) return null;
if (!(exitedStatement instanceof PsiForStatement) && !(exitedStatement instanceof PsiWhileStatement) &&
!(exitedStatement instanceof PsiDoWhileStatement) && !(exitedStatement instanceof PsiForeachStatement)) {
String description = JavaErrorMessages.message("not.loop.label", statement.getLabelIdentifier().getText());
return HighlightInfo.createHighlightInfo(HighlightInfoType.ERROR, statement, description);
}
}
return null;
}
static HighlightInfo checkIllegalModifierCombination(PsiKeyword keyword, PsiModifierList modifierList) {
@Modifier String modifier = keyword.getText();
String incompatible = getIncompatibleModifier(modifier, modifierList);
HighlightInfo highlightInfo = null;
if (incompatible != null) {
String message = JavaErrorMessages.message("incompatible.modifiers", modifier, incompatible);
highlightInfo = HighlightInfo.createHighlightInfo(HighlightInfoType.ERROR, keyword, message);
QuickFixAction.registerQuickFixAction(highlightInfo, QUICK_FIX_FACTORY.createModifierListFix(modifierList, modifier, false, false));
}
return highlightInfo;
}
@Nullable
private static Map<String, Set<String>> getIncompatibleModifierMap(PsiModifierList modifierList) {
PsiElement parent = modifierList.getParent();
if (parent == null || PsiUtilBase.hasErrorElementChild(parent)) return null;
return parent instanceof PsiClass
? ((PsiClass)parent).isInterface() ? ourInterfaceIncompatibleModifiers : ourClassIncompatibleModifiers
: parent instanceof PsiMethod
? ourMethodIncompatibleModifiers
: parent instanceof PsiVariable
? ourFieldIncompatibleModifiers
: parent instanceof PsiClassInitializer ? ourClassInitializerIncompatibleModifiers : null;
}
@Nullable
public static String getIncompatibleModifier(String modifier, PsiModifierList modifierList) {
PsiElement parent = modifierList.getParent();
if (parent == null || PsiUtilBase.hasErrorElementChild(parent)) return null;
final Map<String, Set<String>> incompatibleModifierMap = getIncompatibleModifierMap(modifierList);
if (incompatibleModifierMap == null) return null;
return getIncompatibleModifier(modifier, modifierList, incompatibleModifierMap);
}
@Nullable
public static HighlightInfo checkNotAllowedModifier(PsiKeyword keyword, PsiModifierList modifierList) {
PsiElement modifierOwner = modifierList.getParent();
if (modifierOwner == null) return null;
if (PsiUtilBase.hasErrorElementChild(modifierOwner)) return null;
@Modifier String modifier = keyword.getText();
final Map<String, Set<String>> incompatibleModifierMap = getIncompatibleModifierMap(modifierList);
if (incompatibleModifierMap == null) return null;
Set<String> incompatibles = incompatibleModifierMap.get(modifier);
PsiElement modifierOwnerParent = modifierOwner instanceof PsiMember ? ((PsiMember)modifierOwner).getContainingClass() : modifierOwner.getParent();
if (modifierOwnerParent == null) modifierOwnerParent = modifierOwner.getParent();
boolean isAllowed = true;
if (modifierOwner instanceof PsiClass) {
PsiClass aClass = (PsiClass)modifierOwner;
if (aClass.isInterface()) {
if (PsiModifier.STATIC.equals(modifier) || PsiModifier.PRIVATE.equals(modifier) || PsiModifier.PROTECTED.equals(modifier) ||
PsiModifier.PACKAGE_LOCAL.equals(modifier)) {
isAllowed = modifierOwnerParent instanceof PsiClass;
}
}
else {
if (PsiModifier.PUBLIC.equals(modifier)) {
isAllowed = modifierOwnerParent instanceof PsiJavaFile || modifierOwnerParent instanceof PsiClass;
}
else if (PsiModifier.STATIC.equals(modifier) || PsiModifier.PRIVATE.equals(modifier) || PsiModifier.PROTECTED.equals(modifier) ||
PsiModifier.PACKAGE_LOCAL.equals(modifier)) {
isAllowed = modifierOwnerParent instanceof PsiClass;
}
if (aClass.isEnum()) {
isAllowed &= !(PsiModifier.FINAL.equals(modifier) || PsiModifier.ABSTRACT.equals(modifier));
}
if (aClass.getContainingClass() instanceof PsiAnonymousClass) {
isAllowed &= !(PsiModifier.PRIVATE.equals(modifier) || PsiModifier.PROTECTED.equals(modifier));
}
}
}
else if (modifierOwner instanceof PsiMethod) {
PsiMethod method = (PsiMethod)modifierOwner;
isAllowed = !(method.isConstructor() && ourConstructorNotAllowedModifiers.contains(modifier));
PsiClass containingClass = method.getContainingClass();
if ((method.hasModifierProperty(PsiModifier.PUBLIC) || method.hasModifierProperty(PsiModifier.PROTECTED)) && method.isConstructor() &&
containingClass != null && containingClass.isEnum()) {
isAllowed = false;
}
if (PsiModifier.PRIVATE.equals(modifier) || PsiModifier.PROTECTED.equals(modifier) || PsiModifier.TRANSIENT.equals(modifier) ||
PsiModifier.STRICTFP.equals(modifier) || PsiModifier.SYNCHRONIZED.equals(modifier)) {
isAllowed &= modifierOwnerParent instanceof PsiClass && !((PsiClass)modifierOwnerParent).isInterface();
}
}
else if (modifierOwner instanceof PsiField) {
if (PsiModifier.PRIVATE.equals(modifier) || PsiModifier.PROTECTED.equals(modifier) || PsiModifier.TRANSIENT.equals(modifier) ||
PsiModifier.STRICTFP.equals(modifier) || PsiModifier.SYNCHRONIZED.equals(modifier)) {
isAllowed = modifierOwnerParent instanceof PsiClass && !((PsiClass)modifierOwnerParent).isInterface();
}
}
else if (modifierOwner instanceof PsiClassInitializer) {
isAllowed = PsiModifier.STATIC.equals(modifier);
}
else if (modifierOwner instanceof PsiLocalVariable || modifierOwner instanceof PsiParameter) {
isAllowed = PsiModifier.FINAL.equals(modifier);
}
isAllowed &= incompatibles != null;
if (!isAllowed) {
String message = JavaErrorMessages.message("modifier.not.allowed", modifier);
HighlightInfo highlightInfo = HighlightInfo.createHighlightInfo(HighlightInfoType.ERROR, keyword, message);
QuickFixAction.registerQuickFixAction(highlightInfo, QUICK_FIX_FACTORY.createModifierListFix(modifierList, modifier, false, false));
return highlightInfo;
}
return null;
}
@Nullable
static HighlightInfo checkLiteralExpressionParsingError(PsiLiteralExpression expression) {
String error = expression.getParsingError();
if (error != null) {
return HighlightInfo.createHighlightInfo(HighlightInfoType.ERROR, expression, error);
}
return null;
}
@Nullable
static HighlightInfo checkMustBeBoolean(PsiExpression expr) {
PsiElement parent = expr.getParent();
if (parent instanceof PsiIfStatement || parent instanceof PsiWhileStatement ||
parent instanceof PsiForStatement && expr.equals(((PsiForStatement)parent).getCondition()) ||
parent instanceof PsiDoWhileStatement && expr.equals(((PsiDoWhileStatement)parent).getCondition())) {
if (expr.getNextSibling() instanceof PsiErrorElement) return null;
PsiType type = expr.getType();
if (!TypeConversionUtil.isBooleanType(type)) {
final HighlightInfo info = createIncompatibleTypeHighlightInfo(PsiType.BOOLEAN, type, expr.getTextRange());
if (expr instanceof PsiMethodCallExpression) {
final PsiMethodCallExpression methodCall = (PsiMethodCallExpression) expr;
final PsiMethod method = methodCall.resolveMethod();
if (method != null && PsiType.VOID.equals(method.getReturnType())) {
IntentionAction fix = QUICK_FIX_FACTORY.createMethodReturnFix(method, PsiType.BOOLEAN, true);
QuickFixAction.registerQuickFixAction(info, fix);
}
}
return info;
}
}
return null;
}
@Nullable
static HighlightInfo checkExceptionThrownInTry(PsiParameter parameter) {
PsiElement declarationScope = parameter.getDeclarationScope();
if (!(declarationScope instanceof PsiCatchSection)) return null;
PsiTryStatement statement = ((PsiCatchSection)declarationScope).getTryStatement();
Collection<PsiClassType> classes = ExceptionUtil.collectUnhandledExceptions(statement.getTryBlock(), statement.getTryBlock());
PsiType caughtType = parameter.getType();
if (!(caughtType instanceof PsiClassType)) return null;
if (ExceptionUtil.isUncheckedExceptionOrSuperclass((PsiClassType)caughtType)) return null;
for (PsiClassType exceptionType : classes) {
if (exceptionType.isAssignableFrom(caughtType) || caughtType.isAssignableFrom(exceptionType)) return null;
}
String description = JavaErrorMessages.message("exception.never.thrown.try", formatType(caughtType));
HighlightInfo errorResult = HighlightInfo.createHighlightInfo(HighlightInfoType.ERROR, parameter, description);
QuickFixAction.registerQuickFixAction(errorResult, new DeleteCatchFix(parameter));
return errorResult;
}
@Nullable
static HighlightInfo checkNotAStatement(PsiStatement statement) {
if (!PsiUtil.isStatement(statement) && !PsiUtilBase.hasErrorElementChild(statement)) {
return HighlightInfo.createHighlightInfo(HighlightInfoType.ERROR, statement, JavaErrorMessages.message("not.a.statement"));
}
return null;
}
public static HighlightInfo checkSwitchSelectorType(PsiSwitchStatement statement) {
PsiExpression expression = statement.getExpression();
HighlightInfo errorResult = null;
if (expression != null && expression.getType() != null) {
PsiType type = expression.getType();
if (!isValidTypeForSwitchSelector(type, expression)) {
String message =
JavaErrorMessages.message("incompatible.types", JavaErrorMessages.message("valid.switch.selector.types"), formatType(type));
errorResult = HighlightInfo.createHighlightInfo(HighlightInfoType.ERROR, expression, message);
if (PsiType.LONG.equals(type) || PsiType.FLOAT.equals(type) || PsiType.DOUBLE.equals(type)) {
QuickFixAction.registerQuickFixAction(errorResult, new AddTypeCastFix(PsiType.INT, expression));
}
}
}
return errorResult;
}
private static boolean isValidTypeForSwitchSelector(PsiType type, PsiExpression expression) {
if (TypeConversionUtil.getTypeRank(type) <= TypeConversionUtil.INT_RANK) return true;
if (type instanceof PsiClassType) {
PsiClass psiClass = ((PsiClassType)type).resolve();
if (psiClass == null) return false;
if (psiClass.isEnum()) {
return true;
}
if (PsiUtil.isLanguageLevel7OrHigher(expression)) {
return Comparing.strEqual(psiClass.getQualifiedName(), CommonClassNames.JAVA_LANG_STRING);
}
}
return false;
}
@Nullable
static HighlightInfo checkBinaryOperatorApplicable(PsiBinaryExpression expression) {
PsiExpression lOperand = expression.getLOperand();
PsiExpression rOperand = expression.getROperand();
PsiJavaToken operationSign = expression.getOperationSign();
if (!TypeConversionUtil.isBinaryOperatorApplicable(operationSign.getTokenType(), lOperand, rOperand, false)) {
String message = JavaErrorMessages
.message("binary.operator.not.applicable", operationSign.getText(), formatType(lOperand.getType()), formatType(rOperand.getType()));
return HighlightInfo.createHighlightInfo(HighlightInfoType.ERROR, expression, message);
}
return null;
}
@Nullable
public static HighlightInfo checkUnaryOperatorApplicable(PsiJavaToken token, PsiExpression expression) {
if (token != null && expression != null && !TypeConversionUtil.isUnaryOperatorApplicable(token, expression)) {
PsiType type = expression.getType();
if (type == null) return null;
String message = JavaErrorMessages.message("unary.operator.not.applicable", token.getText(), formatType(type));
PsiElement parentExpr = token.getParent();
HighlightInfo highlightInfo = HighlightInfo.createHighlightInfo(HighlightInfoType.ERROR, parentExpr, message);
if (parentExpr instanceof PsiPrefixExpression && token.getTokenType() == JavaTokenType.EXCL) {
QuickFixAction.registerQuickFixAction(highlightInfo, new NegationBroadScopeFix((PsiPrefixExpression)parentExpr));
}
return highlightInfo;
}
return null;
}
@Nullable
public static HighlightInfo checkThisOrSuperExpressionInIllegalContext(PsiExpression expr, @Nullable PsiJavaCodeReferenceElement qualifier) {
if (expr instanceof PsiSuperExpression && !(expr.getParent() instanceof PsiReferenceExpression)) {
// like in 'Object o = super;'
return HighlightInfo.createHighlightInfo(HighlightInfoType.ERROR, expr.getTextRange().getEndOffset(),
expr.getTextRange().getEndOffset() + 1,
JavaErrorMessages.message("dot.expected.after.super.or.this"));
}
PsiElement resolved = null;
PsiClass aClass = qualifier == null ? PsiTreeUtil.getParentOfType(expr, PsiClass.class) : (resolved = qualifier.resolve()) instanceof PsiClass ? (PsiClass)resolved : null;
if (resolved != null && !(resolved instanceof PsiClass)) {
return HighlightInfo.createHighlightInfo(HighlightInfoType.ERROR, qualifier, JavaErrorMessages.message("class.expected"));
}
if (aClass == null) return null;
if (qualifier != null && aClass.isInterface()) {
return HighlightInfo.createHighlightInfo(HighlightInfoType.ERROR, qualifier, HighlightClassUtil.NO_INTERFACE_EXPECTED);
}
if (!HighlightClassUtil.hasEnclosingInstanceInScope(aClass, expr, false)) {
return HighlightClassUtil.reportIllegalEnclosingUsage(expr, null, aClass, expr);
}
return null;
}
static String buildProblemWithStaticDescription(PsiElement refElement) {
String type = LanguageFindUsages.INSTANCE.forLanguage(StdLanguages.JAVA).getType(refElement);
String name = HighlightMessageUtil.getSymbolName(refElement, PsiSubstitutor.EMPTY);
return JavaErrorMessages.message("non.static.symbol.referenced.from.static.context", type, name);
}
static void registerStaticProblemQuickFixAction(@NotNull PsiElement refElement, HighlightInfo errorResult, PsiJavaCodeReferenceElement place) {
if (refElement instanceof PsiModifierListOwner) {
QuickFixAction.registerQuickFixAction(errorResult, QUICK_FIX_FACTORY.createModifierListFix((PsiModifierListOwner)refElement, PsiModifier.STATIC, true, false));
}
// make context non static
PsiModifierListOwner staticParent = PsiUtil.getEnclosingStaticElement(place, null);
if (staticParent != null && isInstanceReference(place)) {
QuickFixAction.registerQuickFixAction(errorResult, QUICK_FIX_FACTORY.createModifierListFix(staticParent, PsiModifier.STATIC, false, false));
}
}
private static boolean isInstanceReference(PsiJavaCodeReferenceElement place) {
PsiElement qualifier = place.getQualifier();
if (qualifier == null) return true;
if (!(qualifier instanceof PsiJavaCodeReferenceElement)) return false;
PsiElement q = ((PsiReference)qualifier).resolve();
if (q instanceof PsiClass) return false;
if (q != null) return true;
String qname = ((PsiJavaCodeReferenceElement)qualifier).getQualifiedName();
return qname == null || !Character.isLowerCase(qname.charAt(0));
}
static String buildProblemWithAccessDescription(PsiJavaCodeReferenceElement reference, JavaResolveResult result) {
PsiModifierListOwner refElement = (PsiModifierListOwner)result.getElement();
String symbolName = HighlightMessageUtil.getSymbolName(refElement, result.getSubstitutor());
if (refElement.hasModifierProperty(PsiModifier.PRIVATE)) {
String containerName = HighlightMessageUtil.getSymbolName(refElement.getParent(), result.getSubstitutor());
return JavaErrorMessages.message("private.symbol", symbolName, containerName);
}
else {
if (refElement.hasModifierProperty(PsiModifier.PROTECTED)) {
String containerName = HighlightMessageUtil.getSymbolName(refElement.getParent(), result.getSubstitutor());
return JavaErrorMessages.message("protected.symbol", symbolName, containerName);
}
else {
PsiClass packageLocalClass = getPackageLocalClassInTheMiddle(reference);
if (packageLocalClass != null) {
refElement = packageLocalClass;
symbolName = HighlightMessageUtil.getSymbolName(refElement, result.getSubstitutor());
}
if (refElement.hasModifierProperty(PsiModifier.PACKAGE_LOCAL) || packageLocalClass != null) {
String containerName = HighlightMessageUtil.getSymbolName(refElement.getParent(), result.getSubstitutor());
return JavaErrorMessages.message("package.local.symbol", symbolName, containerName);
}
else {
String containerName = HighlightMessageUtil.getSymbolName(
refElement instanceof PsiTypeParameter ? refElement.getParent().getParent() : refElement.getParent(), result.getSubstitutor());
return JavaErrorMessages.message("visibility.access.problem", symbolName, containerName);
}
}
}
}
@Nullable
static HighlightInfo checkValidArrayAccessExpression(PsiExpression arrayExpression, PsiExpression indexExpression) {
PsiType arrayExpressionType = arrayExpression == null ? null : arrayExpression.getType();
if (arrayExpressionType != null && !(arrayExpressionType instanceof PsiArrayType)) {
String description = JavaErrorMessages.message("array.type.expected", formatType(arrayExpressionType));
return HighlightInfo.createHighlightInfo(HighlightInfoType.ERROR, arrayExpression, description);
}
return checkAssignability(PsiType.INT, indexExpression.getType(), indexExpression, indexExpression);
}
@Nullable
public static HighlightInfo checkCatchParameterIsThrowable(PsiParameter parameter) {
if (parameter.getDeclarationScope() instanceof PsiCatchSection) {
PsiType type = parameter.getType();
return checkMustBeThrowable(type, parameter, true);
}
return null;
}
public static void checkArrayInitalizer(final PsiExpression initializer, final HighlightInfoHolder holder) {
if (! (initializer instanceof PsiArrayInitializerExpression)) return;
final PsiType arrayInitializerType = initializer.getType();
if (! (arrayInitializerType instanceof PsiArrayType)) return;
final PsiType componentType = ((PsiArrayType) arrayInitializerType).getComponentType();
final PsiArrayInitializerExpression arrayInitializer = (PsiArrayInitializerExpression) initializer;
boolean arrayTypeFixChecked = false;
VariableArrayTypeFix fix = null;
final PsiExpression[] initializers = arrayInitializer.getInitializers();
for (PsiExpression expression : initializers) {
final HighlightInfo info = checkArrayInitalizerCompatibleTypes(expression, componentType);
if (info != null) {
holder.add(info);
if (!arrayTypeFixChecked) {
final PsiType checkResult = sameType(initializers);
fix = checkResult != null ? new VariableArrayTypeFix(arrayInitializer, checkResult) : null;
arrayTypeFixChecked = true;
}
if (fix != null) {
QuickFixAction.registerQuickFixAction(info, fix);
}
}
}
}
@Nullable
private static PsiType getArrayInitializerType(final PsiArrayInitializerExpression element) {
final PsiType typeCheckResult = sameType(element.getInitializers());
if (typeCheckResult != null) {
return typeCheckResult.createArrayType();
}
return null;
}
private static PsiType sameType(PsiExpression[] expressions) {
PsiType type = null;
for (PsiExpression expression : expressions) {
final PsiType currentType;
if (expression instanceof PsiArrayInitializerExpression) {
currentType = getArrayInitializerType((PsiArrayInitializerExpression)expression);
}
else {
currentType = expression.getType();
}
if (type == null) {
type = currentType;
}
else if (!type.equals(currentType)) {
return null;
}
}
return type;
}
@Nullable
private static HighlightInfo checkArrayInitalizerCompatibleTypes(PsiExpression initializer, final PsiType componentType) {
PsiType initializerType = initializer.getType();
if (initializerType == null) {
return HighlightInfo.createHighlightInfo(HighlightInfoType.ERROR, initializer,
JavaErrorMessages.message("illegal.initializer", formatType(componentType)));
}
PsiExpression expression = initializer instanceof PsiArrayInitializerExpression ? null : initializer;
return checkAssignability(componentType, initializerType, expression, initializer);
}
@Nullable
public static HighlightInfo checkExpressionRequired(PsiReferenceExpression expression) {
if (expression.getNextSibling() instanceof PsiErrorElement) return null;
PsiElement resolved = expression.advancedResolve(true).getElement();
if (resolved == null) return null;
PsiElement parent = expression.getParent();
// String.class or String() are both correct
if (parent instanceof PsiReferenceExpression || parent instanceof PsiMethodCallExpression) return null;
if (resolved instanceof PsiVariable) return null;
return HighlightInfo.createHighlightInfo(HighlightInfoType.ERROR, expression, JavaErrorMessages.message("expression.expected"));
}
@Nullable
public static HighlightInfo checkArrayInitializerApplicable(PsiArrayInitializerExpression expression) {
/*
JLS 10.6 Array Initializers
An array initializer may be specified in a declaration, or as part of an array creation expression
*/
PsiElement parent = expression.getParent();
if (parent instanceof PsiVariable) {
PsiVariable variable = (PsiVariable)parent;
if (variable.getType() instanceof PsiArrayType) return null;
}
else if (parent instanceof PsiNewExpression) {
return null;
}
else if (parent instanceof PsiArrayInitializerExpression) {
return null;
}
HighlightInfo info =
HighlightInfo.createHighlightInfo(HighlightInfoType.ERROR, expression, JavaErrorMessages.message("expression.expected"));
QuickFixAction.registerQuickFixAction(info, new AddNewArrayExpressionFix(expression));
return info;
}
@Nullable
public static HighlightInfo checkCaseStatement(PsiSwitchLabelStatement statement) {
PsiSwitchStatement switchStatement = statement.getEnclosingSwitchStatement();
if (switchStatement == null) {
return HighlightInfo
.createHighlightInfo(HighlightInfoType.ERROR, statement, JavaErrorMessages.message("case.statement.outside.switch"));
}
if (switchStatement.getBody() == null) return null;
PsiExpression switchExpression = switchStatement.getExpression();
PsiType switchType = switchExpression == null ? PsiType.INT : switchExpression.getType();
// check constant expression
PsiExpression caseValue = statement.getCaseValue();
// Every case constant expression associated with a switch statement must be assignable ($5.2) to the type of the switch Expression.
if (caseValue != null && switchExpression != null) {
HighlightInfo highlightInfo = checkAssignability(switchType, caseValue.getType(), caseValue, caseValue);
if (highlightInfo != null) return highlightInfo;
}
Object value = null;
boolean isEnumSwitch = false;
if (!statement.isDefaultCase() && caseValue != null) {
if (caseValue instanceof PsiReferenceExpression) {
PsiElement element = ((PsiReferenceExpression)caseValue).resolve();
if (element instanceof PsiEnumConstant) {
isEnumSwitch = true;
value = ((PsiEnumConstant)element).getName();
if (!(((PsiReferenceExpression)caseValue).getQualifier() == null)) {
String message = JavaErrorMessages.message("qualified.enum.constant.in.switch");
return HighlightInfo.createHighlightInfo(HighlightInfoType.ERROR, caseValue, message);
}
}
}
if (!isEnumSwitch) {
value = ConstantExpressionUtil.computeCastTo(caseValue, switchType);
}
if (value == null) {
return HighlightInfo
.createHighlightInfo(HighlightInfoType.ERROR, caseValue, JavaErrorMessages.message("constant.expression.required"));
}
}
// check duplicate
PsiStatement[] statements = switchStatement.getBody().getStatements();
for (PsiStatement st : statements) {
if (st == statement) continue;
if (!(st instanceof PsiSwitchLabelStatement)) continue;
PsiSwitchLabelStatement labelStatement = (PsiSwitchLabelStatement)st;
if (labelStatement.isDefaultCase() != statement.isDefaultCase()) continue;
PsiExpression caseExpr = labelStatement.getCaseValue();
if (isEnumSwitch && caseExpr instanceof PsiReferenceExpression) {
PsiElement element = ((PsiReferenceExpression)caseExpr).resolve();
if (!(element instanceof PsiEnumConstant && Comparing.equal(((PsiEnumConstant)element).getName(), value))) continue;
}
else {
// not assignable error already caught
if (!TypeConversionUtil.areTypesAssignmentCompatible(switchType, caseExpr)) continue;
if (!Comparing.equal(ConstantExpressionUtil.computeCastTo(caseExpr, switchType), value)) continue;
}
String description = statement.isDefaultCase()
? JavaErrorMessages.message("duplicate.default.switch.label")
: JavaErrorMessages.message("duplicate.switch.label", value);
return HighlightInfo.createHighlightInfo(HighlightInfoType.ERROR, value == null ? statement : caseValue, description);
}
// must be followed with colon
PsiElement lastChild = statement.getLastChild();
while (lastChild instanceof PsiComment || lastChild instanceof PsiWhiteSpace) {
lastChild = lastChild.getPrevSibling();
}
if (!(lastChild instanceof PsiJavaToken && ((PsiJavaToken)lastChild).getTokenType() == JavaTokenType.COLON)) {
int start = statement.getTextRange().getEndOffset();
int end = statement.getTextRange().getEndOffset() + 1;
String description = JavaErrorMessages.message("switch.colon.expected.after.case.label");
CharSequence chars = statement.getContainingFile().getViewProvider().getContents();
boolean isAfterEndOfLine = end >= chars.length() || chars.charAt(start) == '\n' || chars.charAt(start) == '\r';
return HighlightInfo.createHighlightInfo(HighlightInfoType.ERROR, null,start, end, description, description,isAfterEndOfLine, null);
}
return null;
}
/**
* see JLS 8.3.2.3
*/
@Nullable
public static HighlightInfo checkIllegalForwardReferenceToField(PsiReferenceExpression expression, PsiField referencedField) {
PsiClass containingClass = referencedField.getContainingClass();
if (containingClass == null) return null;
if (expression.getContainingFile() != referencedField.getContainingFile()) return null;
if (expression.getTextRange().getStartOffset() >= referencedField.getTextRange().getEndOffset()) return null;
// only simple reference can be illegal
if (expression.getQualifierExpression() != null) return null;
PsiField initField = findEnclosingFieldInitializer(expression);
PsiClassInitializer classInitializer = findParentClassInitializer(expression);
if (initField == null && classInitializer == null) return null;
// instance initializers may access static fields
boolean isStaticClassInitializer = classInitializer != null && classInitializer.hasModifierProperty(PsiModifier.STATIC);
boolean isStaticInitField = initField != null && initField.hasModifierProperty(PsiModifier.STATIC);
boolean inStaticContext = isStaticInitField || isStaticClassInitializer;
if (!inStaticContext && referencedField.hasModifierProperty(PsiModifier.STATIC)) return null;
if (PsiUtil.isOnAssignmentLeftHand(expression) && !PsiUtil.isAccessedForReading(expression)) return null;
if (!containingClass.getManager().areElementsEquivalent(containingClass, PsiTreeUtil.getParentOfType(expression, PsiClass.class))) {
return null;
}
return HighlightInfo.createHighlightInfo(HighlightInfoType.ERROR, expression, JavaErrorMessages.message("illegal.forward.reference"));
}
/**
* @return field that has initializer with this element as subexpression or null if not found
*/
@Nullable
static PsiField findEnclosingFieldInitializer(PsiElement element) {
while (element != null) {
PsiElement parent = element.getParent();
if (parent instanceof PsiField) {
PsiField field = (PsiField)parent;
if (element == field.getInitializer()) return field;
if (field instanceof PsiEnumConstant && element == ((PsiEnumConstant)field).getArgumentList()) return field;
}
if (element instanceof PsiClass || element instanceof PsiMethod) return null;
element = parent;
}
return null;
}
@Nullable
private static PsiClassInitializer findParentClassInitializer(PsiElement element) {
while (element != null) {
if (element instanceof PsiClassInitializer) return (PsiClassInitializer)element;
if (element instanceof PsiClass || element instanceof PsiMethod) return null;
element = element.getParent();
}
return null;
}
@Nullable
public static HighlightInfo checkIllegalType(PsiTypeElement typeElement) {
if (typeElement == null || typeElement.getParent() instanceof PsiTypeElement) return null;
if (PsiUtil.isInsideJavadocComment(typeElement)) return null;
PsiType type = typeElement.getType();
PsiType componentType = type.getDeepComponentType();
if (componentType instanceof PsiClassType) {
PsiClass aClass = PsiUtil.resolveClassInType(componentType);
if (aClass == null) {
String canonicalText = type.getCanonicalText();
String description = JavaErrorMessages.message("unknown.class", canonicalText);
return HighlightInfo.createHighlightInfo(HighlightInfoType.ERROR, typeElement, description);
}
}
return null;
}
@Nullable
public static HighlightInfo checkIllegalVoidType(PsiKeyword type) {
if (!PsiKeyword.VOID.equals(type.getText())) return null;
PsiElement parent = type.getParent();
if (parent instanceof PsiTypeElement) {
PsiElement typeOwner = parent.getParent();
if (typeOwner instanceof PsiMethod) {
if (((PsiMethod)typeOwner).getReturnTypeElement() == parent) return null;
}
else if (typeOwner instanceof PsiClassObjectAccessExpression &&
TypeConversionUtil.isVoidType(((PsiClassObjectAccessExpression)typeOwner).getOperand().getType())) {
// like in Class c = void.class;
return null;
}
else if (typeOwner != null && PsiUtilBase.hasErrorElementChild(typeOwner)) {
// do not highlight incomplete declarations
return null;
}
else if (typeOwner instanceof JavaCodeFragment) {
if (typeOwner.getUserData(PsiUtil.VALID_VOID_TYPE_IN_CODE_FRAGMENT) != null) return null;
}
}
String description = JavaErrorMessages.message("illegal.type.void");
return HighlightInfo.createHighlightInfo(HighlightInfoType.ERROR, type, description);
}
@Nullable
public static HighlightInfo checkMemberReferencedBeforeConstructorCalled(PsiElement expression, PsiElement resolved) {
PsiClass referencedClass;
@NonNls String resolvedName;
PsiType type;
if (expression instanceof PsiJavaCodeReferenceElement) {
// redirected ctr
if (PsiKeyword.THIS.equals(((PsiJavaCodeReferenceElement)expression).getReferenceName())
&& resolved instanceof PsiMethod
&& ((PsiMethod)resolved).isConstructor()) return null;
PsiElement qualifier = ((PsiJavaCodeReferenceElement)expression).getQualifier();
type = qualifier instanceof PsiExpression ? ((PsiExpression)qualifier).getType() : null;
referencedClass = PsiUtil.resolveClassInType(type);
boolean isSuperCall = isSuperMethodCall(expression.getParent());
if (resolved == null && isSuperCall) {
if (qualifier instanceof PsiReferenceExpression) {
resolved = ((PsiReferenceExpression)qualifier).resolve();
expression = qualifier;
type = ((PsiReferenceExpression)qualifier).getType();
referencedClass = PsiUtil.resolveClassInType(type);
}
else if (qualifier instanceof PsiThisExpression || qualifier == null) {
resolved = PsiTreeUtil.getParentOfType(expression, PsiMethod.class, true, PsiMember.class);
expression = qualifier == null ? expression : qualifier;
if (resolved instanceof PsiMethod) {
referencedClass = ((PsiMethod)resolved).getContainingClass();
}
}
}
if (resolved instanceof PsiField) {
PsiField referencedField = (PsiField)resolved;
if (referencedField.hasModifierProperty(PsiModifier.STATIC)) return null;
resolvedName = PsiFormatUtil.formatVariable(referencedField, PsiFormatUtilBase.SHOW_CONTAINING_CLASS | PsiFormatUtilBase.SHOW_NAME, PsiSubstitutor.EMPTY);
referencedClass = referencedField.getContainingClass();
}
else if (resolved instanceof PsiMethod) {
PsiMethod method = (PsiMethod)resolved;
if (method.hasModifierProperty(PsiModifier.STATIC)) return null;
PsiElement nameElement = expression instanceof PsiThisExpression ? expression : ((PsiJavaCodeReferenceElement)expression).getReferenceNameElement();
String name = nameElement == null ? null : nameElement.getText();
if (isSuperCall) {
if (referencedClass == null) return null;
if (qualifier == null) {
PsiClass superClass = referencedClass.getSuperClass();
if (superClass != null
&& PsiUtil.isInnerClass(superClass)
&& InheritanceUtil.isInheritorOrSelf(referencedClass, superClass.getContainingClass(), true)) {
// by default super() is considered this. - qualified
resolvedName = PsiKeyword.THIS;
}
else {
return null;
}
}
else {
resolvedName = qualifier.getText();
}
}
else if (PsiKeyword.THIS.equals(name)) {
resolvedName = PsiKeyword.THIS;
}
else {
resolvedName = PsiFormatUtil.formatMethod(method, PsiSubstitutor.EMPTY, PsiFormatUtilBase.SHOW_CONTAINING_CLASS |
PsiFormatUtilBase.SHOW_NAME, 0);
if (referencedClass == null) referencedClass = method.getContainingClass();
}
}
else if (resolved instanceof PsiClass) {
PsiClass aClass = (PsiClass)resolved;
if (aClass.hasModifierProperty(PsiModifier.STATIC)) return null;
referencedClass = aClass.getContainingClass();
if (referencedClass == null) return null;
resolvedName = PsiFormatUtil.formatClass(aClass, PsiFormatUtilBase.SHOW_NAME);
}
else {
return null;
}
}
else if (expression instanceof PsiThisExpression) {
PsiThisExpression thisExpression = (PsiThisExpression)expression;
type = thisExpression.getType();
referencedClass = PsiUtil.resolveClassInType(type);
if (thisExpression.getQualifier() != null) {
resolvedName = referencedClass == null
? null
: PsiFormatUtil.formatClass(referencedClass, PsiFormatUtilBase.SHOW_CONTAINING_CLASS | PsiFormatUtilBase.SHOW_NAME) + ".this";
}
else {
resolvedName = "this";
}
}
else {
return null;
}
if (referencedClass == null) return null;
return checkReferenceToOurInstanceInsideThisOrSuper(expression, referencedClass, resolvedName);
}
private static HighlightInfo checkReferenceToOurInstanceInsideThisOrSuper(final PsiElement expression,
final PsiClass referencedClass,
final String resolvedName) {
if (PsiTreeUtil.getParentOfType(expression, PsiReferenceParameterList.class) != null) return null;
PsiElement element = expression.getParent();
while (element != null) {
// check if expression inside super()/this() call
if (isSuperOrThisMethodCall(element)) {
PsiElement parentClass = new PsiMatcherImpl(element)
.parent(PsiMatchers.hasClass(PsiExpressionStatement.class))
.parent(PsiMatchers.hasClass(PsiCodeBlock.class))
.parent(PsiMatchers.hasClass(PsiMethod.class))
.dot(PsiMatchers.isConstructor(true))
.parent(PsiMatchers.hasClass(PsiClass.class))
.getElement();
if (parentClass == null) {
return null;
}
// only this class/superclasses instance methods are not allowed to call
PsiClass aClass = (PsiClass)parentClass;
if (PsiUtil.isInnerClass(aClass) && referencedClass == aClass.getContainingClass()) return null;
// field or method should be declared in this class or super
if (!InheritanceUtil.isInheritorOrSelf(aClass, referencedClass, true)) return null;
// and point to our instance
if (expression instanceof PsiReferenceExpression &&
!thisOrSuperReference(((PsiReferenceExpression)expression).getQualifierExpression(), aClass)) {
return null;
}
return createMemberReferencedError(resolvedName, expression.getTextRange());
}
element = element.getParent();
}
return null;
}
private static HighlightInfo createMemberReferencedError(@NonNls final String resolvedName, TextRange textRange) {
String description = JavaErrorMessages.message("member.referenced.before.constructor.called", resolvedName);
return HighlightInfo.createHighlightInfo(HighlightInfoType.ERROR, textRange, description);
}
public static HighlightInfo checkImplicitThisReferenceBeforeSuper(PsiClass aClass) {
if (aClass instanceof PsiAnonymousClass) return null;
PsiClass superClass = aClass.getSuperClass();
if (superClass == null || !PsiUtil.isInnerClass(superClass)) return null;
PsiClass outerClass = superClass.getContainingClass();
if (!InheritanceUtil.isInheritorOrSelf(aClass, outerClass, true)) {
return null;
}
// 'this' can be used as an (implicit) super() qualifier
PsiMethod[] constructors = aClass.getConstructors();
if (constructors.length == 0) {
TextRange range = HighlightNamesUtil.getClassDeclarationTextRange(aClass);
return createMemberReferencedError(aClass.getName()+".this", range);
}
for (PsiMethod constructor : constructors) {
if (!isSuperCalledInConstructor(constructor)) {
return createMemberReferencedError(aClass.getName()+".this", HighlightNamesUtil.getMethodDeclarationTextRange(constructor));
}
}
return null;
}
private static boolean isSuperCalledInConstructor(final PsiMethod constructor) {
final PsiCodeBlock body = constructor.getBody();
if (body == null) return false;
final PsiStatement[] statements = body.getStatements();
if (statements.length == 0) return false;
final PsiStatement statement = statements[0];
final PsiElement element = new PsiMatcherImpl(statement)
.dot(PsiMatchers.hasClass(PsiExpressionStatement.class))
.firstChild(PsiMatchers.hasClass(PsiMethodCallExpression.class))
.firstChild(PsiMatchers.hasClass(PsiReferenceExpression.class))
.firstChild(PsiMatchers.hasClass(PsiKeyword.class))
.dot(PsiMatchers.hasText(PsiKeyword.SUPER))
.getElement();
return element != null;
}
private static String getMethodExpressionName(PsiElement element) {
if (!(element instanceof PsiMethodCallExpression)) return null;
PsiReferenceExpression methodExpression = ((PsiMethodCallExpression)element).getMethodExpression();
return methodExpression.getReferenceName();
}
public static boolean isSuperOrThisMethodCall(PsiElement element) {
String name = getMethodExpressionName(element);
return PsiKeyword.SUPER.equals(name) || PsiKeyword.THIS.equals(name);
}
public static boolean isSuperMethodCall(PsiElement element) {
String name = getMethodExpressionName(element);
return PsiKeyword.SUPER.equals(name);
}
private static boolean thisOrSuperReference(PsiExpression qualifierExpression, PsiClass aClass) {
if (qualifierExpression == null) return true;
PsiJavaCodeReferenceElement qualifier;
if (qualifierExpression instanceof PsiThisExpression) {
qualifier = ((PsiThisExpression)qualifierExpression).getQualifier();
}
else if (qualifierExpression instanceof PsiSuperExpression) {
qualifier = ((PsiSuperExpression)qualifierExpression).getQualifier();
}
else {
return false;
}
if (qualifier == null) return true;
PsiElement resolved = qualifier.resolve();
return resolved instanceof PsiClass && InheritanceUtil.isInheritorOrSelf(aClass, (PsiClass)resolved, true);
}
@Nullable
public static HighlightInfo checkLabelWithoutStatement(PsiLabeledStatement statement) {
if (statement.getStatement() == null) {
return HighlightInfo.createHighlightInfo(HighlightInfoType.ERROR, statement, JavaErrorMessages.message("label.without.statement"));
}
return null;
}
@Nullable
public static HighlightInfo checkLabelAlreadyInUse(PsiLabeledStatement statement) {
PsiIdentifier identifier = statement.getLabelIdentifier();
String text = identifier.getText();
PsiElement element = statement;
while (element != null) {
if (element instanceof PsiMethod || element instanceof PsiClass) break;
if (element instanceof PsiLabeledStatement && element != statement &&
Comparing.equal(((PsiLabeledStatement)element).getLabelIdentifier().getText(), text)) {
String description = JavaErrorMessages.message("duplicate.label", text);
return HighlightInfo.createHighlightInfo(HighlightInfoType.ERROR, identifier, description);
}
element = element.getParent();
}
return null;
}
@Nullable
public static HighlightInfo checkUnclosedComment(PsiComment comment) {
if (!(comment instanceof PsiDocComment) && !(comment.getTokenType() == JavaTokenType.C_STYLE_COMMENT)) return null;
if (!comment.getText().endsWith("*/")) {
int start = comment.getTextRange().getEndOffset() - 1;
int end = start + 1;
return HighlightInfo.createHighlightInfo(HighlightInfoType.ERROR, start, end, JavaErrorMessages.message("nonterminated.comment"));
}
return null;
}
@Nullable
public static HighlightInfo checkExceptionAlreadyCaught(PsiJavaCodeReferenceElement element, PsiElement resolved) {
if (!(resolved instanceof PsiClass)) return null;
PsiClass catchClass = (PsiClass)resolved;
if (!(element.getParent() instanceof PsiTypeElement)) return null;
PsiElement catchParameter = element.getParent().getParent();
if (!(catchParameter instanceof PsiParameter) || !(((PsiParameter)catchParameter).getDeclarationScope() instanceof PsiCatchSection)) {
return null;
}
PsiCatchSection catchSection = (PsiCatchSection)((PsiParameter)catchParameter).getDeclarationScope();
PsiTryStatement statement = catchSection.getTryStatement();
PsiCatchSection[] catchSections = statement.getCatchSections();
int i = ArrayUtil.find(catchSections, catchSection);
for (i--; i >= 0; i--) {
PsiCatchSection section = catchSections[i];
PsiType type = section.getCatchType();
PsiClass upCatchClass = PsiUtil.resolveClassInType(type);
if (upCatchClass == null) continue;
if (InheritanceUtil.isInheritorOrSelf(catchClass, upCatchClass, true)) {
String description = JavaErrorMessages
.message("exception.already.caught", PsiFormatUtil.formatClass(catchClass, PsiFormatUtilBase.SHOW_NAME |
PsiFormatUtilBase.SHOW_FQ_NAME));
HighlightInfo highlightInfo = HighlightInfo.createHighlightInfo(HighlightInfoType.ERROR, element, description);
QuickFixAction.registerQuickFixAction(highlightInfo, new MoveCatchUpFix(catchSection, section));
QuickFixAction.registerQuickFixAction(highlightInfo, new DeleteCatchFix((PsiParameter)catchParameter));
return highlightInfo;
}
}
return null;
}
@Nullable
public static HighlightInfo checkTernaryOperatorConditionIsBoolean(PsiExpression expression) {
if (expression.getParent() instanceof PsiConditionalExpression &&
((PsiConditionalExpression)expression.getParent()).getCondition() == expression && expression.getType() != null &&
!TypeConversionUtil.isBooleanType(expression.getType())) {
PsiType foundType = expression.getType();
return createIncompatibleTypeHighlightInfo(PsiType.BOOLEAN, foundType, expression.getTextRange());
}
return null;
}
@Nullable
public static HighlightInfo checkStatementPrependedWithCaseInsideSwitch(PsiStatement statement) {
if (!(statement instanceof PsiSwitchLabelStatement) && statement.getParent() instanceof PsiCodeBlock &&
statement.getParent().getParent() instanceof PsiSwitchStatement &&
((PsiCodeBlock)statement.getParent()).getStatements().length != 0 &&
statement == ((PsiCodeBlock)statement.getParent()).getStatements()[0]) {
return HighlightInfo
.createHighlightInfo(HighlightInfoType.ERROR, statement, JavaErrorMessages.message("statement.must.be.prepended.with.case.label"));
}
return null;
}
@Nullable
public static HighlightInfo checkAssertOperatorTypes(PsiExpression expression) {
if (!(expression.getParent() instanceof PsiAssertStatement)) {
return null;
}
PsiAssertStatement assertStatement = (PsiAssertStatement)expression.getParent();
PsiType type = expression.getType();
if (type == null) return null;
if (expression == assertStatement.getAssertCondition() && !TypeConversionUtil.isBooleanType(type)) {
// addTypeCast quickfix is not applicable here since no type can be cast to boolean
return createIncompatibleTypeHighlightInfo(PsiType.BOOLEAN, type, expression.getTextRange());
}
else if (expression == assertStatement.getAssertDescription() && TypeConversionUtil.isVoidType(type)) {
String description = JavaErrorMessages.message("void.type.is.not.allowed");
return HighlightInfo.createHighlightInfo(HighlightInfoType.ERROR, expression, description);
}
return null;
}
@Nullable
public static HighlightInfo checkSynchronizedExpressionType(PsiExpression expression) {
if (expression.getParent() instanceof PsiSynchronizedStatement) {
PsiType type = expression.getType();
if (type == null) return null;
PsiSynchronizedStatement synchronizedStatement = (PsiSynchronizedStatement)expression.getParent();
if (expression == synchronizedStatement.getLockExpression() &&
(type instanceof PsiPrimitiveType || TypeConversionUtil.isNullType(type))) {
PsiClassType objectType = PsiType.getJavaLangObject(expression.getManager(), expression.getResolveScope());
return createIncompatibleTypeHighlightInfo(objectType, type, expression.getTextRange());
}
}
return null;
}
@Nullable
public static HighlightInfo checkConditionalExpressionBranchTypesMatch(PsiExpression expression) {
if (!(expression.getParent() instanceof PsiConditionalExpression)) {
return null;
}
PsiConditionalExpression conditionalExpression = (PsiConditionalExpression)expression.getParent();
// check else branches only
if (conditionalExpression.getElseExpression() != expression) return null;
final PsiExpression thenExpression = conditionalExpression.getThenExpression();
assert thenExpression != null;
PsiType thenType = thenExpression.getType();
PsiType elseType = expression.getType();
if (thenType == null || elseType == null) return null;
if (conditionalExpression.getType() == null) {
// cannot derive type of conditional expression
// elsetype will never be castable to thentype, so no quick fix here
return createIncompatibleTypeHighlightInfo(thenType, elseType, expression.getTextRange());
}
return null;
}
private static HighlightInfo createIncompatibleTypeHighlightInfo(final PsiType lType, final PsiType rType, final TextRange textRange) {
PsiType lType1 = lType;
PsiType rType1 = rType;
PsiTypeParameter[] lTypeParams = PsiTypeParameter.EMPTY_ARRAY;
PsiSubstitutor lTypeSubstitutor = PsiSubstitutor.EMPTY;
if (lType1 instanceof PsiClassType) {
PsiClassType.ClassResolveResult resolveResult = ((PsiClassType)lType1).resolveGenerics();
lTypeSubstitutor = resolveResult.getSubstitutor();
PsiClass psiClass = resolveResult.getElement();
if (psiClass instanceof PsiAnonymousClass) {
lType1 = ((PsiAnonymousClass)psiClass).getBaseClassType();
resolveResult = ((PsiClassType)lType1).resolveGenerics();
lTypeSubstitutor = resolveResult.getSubstitutor();
psiClass = resolveResult.getElement();
}
lTypeParams = psiClass == null ? PsiTypeParameter.EMPTY_ARRAY : psiClass.getTypeParameters();
}
PsiTypeParameter[] rTypeParams = PsiTypeParameter.EMPTY_ARRAY;
PsiSubstitutor rTypeSubstitutor = PsiSubstitutor.EMPTY;
if (rType1 instanceof PsiClassType) {
PsiClassType.ClassResolveResult resolveResult = ((PsiClassType)rType1).resolveGenerics();
rTypeSubstitutor = resolveResult.getSubstitutor();
PsiClass psiClass = resolveResult.getElement();
if (psiClass instanceof PsiAnonymousClass) {
rType1 = ((PsiAnonymousClass)psiClass).getBaseClassType();
resolveResult = ((PsiClassType)rType1).resolveGenerics();
rTypeSubstitutor = resolveResult.getSubstitutor();
psiClass = resolveResult.getElement();
}
rTypeParams = psiClass == null ? PsiTypeParameter.EMPTY_ARRAY : psiClass.getTypeParameters();
}
int typeParamColumns = Math.max(lTypeParams.length, rTypeParams.length);
@Language("HTML") @NonNls String requiredRow = "";
@Language("HTML") @NonNls String foundRow = "";
for (int i = 0; i < typeParamColumns; i++) {
PsiTypeParameter lTypeParameter = i >= lTypeParams.length ? null : lTypeParams[i];
PsiTypeParameter rTypeParameter = i >= rTypeParams.length ? null : rTypeParams[i];
PsiType lSubstitutedType = lTypeParameter == null ? null : lTypeSubstitutor.substitute(lTypeParameter);
PsiType rSubstitutedType = rTypeParameter == null ? null : rTypeSubstitutor.substitute(rTypeParameter);
boolean matches = Comparing.equal(lSubstitutedType, rSubstitutedType);
@NonNls String openBrace = i == 0 ? "<" : "";
@NonNls String closeBrace = i == typeParamColumns - 1 ? ">" : ",";
requiredRow += "<td>" + (lTypeParams.length == 0 ? "" : openBrace) + redIfNotMatch(lSubstitutedType, matches) +
(i < lTypeParams.length ? closeBrace : "") + "</td>";
foundRow += "<td>" + (rTypeParams.length == 0 ? "" : openBrace) + redIfNotMatch(rSubstitutedType, matches) +
(i < rTypeParams.length ? closeBrace : "") + "</td>";
}
PsiType lRawType = lType1 instanceof PsiClassType ? ((PsiClassType)lType1).rawType() : lType1;
PsiType rRawType = rType1 instanceof PsiClassType ? ((PsiClassType)rType1).rawType() : rType1;
boolean assignable = lRawType == null || rRawType == null || TypeConversionUtil.isAssignable(lRawType, rRawType);
String toolTip = JavaErrorMessages.message("incompatible.types.html.tooltip",
redIfNotMatch(lRawType, assignable), requiredRow,
redIfNotMatch(rRawType, assignable), foundRow);
String description = JavaErrorMessages.message("incompatible.types", formatType(lType1), formatType(rType1));
return HighlightInfo.createHighlightInfo(HighlightInfoType.ERROR, null, textRange.getStartOffset(), textRange.getEndOffset(),
description, toolTip);
}
@Nullable
public static HighlightInfo checkSingleImportClassConflict(PsiImportStatement statement,
Map<String, Pair<PsiImportStatementBase, PsiClass>> singleImportedClasses) {
if (statement.isOnDemand()) return null;
PsiElement element = statement.resolve();
if (element instanceof PsiClass) {
String name = ((PsiClass)element).getName();
Pair<PsiImportStatementBase, PsiClass> imported = singleImportedClasses.get(name);
PsiClass importedClass = imported == null ? null : imported.getSecond();
if (importedClass != null && !element.getManager().areElementsEquivalent(importedClass, element)) {
String description = JavaErrorMessages.message("single.import.class.conflict", formatClass(importedClass));
return HighlightInfo.createHighlightInfo(HighlightInfoType.ERROR, statement, description);
}
singleImportedClasses.put(name, Pair.<PsiImportStatementBase, PsiClass>create(statement, (PsiClass)element));
}
return null;
}
@NonNls
private static String redIfNotMatch(PsiType type, boolean matches) {
if (matches) return getFQName(type, false);
return "<font color=red><b>" + getFQName(type, true) + "</b></font>";
}
private static String getFQName(PsiType type, boolean longName) {
if (type == null) return "";
return XmlStringUtil.escapeString(longName ? type.getInternalCanonicalText() : type.getPresentableText());
}
@Nullable
public static HighlightInfo checkMustBeThrowable(PsiType type, PsiElement context, boolean addCastIntention) {
if (type == null) return null;
PsiElementFactory factory = JavaPsiFacade.getInstance(context.getProject()).getElementFactory();
PsiClassType throwable = factory.createTypeByFQClassName("java.lang.Throwable", context.getResolveScope());
if (!TypeConversionUtil.isAssignable(throwable, type)) {
HighlightInfo highlightInfo = createIncompatibleTypeHighlightInfo(throwable, type, context.getTextRange());
if (addCastIntention && TypeConversionUtil.areTypesConvertible(type, throwable)) {
if (context instanceof PsiExpression) {
QuickFixAction.registerQuickFixAction(highlightInfo, new AddTypeCastFix(throwable, (PsiExpression)context));
}
}
return highlightInfo;
}
return null;
}
@Nullable
private static HighlightInfo checkMustBeThrowable(PsiClass aClass, PsiElement context) {
if (aClass == null) return null;
PsiClassType type = JavaPsiFacade.getInstance(aClass.getProject()).getElementFactory().createType(aClass);
return checkMustBeThrowable(type, context, false);
}
@Nullable
public static HighlightInfo checkLabelDefined(PsiIdentifier labelIdentifier, PsiStatement exitedStatement) {
if (labelIdentifier == null) return null;
String label = labelIdentifier.getText();
if (label == null) return null;
if (exitedStatement == null) {
String message = JavaErrorMessages.message("unresolved.label", label);
return HighlightInfo.createHighlightInfo(HighlightInfoType.ERROR, labelIdentifier, message);
}
return null;
}
@Nullable
public static HighlightInfo checkReference(PsiJavaCodeReferenceElement ref, JavaResolveResult result, PsiElement resolved) {
PsiElement refName = ref.getReferenceNameElement();
if (!(refName instanceof PsiIdentifier) && !(refName instanceof PsiKeyword)) return null;
HighlightInfo highlightInfo = checkMemberReferencedBeforeConstructorCalled(ref, resolved);
if (highlightInfo != null) return highlightInfo;
PsiElement refParent = ref.getParent();
if (refParent instanceof PsiMethodCallExpression) {
return null; // methods checked elsewhere
}
if (resolved == null) {
// do not highlight unknown packages - javac does not care about illegal package names
if (isInsidePackageStatement(refName)) return null;
if (result.isPackagePrefixPackageReference()) return null;
JavaResolveResult[] results = ref.multiResolve(true);
String description;
if (results.length > 1) {
String t1 = format(results[0].getElement());
String t2 = format(results[1].getElement());
description = JavaErrorMessages.message("ambiguous.reference", refName.getText(), t1, t2);
}
else {
description = JavaErrorMessages.message("cannot.resolve.symbol", refName.getText());
}
HighlightInfoType type = HighlightInfoType.WRONG_REF;
if (PsiUtil.isInsideJavadocComment(ref)) return null;
HighlightInfo info = HighlightInfo.createHighlightInfo(type, refName, description);
UnresolvedReferenceQuickFixProvider.registerReferenceFixes(ref, new QuickFixActionRegistrarImpl(info));
return info;
}
if (!result.isValidResult() && !PsiUtil.isInsideJavadocComment(ref)) {
if (!result.isAccessible()) {
String description = buildProblemWithAccessDescription(ref, result);
HighlightInfo info = HighlightInfo.createHighlightInfo(HighlightInfoType.WRONG_REF, ref.getReferenceNameElement(), description);
if (result.isStaticsScopeCorrect()) {
registerAccessQuickFixAction((PsiMember)resolved, ref, info, result.getCurrentFileResolveScope());
if (ref instanceof PsiReferenceExpression) {
QuickFixAction.registerQuickFixAction(info, new RenameWrongRefFix((PsiReferenceExpression)ref));
}
}
return info;
}
if (!result.isStaticsScopeCorrect()) {
String description = buildProblemWithStaticDescription(resolved);
HighlightInfo info = HighlightInfo.createHighlightInfo(HighlightInfoType.WRONG_REF, ref.getReferenceNameElement(), description);
registerStaticProblemQuickFixAction(resolved, info, ref);
if (ref instanceof PsiReferenceExpression) {
QuickFixAction.registerQuickFixAction(info, new RenameWrongRefFix((PsiReferenceExpression)ref));
}
return info;
}
}
if ((resolved instanceof PsiLocalVariable || resolved instanceof PsiParameter) && !(resolved instanceof ImplicitVariable)) {
highlightInfo = HighlightControlFlowUtil.checkVariableMustBeFinal((PsiVariable)resolved, ref);
}
return highlightInfo;
}
private static String format(PsiElement element) {
if (element instanceof PsiClass) return formatClass((PsiClass)element);
if (element instanceof PsiMethod) return formatMethod((PsiMethod)element);
return ElementDescriptionUtil.getElementDescription(element, HighlightUsagesDescriptionLocation.INSTANCE);
}
private static boolean isInsidePackageStatement(PsiElement element) {
while (element != null) {
if (element instanceof PsiPackageStatement) return true;
if (!(element instanceof PsiIdentifier) && !(element instanceof PsiJavaCodeReferenceElement)) return false;
element = element.getParent();
}
return false;
}
@Nullable
public static HighlightInfo checkElementInReferenceList(PsiJavaCodeReferenceElement ref,
PsiReferenceList referenceList,
JavaResolveResult resolveResult) {
PsiElement resolved = resolveResult.getElement();
HighlightInfo highlightInfo = null;
PsiElement refGrandParent = referenceList.getParent();
if (resolved instanceof PsiClass) {
PsiClass aClass = (PsiClass)resolved;
if (refGrandParent instanceof PsiClass) {
if (refGrandParent instanceof PsiTypeParameter) {
highlightInfo = GenericsHighlightUtil.checkElementInTypeParameterExtendsList(referenceList, resolveResult, ref);
}
else {
highlightInfo = HighlightClassUtil.checkExtendsClassAndImplementsInterface(referenceList, resolveResult, ref);
if (highlightInfo == null) {
highlightInfo = HighlightClassUtil.checkCannotInheritFromFinal(aClass, ref);
}
if (highlightInfo == null) {
highlightInfo = GenericsHighlightUtil.checkCannotInheritFromEnum(aClass, ref);
}
if (highlightInfo == null) {
highlightInfo = GenericsHighlightUtil.checkCannotInheritFromTypeParameter(aClass, ref);
}
}
}
else if (refGrandParent instanceof PsiMethod && ((PsiMethod)refGrandParent).getThrowsList() == referenceList) {
highlightInfo = checkMustBeThrowable(aClass, ref);
}
}
else if (refGrandParent instanceof PsiMethod && referenceList == ((PsiMethod)refGrandParent).getThrowsList()) {
highlightInfo = HighlightInfo.createHighlightInfo(HighlightInfoType.ERROR, ref, JavaErrorMessages.message("class.name.expected"));
}
return highlightInfo;
}
public static boolean isSerializable(PsiClass aClass) {
PsiManager manager = aClass.getManager();
PsiClass serializableClass = JavaPsiFacade.getInstance(manager.getProject()).findClass("java.io.Serializable", aClass.getResolveScope());
return serializableClass != null && aClass.isInheritor(serializableClass, true);
}
public static boolean isSerializationImplicitlyUsedField(PsiField field) {
final String name = field.getName();
if (!SERIAL_VERSION_UID_FIELD_NAME.equals(name) && !SERIAL_PERSISTENT_FIELDS_FIELD_NAME.equals(name)) return false;
if (!field.hasModifierProperty(PsiModifier.STATIC)) return false;
PsiClass aClass = field.getContainingClass();
return aClass == null || isSerializable(aClass);
}
public static HighlightInfo checkClassReferenceAfterQualifier(final PsiReferenceExpression expression, final PsiElement resolved) {
if (!(resolved instanceof PsiClass)) return null;
final PsiExpression qualifier = expression.getQualifierExpression();
if (qualifier == null) return null;
if (qualifier instanceof PsiReferenceExpression) {
PsiElement qualifierResolved = ((PsiReferenceExpression)qualifier).resolve();
if (qualifierResolved instanceof PsiClass || qualifierResolved instanceof PsiPackage) return null;
}
HighlightInfo info = HighlightInfo.createHighlightInfo(HighlightInfoType.ERROR, qualifier, JavaErrorMessages.message("expected.class.or.package"));
QuickFixAction.registerQuickFixAction(info, new RemoveQualifierFix(qualifier, expression, (PsiClass)resolved));
return info;
}
public static void registerChangeVariableTypeFixes(PsiVariable parameter, PsiType itemType, HighlightInfo highlightInfo) {
for (ChangeVariableTypeQuickFixProvider fixProvider : Extensions.getExtensions(ChangeVariableTypeQuickFixProvider.EP_NAME)) {
for (IntentionAction action : fixProvider.getFixes(parameter, itemType)) {
QuickFixAction.registerQuickFixAction(highlightInfo, action, null);
}
}
}
}
| joewalnes/idea-community | java/java-impl/src/com/intellij/codeInsight/daemon/impl/analysis/HighlightUtil.java | Java | apache-2.0 | 95,504 |
/*
* Copyright (C) 2014 Stratio (http://stratio.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.stratio.cassandra.lucene.testsAT.varia;
import com.stratio.cassandra.lucene.testsAT.BaseIT;
import com.stratio.cassandra.lucene.testsAT.util.CassandraUtils;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import java.util.ArrayList;
import java.util.List;
import static com.stratio.cassandra.lucene.builder.Builder.all;
import static com.stratio.cassandra.lucene.builder.Builder.field;
/**
* Test the retrieval of large volumes of rows, specially above 65535 rows.
*
* @author Andres de la Pena <adelapena@stratio.com>
*/
public class SearchMatchingManyRowsIT extends BaseIT {
private static CassandraUtils utils;
@BeforeClass
public static void before() {
utils = CassandraUtils.builder("search_matching_many_rows")
.withPartitionKey("pk")
.withClusteringKey("ck")
.withColumn("pk", "int")
.withColumn("ck", "int")
.withColumn("rc", "int")
.build()
.createKeyspace()
.createTable()
.createIndex();
int numPartitions = 666;
int partitionSize = 100;
String[] names = new String[]{"pk", "ck", "rc"};
for (Integer pk = 0; pk < numPartitions; pk++) {
List<Object[]> values = new ArrayList<>();
for (Integer ck = 0; ck <= partitionSize; ck++) {
values.add(new Object[]{pk, ck, pk * partitionSize + ck});
}
utils.insert(names, values);
}
utils.refresh();
}
@AfterClass
public static void after() {
utils.dropIndex().dropTable().dropKeyspace();
}
@Test
public void testQuery() {
utils.query(all()).fetchSize(500).limit(65536).check(65536);
}
@Test
public void testFilter() {
utils.filter(all()).fetchSize(500).limit(65536).check(65536);
}
@Test
public void testSort() {
utils.sort(field("rc")).fetchSize(500).limit(65536).check(65536);
}
}
| adelapena/cassandra-lucene-index | testsAT/src/test/java/com/stratio/cassandra/lucene/testsAT/varia/SearchMatchingManyRowsIT.java | Java | apache-2.0 | 2,800 |
'use strict';
/**
* Removes server error when user updates input
*/
angular.module('fluxApp')
.directive('mongooseError', function () {
return {
restrict: 'A',
require: 'ngModel',
link: function(scope, element, attrs, ngModel) {
element.on('keydown', function() {
return ngModel.$setValidity('mongoose', true);
});
}
};
}); | mysmartcity/flux | client/components/mongoose-error/mongoose-error.directive.js | JavaScript | apache-2.0 | 388 |
/*
* Copyright 2014-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License. You may obtain
* a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package com.facebook.buck.cxx;
import com.facebook.buck.core.cell.CellPathResolver;
import com.facebook.buck.core.model.BuildTarget;
import com.facebook.buck.core.rulekey.AddToRuleKey;
import com.facebook.buck.core.rules.ActionGraphBuilder;
import com.facebook.buck.core.rules.BuildRuleResolver;
import com.facebook.buck.core.rules.SourcePathRuleFinder;
import com.facebook.buck.core.sourcepath.SourcePath;
import com.facebook.buck.core.sourcepath.resolver.SourcePathResolver;
import com.facebook.buck.core.sourcepath.resolver.impl.DefaultSourcePathResolver;
import com.facebook.buck.core.util.log.Logger;
import com.facebook.buck.cxx.toolchain.CxxBuckConfig;
import com.facebook.buck.cxx.toolchain.CxxPlatform;
import com.facebook.buck.cxx.toolchain.LinkerMapMode;
import com.facebook.buck.cxx.toolchain.linker.HasImportLibrary;
import com.facebook.buck.cxx.toolchain.linker.HasLinkerMap;
import com.facebook.buck.cxx.toolchain.linker.HasThinLTO;
import com.facebook.buck.cxx.toolchain.linker.Linker;
import com.facebook.buck.cxx.toolchain.linker.Linker.ExtraOutputsDeriver;
import com.facebook.buck.cxx.toolchain.linker.Linker.LinkableDepType;
import com.facebook.buck.cxx.toolchain.nativelink.NativeLinkable;
import com.facebook.buck.cxx.toolchain.nativelink.NativeLinkableInput;
import com.facebook.buck.cxx.toolchain.nativelink.NativeLinkables;
import com.facebook.buck.io.file.MorePaths;
import com.facebook.buck.io.filesystem.ProjectFilesystem;
import com.facebook.buck.rules.args.AddsToRuleKeyFunction;
import com.facebook.buck.rules.args.Arg;
import com.facebook.buck.rules.args.SanitizedArg;
import com.facebook.buck.rules.args.SourcePathArg;
import com.facebook.buck.rules.args.StringArg;
import com.facebook.buck.rules.coercer.FrameworkPath;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.ImmutableSortedSet;
import com.google.common.collect.Ordering;
import com.google.common.collect.Streams;
import java.nio.file.Path;
import java.util.EnumSet;
import java.util.Objects;
import java.util.Optional;
import java.util.function.Consumer;
import java.util.stream.Collectors;
import java.util.stream.Stream;
public class CxxLinkableEnhancer {
private static final Logger LOG = Logger.get(CxxLinkableEnhancer.class);
private static final EnumSet<Linker.LinkType> SONAME_REQUIRED_LINK_TYPES =
EnumSet.of(Linker.LinkType.SHARED, Linker.LinkType.MACH_O_BUNDLE);
// Utility class doesn't instantiate.
private CxxLinkableEnhancer() {}
public static CxxLink createCxxLinkableBuildRule(
CellPathResolver cellPathResolver,
CxxBuckConfig cxxBuckConfig,
CxxPlatform cxxPlatform,
ProjectFilesystem projectFilesystem,
BuildRuleResolver ruleResolver,
SourcePathRuleFinder ruleFinder,
BuildTarget target,
Path output,
ImmutableMap<String, Path> extraOutputs,
ImmutableList<Arg> args,
LinkableDepType runtimeDepType,
CxxLinkOptions linkOptions,
Optional<LinkOutputPostprocessor> postprocessor) {
Linker linker = cxxPlatform.getLd().resolve(ruleResolver);
// Build up the arguments to pass to the linker.
ImmutableList.Builder<Arg> argsBuilder = ImmutableList.builder();
// Add flags to generate linker map if supported.
if (linker instanceof HasLinkerMap && LinkerMapMode.isLinkerMapEnabledForBuildTarget(target)) {
argsBuilder.addAll(((HasLinkerMap) linker).linkerMap(output));
}
// Add lto object path if thin LTO is on.
if (linker instanceof HasThinLTO && linkOptions.getThinLto()) {
argsBuilder.addAll(((HasThinLTO) linker).thinLTO(output));
}
if (linker instanceof HasImportLibrary) {
argsBuilder.addAll(((HasImportLibrary) linker).importLibrary(output));
}
// Pass any platform specific or extra linker flags.
argsBuilder.addAll(
SanitizedArg.from(
cxxPlatform.getCompilerDebugPathSanitizer().sanitize(Optional.empty()),
cxxPlatform.getLdflags()));
argsBuilder.addAll(args);
// Add all arguments needed to link in the C/C++ platform runtime.
argsBuilder.addAll(StringArg.from(cxxPlatform.getRuntimeLdflags().get(runtimeDepType)));
ImmutableList<Arg> ldArgs = argsBuilder.build();
ImmutableMap<String, Path> allExtraOutputs = extraOutputs;
Optional<ExtraOutputsDeriver> extraOutputsDeriver = linker.getExtraOutputsDeriver();
if (extraOutputsDeriver.isPresent()) {
ImmutableMap<String, Path> derivedExtraOutputs =
extraOutputsDeriver
.get()
.deriveExtraOutputsFromArgs(
Arg.stringify(ldArgs, DefaultSourcePathResolver.from(ruleFinder)), output);
if (!derivedExtraOutputs.isEmpty()) {
allExtraOutputs =
ImmutableMap.<String, Path>builder()
.putAll(extraOutputs)
.putAll(derivedExtraOutputs)
.build();
}
}
return new CxxLink(
target,
projectFilesystem,
ruleFinder,
cellPathResolver,
linker,
output,
allExtraOutputs,
ldArgs,
postprocessor,
cxxBuckConfig.getLinkScheduleInfo(),
cxxBuckConfig.shouldCacheLinks(),
linkOptions.getThinLto());
}
/**
* Construct a {@link CxxLink} rule that builds a native linkable from top-level input objects and
* a dependency tree of {@link NativeLinkable} dependencies.
*
* @param nativeLinkableDeps library dependencies that the linkable links in
* @param immediateLinkableInput framework and libraries of the linkable itself
* @param cellPathResolver
*/
public static CxxLink createCxxLinkableBuildRule(
CxxBuckConfig cxxBuckConfig,
CxxPlatform cxxPlatform,
ProjectFilesystem projectFilesystem,
ActionGraphBuilder graphBuilder,
SourcePathResolver resolver,
SourcePathRuleFinder ruleFinder,
BuildTarget target,
Linker.LinkType linkType,
Optional<String> soname,
Path output,
ImmutableList<String> extraOutputNames,
Linker.LinkableDepType depType,
CxxLinkOptions linkOptions,
Iterable<? extends NativeLinkable> nativeLinkableDeps,
Optional<Linker.CxxRuntimeType> cxxRuntimeType,
Optional<SourcePath> bundleLoader,
ImmutableSet<BuildTarget> blacklist,
ImmutableSet<BuildTarget> linkWholeDeps,
NativeLinkableInput immediateLinkableInput,
Optional<LinkOutputPostprocessor> postprocessor,
CellPathResolver cellPathResolver) {
// Soname should only ever be set when linking a "shared" library.
Preconditions.checkState(!soname.isPresent() || SONAME_REQUIRED_LINK_TYPES.contains(linkType));
// Bundle loaders are only supported for Mach-O bundle libraries
Preconditions.checkState(
!bundleLoader.isPresent() || linkType == Linker.LinkType.MACH_O_BUNDLE);
// Collect and topologically sort our deps that contribute to the link.
Stream<NativeLinkableInput> nativeLinkableInputs =
graphBuilder
.getParallelizer()
.maybeParallelize(
NativeLinkables.getNativeLinkables(
cxxPlatform, graphBuilder, nativeLinkableDeps, depType)
.stream())
.filter(linkable -> !blacklist.contains(linkable.getBuildTarget()))
.map(
nativeLinkable -> {
NativeLinkable.Linkage link =
nativeLinkable.getPreferredLinkage(cxxPlatform, graphBuilder);
NativeLinkableInput input =
nativeLinkable.getNativeLinkableInput(
cxxPlatform,
NativeLinkables.getLinkStyle(link, depType),
linkWholeDeps.contains(nativeLinkable.getBuildTarget()),
graphBuilder);
LOG.verbose("Native linkable %s returned input %s", nativeLinkable, input);
return input;
});
nativeLinkableInputs = Stream.concat(Stream.of(immediateLinkableInput), nativeLinkableInputs);
// Construct a list out of the stream rather than passing in an iterable via ::iterator as
// the latter will never evaluate stream elements in parallel.
NativeLinkableInput linkableInput =
NativeLinkableInput.concat(nativeLinkableInputs.collect(Collectors.toList()));
// Build up the arguments to pass to the linker.
ImmutableList.Builder<Arg> argsBuilder = ImmutableList.builder();
// If we're doing a shared build, pass the necessary flags to the linker, including setting
// the soname.
if (linkType == Linker.LinkType.SHARED) {
argsBuilder.addAll(cxxPlatform.getLd().resolve(graphBuilder).getSharedLibFlag());
} else if (linkType == Linker.LinkType.MACH_O_BUNDLE) {
argsBuilder.add(StringArg.of("-bundle"));
// It's possible to build a Mach-O bundle without a bundle loader (logic tests, for example).
if (bundleLoader.isPresent()) {
argsBuilder.add(StringArg.of("-bundle_loader"), SourcePathArg.of(bundleLoader.get()));
}
}
if (soname.isPresent()) {
argsBuilder.addAll(
StringArg.from(cxxPlatform.getLd().resolve(graphBuilder).soname(soname.get())));
}
// Add all arguments from our dependencies.
argsBuilder.addAll(linkableInput.getArgs());
// Add all shared libraries
if (!linkableInput.getLibraries().isEmpty()) {
addSharedLibrariesLinkerArgs(
cxxPlatform,
resolver,
ImmutableSortedSet.copyOf(linkableInput.getLibraries()),
argsBuilder);
}
// Add framework args
if (!linkableInput.getFrameworks().isEmpty()) {
addFrameworkLinkerArgs(
cxxPlatform,
resolver,
ImmutableSortedSet.copyOf(linkableInput.getFrameworks()),
argsBuilder);
}
Linker.LinkableDepType runtimeDepType = depType;
if (cxxRuntimeType.orElse(Linker.CxxRuntimeType.DYNAMIC) == Linker.CxxRuntimeType.STATIC) {
runtimeDepType = Linker.LinkableDepType.STATIC;
}
ImmutableList<Arg> allArgs = argsBuilder.build();
return createCxxLinkableBuildRule(
cellPathResolver,
cxxBuckConfig,
cxxPlatform,
projectFilesystem,
graphBuilder,
ruleFinder,
target,
output,
deriveSupplementaryOutputPathsFromMainOutputPath(output, extraOutputNames),
allArgs,
runtimeDepType,
linkOptions,
postprocessor);
}
private static void addSharedLibrariesLinkerArgs(
CxxPlatform cxxPlatform,
SourcePathResolver resolver,
ImmutableSortedSet<FrameworkPath> allLibraries,
ImmutableList.Builder<Arg> argsBuilder) {
argsBuilder.add(new SharedLibraryLinkArgs(allLibraries, cxxPlatform, resolver));
// Add all libraries link args
argsBuilder.add(new FrameworkLibraryLinkArgs(allLibraries));
}
private static void addFrameworkLinkerArgs(
CxxPlatform cxxPlatform,
SourcePathResolver resolver,
ImmutableSortedSet<FrameworkPath> allFrameworks,
ImmutableList.Builder<Arg> argsBuilder) {
argsBuilder.add(new FrameworkLinkerArgs(allFrameworks, cxxPlatform, resolver));
// Add all framework link args
argsBuilder.add(frameworksToLinkerArg(allFrameworks));
}
@VisibleForTesting
static Arg frameworksToLinkerArg(ImmutableSortedSet<FrameworkPath> frameworkPaths) {
return new FrameworkToLinkerArg(frameworkPaths);
}
public static CxxLink createCxxLinkableSharedBuildRule(
CxxBuckConfig cxxBuckConfig,
CxxPlatform cxxPlatform,
ProjectFilesystem projectFilesystem,
BuildRuleResolver ruleResolver,
SourcePathRuleFinder ruleFinder,
BuildTarget target,
Path output,
ImmutableMap<String, Path> extraOutputs,
Optional<String> soname,
ImmutableList<? extends Arg> args,
CellPathResolver cellPathResolver) {
ImmutableList.Builder<Arg> linkArgsBuilder = ImmutableList.builder();
linkArgsBuilder.addAll(cxxPlatform.getLd().resolve(ruleResolver).getSharedLibFlag());
if (soname.isPresent()) {
linkArgsBuilder.addAll(
StringArg.from(cxxPlatform.getLd().resolve(ruleResolver).soname(soname.get())));
}
linkArgsBuilder.addAll(args);
ImmutableList<Arg> linkArgs = linkArgsBuilder.build();
return createCxxLinkableBuildRule(
cellPathResolver,
cxxBuckConfig,
cxxPlatform,
projectFilesystem,
ruleResolver,
ruleFinder,
target,
output,
extraOutputs,
linkArgs,
Linker.LinkableDepType.SHARED,
CxxLinkOptions.of(),
Optional.empty());
}
/**
* Derive supplementary output paths based on the main output path.
*
* @param output main output path.
* @param supplementaryOutputNames supplementary output names.
* @return Map of names to supplementary output paths.
*/
public static ImmutableMap<String, Path> deriveSupplementaryOutputPathsFromMainOutputPath(
Path output, Iterable<String> supplementaryOutputNames) {
return Streams.stream(supplementaryOutputNames)
.collect(
ImmutableMap.toImmutableMap(
name -> name,
name -> output.getParent().resolve(output.getFileName() + "-" + name)));
}
private static class FrameworkLinkerArgs extends FrameworkPathArg {
@AddToRuleKey final AddsToRuleKeyFunction<FrameworkPath, Path> frameworkPathToSearchPath;
public FrameworkLinkerArgs(
ImmutableSortedSet<FrameworkPath> allFrameworks,
CxxPlatform cxxPlatform,
SourcePathResolver resolver) {
super(allFrameworks);
frameworkPathToSearchPath =
CxxDescriptionEnhancer.frameworkPathToSearchPath(cxxPlatform, resolver);
}
@Override
public void appendToCommandLine(Consumer<String> consumer, SourcePathResolver resolver) {
ImmutableSortedSet<Path> searchPaths =
frameworkPaths
.stream()
.map(frameworkPathToSearchPath)
.collect(ImmutableSortedSet.toImmutableSortedSet(Ordering.natural()));
for (Path searchPath : searchPaths) {
consumer.accept("-F");
consumer.accept(searchPath.toString());
}
}
}
private static class FrameworkToLinkerArg extends FrameworkPathArg {
public FrameworkToLinkerArg(ImmutableSortedSet<FrameworkPath> frameworkPaths) {
super(frameworkPaths);
}
@Override
public void appendToCommandLine(Consumer<String> consumer, SourcePathResolver resolver) {
for (FrameworkPath frameworkPath : frameworkPaths) {
consumer.accept("-framework");
consumer.accept(frameworkPath.getName(resolver::getAbsolutePath));
}
}
}
private static class FrameworkLibraryLinkArgs extends FrameworkPathArg {
public FrameworkLibraryLinkArgs(ImmutableSortedSet<FrameworkPath> allLibraries) {
super(allLibraries);
}
@Override
public void appendToCommandLine(Consumer<String> consumer, SourcePathResolver resolver) {
for (FrameworkPath frameworkPath : frameworkPaths) {
String libName =
MorePaths.stripPathPrefixAndExtension(
frameworkPath.getFileName(resolver::getAbsolutePath), "lib");
// libraries set can contain path-qualified libraries, or just library
// search paths.
// Assume these end in '../lib' and filter out here.
if (libName.isEmpty()) {
continue;
}
consumer.accept("-l" + libName);
}
}
}
private static class SharedLibraryLinkArgs extends FrameworkPathArg {
@AddToRuleKey final AddsToRuleKeyFunction<FrameworkPath, Path> frameworkPathToSearchPath;
public SharedLibraryLinkArgs(
ImmutableSortedSet<FrameworkPath> allLibraries,
CxxPlatform cxxPlatform,
SourcePathResolver resolver) {
super(allLibraries);
frameworkPathToSearchPath =
CxxDescriptionEnhancer.frameworkPathToSearchPath(cxxPlatform, resolver);
}
@Override
public void appendToCommandLine(Consumer<String> consumer, SourcePathResolver resolver) {
ImmutableSortedSet<Path> searchPaths =
frameworkPaths
.stream()
.map(frameworkPathToSearchPath)
.filter(Objects::nonNull)
.collect(ImmutableSortedSet.toImmutableSortedSet(Ordering.natural()));
for (Path searchPath : searchPaths) {
consumer.accept("-L");
consumer.accept(searchPath.toString());
}
}
}
}
| brettwooldridge/buck | src/com/facebook/buck/cxx/CxxLinkableEnhancer.java | Java | apache-2.0 | 17,412 |
flavor = node[:openvpn][:community_repo_flavor]
case node[:platform]
when "debian"
case node[:platform_version].to_i
when 5
apt_repository "openvpn-lenny" do
uri "http://repos.openvpn.net/repos/apt/lenny-#{flavor}"
components ["lenny", "main"]
key "http://repos.openvpn.net/repos/repo-public.gpg"
end
when 6
apt_repository "openvpn-squeeze" do
uri "http://swupdate.openvpn.net/apt"
components ["squeeze", "main"]
key "https://swupdate.openvpn.net/repos/repo-public.gpg"
end
when 7
apt_repository "openvpn-wheezy" do
uri "http://swupdate.openvpn.net/apt"
components ["wheezy", "main"]
key "https://swupdate.openvpn.net/repos/repo-public.gpg"
end
end
when "ubuntu"
case node[:platform_version]
when "10.04", "10.10", "11.04", "11.10"
apt_repository "openvpn-lucid" do
uri "http://swupdate.openvpn.net/apt"
components ["lucid", "main"]
key "https://swupdate.openvpn.net/repos/repo-public.gpg"
end
when "12.04"
apt_repository "openvpn-precise" do
uri "http://swupdate.openvpn.net/apt"
components ["precise", "main"]
key "https://swupdate.openvpn.net/repos/repo-public.gpg"
end
when "13.04"
apt_repository "openvpn-raring" do
uri "http://swupdate.openvpn.net/apt"
components ["raring", "main"]
key "https://swupdate.openvpn.net/repos/repo-public.gpg"
end
when "13.10"
apt_repository "openvpn-saucy" do
uri "http://swupdate.openvpn.net/apt"
components ["saucy", "main"]
key "https://swupdate.openvpn.net/repos/repo-public.gpg"
end
end
end
| ClodoCorp/cookbook-openvpn | recipes/use_community_repos.rb | Ruby | apache-2.0 | 1,639 |
package com.cdd.allpay.callback;
import android.app.Activity;
import android.widget.Toast;
import com.cdd.allpay.Pay;
/**
* 泡泡龙2015
*
* @author lei
*/
public class PaoPaoLong2015Pay extends ActivityCallback {
public PaoPaoLong2015Pay(Activity activity) {
super(activity);
}
@Override
public void onSucess(String item) {
onPaySucess(getPayCode(item));
Toast.makeText(activity, "支付成功(Sucess)", Toast.LENGTH_LONG).show();
}
private int getPayCode(String item) {
return Pay.payMap.get(item);
}
private void onPaySucess(int payCode) {
}
@Override
public String convertItem(String payCode) {
return payCode;
}
@Override
public void onFailed(String item) {
onPayFailed(getPayCode(item));
Toast.makeText(activity, "支付失败(Failed)", Toast.LENGTH_LONG).show();
}
private void onPayFailed(int payCode) {
}
}
| hsoftxl/PlayProject | app/src/main/java/com/cdd/allpay/callback/PaoPaoLong2015Pay.java | Java | apache-2.0 | 958 |
# Copyright 2010 OpenStack Foundation
# Copyright 2012 University Of Minho
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import copy
import datetime
import errno
import glob
import os
import random
import re
import shutil
import signal
import threading
import time
import uuid
import eventlet
from eventlet import greenthread
import fixtures
from lxml import etree
import mock
from mox3 import mox
from os_brick.initiator import connector
from oslo_concurrency import lockutils
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_serialization import jsonutils
from oslo_service import loopingcall
from oslo_utils import encodeutils
from oslo_utils import fileutils
from oslo_utils import importutils
from oslo_utils import timeutils
from oslo_utils import units
from oslo_utils import uuidutils
import six
from six.moves import builtins
from six.moves import range
from nova.api.metadata import base as instance_metadata
from nova.compute import arch
from nova.compute import cpumodel
from nova.compute import manager
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import vm_mode
from nova.compute import vm_states
from nova import context
from nova import db
from nova import exception
from nova.network import model as network_model
from nova import objects
from nova.objects import fields
from nova.pci import manager as pci_manager
from nova import test
from nova.tests.unit import fake_block_device
from nova.tests.unit import fake_instance
from nova.tests.unit import fake_network
import nova.tests.unit.image.fake
from nova.tests.unit import matchers
from nova.tests.unit.objects import test_pci_device
from nova.tests.unit.objects import test_vcpu_model
from nova.tests.unit.virt.libvirt import fake_imagebackend
from nova.tests.unit.virt.libvirt import fake_libvirt_utils
from nova.tests.unit.virt.libvirt import fakelibvirt
from nova import utils
from nova import version
from nova.virt import block_device as driver_block_device
from nova.virt import configdrive
from nova.virt.disk import api as disk
from nova.virt import driver
from nova.virt import fake
from nova.virt import firewall as base_firewall
from nova.virt import hardware
from nova.virt.image import model as imgmodel
from nova.virt.libvirt import blockinfo
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import driver as libvirt_driver
from nova.virt.libvirt import firewall
from nova.virt.libvirt import guest as libvirt_guest
from nova.virt.libvirt import host
from nova.virt.libvirt import imagebackend
from nova.virt.libvirt.storage import dmcrypt
from nova.virt.libvirt.storage import lvm
from nova.virt.libvirt.storage import rbd_utils
from nova.virt.libvirt import utils as libvirt_utils
from nova.virt.libvirt.volume import volume as volume_drivers
libvirt_driver.libvirt = fakelibvirt
host.libvirt = fakelibvirt
libvirt_guest.libvirt = fakelibvirt
CONF = cfg.CONF
CONF.import_opt('compute_manager', 'nova.service')
CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('my_ip', 'nova.netconf')
CONF.import_opt('image_cache_subdirectory_name', 'nova.virt.imagecache')
CONF.import_opt('instances_path', 'nova.compute.manager')
_fake_network_info = fake_network.fake_get_instance_nw_info
_fake_NodeDevXml = \
{"pci_0000_04_00_3": """
<device>
<name>pci_0000_04_00_3</name>
<parent>pci_0000_00_01_1</parent>
<driver>
<name>igb</name>
</driver>
<capability type='pci'>
<domain>0</domain>
<bus>4</bus>
<slot>0</slot>
<function>3</function>
<product id='0x1521'>I350 Gigabit Network Connection</product>
<vendor id='0x8086'>Intel Corporation</vendor>
<capability type='virt_functions'>
<address domain='0x0000' bus='0x04' slot='0x10' function='0x3'/>
<address domain='0x0000' bus='0x04' slot='0x10' function='0x7'/>
<address domain='0x0000' bus='0x04' slot='0x11' function='0x3'/>
<address domain='0x0000' bus='0x04' slot='0x11' function='0x7'/>
</capability>
</capability>
</device>""",
"pci_0000_04_10_7": """
<device>
<name>pci_0000_04_10_7</name>
<parent>pci_0000_00_01_1</parent>
<driver>
<name>igbvf</name>
</driver>
<capability type='pci'>
<domain>0</domain>
<bus>4</bus>
<slot>16</slot>
<function>7</function>
<product id='0x1520'>I350 Ethernet Controller Virtual Function
</product>
<vendor id='0x8086'>Intel Corporation</vendor>
<capability type='phys_function'>
<address domain='0x0000' bus='0x04' slot='0x00' function='0x3'/>
</capability>
<capability type='virt_functions'>
</capability>
</capability>
</device>""",
"pci_0000_04_11_7": """
<device>
<name>pci_0000_04_11_7</name>
<parent>pci_0000_00_01_1</parent>
<driver>
<name>igbvf</name>
</driver>
<capability type='pci'>
<domain>0</domain>
<bus>4</bus>
<slot>17</slot>
<function>7</function>
<product id='0x1520'>I350 Ethernet Controller Virtual Function
</product>
<vendor id='0x8086'>Intel Corporation</vendor>
<numa node='0'/>
<capability type='phys_function'>
<address domain='0x0000' bus='0x04' slot='0x00' function='0x3'/>
</capability>
<capability type='virt_functions'>
</capability>
</capability>
</device>"""}
_fake_cpu_info = {
"arch": "test_arch",
"model": "test_model",
"vendor": "test_vendor",
"topology": {
"sockets": 1,
"cores": 8,
"threads": 16
},
"features": ["feature1", "feature2"]
}
def _concurrency(signal, wait, done, target, is_block_dev=False):
signal.send()
wait.wait()
done.send()
class FakeVirDomainSnapshot(object):
def __init__(self, dom=None):
self.dom = dom
def delete(self, flags):
pass
class FakeVirtDomain(object):
def __init__(self, fake_xml=None, uuidstr=None, id=None, name=None):
if uuidstr is None:
uuidstr = str(uuid.uuid4())
self.uuidstr = uuidstr
self.id = id
self.domname = name
self._info = [power_state.RUNNING, 2048 * units.Mi, 1234 * units.Mi,
None, None]
if fake_xml:
self._fake_dom_xml = fake_xml
else:
self._fake_dom_xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
</disk>
</devices>
</domain>
"""
def name(self):
if self.domname is None:
return "fake-domain %s" % self
else:
return self.domname
def ID(self):
return self.id
def info(self):
return self._info
def create(self):
pass
def managedSave(self, *args):
pass
def createWithFlags(self, launch_flags):
pass
def XMLDesc(self, flags):
return self._fake_dom_xml
def UUIDString(self):
return self.uuidstr
def attachDeviceFlags(self, xml, flags):
pass
def attachDevice(self, xml):
pass
def detachDeviceFlags(self, xml, flags):
pass
def snapshotCreateXML(self, xml, flags):
pass
def blockCommit(self, disk, base, top, bandwidth=0, flags=0):
pass
def blockRebase(self, disk, base, bandwidth=0, flags=0):
pass
def blockJobInfo(self, path, flags):
pass
def resume(self):
pass
def destroy(self):
pass
def fsFreeze(self, disks=None, flags=0):
pass
def fsThaw(self, disks=None, flags=0):
pass
class CacheConcurrencyTestCase(test.NoDBTestCase):
def setUp(self):
super(CacheConcurrencyTestCase, self).setUp()
self.flags(instances_path=self.useFixture(fixtures.TempDir()).path)
# utils.synchronized() will create the lock_path for us if it
# doesn't already exist. It will also delete it when it's done,
# which can cause race conditions with the multiple threads we
# use for tests. So, create the path here so utils.synchronized()
# won't delete it out from under one of the threads.
self.lock_path = os.path.join(CONF.instances_path, 'locks')
fileutils.ensure_tree(self.lock_path)
def fake_exists(fname):
basedir = os.path.join(CONF.instances_path,
CONF.image_cache_subdirectory_name)
if fname == basedir or fname == self.lock_path:
return True
return False
def fake_execute(*args, **kwargs):
pass
def fake_extend(image, size, use_cow=False):
pass
self.stubs.Set(os.path, 'exists', fake_exists)
self.stubs.Set(utils, 'execute', fake_execute)
self.stubs.Set(imagebackend.disk, 'extend', fake_extend)
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.imagebackend.libvirt_utils',
fake_libvirt_utils))
def _fake_instance(self, uuid):
return objects.Instance(id=1, uuid=uuid)
def test_same_fname_concurrency(self):
# Ensures that the same fname cache runs at a sequentially.
uuid = uuidutils.generate_uuid()
backend = imagebackend.Backend(False)
wait1 = eventlet.event.Event()
done1 = eventlet.event.Event()
sig1 = eventlet.event.Event()
thr1 = eventlet.spawn(backend.image(self._fake_instance(uuid),
'name').cache,
_concurrency, 'fname', None,
signal=sig1, wait=wait1, done=done1)
eventlet.sleep(0)
# Thread 1 should run before thread 2.
sig1.wait()
wait2 = eventlet.event.Event()
done2 = eventlet.event.Event()
sig2 = eventlet.event.Event()
thr2 = eventlet.spawn(backend.image(self._fake_instance(uuid),
'name').cache,
_concurrency, 'fname', None,
signal=sig2, wait=wait2, done=done2)
wait2.send()
eventlet.sleep(0)
try:
self.assertFalse(done2.ready())
finally:
wait1.send()
done1.wait()
eventlet.sleep(0)
self.assertTrue(done2.ready())
# Wait on greenthreads to assert they didn't raise exceptions
# during execution
thr1.wait()
thr2.wait()
def test_different_fname_concurrency(self):
# Ensures that two different fname caches are concurrent.
uuid = uuidutils.generate_uuid()
backend = imagebackend.Backend(False)
wait1 = eventlet.event.Event()
done1 = eventlet.event.Event()
sig1 = eventlet.event.Event()
thr1 = eventlet.spawn(backend.image(self._fake_instance(uuid),
'name').cache,
_concurrency, 'fname2', None,
signal=sig1, wait=wait1, done=done1)
eventlet.sleep(0)
# Thread 1 should run before thread 2.
sig1.wait()
wait2 = eventlet.event.Event()
done2 = eventlet.event.Event()
sig2 = eventlet.event.Event()
thr2 = eventlet.spawn(backend.image(self._fake_instance(uuid),
'name').cache,
_concurrency, 'fname1', None,
signal=sig2, wait=wait2, done=done2)
eventlet.sleep(0)
# Wait for thread 2 to start.
sig2.wait()
wait2.send()
tries = 0
while not done2.ready() and tries < 10:
eventlet.sleep(0)
tries += 1
try:
self.assertTrue(done2.ready())
finally:
wait1.send()
eventlet.sleep(0)
# Wait on greenthreads to assert they didn't raise exceptions
# during execution
thr1.wait()
thr2.wait()
class FakeVolumeDriver(object):
def __init__(self, *args, **kwargs):
pass
def attach_volume(self, *args):
pass
def detach_volume(self, *args):
pass
def get_xml(self, *args):
return ""
def get_config(self, *args):
"""Connect the volume to a fake device."""
conf = vconfig.LibvirtConfigGuestDisk()
conf.source_type = "network"
conf.source_protocol = "fake"
conf.source_name = "fake"
conf.target_dev = "fake"
conf.target_bus = "fake"
return conf
def connect_volume(self, *args):
"""Connect the volume to a fake device."""
return self.get_config()
class FakeConfigGuestDisk(object):
def __init__(self, *args, **kwargs):
self.source_type = None
self.driver_cache = None
class FakeConfigGuest(object):
def __init__(self, *args, **kwargs):
self.driver_cache = None
class FakeNodeDevice(object):
def __init__(self, fakexml):
self.xml = fakexml
def XMLDesc(self, flags):
return self.xml
def _create_test_instance():
flavor = objects.Flavor(memory_mb=2048,
swap=0,
vcpu_weight=None,
root_gb=1,
id=2,
name=u'm1.small',
ephemeral_gb=0,
rxtx_factor=1.0,
flavorid=u'1',
vcpus=1,
extra_specs={})
return {
'id': 1,
'uuid': '32dfcb37-5af1-552b-357c-be8c3aa38310',
'memory_kb': '1024000',
'basepath': '/some/path',
'bridge_name': 'br100',
'display_name': "Acme webserver",
'vcpus': 2,
'project_id': 'fake',
'bridge': 'br101',
'image_ref': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
'root_gb': 10,
'ephemeral_gb': 20,
'instance_type_id': '5', # m1.small
'extra_specs': {},
'system_metadata': {
'image_disk_format': 'raw',
},
'flavor': flavor,
'new_flavor': None,
'old_flavor': None,
'pci_devices': objects.PciDeviceList(),
'numa_topology': None,
'config_drive': None,
'vm_mode': None,
'kernel_id': None,
'ramdisk_id': None,
'os_type': 'linux',
'user_id': '838a72b0-0d54-4827-8fd6-fb1227633ceb',
'ephemeral_key_uuid': None,
'vcpu_model': None,
'host': 'fake-host',
}
class LibvirtConnTestCase(test.NoDBTestCase):
REQUIRES_LOCKING = True
_EPHEMERAL_20_DEFAULT = ('ephemeral_20_%s' %
utils.get_hash_str(disk._DEFAULT_FILE_SYSTEM)[:7])
def setUp(self):
super(LibvirtConnTestCase, self).setUp()
self.flags(fake_call=True)
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.get_admin_context()
temp_dir = self.useFixture(fixtures.TempDir()).path
self.flags(instances_path=temp_dir)
self.flags(snapshots_directory=temp_dir, group='libvirt')
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.driver.libvirt_utils',
fake_libvirt_utils))
self.flags(sysinfo_serial="hardware", group="libvirt")
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.imagebackend.libvirt_utils',
fake_libvirt_utils))
def fake_extend(image, size, use_cow=False):
pass
self.stubs.Set(libvirt_driver.disk, 'extend', fake_extend)
self.stubs.Set(imagebackend.Image, 'resolve_driver_format',
imagebackend.Image._get_driver_format)
self.useFixture(fakelibvirt.FakeLibvirtFixture())
self.test_instance = _create_test_instance()
self.test_image_meta = {
"disk_format": "raw",
}
self.image_service = nova.tests.unit.image.fake.stub_out_image_service(
self.stubs)
self.device_xml_tmpl = """
<domain type='kvm'>
<devices>
<disk type='block' device='disk'>
<driver name='qemu' type='raw' cache='none'/>
<source dev='{device_path}'/>
<target bus='virtio' dev='vdb'/>
<serial>58a84f6d-3f0c-4e19-a0af-eb657b790657</serial>
<address type='pci' domain='0x0' bus='0x0' slot='0x04' \
function='0x0'/>
</disk>
</devices>
</domain>
"""
def relpath(self, path):
return os.path.relpath(path, CONF.instances_path)
def tearDown(self):
nova.tests.unit.image.fake.FakeImageService_reset()
super(LibvirtConnTestCase, self).tearDown()
def test_driver_capabilities(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertTrue(drvr.capabilities['has_imagecache'],
'Driver capabilities for \'has_imagecache\' '
'is invalid')
self.assertTrue(drvr.capabilities['supports_recreate'],
'Driver capabilities for \'supports_recreate\' '
'is invalid')
self.assertFalse(drvr.capabilities['supports_migrate_to_same_host'],
'Driver capabilities for '
'\'supports_migrate_to_same_host\' is invalid')
def create_fake_libvirt_mock(self, **kwargs):
"""Defining mocks for LibvirtDriver(libvirt is not used)."""
# A fake libvirt.virConnect
class FakeLibvirtDriver(object):
def defineXML(self, xml):
return FakeVirtDomain()
# Creating mocks
volume_driver = ['iscsi=nova.tests.unit.virt.libvirt.test_driver'
'.FakeVolumeDriver']
fake = FakeLibvirtDriver()
# Customizing above fake if necessary
for key, val in kwargs.items():
fake.__setattr__(key, val)
self.stubs.Set(libvirt_driver.LibvirtDriver, '_conn', fake)
self.stubs.Set(libvirt_driver.LibvirtDriver, '_get_volume_drivers',
lambda x: volume_driver)
self.stubs.Set(host.Host, 'get_connection', lambda x: fake)
def fake_lookup(self, instance_name):
return FakeVirtDomain()
def fake_execute(self, *args, **kwargs):
open(args[-1], "a").close()
def _create_service(self, **kwargs):
service_ref = {'host': kwargs.get('host', 'dummy'),
'disabled': kwargs.get('disabled', False),
'binary': 'nova-compute',
'topic': 'compute',
'report_count': 0}
return objects.Service(**service_ref)
def _get_pause_flag(self, drvr, network_info, power_on=True,
vifs_already_plugged=False):
timeout = CONF.vif_plugging_timeout
events = []
if (drvr._conn_supports_start_paused and
utils.is_neutron() and
not vifs_already_plugged and
power_on and timeout):
events = drvr._get_neutron_events(network_info)
return bool(events)
def test_public_api_signatures(self):
baseinst = driver.ComputeDriver(None)
inst = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertPublicAPISignatures(baseinst, inst)
def test_legacy_block_device_info(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertFalse(drvr.need_legacy_block_device_info)
@mock.patch.object(host.Host, "has_min_version")
def test_min_version_start_ok(self, mock_version):
mock_version.return_value = True
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
drvr.init_host("dummyhost")
@mock.patch.object(host.Host, "has_min_version")
def test_min_version_start_abort(self, mock_version):
mock_version.return_value = False
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertRaises(exception.NovaException,
drvr.init_host,
"dummyhost")
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion',
return_value=utils.convert_version_to_int(
libvirt_driver.NEXT_MIN_LIBVIRT_VERSION) - 1)
@mock.patch.object(libvirt_driver.LOG, 'warning')
def test_next_min_version_deprecation_warning(self, mock_warning,
mock_get_libversion):
# Test that a warning is logged if the libvirt version is less than
# the next required minimum version.
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
drvr.init_host("dummyhost")
# assert that the next min version is in a warning message
expected_arg = {'version': '0.10.2'}
version_arg_found = False
for call in mock_warning.call_args_list:
if call[0][1] == expected_arg:
version_arg_found = True
break
self.assertTrue(version_arg_found)
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion',
return_value=utils.convert_version_to_int(
libvirt_driver.NEXT_MIN_LIBVIRT_VERSION))
@mock.patch.object(libvirt_driver.LOG, 'warning')
def test_next_min_version_ok(self, mock_warning, mock_get_libversion):
# Test that a warning is not logged if the libvirt version is greater
# than or equal to NEXT_MIN_LIBVIRT_VERSION.
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
drvr.init_host("dummyhost")
# assert that the next min version is in a warning message
expected_arg = {'version': '0.10.2'}
version_arg_found = False
for call in mock_warning.call_args_list:
if call[0][1] == expected_arg:
version_arg_found = True
break
self.assertFalse(version_arg_found)
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion',
return_value=utils.convert_version_to_int(
libvirt_driver.MIN_LIBVIRT_KVM_S390_VERSION) - 1)
@mock.patch.object(fakelibvirt.Connection, 'getVersion',
return_value=utils.convert_version_to_int(
libvirt_driver.MIN_QEMU_S390_VERSION))
@mock.patch.object(arch, "from_host", return_value=arch.S390X)
def test_min_version_s390_old_libvirt(self, mock_arch,
mock_qemu_version, mock_lv_version):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertRaises(exception.NovaException,
drvr.init_host,
"dummyhost")
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion',
return_value=utils.convert_version_to_int(
libvirt_driver.MIN_LIBVIRT_KVM_S390_VERSION))
@mock.patch.object(fakelibvirt.Connection, 'getVersion',
return_value=utils.convert_version_to_int(
libvirt_driver.MIN_QEMU_S390_VERSION) - 1)
@mock.patch.object(arch, "from_host", return_value=arch.S390X)
def test_min_version_s390_old_qemu(self, mock_arch,
mock_qemu_version, mock_lv_version):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertRaises(exception.NovaException,
drvr.init_host,
"dummyhost")
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion',
return_value=utils.convert_version_to_int(
libvirt_driver.MIN_LIBVIRT_KVM_S390_VERSION))
@mock.patch.object(fakelibvirt.Connection, 'getVersion',
return_value=utils.convert_version_to_int(
libvirt_driver.MIN_QEMU_S390_VERSION))
@mock.patch.object(arch, "from_host", return_value=arch.S390X)
def test_min_version_s390_ok(self, mock_arch,
mock_qemu_version, mock_lv_version):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
drvr.init_host("dummyhost")
@mock.patch('nova.utils.get_image_from_system_metadata')
@mock.patch.object(host.Host,
'has_min_version', return_value=True)
@mock.patch('nova.virt.libvirt.host.Host.get_guest')
def test_set_admin_password(self, mock_get_guest, ver, mock_image):
self.flags(virt_type='kvm', group='libvirt')
instance = objects.Instance(**self.test_instance)
mock_image.return_value = {"properties": {
"hw_qemu_guest_agent": "yes"}}
mock_guest = mock.Mock(spec=libvirt_guest.Guest)
mock_get_guest.return_value = mock_guest
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
drvr.set_admin_password(instance, "123")
mock_guest.set_user_password.assert_called_once_with("root", "123")
@mock.patch('nova.utils.get_image_from_system_metadata')
@mock.patch.object(host.Host,
'has_min_version', return_value=True)
@mock.patch('nova.virt.libvirt.host.Host.get_guest')
def test_set_admin_password_windows(self, mock_get_guest, ver, mock_image):
self.flags(virt_type='kvm', group='libvirt')
instance = objects.Instance(**self.test_instance)
instance.os_type = "windows"
mock_image.return_value = {"properties": {
"hw_qemu_guest_agent": "yes"}}
mock_guest = mock.Mock(spec=libvirt_guest.Guest)
mock_get_guest.return_value = mock_guest
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
drvr.set_admin_password(instance, "123")
mock_guest.set_user_password.assert_called_once_with(
"Administrator", "123")
@mock.patch('nova.utils.get_image_from_system_metadata')
@mock.patch.object(host.Host,
'has_min_version', return_value=True)
@mock.patch('nova.virt.libvirt.host.Host.get_guest')
def test_set_admin_password_image(self, mock_get_guest, ver, mock_image):
self.flags(virt_type='kvm', group='libvirt')
instance = objects.Instance(**self.test_instance)
mock_image.return_value = {"properties": {
"hw_qemu_guest_agent": "yes",
"os_admin_user": "foo"
}}
mock_guest = mock.Mock(spec=libvirt_guest.Guest)
mock_get_guest.return_value = mock_guest
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
drvr.set_admin_password(instance, "123")
mock_guest.set_user_password.assert_called_once_with("foo", "123")
@mock.patch('nova.utils.get_image_from_system_metadata')
@mock.patch.object(host.Host,
'has_min_version', return_value=False)
def test_set_admin_password_bad_version(self, mock_svc, mock_image):
self.flags(virt_type='kvm', group='libvirt')
instance = objects.Instance(**self.test_instance)
mock_image.return_value = {"properties": {
"hw_qemu_guest_agent": "yes"}}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertRaises(exception.SetAdminPasswdNotSupported,
drvr.set_admin_password, instance, "123")
@mock.patch('nova.utils.get_image_from_system_metadata')
@mock.patch.object(host.Host,
'has_min_version', return_value=True)
def test_set_admin_password_bad_hyp(self, mock_svc, mock_image):
self.flags(virt_type='foo', group='libvirt')
instance = objects.Instance(**self.test_instance)
mock_image.return_value = {"properties": {
"hw_qemu_guest_agent": "yes"}}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertRaises(exception.SetAdminPasswdNotSupported,
drvr.set_admin_password, instance, "123")
@mock.patch.object(host.Host,
'has_min_version', return_value=True)
def test_set_admin_password_guest_agent_not_running(self, mock_svc):
self.flags(virt_type='kvm', group='libvirt')
instance = objects.Instance(**self.test_instance)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertRaises(exception.QemuGuestAgentNotEnabled,
drvr.set_admin_password, instance, "123")
@mock.patch('nova.utils.get_image_from_system_metadata')
@mock.patch.object(host.Host,
'has_min_version', return_value=True)
@mock.patch('nova.virt.libvirt.host.Host.get_guest')
def test_set_admin_password_error(self, mock_get_guest, ver, mock_image):
self.flags(virt_type='kvm', group='libvirt')
instance = objects.Instance(**self.test_instance)
mock_image.return_value = {"properties": {
"hw_qemu_guest_agent": "yes"}}
mock_guest = mock.Mock(spec=libvirt_guest.Guest)
mock_guest.set_user_password.side_effect = (
fakelibvirt.libvirtError("error"))
mock_get_guest.return_value = mock_guest
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertRaises(exception.NovaException,
drvr.set_admin_password, instance, "123")
@mock.patch.object(objects.Service, 'get_by_compute_host')
def test_set_host_enabled_with_disable(self, mock_svc):
# Tests disabling an enabled host.
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
svc = self._create_service(host='fake-mini')
mock_svc.return_value = svc
drvr._set_host_enabled(False)
self.assertTrue(svc.disabled)
@mock.patch.object(objects.Service, 'get_by_compute_host')
def test_set_host_enabled_with_enable(self, mock_svc):
# Tests enabling a disabled host.
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
svc = self._create_service(disabled=True, host='fake-mini')
mock_svc.return_value = svc
drvr._set_host_enabled(True)
self.assertTrue(svc.disabled)
@mock.patch.object(objects.Service, 'get_by_compute_host')
def test_set_host_enabled_with_enable_state_enabled(self, mock_svc):
# Tests enabling an enabled host.
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
svc = self._create_service(disabled=False, host='fake-mini')
mock_svc.return_value = svc
drvr._set_host_enabled(True)
self.assertFalse(svc.disabled)
@mock.patch.object(objects.Service, 'get_by_compute_host')
def test_set_host_enabled_with_disable_state_disabled(self, mock_svc):
# Tests disabling a disabled host.
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
svc = self._create_service(disabled=True, host='fake-mini')
mock_svc.return_value = svc
drvr._set_host_enabled(False)
self.assertTrue(svc.disabled)
def test_set_host_enabled_swallows_exceptions(self):
# Tests that set_host_enabled will swallow exceptions coming from the
# db_api code so they don't break anything calling it, e.g. the
# _get_new_connection method.
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
with mock.patch.object(db, 'service_get_by_compute_host') as db_mock:
# Make db.service_get_by_compute_host raise NovaException; this
# is more robust than just raising ComputeHostNotFound.
db_mock.side_effect = exception.NovaException
drvr._set_host_enabled(False)
@mock.patch.object(fakelibvirt.virConnect, "nodeDeviceLookupByName")
def test_prepare_pci_device(self, mock_lookup):
pci_devices = [dict(hypervisor_name='xxx')]
self.flags(virt_type='xen', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
conn = drvr._host.get_connection()
mock_lookup.side_effect = lambda x: fakelibvirt.NodeDevice(conn)
drvr._prepare_pci_devices_for_use(pci_devices)
@mock.patch.object(fakelibvirt.virConnect, "nodeDeviceLookupByName")
@mock.patch.object(fakelibvirt.virNodeDevice, "dettach")
def test_prepare_pci_device_exception(self, mock_detach, mock_lookup):
pci_devices = [dict(hypervisor_name='xxx',
id='id1',
instance_uuid='uuid')]
self.flags(virt_type='xen', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
conn = drvr._host.get_connection()
mock_lookup.side_effect = lambda x: fakelibvirt.NodeDevice(conn)
mock_detach.side_effect = fakelibvirt.libvirtError("xxxx")
self.assertRaises(exception.PciDevicePrepareFailed,
drvr._prepare_pci_devices_for_use, pci_devices)
def test_detach_pci_devices_exception(self):
pci_devices = [dict(hypervisor_name='xxx',
id='id1',
instance_uuid='uuid')]
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.mox.StubOutWithMock(host.Host,
'has_min_version')
host.Host.has_min_version = lambda x, y: False
self.assertRaises(exception.PciDeviceDetachFailed,
drvr._detach_pci_devices, None, pci_devices)
def test_detach_pci_devices(self):
fake_domXML1 =\
"""<domain> <devices>
<disk type='file' device='disk'>
<driver name='qemu' type='qcow2' cache='none'/>
<source file='xxx'/>
<target dev='vda' bus='virtio'/>
<alias name='virtio-disk0'/>
<address type='pci' domain='0x0000' bus='0x00'
slot='0x04' function='0x0'/>
</disk>
<hostdev mode="subsystem" type="pci" managed="yes">
<source>
<address function="0x1" slot="0x10" domain="0x0000"
bus="0x04"/>
</source>
</hostdev></devices></domain>"""
pci_devices = [dict(hypervisor_name='xxx',
id='id1',
instance_uuid='uuid',
address="0001:04:10:1")]
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.mox.StubOutWithMock(host.Host,
'has_min_version')
host.Host.has_min_version = lambda x, y: True
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver,
'_get_guest_pci_device')
class FakeDev(object):
def to_xml(self):
pass
libvirt_driver.LibvirtDriver._get_guest_pci_device =\
lambda x, y: FakeDev()
class FakeDomain(object):
def detachDeviceFlags(self, xml, flags):
pci_devices[0]['hypervisor_name'] = 'marked'
pass
def XMLDesc(self, flags):
return fake_domXML1
guest = libvirt_guest.Guest(FakeDomain())
drvr._detach_pci_devices(guest, pci_devices)
self.assertEqual(pci_devices[0]['hypervisor_name'], 'marked')
def test_detach_pci_devices_timeout(self):
fake_domXML1 =\
"""<domain>
<devices>
<hostdev mode="subsystem" type="pci" managed="yes">
<source>
<address function="0x1" slot="0x10" domain="0x0000" bus="0x04"/>
</source>
</hostdev>
</devices>
</domain>"""
pci_devices = [dict(hypervisor_name='xxx',
id='id1',
instance_uuid='uuid',
address="0000:04:10:1")]
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.mox.StubOutWithMock(host.Host,
'has_min_version')
host.Host.has_min_version = lambda x, y: True
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver,
'_get_guest_pci_device')
class FakeDev(object):
def to_xml(self):
pass
libvirt_driver.LibvirtDriver._get_guest_pci_device =\
lambda x, y: FakeDev()
class FakeDomain(object):
def detachDeviceFlags(self, xml, flags):
pass
def XMLDesc(self, flags):
return fake_domXML1
guest = libvirt_guest.Guest(FakeDomain())
self.assertRaises(exception.PciDeviceDetachFailed,
drvr._detach_pci_devices, guest, pci_devices)
@mock.patch.object(connector, 'get_connector_properties')
def test_get_connector(self, fake_get_connector):
initiator = 'fake.initiator.iqn'
ip = 'fakeip'
host = 'fakehost'
wwpns = ['100010604b019419']
wwnns = ['200010604b019419']
self.flags(my_ip=ip)
self.flags(host=host)
expected = {
'ip': ip,
'initiator': initiator,
'host': host,
'wwpns': wwpns,
'wwnns': wwnns
}
volume = {
'id': 'fake'
}
# TODO(walter-boring) add the fake in os-brick
fake_get_connector.return_value = expected
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
result = drvr.get_volume_connector(volume)
self.assertThat(expected, matchers.DictMatches(result))
@mock.patch.object(connector, 'get_connector_properties')
def test_get_connector_storage_ip(self, fake_get_connector):
ip = '100.100.100.100'
storage_ip = '101.101.101.101'
self.flags(my_block_storage_ip=storage_ip, my_ip=ip)
volume = {
'id': 'fake'
}
expected = {
'ip': storage_ip
}
# TODO(walter-boring) add the fake in os-brick
fake_get_connector.return_value = expected
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
result = drvr.get_volume_connector(volume)
self.assertEqual(storage_ip, result['ip'])
def test_lifecycle_event_registration(self):
calls = []
def fake_registerErrorHandler(*args, **kwargs):
calls.append('fake_registerErrorHandler')
def fake_get_host_capabilities(**args):
cpu = vconfig.LibvirtConfigGuestCPU()
cpu.arch = arch.ARMV7
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = cpu
calls.append('fake_get_host_capabilities')
return caps
@mock.patch.object(fakelibvirt, 'registerErrorHandler',
side_effect=fake_registerErrorHandler)
@mock.patch.object(host.Host, "get_capabilities",
side_effect=fake_get_host_capabilities)
def test_init_host(get_host_capabilities, register_error_handler):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
drvr.init_host("test_host")
test_init_host()
# NOTE(dkliban): Will fail if get_host_capabilities is called before
# registerErrorHandler
self.assertEqual(['fake_registerErrorHandler',
'fake_get_host_capabilities'], calls)
def test_sanitize_log_to_xml(self):
# setup fake data
data = {'auth_password': 'scrubme'}
bdm = [{'connection_info': {'data': data}}]
bdi = {'block_device_mapping': bdm}
# Tests that the parameters to the _get_guest_xml method
# are sanitized for passwords when logged.
def fake_debug(*args, **kwargs):
if 'auth_password' in args[0]:
self.assertNotIn('scrubme', args[0])
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
conf = mock.Mock()
with contextlib.nested(
mock.patch.object(libvirt_driver.LOG, 'debug',
side_effect=fake_debug),
mock.patch.object(drvr, '_get_guest_config', return_value=conf)
) as (
debug_mock, conf_mock
):
drvr._get_guest_xml(self.context, self.test_instance,
network_info={}, disk_info={},
image_meta={}, block_device_info=bdi)
# we don't care what the log message is, we just want to make sure
# our stub method is called which asserts the password is scrubbed
self.assertTrue(debug_mock.called)
@mock.patch.object(time, "time")
def test_get_guest_config(self, time_mock):
time_mock.return_value = 1234567.89
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
test_instance = copy.deepcopy(self.test_instance)
test_instance["display_name"] = "purple tomatoes"
ctxt = context.RequestContext(project_id=123,
project_name="aubergine",
user_id=456,
user_name="pie")
flavor = objects.Flavor(name='m1.small',
memory_mb=6,
vcpus=28,
root_gb=496,
ephemeral_gb=8128,
swap=33550336,
extra_specs={})
instance_ref = objects.Instance(**test_instance)
instance_ref.flavor = flavor
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
image_meta, disk_info,
context=ctxt)
self.assertEqual(cfg.uuid, instance_ref["uuid"])
self.assertEqual(2, len(cfg.features))
self.assertIsInstance(cfg.features[0],
vconfig.LibvirtConfigGuestFeatureACPI)
self.assertIsInstance(cfg.features[1],
vconfig.LibvirtConfigGuestFeatureAPIC)
self.assertEqual(cfg.memory, 6 * units.Ki)
self.assertEqual(cfg.vcpus, 28)
self.assertEqual(cfg.os_type, vm_mode.HVM)
self.assertEqual(cfg.os_boot_dev, ["hd"])
self.assertIsNone(cfg.os_root)
self.assertEqual(len(cfg.devices), 10)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestInterface)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestInput)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[8],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[9],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual(len(cfg.metadata), 1)
self.assertIsInstance(cfg.metadata[0],
vconfig.LibvirtConfigGuestMetaNovaInstance)
self.assertEqual(version.version_string_with_package(),
cfg.metadata[0].package)
self.assertEqual("purple tomatoes",
cfg.metadata[0].name)
self.assertEqual(1234567.89,
cfg.metadata[0].creationTime)
self.assertEqual("image",
cfg.metadata[0].roottype)
self.assertEqual(str(instance_ref["image_ref"]),
cfg.metadata[0].rootid)
self.assertIsInstance(cfg.metadata[0].owner,
vconfig.LibvirtConfigGuestMetaNovaOwner)
self.assertEqual(456,
cfg.metadata[0].owner.userid)
self.assertEqual("pie",
cfg.metadata[0].owner.username)
self.assertEqual(123,
cfg.metadata[0].owner.projectid)
self.assertEqual("aubergine",
cfg.metadata[0].owner.projectname)
self.assertIsInstance(cfg.metadata[0].flavor,
vconfig.LibvirtConfigGuestMetaNovaFlavor)
self.assertEqual("m1.small",
cfg.metadata[0].flavor.name)
self.assertEqual(6,
cfg.metadata[0].flavor.memory)
self.assertEqual(28,
cfg.metadata[0].flavor.vcpus)
self.assertEqual(496,
cfg.metadata[0].flavor.disk)
self.assertEqual(8128,
cfg.metadata[0].flavor.ephemeral)
self.assertEqual(33550336,
cfg.metadata[0].flavor.swap)
def test_get_guest_config_lxc(self):
self.flags(virt_type='lxc', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
image_meta, {'mapping': {}})
self.assertEqual(instance_ref["uuid"], cfg.uuid)
self.assertEqual(2 * units.Mi, cfg.memory)
self.assertEqual(1, cfg.vcpus)
self.assertEqual(vm_mode.EXE, cfg.os_type)
self.assertEqual("/sbin/init", cfg.os_init_path)
self.assertEqual("console=tty0 console=ttyS0", cfg.os_cmdline)
self.assertIsNone(cfg.os_root)
self.assertEqual(3, len(cfg.devices))
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestFilesys)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestInterface)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestConsole)
def test_get_guest_config_lxc_with_id_maps(self):
self.flags(virt_type='lxc', group='libvirt')
self.flags(uid_maps=['0:1000:100'], group='libvirt')
self.flags(gid_maps=['0:1000:100'], group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
image_meta, {'mapping': {}})
self.assertEqual(instance_ref["uuid"], cfg.uuid)
self.assertEqual(2 * units.Mi, cfg.memory)
self.assertEqual(1, cfg.vcpus)
self.assertEqual(vm_mode.EXE, cfg.os_type)
self.assertEqual("/sbin/init", cfg.os_init_path)
self.assertEqual("console=tty0 console=ttyS0", cfg.os_cmdline)
self.assertIsNone(cfg.os_root)
self.assertEqual(3, len(cfg.devices))
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestFilesys)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestInterface)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestConsole)
self.assertEqual(len(cfg.idmaps), 2)
self.assertIsInstance(cfg.idmaps[0],
vconfig.LibvirtConfigGuestUIDMap)
self.assertIsInstance(cfg.idmaps[1],
vconfig.LibvirtConfigGuestGIDMap)
def test_get_guest_config_numa_host_instance_fits(self):
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
flavor = objects.Flavor(memory_mb=1, vcpus=2, root_gb=496,
ephemeral_gb=8128, swap=33550336, name='fake',
extra_specs={})
instance_ref.flavor = flavor
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = "x86_64"
caps.host.topology = self._fake_caps_numa_topology()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
with contextlib.nested(
mock.patch.object(host.Host, 'has_min_version',
return_value=True),
mock.patch.object(host.Host, "get_capabilities",
return_value=caps)):
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertIsNone(cfg.cpuset)
self.assertEqual(0, len(cfg.cputune.vcpupin))
self.assertIsNone(cfg.cpu.numa)
def test_get_guest_config_numa_host_instance_no_fit(self):
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
flavor = objects.Flavor(memory_mb=4096, vcpus=4, root_gb=496,
ephemeral_gb=8128, swap=33550336, name='fake',
extra_specs={})
instance_ref.flavor = flavor
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = "x86_64"
caps.host.topology = self._fake_caps_numa_topology()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
with contextlib.nested(
mock.patch.object(host.Host, "get_capabilities",
return_value=caps),
mock.patch.object(
hardware, 'get_vcpu_pin_set', return_value=set([3])),
mock.patch.object(random, 'choice')
) as (get_host_cap_mock,
get_vcpu_pin_set_mock, choice_mock):
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertFalse(choice_mock.called)
self.assertEqual(set([3]), cfg.cpuset)
self.assertEqual(0, len(cfg.cputune.vcpupin))
self.assertIsNone(cfg.cpu.numa)
def _test_get_guest_memory_backing_config(
self, host_topology, inst_topology, numatune):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
with mock.patch.object(
drvr, "_get_host_numa_topology",
return_value=host_topology):
return drvr._get_guest_memory_backing_config(
inst_topology, numatune)
@mock.patch.object(host.Host,
'has_min_version', return_value=True)
def test_get_guest_memory_backing_config_large_success(self, mock_version):
host_topology = objects.NUMATopology(
cells=[
objects.NUMACell(
id=3, cpuset=set([1]), memory=1024, mempages=[
objects.NUMAPagesTopology(size_kb=4, total=2000,
used=0),
objects.NUMAPagesTopology(size_kb=2048, total=512,
used=0),
objects.NUMAPagesTopology(size_kb=1048576, total=0,
used=0),
])])
inst_topology = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(
id=3, cpuset=set([0, 1]), memory=1024, pagesize=2048)])
numa_tune = vconfig.LibvirtConfigGuestNUMATune()
numa_tune.memnodes = [vconfig.LibvirtConfigGuestNUMATuneMemNode()]
numa_tune.memnodes[0].cellid = 0
numa_tune.memnodes[0].nodeset = [3]
result = self._test_get_guest_memory_backing_config(
host_topology, inst_topology, numa_tune)
self.assertEqual(1, len(result.hugepages))
self.assertEqual(2048, result.hugepages[0].size_kb)
self.assertEqual([0], result.hugepages[0].nodeset)
@mock.patch.object(host.Host,
'has_min_version', return_value=True)
def test_get_guest_memory_backing_config_smallest(self, mock_version):
host_topology = objects.NUMATopology(
cells=[
objects.NUMACell(
id=3, cpuset=set([1]), memory=1024, mempages=[
objects.NUMAPagesTopology(size_kb=4, total=2000,
used=0),
objects.NUMAPagesTopology(size_kb=2048, total=512,
used=0),
objects.NUMAPagesTopology(size_kb=1048576, total=0,
used=0),
])])
inst_topology = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(
id=3, cpuset=set([0, 1]), memory=1024, pagesize=4)])
numa_tune = vconfig.LibvirtConfigGuestNUMATune()
numa_tune.memnodes = [vconfig.LibvirtConfigGuestNUMATuneMemNode()]
numa_tune.memnodes[0].cellid = 0
numa_tune.memnodes[0].nodeset = [3]
result = self._test_get_guest_memory_backing_config(
host_topology, inst_topology, numa_tune)
self.assertIsNone(result)
def test_get_guest_config_numa_host_instance_pci_no_numa_info(self):
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
flavor = objects.Flavor(memory_mb=1, vcpus=2, root_gb=496,
ephemeral_gb=8128, swap=33550336, name='fake',
extra_specs={})
instance_ref.flavor = flavor
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = "x86_64"
caps.host.topology = self._fake_caps_numa_topology()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
pci_device_info = dict(test_pci_device.fake_db_dev)
pci_device_info.update(compute_node_id=1,
label='fake',
status=fields.PciDeviceStatus.AVAILABLE,
address='0000:00:00.1',
instance_uuid=None,
request_id=None,
extra_info={},
numa_node=None)
pci_device = objects.PciDevice(**pci_device_info)
with contextlib.nested(
mock.patch.object(host.Host, 'has_min_version',
return_value=True),
mock.patch.object(
host.Host, "get_capabilities", return_value=caps),
mock.patch.object(
hardware, 'get_vcpu_pin_set', return_value=set([3])),
mock.patch.object(host.Host, 'get_online_cpus',
return_value=set(range(8))),
mock.patch.object(pci_manager, "get_instance_pci_devs",
return_value=[pci_device])):
cfg = conn._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(set([3]), cfg.cpuset)
self.assertEqual(0, len(cfg.cputune.vcpupin))
self.assertIsNone(cfg.cpu.numa)
def test_get_guest_config_numa_host_instance_2pci_no_fit(self):
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
flavor = objects.Flavor(memory_mb=4096, vcpus=4, root_gb=496,
ephemeral_gb=8128, swap=33550336, name='fake',
extra_specs={})
instance_ref.flavor = flavor
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = "x86_64"
caps.host.topology = self._fake_caps_numa_topology()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
pci_device_info = dict(test_pci_device.fake_db_dev)
pci_device_info.update(compute_node_id=1,
label='fake',
status=fields.PciDeviceStatus.AVAILABLE,
address='0000:00:00.1',
instance_uuid=None,
request_id=None,
extra_info={},
numa_node=1)
pci_device = objects.PciDevice(**pci_device_info)
pci_device_info.update(numa_node=0, address='0000:00:00.2')
pci_device2 = objects.PciDevice(**pci_device_info)
with contextlib.nested(
mock.patch.object(
host.Host, "get_capabilities", return_value=caps),
mock.patch.object(
hardware, 'get_vcpu_pin_set', return_value=set([3])),
mock.patch.object(random, 'choice'),
mock.patch.object(pci_manager, "get_instance_pci_devs",
return_value=[pci_device, pci_device2])
) as (get_host_cap_mock,
get_vcpu_pin_set_mock, choice_mock, pci_mock):
cfg = conn._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertFalse(choice_mock.called)
self.assertEqual(set([3]), cfg.cpuset)
self.assertEqual(0, len(cfg.cputune.vcpupin))
self.assertIsNone(cfg.cpu.numa)
@mock.patch.object(fakelibvirt.Connection, 'getType')
@mock.patch.object(fakelibvirt.Connection, 'getVersion')
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion')
@mock.patch.object(host.Host, 'get_capabilities')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_set_host_enabled')
def _test_get_guest_config_numa_unsupported(self, fake_lib_version,
fake_version, fake_type,
fake_arch, exception_class,
pagesize, mock_host,
mock_caps, mock_lib_version,
mock_version, mock_type):
instance_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
id=0, cpuset=set([0]),
memory=1024, pagesize=pagesize)])
instance_ref = objects.Instance(**self.test_instance)
instance_ref.numa_topology = instance_topology
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
flavor = objects.Flavor(memory_mb=1, vcpus=2, root_gb=496,
ephemeral_gb=8128, swap=33550336, name='fake',
extra_specs={})
instance_ref.flavor = flavor
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = fake_arch
caps.host.topology = self._fake_caps_numa_topology()
mock_type.return_value = fake_type
mock_version.return_value = fake_version
mock_lib_version.return_value = fake_lib_version
mock_caps.return_value = caps
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
self.assertRaises(exception_class,
drvr._get_guest_config,
instance_ref, [],
image_meta, disk_info)
def test_get_guest_config_numa_old_version_libvirt(self):
self.flags(virt_type='kvm', group='libvirt')
self._test_get_guest_config_numa_unsupported(
utils.convert_version_to_int(
libvirt_driver.MIN_LIBVIRT_NUMA_VERSION) - 1,
utils.convert_version_to_int(
libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION),
host.HV_DRIVER_QEMU,
arch.X86_64,
exception.NUMATopologyUnsupported,
None)
def test_get_guest_config_numa_bad_version_libvirt(self):
self.flags(virt_type='kvm', group='libvirt')
self._test_get_guest_config_numa_unsupported(
utils.convert_version_to_int(
libvirt_driver.BAD_LIBVIRT_NUMA_VERSIONS[0]),
utils.convert_version_to_int(
libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION),
host.HV_DRIVER_QEMU,
arch.X86_64,
exception.NUMATopologyUnsupported,
None)
@mock.patch.object(libvirt_driver.LOG, 'warn')
def test_has_numa_support_bad_version_libvirt_log(self, mock_warn):
# Tests that a warning is logged once and only once when there is a bad
# BAD_LIBVIRT_NUMA_VERSIONS detected.
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertFalse(hasattr(drvr, '_bad_libvirt_numa_version_warn'))
with mock.patch.object(drvr._host, 'has_version', return_value=True):
for i in xrange(2):
self.assertFalse(drvr._has_numa_support())
self.assertTrue(drvr._bad_libvirt_numa_version_warn)
self.assertEqual(1, mock_warn.call_count)
# assert the version is logged properly
self.assertEqual('1.2.9.2', mock_warn.call_args[0][1])
def test_get_guest_config_numa_old_version_qemu(self):
self.flags(virt_type='kvm', group='libvirt')
self._test_get_guest_config_numa_unsupported(
utils.convert_version_to_int(
libvirt_driver.MIN_LIBVIRT_NUMA_VERSION),
utils.convert_version_to_int(
libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION) - 1,
host.HV_DRIVER_QEMU,
arch.X86_64,
exception.NUMATopologyUnsupported,
None)
def test_get_guest_config_numa_other_arch_qemu(self):
self.flags(virt_type='kvm', group='libvirt')
self._test_get_guest_config_numa_unsupported(
utils.convert_version_to_int(
libvirt_driver.MIN_LIBVIRT_NUMA_VERSION),
utils.convert_version_to_int(
libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION),
host.HV_DRIVER_QEMU,
arch.PPC64,
exception.NUMATopologyUnsupported,
None)
def test_get_guest_config_numa_xen(self):
self.flags(virt_type='xen', group='libvirt')
self._test_get_guest_config_numa_unsupported(
utils.convert_version_to_int(
libvirt_driver.MIN_LIBVIRT_NUMA_VERSION),
utils.convert_version_to_int((4, 5, 0)),
'XEN',
arch.X86_64,
exception.NUMATopologyUnsupported,
None)
def test_get_guest_config_numa_old_pages_libvirt(self):
self.flags(virt_type='kvm', group='libvirt')
self._test_get_guest_config_numa_unsupported(
utils.convert_version_to_int(
libvirt_driver.MIN_LIBVIRT_HUGEPAGE_VERSION) - 1,
utils.convert_version_to_int(
libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION),
host.HV_DRIVER_QEMU,
arch.X86_64,
exception.MemoryPagesUnsupported,
2048)
def test_get_guest_config_numa_old_pages_qemu(self):
self.flags(virt_type='kvm', group='libvirt')
self._test_get_guest_config_numa_unsupported(
utils.convert_version_to_int(
libvirt_driver.MIN_LIBVIRT_HUGEPAGE_VERSION),
utils.convert_version_to_int(
libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION) - 1,
host.HV_DRIVER_QEMU,
arch.X86_64,
exception.NUMATopologyUnsupported,
2048)
def test_get_guest_config_numa_host_instance_fit_w_cpu_pinset(self):
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
flavor = objects.Flavor(memory_mb=1024, vcpus=2, root_gb=496,
ephemeral_gb=8128, swap=33550336, name='fake',
extra_specs={})
instance_ref.flavor = flavor
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = "x86_64"
caps.host.topology = self._fake_caps_numa_topology(kb_mem=4194304)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
with contextlib.nested(
mock.patch.object(host.Host, 'has_min_version',
return_value=True),
mock.patch.object(host.Host, "get_capabilities",
return_value=caps),
mock.patch.object(
hardware, 'get_vcpu_pin_set', return_value=set([2, 3])),
mock.patch.object(host.Host, 'get_online_cpus',
return_value=set(range(8)))
) as (has_min_version_mock, get_host_cap_mock,
get_vcpu_pin_set_mock, get_online_cpus_mock):
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
# NOTE(ndipanov): we make sure that pin_set was taken into account
# when choosing viable cells
self.assertEqual(set([2, 3]), cfg.cpuset)
self.assertEqual(0, len(cfg.cputune.vcpupin))
self.assertIsNone(cfg.cpu.numa)
def test_get_guest_config_non_numa_host_instance_topo(self):
instance_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
id=0, cpuset=set([0]), memory=1024),
objects.InstanceNUMACell(
id=1, cpuset=set([2]), memory=1024)])
instance_ref = objects.Instance(**self.test_instance)
instance_ref.numa_topology = instance_topology
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
flavor = objects.Flavor(memory_mb=2048, vcpus=2, root_gb=496,
ephemeral_gb=8128, swap=33550336, name='fake',
extra_specs={})
instance_ref.flavor = flavor
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = "x86_64"
caps.host.topology = None
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
with contextlib.nested(
mock.patch.object(
objects.InstanceNUMATopology, "get_by_instance_uuid",
return_value=instance_topology),
mock.patch.object(host.Host, 'has_min_version',
return_value=True),
mock.patch.object(host.Host, "get_capabilities",
return_value=caps)):
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertIsNone(cfg.cpuset)
self.assertEqual(0, len(cfg.cputune.vcpupin))
self.assertIsNone(cfg.numatune)
self.assertIsNotNone(cfg.cpu.numa)
for instance_cell, numa_cfg_cell in zip(
instance_topology.cells, cfg.cpu.numa.cells):
self.assertEqual(instance_cell.id, numa_cfg_cell.id)
self.assertEqual(instance_cell.cpuset, numa_cfg_cell.cpus)
self.assertEqual(instance_cell.memory * units.Ki,
numa_cfg_cell.memory)
def test_get_guest_config_numa_host_instance_topo(self):
instance_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
id=1, cpuset=set([0, 1]), memory=1024, pagesize=None),
objects.InstanceNUMACell(
id=2, cpuset=set([2, 3]), memory=1024,
pagesize=None)])
instance_ref = objects.Instance(**self.test_instance)
instance_ref.numa_topology = instance_topology
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
flavor = objects.Flavor(memory_mb=2048, vcpus=4, root_gb=496,
ephemeral_gb=8128, swap=33550336, name='fake',
extra_specs={})
instance_ref.flavor = flavor
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = "x86_64"
caps.host.topology = self._fake_caps_numa_topology()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
with contextlib.nested(
mock.patch.object(
objects.InstanceNUMATopology, "get_by_instance_uuid",
return_value=instance_topology),
mock.patch.object(host.Host, 'has_min_version',
return_value=True),
mock.patch.object(host.Host, "get_capabilities",
return_value=caps),
mock.patch.object(
hardware, 'get_vcpu_pin_set',
return_value=set([2, 3, 4, 5])),
mock.patch.object(host.Host, 'get_online_cpus',
return_value=set(range(8))),
):
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertIsNone(cfg.cpuset)
# Test that the pinning is correct and limited to allowed only
self.assertEqual(0, cfg.cputune.vcpupin[0].id)
self.assertEqual(set([2, 3]), cfg.cputune.vcpupin[0].cpuset)
self.assertEqual(1, cfg.cputune.vcpupin[1].id)
self.assertEqual(set([2, 3]), cfg.cputune.vcpupin[1].cpuset)
self.assertEqual(2, cfg.cputune.vcpupin[2].id)
self.assertEqual(set([4, 5]), cfg.cputune.vcpupin[2].cpuset)
self.assertEqual(3, cfg.cputune.vcpupin[3].id)
self.assertEqual(set([4, 5]), cfg.cputune.vcpupin[3].cpuset)
self.assertIsNotNone(cfg.cpu.numa)
self.assertIsInstance(cfg.cputune.emulatorpin,
vconfig.LibvirtConfigGuestCPUTuneEmulatorPin)
self.assertEqual(set([2, 3, 4, 5]), cfg.cputune.emulatorpin.cpuset)
for instance_cell, numa_cfg_cell, index in zip(
instance_topology.cells,
cfg.cpu.numa.cells,
range(len(instance_topology.cells))):
self.assertEqual(index, numa_cfg_cell.id)
self.assertEqual(instance_cell.cpuset, numa_cfg_cell.cpus)
self.assertEqual(instance_cell.memory * units.Ki,
numa_cfg_cell.memory)
allnodes = [cell.id for cell in instance_topology.cells]
self.assertEqual(allnodes, cfg.numatune.memory.nodeset)
self.assertEqual("strict", cfg.numatune.memory.mode)
for instance_cell, memnode, index in zip(
instance_topology.cells,
cfg.numatune.memnodes,
range(len(instance_topology.cells))):
self.assertEqual(index, memnode.cellid)
self.assertEqual([instance_cell.id], memnode.nodeset)
self.assertEqual("strict", memnode.mode)
def test_get_guest_config_numa_host_instance_topo_reordered(self):
instance_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
id=3, cpuset=set([0, 1]), memory=1024),
objects.InstanceNUMACell(
id=0, cpuset=set([2, 3]), memory=1024)])
instance_ref = objects.Instance(**self.test_instance)
instance_ref.numa_topology = instance_topology
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
flavor = objects.Flavor(memory_mb=2048, vcpus=4, root_gb=496,
ephemeral_gb=8128, swap=33550336, name='fake',
extra_specs={})
instance_ref.flavor = flavor
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = "x86_64"
caps.host.topology = self._fake_caps_numa_topology()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
with contextlib.nested(
mock.patch.object(
objects.InstanceNUMATopology, "get_by_instance_uuid",
return_value=instance_topology),
mock.patch.object(host.Host, 'has_min_version',
return_value=True),
mock.patch.object(host.Host, "get_capabilities",
return_value=caps),
mock.patch.object(host.Host, 'get_online_cpus',
return_value=set(range(8))),
):
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertIsNone(cfg.cpuset)
# Test that the pinning is correct and limited to allowed only
self.assertEqual(0, cfg.cputune.vcpupin[0].id)
self.assertEqual(set([6, 7]), cfg.cputune.vcpupin[0].cpuset)
self.assertEqual(1, cfg.cputune.vcpupin[1].id)
self.assertEqual(set([6, 7]), cfg.cputune.vcpupin[1].cpuset)
self.assertEqual(2, cfg.cputune.vcpupin[2].id)
self.assertEqual(set([0, 1]), cfg.cputune.vcpupin[2].cpuset)
self.assertEqual(3, cfg.cputune.vcpupin[3].id)
self.assertEqual(set([0, 1]), cfg.cputune.vcpupin[3].cpuset)
self.assertIsNotNone(cfg.cpu.numa)
self.assertIsInstance(cfg.cputune.emulatorpin,
vconfig.LibvirtConfigGuestCPUTuneEmulatorPin)
self.assertEqual(set([0, 1, 6, 7]), cfg.cputune.emulatorpin.cpuset)
for index, (instance_cell, numa_cfg_cell) in enumerate(zip(
instance_topology.cells,
cfg.cpu.numa.cells)):
self.assertEqual(index, numa_cfg_cell.id)
self.assertEqual(instance_cell.cpuset, numa_cfg_cell.cpus)
self.assertEqual(instance_cell.memory * units.Ki,
numa_cfg_cell.memory)
self.assertIsNone(numa_cfg_cell.memAccess)
allnodes = set([cell.id for cell in instance_topology.cells])
self.assertEqual(allnodes, set(cfg.numatune.memory.nodeset))
self.assertEqual("strict", cfg.numatune.memory.mode)
for index, (instance_cell, memnode) in enumerate(zip(
instance_topology.cells,
cfg.numatune.memnodes)):
self.assertEqual(index, memnode.cellid)
self.assertEqual([instance_cell.id], memnode.nodeset)
self.assertEqual("strict", memnode.mode)
def test_get_guest_config_numa_host_instance_topo_cpu_pinning(self):
instance_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
id=1, cpuset=set([0, 1]), memory=1024,
cpu_pinning={0: 24, 1: 25}),
objects.InstanceNUMACell(
id=0, cpuset=set([2, 3]), memory=1024,
cpu_pinning={2: 0, 3: 1})])
instance_ref = objects.Instance(**self.test_instance)
instance_ref.numa_topology = instance_topology
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
flavor = objects.Flavor(memory_mb=2048, vcpus=2, root_gb=496,
ephemeral_gb=8128, swap=33550336, name='fake',
extra_specs={})
instance_ref.flavor = flavor
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = "x86_64"
caps.host.topology = self._fake_caps_numa_topology(
sockets_per_cell=4, cores_per_socket=3, threads_per_core=2)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
with contextlib.nested(
mock.patch.object(
objects.InstanceNUMATopology, "get_by_instance_uuid",
return_value=instance_topology),
mock.patch.object(host.Host, 'has_min_version',
return_value=True),
mock.patch.object(host.Host, "get_capabilities",
return_value=caps),
mock.patch.object(host.Host, 'get_online_cpus',
return_value=set(range(8))),
):
cfg = conn._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertIsNone(cfg.cpuset)
# Test that the pinning is correct and limited to allowed only
self.assertEqual(0, cfg.cputune.vcpupin[0].id)
self.assertEqual(set([24]), cfg.cputune.vcpupin[0].cpuset)
self.assertEqual(1, cfg.cputune.vcpupin[1].id)
self.assertEqual(set([25]), cfg.cputune.vcpupin[1].cpuset)
self.assertEqual(2, cfg.cputune.vcpupin[2].id)
self.assertEqual(set([0]), cfg.cputune.vcpupin[2].cpuset)
self.assertEqual(3, cfg.cputune.vcpupin[3].id)
self.assertEqual(set([1]), cfg.cputune.vcpupin[3].cpuset)
self.assertIsNotNone(cfg.cpu.numa)
# Emulator must be pinned to union of cfg.cputune.vcpupin[*].cpuset
self.assertIsInstance(cfg.cputune.emulatorpin,
vconfig.LibvirtConfigGuestCPUTuneEmulatorPin)
self.assertEqual(set([0, 1, 24, 25]),
cfg.cputune.emulatorpin.cpuset)
for i, (instance_cell, numa_cfg_cell) in enumerate(zip(
instance_topology.cells, cfg.cpu.numa.cells)):
self.assertEqual(i, numa_cfg_cell.id)
self.assertEqual(instance_cell.cpuset, numa_cfg_cell.cpus)
self.assertEqual(instance_cell.memory * units.Ki,
numa_cfg_cell.memory)
self.assertIsNone(numa_cfg_cell.memAccess)
allnodes = set([cell.id for cell in instance_topology.cells])
self.assertEqual(allnodes, set(cfg.numatune.memory.nodeset))
self.assertEqual("strict", cfg.numatune.memory.mode)
for i, (instance_cell, memnode) in enumerate(zip(
instance_topology.cells, cfg.numatune.memnodes)):
self.assertEqual(i, memnode.cellid)
self.assertEqual([instance_cell.id], memnode.nodeset)
self.assertEqual("strict", memnode.mode)
def test_get_guest_config_numa_host_mempages_shared(self):
instance_topology = objects.InstanceNUMATopology(
cells=[
objects.InstanceNUMACell(
id=1, cpuset=set([0, 1]),
memory=1024, pagesize=2048),
objects.InstanceNUMACell(
id=2, cpuset=set([2, 3]),
memory=1024, pagesize=2048)])
instance_ref = objects.Instance(**self.test_instance)
instance_ref.numa_topology = instance_topology
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
flavor = objects.Flavor(memory_mb=2048, vcpus=4, root_gb=496,
ephemeral_gb=8128, swap=33550336, name='fake',
extra_specs={})
instance_ref.flavor = flavor
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = "x86_64"
caps.host.topology = self._fake_caps_numa_topology()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
with contextlib.nested(
mock.patch.object(
objects.InstanceNUMATopology, "get_by_instance_uuid",
return_value=instance_topology),
mock.patch.object(host.Host, 'has_min_version',
return_value=True),
mock.patch.object(host.Host, "get_capabilities",
return_value=caps),
mock.patch.object(
hardware, 'get_vcpu_pin_set',
return_value=set([2, 3, 4, 5])),
mock.patch.object(host.Host, 'get_online_cpus',
return_value=set(range(8))),
):
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
for instance_cell, numa_cfg_cell, index in zip(
instance_topology.cells,
cfg.cpu.numa.cells,
range(len(instance_topology.cells))):
self.assertEqual(index, numa_cfg_cell.id)
self.assertEqual(instance_cell.cpuset, numa_cfg_cell.cpus)
self.assertEqual(instance_cell.memory * units.Ki,
numa_cfg_cell.memory)
self.assertEqual("shared", numa_cfg_cell.memAccess)
allnodes = [cell.id for cell in instance_topology.cells]
self.assertEqual(allnodes, cfg.numatune.memory.nodeset)
self.assertEqual("strict", cfg.numatune.memory.mode)
for instance_cell, memnode, index in zip(
instance_topology.cells,
cfg.numatune.memnodes,
range(len(instance_topology.cells))):
self.assertEqual(index, memnode.cellid)
self.assertEqual([instance_cell.id], memnode.nodeset)
self.assertEqual("strict", memnode.mode)
def test_get_cpu_numa_config_from_instance(self):
topology = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(id=0, cpuset=set([1, 2]), memory=128),
objects.InstanceNUMACell(id=1, cpuset=set([3, 4]), memory=128),
])
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
conf = drvr._get_cpu_numa_config_from_instance(topology, True)
self.assertIsInstance(conf, vconfig.LibvirtConfigGuestCPUNUMA)
self.assertEqual(0, conf.cells[0].id)
self.assertEqual(set([1, 2]), conf.cells[0].cpus)
self.assertEqual(131072, conf.cells[0].memory)
self.assertEqual("shared", conf.cells[0].memAccess)
self.assertEqual(1, conf.cells[1].id)
self.assertEqual(set([3, 4]), conf.cells[1].cpus)
self.assertEqual(131072, conf.cells[1].memory)
self.assertEqual("shared", conf.cells[1].memAccess)
def test_get_cpu_numa_config_from_instance_none(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
conf = drvr._get_cpu_numa_config_from_instance(None, False)
self.assertIsNone(conf)
@mock.patch.object(host.Host, 'has_version', return_value=True)
def test_has_cpu_policy_support(self, mock_has_version):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertRaises(exception.CPUPinningNotSupported,
drvr._has_cpu_policy_support)
@mock.patch.object(libvirt_driver.LibvirtDriver, "_has_numa_support",
return_value=True)
@mock.patch.object(libvirt_driver.LibvirtDriver, "_has_hugepage_support",
return_value=True)
@mock.patch.object(host.Host, "get_capabilities")
def test_does_not_want_hugepages(self, mock_caps, mock_numa, mock_hp):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_topology = objects.InstanceNUMATopology(
cells=[
objects.InstanceNUMACell(
id=1, cpuset=set([0, 1]),
memory=1024, pagesize=4),
objects.InstanceNUMACell(
id=2, cpuset=set([2, 3]),
memory=1024, pagesize=4)])
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = "x86_64"
caps.host.topology = self._fake_caps_numa_topology()
mock_caps.return_value = caps
host_topology = drvr._get_host_numa_topology()
self.assertFalse(drvr._wants_hugepages(None, None))
self.assertFalse(drvr._wants_hugepages(host_topology, None))
self.assertFalse(drvr._wants_hugepages(None, instance_topology))
self.assertFalse(drvr._wants_hugepages(host_topology,
instance_topology))
@mock.patch.object(libvirt_driver.LibvirtDriver, "_has_numa_support",
return_value=True)
@mock.patch.object(libvirt_driver.LibvirtDriver, "_has_hugepage_support",
return_value=True)
@mock.patch.object(host.Host, "get_capabilities")
def test_does_want_hugepages(self, mock_caps, mock_numa, mock_hp):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_topology = objects.InstanceNUMATopology(
cells=[
objects.InstanceNUMACell(
id=1, cpuset=set([0, 1]),
memory=1024, pagesize=2048),
objects.InstanceNUMACell(
id=2, cpuset=set([2, 3]),
memory=1024, pagesize=2048)])
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = "x86_64"
caps.host.topology = self._fake_caps_numa_topology()
mock_caps.return_value = caps
host_topology = drvr._get_host_numa_topology()
self.assertTrue(drvr._wants_hugepages(host_topology,
instance_topology))
def test_get_guest_config_clock(self):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
hpet_map = {
arch.X86_64: True,
arch.I686: True,
arch.PPC: False,
arch.PPC64: False,
arch.ARMV7: False,
arch.AARCH64: False,
}
for guestarch, expect_hpet in hpet_map.items():
with mock.patch.object(libvirt_driver.libvirt_utils,
'get_arch',
return_value=guestarch):
cfg = drvr._get_guest_config(instance_ref, [],
image_meta,
disk_info)
self.assertIsInstance(cfg.clock,
vconfig.LibvirtConfigGuestClock)
self.assertEqual(cfg.clock.offset, "utc")
self.assertIsInstance(cfg.clock.timers[0],
vconfig.LibvirtConfigGuestTimer)
self.assertIsInstance(cfg.clock.timers[1],
vconfig.LibvirtConfigGuestTimer)
self.assertEqual(cfg.clock.timers[0].name, "pit")
self.assertEqual(cfg.clock.timers[0].tickpolicy,
"delay")
self.assertEqual(cfg.clock.timers[1].name, "rtc")
self.assertEqual(cfg.clock.timers[1].tickpolicy,
"catchup")
if expect_hpet:
self.assertEqual(3, len(cfg.clock.timers))
self.assertIsInstance(cfg.clock.timers[2],
vconfig.LibvirtConfigGuestTimer)
self.assertEqual('hpet', cfg.clock.timers[2].name)
self.assertFalse(cfg.clock.timers[2].present)
else:
self.assertEqual(2, len(cfg.clock.timers))
@mock.patch.object(libvirt_utils, 'get_arch')
@mock.patch.object(host.Host, 'has_min_version')
def test_get_guest_config_windows(self, mock_version, mock_get_arch):
mock_version.return_value = False
mock_get_arch.return_value = arch.I686
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref['os_type'] = 'windows'
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
image_meta, disk_info)
self.assertIsInstance(cfg.clock,
vconfig.LibvirtConfigGuestClock)
self.assertEqual(cfg.clock.offset, "localtime")
self.assertEqual(3, len(cfg.clock.timers), cfg.clock.timers)
self.assertEqual("pit", cfg.clock.timers[0].name)
self.assertEqual("rtc", cfg.clock.timers[1].name)
self.assertEqual("hpet", cfg.clock.timers[2].name)
self.assertFalse(cfg.clock.timers[2].present)
@mock.patch.object(libvirt_utils, 'get_arch')
@mock.patch.object(host.Host, 'has_min_version')
def test_get_guest_config_windows_timer(self, mock_version, mock_get_arch):
mock_version.return_value = True
mock_get_arch.return_value = arch.I686
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref['os_type'] = 'windows'
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
image_meta, disk_info)
self.assertIsInstance(cfg.clock,
vconfig.LibvirtConfigGuestClock)
self.assertEqual(cfg.clock.offset, "localtime")
self.assertEqual(4, len(cfg.clock.timers), cfg.clock.timers)
self.assertEqual("pit", cfg.clock.timers[0].name)
self.assertEqual("rtc", cfg.clock.timers[1].name)
self.assertEqual("hpet", cfg.clock.timers[2].name)
self.assertFalse(cfg.clock.timers[2].present)
self.assertEqual("hypervclock", cfg.clock.timers[3].name)
self.assertTrue(cfg.clock.timers[3].present)
self.assertEqual(3, len(cfg.features))
self.assertIsInstance(cfg.features[0],
vconfig.LibvirtConfigGuestFeatureACPI)
self.assertIsInstance(cfg.features[1],
vconfig.LibvirtConfigGuestFeatureAPIC)
self.assertIsInstance(cfg.features[2],
vconfig.LibvirtConfigGuestFeatureHyperV)
@mock.patch.object(host.Host, 'has_min_version')
def test_get_guest_config_windows_hyperv_feature1(self, mock_version):
def fake_version(lv_ver=None, hv_ver=None, hv_type=None):
if lv_ver == (1, 0, 0) and hv_ver == (1, 1, 0):
return True
return False
mock_version.side_effect = fake_version
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref['os_type'] = 'windows'
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
image_meta, disk_info)
self.assertIsInstance(cfg.clock,
vconfig.LibvirtConfigGuestClock)
self.assertEqual(cfg.clock.offset, "localtime")
self.assertEqual(3, len(cfg.features))
self.assertIsInstance(cfg.features[0],
vconfig.LibvirtConfigGuestFeatureACPI)
self.assertIsInstance(cfg.features[1],
vconfig.LibvirtConfigGuestFeatureAPIC)
self.assertIsInstance(cfg.features[2],
vconfig.LibvirtConfigGuestFeatureHyperV)
self.assertTrue(cfg.features[2].relaxed)
self.assertFalse(cfg.features[2].spinlocks)
self.assertFalse(cfg.features[2].vapic)
@mock.patch.object(host.Host, 'has_min_version')
def test_get_guest_config_windows_hyperv_feature2(self, mock_version):
mock_version.return_value = True
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref['os_type'] = 'windows'
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
image_meta, disk_info)
self.assertIsInstance(cfg.clock,
vconfig.LibvirtConfigGuestClock)
self.assertEqual(cfg.clock.offset, "localtime")
self.assertEqual(3, len(cfg.features))
self.assertIsInstance(cfg.features[0],
vconfig.LibvirtConfigGuestFeatureACPI)
self.assertIsInstance(cfg.features[1],
vconfig.LibvirtConfigGuestFeatureAPIC)
self.assertIsInstance(cfg.features[2],
vconfig.LibvirtConfigGuestFeatureHyperV)
self.assertTrue(cfg.features[2].relaxed)
self.assertTrue(cfg.features[2].spinlocks)
self.assertEqual(8191, cfg.features[2].spinlock_retries)
self.assertTrue(cfg.features[2].vapic)
def test_get_guest_config_with_two_nics(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 2),
image_meta, disk_info)
self.assertEqual(2, len(cfg.features))
self.assertIsInstance(cfg.features[0],
vconfig.LibvirtConfigGuestFeatureACPI)
self.assertIsInstance(cfg.features[1],
vconfig.LibvirtConfigGuestFeatureAPIC)
self.assertEqual(cfg.memory, 2 * units.Mi)
self.assertEqual(cfg.vcpus, 1)
self.assertEqual(cfg.os_type, vm_mode.HVM)
self.assertEqual(cfg.os_boot_dev, ["hd"])
self.assertIsNone(cfg.os_root)
self.assertEqual(len(cfg.devices), 10)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestInterface)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestInterface)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestInput)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[8],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[9],
vconfig.LibvirtConfigMemoryBalloon)
def test_get_guest_config_bug_1118829(self):
self.flags(virt_type='uml', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
disk_info = {'disk_bus': 'virtio',
'cdrom_bus': 'ide',
'mapping': {u'vda': {'bus': 'virtio',
'type': 'disk',
'dev': u'vda'},
'root': {'bus': 'virtio',
'type': 'disk',
'dev': 'vda'}}}
# NOTE(jdg): For this specific test leave this blank
# This will exercise the failed code path still,
# and won't require fakes and stubs of the iscsi discovery
block_device_info = {}
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
drvr._get_guest_config(instance_ref, [], image_meta, disk_info,
None, block_device_info)
self.assertEqual(instance_ref['root_device_name'], '/dev/vda')
def test_get_guest_config_with_root_device_name(self):
self.flags(virt_type='uml', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
block_device_info = {'root_device_name': '/dev/vdb'}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta,
block_device_info)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info,
None, block_device_info)
self.assertEqual(0, len(cfg.features))
self.assertEqual(cfg.memory, 2 * units.Mi)
self.assertEqual(cfg.vcpus, 1)
self.assertEqual(cfg.os_type, "uml")
self.assertEqual(cfg.os_boot_dev, [])
self.assertEqual(cfg.os_root, '/dev/vdb')
self.assertEqual(len(cfg.devices), 3)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestConsole)
def test_get_guest_config_with_block_device(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
conn_info = {'driver_volume_type': 'fake'}
info = {'block_device_mapping': driver_block_device.convert_volumes([
fake_block_device.FakeDbBlockDeviceDict(
{'id': 1,
'source_type': 'volume', 'destination_type': 'volume',
'device_name': '/dev/vdc'}),
fake_block_device.FakeDbBlockDeviceDict(
{'id': 2,
'source_type': 'volume', 'destination_type': 'volume',
'device_name': '/dev/vdd'}),
])}
info['block_device_mapping'][0]['connection_info'] = conn_info
info['block_device_mapping'][1]['connection_info'] = conn_info
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta,
info)
with mock.patch.object(
driver_block_device.DriverVolumeBlockDevice, 'save'
) as mock_save:
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info,
None, info)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestDisk)
self.assertEqual(cfg.devices[2].target_dev, 'vdc')
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestDisk)
self.assertEqual(cfg.devices[3].target_dev, 'vdd')
mock_save.assert_called_with()
def test_get_guest_config_lxc_with_attached_volume(self):
self.flags(virt_type='lxc', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
conn_info = {'driver_volume_type': 'fake'}
info = {'block_device_mapping': driver_block_device.convert_volumes([
fake_block_device.FakeDbBlockDeviceDict(
{'id': 1,
'source_type': 'volume', 'destination_type': 'volume',
'boot_index': 0}),
fake_block_device.FakeDbBlockDeviceDict(
{'id': 2,
'source_type': 'volume', 'destination_type': 'volume',
}),
fake_block_device.FakeDbBlockDeviceDict(
{'id': 3,
'source_type': 'volume', 'destination_type': 'volume',
}),
])}
info['block_device_mapping'][0]['connection_info'] = conn_info
info['block_device_mapping'][1]['connection_info'] = conn_info
info['block_device_mapping'][2]['connection_info'] = conn_info
info['block_device_mapping'][0]['mount_device'] = '/dev/vda'
info['block_device_mapping'][1]['mount_device'] = '/dev/vdc'
info['block_device_mapping'][2]['mount_device'] = '/dev/vdd'
with mock.patch.object(
driver_block_device.DriverVolumeBlockDevice, 'save'
) as mock_save:
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta,
info)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info,
None, info)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertEqual(cfg.devices[1].target_dev, 'vdc')
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestDisk)
self.assertEqual(cfg.devices[2].target_dev, 'vdd')
mock_save.assert_called_with()
def test_get_guest_config_with_configdrive(self):
# It's necessary to check if the architecture is power, because
# power doesn't have support to ide, and so libvirt translate
# all ide calls to scsi
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
# make configdrive.required_by() return True
instance_ref['config_drive'] = True
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
# The last device is selected for this. on x86 is the last ide
# device (hdd). Since power only support scsi, the last device
# is sdz
expect = {"ppc": "sdz", "ppc64": "sdz"}
disk = expect.get(blockinfo.libvirt_utils.get_arch({}), "hdd")
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestDisk)
self.assertEqual(cfg.devices[2].target_dev, disk)
def test_get_guest_config_with_virtio_scsi_bus(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"hw_scsi_model": "virtio-scsi"}})
instance_ref = objects.Instance(**self.test_instance)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta,
[])
cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestController)
self.assertEqual(cfg.devices[2].model, 'virtio-scsi')
def test_get_guest_config_with_virtio_scsi_bus_bdm(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"hw_scsi_model": "virtio-scsi"}})
instance_ref = objects.Instance(**self.test_instance)
conn_info = {'driver_volume_type': 'fake'}
bd_info = {
'block_device_mapping': driver_block_device.convert_volumes([
fake_block_device.FakeDbBlockDeviceDict(
{'id': 1,
'source_type': 'volume', 'destination_type': 'volume',
'device_name': '/dev/sdc', 'disk_bus': 'scsi'}),
fake_block_device.FakeDbBlockDeviceDict(
{'id': 2,
'source_type': 'volume', 'destination_type': 'volume',
'device_name': '/dev/sdd', 'disk_bus': 'scsi'}),
])}
bd_info['block_device_mapping'][0]['connection_info'] = conn_info
bd_info['block_device_mapping'][1]['connection_info'] = conn_info
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta,
bd_info)
with mock.patch.object(
driver_block_device.DriverVolumeBlockDevice, 'save'
) as mock_save:
cfg = drvr._get_guest_config(instance_ref, [], image_meta,
disk_info, [], bd_info)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestDisk)
self.assertEqual(cfg.devices[2].target_dev, 'sdc')
self.assertEqual(cfg.devices[2].target_bus, 'scsi')
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestDisk)
self.assertEqual(cfg.devices[3].target_dev, 'sdd')
self.assertEqual(cfg.devices[3].target_bus, 'scsi')
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestController)
self.assertEqual(cfg.devices[4].model, 'virtio-scsi')
mock_save.assert_called_with()
def test_get_guest_config_with_vnc(self):
self.flags(enabled=True, group='vnc')
self.flags(virt_type='kvm',
use_usb_tablet=False,
group='libvirt')
self.flags(enabled=False, group='spice')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(len(cfg.devices), 7)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual(cfg.devices[4].type, "vnc")
def test_get_guest_config_with_vnc_and_tablet(self):
self.flags(enabled=True, group='vnc')
self.flags(virt_type='kvm',
use_usb_tablet=True,
group='libvirt')
self.flags(enabled=False, group='spice')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(len(cfg.devices), 8)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestInput)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual(cfg.devices[4].type, "tablet")
self.assertEqual(cfg.devices[5].type, "vnc")
def test_get_guest_config_with_spice_and_tablet(self):
self.flags(enabled=False, group='vnc')
self.flags(virt_type='kvm',
use_usb_tablet=True,
group='libvirt')
self.flags(enabled=True,
agent_enabled=False,
group='spice')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(len(cfg.devices), 8)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestInput)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual(cfg.devices[4].type, "tablet")
self.assertEqual(cfg.devices[5].type, "spice")
def test_get_guest_config_with_spice_and_agent(self):
self.flags(enabled=False, group='vnc')
self.flags(virt_type='kvm',
use_usb_tablet=True,
group='libvirt')
self.flags(enabled=True,
agent_enabled=True,
group='spice')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(len(cfg.devices), 8)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestChannel)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual(cfg.devices[4].target_name, "com.redhat.spice.0")
self.assertEqual(cfg.devices[5].type, "spice")
self.assertEqual(cfg.devices[6].type, "qxl")
@mock.patch('nova.console.serial.acquire_port')
@mock.patch('nova.virt.hardware.get_number_of_serial_ports',
return_value=1)
@mock.patch.object(libvirt_driver.libvirt_utils, 'get_arch',)
def test_create_serial_console_devices_based_on_arch(self, mock_get_arch,
mock_get_port_number,
mock_acquire_port):
self.flags(enabled=True, group='serial_console')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
expected = {arch.X86_64: vconfig.LibvirtConfigGuestSerial,
arch.S390: vconfig.LibvirtConfigGuestConsole,
arch.S390X: vconfig.LibvirtConfigGuestConsole}
for guest_arch, device_type in expected.items():
mock_get_arch.return_value = guest_arch
guest = vconfig.LibvirtConfigGuest()
drvr._create_serial_console_devices(guest, instance=None,
flavor={}, image_meta={})
self.assertEqual(1, len(guest.devices))
console_device = guest.devices[0]
self.assertIsInstance(console_device, device_type)
self.assertEqual("tcp", console_device.type)
@mock.patch('nova.console.serial.acquire_port')
def test_get_guest_config_serial_console(self, acquire_port):
self.flags(enabled=True, group='serial_console')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
acquire_port.return_value = 11111
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(8, len(cfg.devices))
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestInput)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual("tcp", cfg.devices[2].type)
self.assertEqual(11111, cfg.devices[2].listen_port)
def test_get_guest_config_serial_console_through_flavor(self):
self.flags(enabled=True, group='serial_console')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.extra_specs = {'hw:serial_port_count': 3}
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(10, len(cfg.devices))
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestInput)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[8],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[9],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual("tcp", cfg.devices[2].type)
self.assertEqual("tcp", cfg.devices[3].type)
self.assertEqual("tcp", cfg.devices[4].type)
def test_get_guest_config_serial_console_invalid_flavor(self):
self.flags(enabled=True, group='serial_console')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.extra_specs = {'hw:serial_port_count': "a"}
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
self.assertRaises(
exception.ImageSerialPortNumberInvalid,
drvr._get_guest_config, instance_ref, [],
image_meta, disk_info)
def test_get_guest_config_serial_console_image_and_flavor(self):
self.flags(enabled=True, group='serial_console')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"hw_serial_port_count": "3"}})
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.extra_specs = {'hw:serial_port_count': 4}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [], image_meta,
disk_info)
self.assertEqual(10, len(cfg.devices), cfg.devices)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestInput)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[8],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[9],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual("tcp", cfg.devices[2].type)
self.assertEqual("tcp", cfg.devices[3].type)
self.assertEqual("tcp", cfg.devices[4].type)
@mock.patch('nova.console.serial.acquire_port')
def test_get_guest_config_serial_console_through_port_rng_exhausted(
self, acquire_port):
self.flags(enabled=True, group='serial_console')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
acquire_port.side_effect = exception.SocketPortRangeExhaustedException(
'127.0.0.1')
self.assertRaises(
exception.SocketPortRangeExhaustedException,
drvr._get_guest_config, instance_ref, [],
image_meta, disk_info)
@mock.patch.object(libvirt_guest.Guest, "get_xml_desc")
def test_get_serial_ports_from_guest(self, mock_get_xml_desc):
i = self._test_get_serial_ports_from_guest(None,
mock_get_xml_desc)
self.assertEqual([
('127.0.0.1', 100),
('127.0.0.1', 101),
('127.0.0.2', 100),
('127.0.0.2', 101)], list(i))
@mock.patch.object(libvirt_guest.Guest, "get_xml_desc")
def test_get_serial_ports_from_guest_bind_only(self, mock_get_xml_desc):
i = self._test_get_serial_ports_from_guest('bind',
mock_get_xml_desc)
self.assertEqual([
('127.0.0.1', 101),
('127.0.0.2', 100)], list(i))
@mock.patch.object(libvirt_guest.Guest, "get_xml_desc")
def test_get_serial_ports_from_guest_connect_only(self,
mock_get_xml_desc):
i = self._test_get_serial_ports_from_guest('connect',
mock_get_xml_desc)
self.assertEqual([
('127.0.0.1', 100),
('127.0.0.2', 101)], list(i))
@mock.patch.object(libvirt_guest.Guest, "get_xml_desc")
def test_get_serial_ports_from_guest_on_s390(self, mock_get_xml_desc):
i = self._test_get_serial_ports_from_guest(None,
mock_get_xml_desc,
'console')
self.assertEqual([
('127.0.0.1', 100),
('127.0.0.1', 101),
('127.0.0.2', 100),
('127.0.0.2', 101)], list(i))
def _test_get_serial_ports_from_guest(self, mode, mock_get_xml_desc,
dev_name='serial'):
xml = """
<domain type='kvm'>
<devices>
<%(dev_name)s type="tcp">
<source host="127.0.0.1" service="100" mode="connect"/>
</%(dev_name)s>
<%(dev_name)s type="tcp">
<source host="127.0.0.1" service="101" mode="bind"/>
</%(dev_name)s>
<%(dev_name)s type="tcp">
<source host="127.0.0.2" service="100" mode="bind"/>
</%(dev_name)s>
<%(dev_name)s type="tcp">
<source host="127.0.0.2" service="101" mode="connect"/>
</%(dev_name)s>
</devices>
</domain>""" % {'dev_name': dev_name}
mock_get_xml_desc.return_value = xml
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
guest = libvirt_guest.Guest(FakeVirtDomain())
return drvr._get_serial_ports_from_guest(guest, mode=mode)
def test_get_guest_config_with_type_xen(self):
self.flags(enabled=True, group='vnc')
self.flags(virt_type='xen',
use_usb_tablet=False,
group='libvirt')
self.flags(enabled=False,
group='spice')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(len(cfg.devices), 6)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestConsole)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual(cfg.devices[3].type, "vnc")
self.assertEqual(cfg.devices[4].type, "xen")
@mock.patch.object(libvirt_driver.libvirt_utils, 'get_arch',
return_value=arch.S390X)
def test_get_guest_config_with_type_kvm_on_s390(self, mock_get_arch):
self.flags(enabled=False, group='vnc')
self.flags(virt_type='kvm',
use_usb_tablet=False,
group='libvirt')
self._stub_host_capabilities_cpu_arch(arch.S390X)
instance_ref = objects.Instance(**self.test_instance)
cfg = self._get_guest_config_via_fake_api(instance_ref)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
log_file_device = cfg.devices[2]
self.assertIsInstance(log_file_device,
vconfig.LibvirtConfigGuestConsole)
self.assertEqual("sclplm", log_file_device.target_type)
self.assertEqual("file", log_file_device.type)
terminal_device = cfg.devices[3]
self.assertIsInstance(terminal_device,
vconfig.LibvirtConfigGuestConsole)
self.assertEqual("sclp", terminal_device.target_type)
self.assertEqual("pty", terminal_device.type)
self.assertEqual("s390-ccw-virtio", cfg.os_mach_type)
def _stub_host_capabilities_cpu_arch(self, cpu_arch):
def get_host_capabilities_stub(self):
cpu = vconfig.LibvirtConfigGuestCPU()
cpu.arch = cpu_arch
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = cpu
return caps
self.stubs.Set(host.Host, "get_capabilities",
get_host_capabilities_stub)
def _get_guest_config_via_fake_api(self, instance):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
image_meta)
return drvr._get_guest_config(instance, [],
image_meta, disk_info)
def test_get_guest_config_with_type_xen_pae_hvm(self):
self.flags(enabled=True, group='vnc')
self.flags(virt_type='xen',
use_usb_tablet=False,
group='libvirt')
self.flags(enabled=False,
group='spice')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref['vm_mode'] = vm_mode.HVM
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(cfg.os_type, vm_mode.HVM)
self.assertEqual(cfg.os_loader, CONF.libvirt.xen_hvmloader_path)
self.assertEqual(3, len(cfg.features))
self.assertIsInstance(cfg.features[0],
vconfig.LibvirtConfigGuestFeaturePAE)
self.assertIsInstance(cfg.features[1],
vconfig.LibvirtConfigGuestFeatureACPI)
self.assertIsInstance(cfg.features[2],
vconfig.LibvirtConfigGuestFeatureAPIC)
def test_get_guest_config_with_type_xen_pae_pvm(self):
self.flags(enabled=True, group='vnc')
self.flags(virt_type='xen',
use_usb_tablet=False,
group='libvirt')
self.flags(enabled=False,
group='spice')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(cfg.os_type, vm_mode.XEN)
self.assertEqual(1, len(cfg.features))
self.assertIsInstance(cfg.features[0],
vconfig.LibvirtConfigGuestFeaturePAE)
def test_get_guest_config_with_vnc_and_spice(self):
self.flags(enabled=True, group='vnc')
self.flags(virt_type='kvm',
use_usb_tablet=True,
group='libvirt')
self.flags(enabled=True,
agent_enabled=True,
group='spice')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(len(cfg.devices), 10)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestInput)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestChannel)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[8],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[9],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual(cfg.devices[4].type, "tablet")
self.assertEqual(cfg.devices[5].target_name, "com.redhat.spice.0")
self.assertEqual(cfg.devices[6].type, "vnc")
self.assertEqual(cfg.devices[7].type, "spice")
def test_get_guest_config_with_watchdog_action_image_meta(self):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"hw_watchdog_action": "none"}})
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info)
self.assertEqual(len(cfg.devices), 9)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestInput)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigGuestWatchdog)
self.assertIsInstance(cfg.devices[8],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual("none", cfg.devices[7].action)
def _test_get_guest_usb_tablet(self, vnc_enabled, spice_enabled, os_type,
agent_enabled=False):
self.flags(enabled=vnc_enabled, group='vnc')
self.flags(enabled=spice_enabled,
agent_enabled=agent_enabled, group='spice')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
return drvr._get_guest_usb_tablet(os_type)
def test_get_guest_usb_tablet_wipe(self):
self.flags(use_usb_tablet=True, group='libvirt')
tablet = self._test_get_guest_usb_tablet(True, True, vm_mode.HVM)
self.assertIsNotNone(tablet)
tablet = self._test_get_guest_usb_tablet(True, False, vm_mode.HVM)
self.assertIsNotNone(tablet)
tablet = self._test_get_guest_usb_tablet(False, True, vm_mode.HVM)
self.assertIsNotNone(tablet)
tablet = self._test_get_guest_usb_tablet(False, False, vm_mode.HVM)
self.assertIsNone(tablet)
tablet = self._test_get_guest_usb_tablet(True, True, "foo")
self.assertIsNone(tablet)
tablet = self._test_get_guest_usb_tablet(
False, True, vm_mode.HVM, True)
self.assertIsNone(tablet)
def _test_get_guest_config_with_watchdog_action_flavor(self,
hw_watchdog_action="hw:watchdog_action"):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.extra_specs = {hw_watchdog_action: 'none'}
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(9, len(cfg.devices))
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestInput)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigGuestWatchdog)
self.assertIsInstance(cfg.devices[8],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual("none", cfg.devices[7].action)
def test_get_guest_config_with_watchdog_action_through_flavor(self):
self._test_get_guest_config_with_watchdog_action_flavor()
# TODO(pkholkin): the test accepting old property name 'hw_watchdog_action'
# should be removed in the next release
def test_get_guest_config_with_watchdog_action_through_flavor_no_scope(
self):
self._test_get_guest_config_with_watchdog_action_flavor(
hw_watchdog_action="hw_watchdog_action")
def test_get_guest_config_with_watchdog_overrides_flavor(self):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.extra_specs = {'hw_watchdog_action': 'none'}
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"hw_watchdog_action": "pause"}})
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(9, len(cfg.devices))
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestInput)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigGuestWatchdog)
self.assertIsInstance(cfg.devices[8],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual("pause", cfg.devices[7].action)
def test_get_guest_config_with_video_driver_image_meta(self):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"hw_video_model": "vmvga"}})
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info)
self.assertEqual(len(cfg.devices), 8)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestInput)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual(cfg.devices[5].type, "vnc")
self.assertEqual(cfg.devices[6].type, "vmvga")
def test_get_guest_config_with_qga_through_image_meta(self):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"hw_qemu_guest_agent": "yes"}})
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info)
self.assertEqual(len(cfg.devices), 9)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestInput)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigGuestChannel)
self.assertIsInstance(cfg.devices[8],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual(cfg.devices[4].type, "tablet")
self.assertEqual(cfg.devices[5].type, "vnc")
self.assertEqual(cfg.devices[7].type, "unix")
self.assertEqual(cfg.devices[7].target_name, "org.qemu.guest_agent.0")
def test_get_guest_config_with_video_driver_vram(self):
self.flags(enabled=False, group='vnc')
self.flags(virt_type='kvm', group='libvirt')
self.flags(enabled=True,
agent_enabled=True,
group='spice')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.extra_specs = {'hw_video:ram_max_mb': "100"}
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"hw_video_model": "qxl",
"hw_video_ram": "64"}})
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(len(cfg.devices), 8)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestChannel)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual(cfg.devices[5].type, "spice")
self.assertEqual(cfg.devices[6].type, "qxl")
self.assertEqual(cfg.devices[6].vram, 64 * units.Mi / units.Ki)
@mock.patch('nova.virt.disk.api.teardown_container')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_info')
@mock.patch('nova.virt.disk.api.setup_container')
@mock.patch('oslo_utils.fileutils.ensure_tree')
@mock.patch.object(fake_libvirt_utils, 'get_instance_path')
def test_unmount_fs_if_error_during_lxc_create_domain(self,
mock_get_inst_path, mock_ensure_tree, mock_setup_container,
mock_get_info, mock_teardown):
"""If we hit an error during a `_create_domain` call to `libvirt+lxc`
we need to ensure the guest FS is unmounted from the host so that any
future `lvremove` calls will work.
"""
self.flags(virt_type='lxc', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
mock_instance = mock.MagicMock()
mock_get_inst_path.return_value = '/tmp/'
mock_image_backend = mock.MagicMock()
drvr.image_backend = mock_image_backend
mock_image = mock.MagicMock()
mock_image.path = '/tmp/test.img'
drvr.image_backend.image.return_value = mock_image
mock_setup_container.return_value = '/dev/nbd0'
mock_get_info.side_effect = exception.InstanceNotFound(
instance_id='foo')
drvr._conn.defineXML = mock.Mock()
drvr._conn.defineXML.side_effect = ValueError('somethingbad')
with contextlib.nested(
mock.patch.object(drvr, '_is_booted_from_volume',
return_value=False),
mock.patch.object(drvr, 'plug_vifs'),
mock.patch.object(drvr, 'firewall_driver'),
mock.patch.object(drvr, 'cleanup')):
self.assertRaises(ValueError,
drvr._create_domain_and_network,
self.context,
'xml',
mock_instance, None, None)
mock_teardown.assert_called_with(container_dir='/tmp/rootfs')
def test_video_driver_flavor_limit_not_set(self):
self.flags(virt_type='kvm', group='libvirt')
self.flags(enabled=True,
agent_enabled=True,
group='spice')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"hw_video_model": "qxl",
"hw_video_ram": "64"}})
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
with mock.patch.object(objects.Instance, 'save'):
self.assertRaises(exception.RequestedVRamTooHigh,
drvr._get_guest_config,
instance_ref,
[],
image_meta,
disk_info)
def test_video_driver_ram_above_flavor_limit(self):
self.flags(virt_type='kvm', group='libvirt')
self.flags(enabled=True,
agent_enabled=True,
group='spice')
instance_ref = objects.Instance(**self.test_instance)
instance_type = instance_ref.get_flavor()
instance_type.extra_specs = {'hw_video:ram_max_mb': "50"}
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"hw_video_model": "qxl",
"hw_video_ram": "64"}})
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
with mock.patch.object(objects.Instance, 'save'):
self.assertRaises(exception.RequestedVRamTooHigh,
drvr._get_guest_config,
instance_ref,
[],
image_meta,
disk_info)
def test_get_guest_config_without_qga_through_image_meta(self):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"hw_qemu_guest_agent": "no"}})
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info)
self.assertEqual(len(cfg.devices), 8)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestInput)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual(cfg.devices[4].type, "tablet")
self.assertEqual(cfg.devices[5].type, "vnc")
def test_get_guest_config_with_rng_device(self):
self.flags(virt_type='kvm',
use_usb_tablet=False,
group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.extra_specs = {'hw_rng:allowed': 'True'}
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"hw_rng_model": "virtio"}})
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(len(cfg.devices), 8)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestRng)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual(cfg.devices[6].model, 'random')
self.assertIsNone(cfg.devices[6].backend)
self.assertIsNone(cfg.devices[6].rate_bytes)
self.assertIsNone(cfg.devices[6].rate_period)
def test_get_guest_config_with_rng_not_allowed(self):
self.flags(virt_type='kvm',
use_usb_tablet=False,
group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"hw_rng_model": "virtio"}})
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(len(cfg.devices), 7)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigMemoryBalloon)
def test_get_guest_config_with_rng_limits(self):
self.flags(virt_type='kvm',
use_usb_tablet=False,
group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.extra_specs = {'hw_rng:allowed': 'True',
'hw_rng:rate_bytes': '1024',
'hw_rng:rate_period': '2'}
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"hw_rng_model": "virtio"}})
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(len(cfg.devices), 8)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestRng)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual(cfg.devices[6].model, 'random')
self.assertIsNone(cfg.devices[6].backend)
self.assertEqual(cfg.devices[6].rate_bytes, 1024)
self.assertEqual(cfg.devices[6].rate_period, 2)
@mock.patch('nova.virt.libvirt.driver.os.path.exists')
def test_get_guest_config_with_rng_backend(self, mock_path):
self.flags(virt_type='kvm',
use_usb_tablet=False,
rng_dev_path='/dev/hw_rng',
group='libvirt')
mock_path.return_value = True
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.extra_specs = {'hw_rng:allowed': 'True'}
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"hw_rng_model": "virtio"}})
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(len(cfg.devices), 8)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestRng)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual(cfg.devices[6].model, 'random')
self.assertEqual(cfg.devices[6].backend, '/dev/hw_rng')
self.assertIsNone(cfg.devices[6].rate_bytes)
self.assertIsNone(cfg.devices[6].rate_period)
@mock.patch('nova.virt.libvirt.driver.os.path.exists')
def test_get_guest_config_with_rng_dev_not_present(self, mock_path):
self.flags(virt_type='kvm',
use_usb_tablet=False,
rng_dev_path='/dev/hw_rng',
group='libvirt')
mock_path.return_value = False
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.extra_specs = {'hw_rng:allowed': 'True'}
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"hw_rng_model": "virtio"}})
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
self.assertRaises(exception.RngDeviceNotExist,
drvr._get_guest_config,
instance_ref,
[],
image_meta, disk_info)
def test_guest_cpu_shares_with_multi_vcpu(self):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.vcpus = 4
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(4096, cfg.cputune.shares)
def test_get_guest_config_with_cpu_quota(self):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.extra_specs = {'quota:cpu_shares': '10000',
'quota:cpu_period': '20000'}
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(10000, cfg.cputune.shares)
self.assertEqual(20000, cfg.cputune.period)
def test_get_guest_config_with_bogus_cpu_quota(self):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.extra_specs = {'quota:cpu_shares': 'fishfood',
'quota:cpu_period': '20000'}
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
self.assertRaises(ValueError,
drvr._get_guest_config,
instance_ref, [], image_meta, disk_info)
def _test_get_guest_config_sysinfo_serial(self, expected_serial):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
cfg = drvr._get_guest_config_sysinfo(instance_ref)
self.assertIsInstance(cfg, vconfig.LibvirtConfigGuestSysinfo)
self.assertEqual(version.vendor_string(),
cfg.system_manufacturer)
self.assertEqual(version.product_string(),
cfg.system_product)
self.assertEqual(version.version_string_with_package(),
cfg.system_version)
self.assertEqual(expected_serial,
cfg.system_serial)
self.assertEqual(instance_ref['uuid'],
cfg.system_uuid)
self.assertEqual("Virtual Machine",
cfg.system_family)
def test_get_guest_config_sysinfo_serial_none(self):
self.flags(sysinfo_serial="none", group="libvirt")
self._test_get_guest_config_sysinfo_serial(None)
@mock.patch.object(libvirt_driver.LibvirtDriver,
"_get_host_sysinfo_serial_hardware")
def test_get_guest_config_sysinfo_serial_hardware(self, mock_uuid):
self.flags(sysinfo_serial="hardware", group="libvirt")
theuuid = "56b40135-a973-4eb3-87bb-a2382a3e6dbc"
mock_uuid.return_value = theuuid
self._test_get_guest_config_sysinfo_serial(theuuid)
@contextlib.contextmanager
def patch_exists(self, result):
real_exists = os.path.exists
def fake_exists(filename):
if filename == "/etc/machine-id":
return result
return real_exists(filename)
with mock.patch.object(os.path, "exists") as mock_exists:
mock_exists.side_effect = fake_exists
yield mock_exists
def test_get_guest_config_sysinfo_serial_os(self):
self.flags(sysinfo_serial="os", group="libvirt")
theuuid = "56b40135-a973-4eb3-87bb-a2382a3e6dbc"
with contextlib.nested(
mock.patch('__builtin__.open',
mock.mock_open(read_data=theuuid)),
self.patch_exists(True)):
self._test_get_guest_config_sysinfo_serial(theuuid)
def test_get_guest_config_sysinfo_serial_os_empty_machine_id(self):
self.flags(sysinfo_serial="os", group="libvirt")
with contextlib.nested(
mock.patch('__builtin__.open', mock.mock_open(read_data="")),
self.patch_exists(True)):
self.assertRaises(exception.NovaException,
self._test_get_guest_config_sysinfo_serial,
None)
def test_get_guest_config_sysinfo_serial_os_no_machine_id_file(self):
self.flags(sysinfo_serial="os", group="libvirt")
with self.patch_exists(False):
self.assertRaises(exception.NovaException,
self._test_get_guest_config_sysinfo_serial,
None)
def test_get_guest_config_sysinfo_serial_auto_hardware(self):
self.flags(sysinfo_serial="auto", group="libvirt")
real_exists = os.path.exists
with contextlib.nested(
mock.patch.object(os.path, "exists"),
mock.patch.object(libvirt_driver.LibvirtDriver,
"_get_host_sysinfo_serial_hardware")
) as (mock_exists, mock_uuid):
def fake_exists(filename):
if filename == "/etc/machine-id":
return False
return real_exists(filename)
mock_exists.side_effect = fake_exists
theuuid = "56b40135-a973-4eb3-87bb-a2382a3e6dbc"
mock_uuid.return_value = theuuid
self._test_get_guest_config_sysinfo_serial(theuuid)
def test_get_guest_config_sysinfo_serial_auto_os(self):
self.flags(sysinfo_serial="auto", group="libvirt")
real_exists = os.path.exists
real_open = builtins.open
with contextlib.nested(
mock.patch.object(os.path, "exists"),
mock.patch.object(builtins, "open"),
) as (mock_exists, mock_open):
def fake_exists(filename):
if filename == "/etc/machine-id":
return True
return real_exists(filename)
mock_exists.side_effect = fake_exists
theuuid = "56b40135-a973-4eb3-87bb-a2382a3e6dbc"
def fake_open(filename, *args, **kwargs):
if filename == "/etc/machine-id":
h = mock.MagicMock()
h.read.return_value = theuuid
h.__enter__.return_value = h
return h
return real_open(filename, *args, **kwargs)
mock_open.side_effect = fake_open
self._test_get_guest_config_sysinfo_serial(theuuid)
def _create_fake_service_compute(self):
service_info = {
'id': 1729,
'host': 'fake',
'report_count': 0
}
service_ref = objects.Service(**service_info)
compute_info = {
'id': 1729,
'vcpus': 2,
'memory_mb': 1024,
'local_gb': 2048,
'vcpus_used': 0,
'memory_mb_used': 0,
'local_gb_used': 0,
'free_ram_mb': 1024,
'free_disk_gb': 2048,
'hypervisor_type': 'xen',
'hypervisor_version': 1,
'running_vms': 0,
'cpu_info': '',
'current_workload': 0,
'service_id': service_ref['id'],
'host': service_ref['host']
}
compute_ref = objects.ComputeNode(**compute_info)
return (service_ref, compute_ref)
def test_get_guest_config_with_pci_passthrough_kvm(self):
self.flags(virt_type='kvm', group='libvirt')
service_ref, compute_ref = self._create_fake_service_compute()
instance = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
pci_device_info = dict(test_pci_device.fake_db_dev)
pci_device_info.update(compute_node_id=1,
label='fake',
status=fields.PciDeviceStatus.ALLOCATED,
address='0000:00:00.1',
compute_id=compute_ref['id'],
instance_uuid=instance.uuid,
request_id=None,
extra_info={})
pci_device = objects.PciDevice(**pci_device_info)
pci_list = objects.PciDeviceList()
pci_list.objects.append(pci_device)
instance.pci_devices = pci_list
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
image_meta)
cfg = drvr._get_guest_config(instance, [],
image_meta, disk_info)
had_pci = 0
# care only about the PCI devices
for dev in cfg.devices:
if type(dev) == vconfig.LibvirtConfigGuestHostdevPCI:
had_pci += 1
self.assertEqual(dev.type, 'pci')
self.assertEqual(dev.managed, 'yes')
self.assertEqual(dev.mode, 'subsystem')
self.assertEqual(dev.domain, "0000")
self.assertEqual(dev.bus, "00")
self.assertEqual(dev.slot, "00")
self.assertEqual(dev.function, "1")
self.assertEqual(had_pci, 1)
def test_get_guest_config_with_pci_passthrough_xen(self):
self.flags(virt_type='xen', group='libvirt')
service_ref, compute_ref = self._create_fake_service_compute()
instance = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
pci_device_info = dict(test_pci_device.fake_db_dev)
pci_device_info.update(compute_node_id=1,
label='fake',
status=fields.PciDeviceStatus.ALLOCATED,
address='0000:00:00.2',
compute_id=compute_ref['id'],
instance_uuid=instance.uuid,
request_id=None,
extra_info={})
pci_device = objects.PciDevice(**pci_device_info)
pci_list = objects.PciDeviceList()
pci_list.objects.append(pci_device)
instance.pci_devices = pci_list
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
image_meta)
cfg = drvr._get_guest_config(instance, [],
image_meta, disk_info)
had_pci = 0
# care only about the PCI devices
for dev in cfg.devices:
if type(dev) == vconfig.LibvirtConfigGuestHostdevPCI:
had_pci += 1
self.assertEqual(dev.type, 'pci')
self.assertEqual(dev.managed, 'no')
self.assertEqual(dev.mode, 'subsystem')
self.assertEqual(dev.domain, "0000")
self.assertEqual(dev.bus, "00")
self.assertEqual(dev.slot, "00")
self.assertEqual(dev.function, "2")
self.assertEqual(had_pci, 1)
def test_get_guest_config_os_command_line_through_image_meta(self):
self.flags(virt_type="kvm",
cpu_mode=None,
group='libvirt')
self.test_instance['kernel_id'] = "fake_kernel_id"
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"os_command_line":
"fake_os_command_line"}})
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
image_meta, disk_info)
self.assertEqual(cfg.os_cmdline, "fake_os_command_line")
def test_get_guest_config_os_command_line_without_kernel_id(self):
self.flags(virt_type="kvm",
cpu_mode=None,
group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"os_command_line":
"fake_os_command_line"}})
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
image_meta, disk_info)
self.assertIsNone(cfg.os_cmdline)
def test_get_guest_config_os_command_empty(self):
self.flags(virt_type="kvm",
cpu_mode=None,
group='libvirt')
self.test_instance['kernel_id'] = "fake_kernel_id"
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"os_command_line": ""}})
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
# the instance has 'root=/dev/vda console=tty0 console=ttyS0' set by
# default, so testing an empty string and None value in the
# os_command_line image property must pass
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
image_meta, disk_info)
self.assertNotEqual(cfg.os_cmdline, "")
def test_get_guest_config_armv7(self):
def get_host_capabilities_stub(self):
cpu = vconfig.LibvirtConfigGuestCPU()
cpu.arch = arch.ARMV7
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = cpu
return caps
self.flags(virt_type="kvm",
group="libvirt")
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
self.stubs.Set(host.Host, "get_capabilities",
get_host_capabilities_stub)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
image_meta, disk_info)
self.assertEqual(cfg.os_mach_type, "vexpress-a15")
def test_get_guest_config_aarch64(self):
def get_host_capabilities_stub(self):
cpu = vconfig.LibvirtConfigGuestCPU()
cpu.arch = arch.AARCH64
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = cpu
return caps
self.flags(virt_type="kvm",
group="libvirt")
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
self.stubs.Set(host.Host, "get_capabilities",
get_host_capabilities_stub)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
image_meta, disk_info)
self.assertEqual(cfg.os_mach_type, "virt")
def test_get_guest_config_machine_type_s390(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigGuestCPU()
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
host_cpu_archs = (arch.S390, arch.S390X)
for host_cpu_arch in host_cpu_archs:
caps.host.cpu.arch = host_cpu_arch
os_mach_type = drvr._get_machine_type(image_meta, caps)
self.assertEqual('s390-ccw-virtio', os_mach_type)
def test_get_guest_config_machine_type_through_image_meta(self):
self.flags(virt_type="kvm",
group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"hw_machine_type":
"fake_machine_type"}})
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
image_meta, disk_info)
self.assertEqual(cfg.os_mach_type, "fake_machine_type")
def test_get_guest_config_machine_type_from_config(self):
self.flags(virt_type='kvm', group='libvirt')
self.flags(hw_machine_type=['x86_64=fake_machine_type'],
group='libvirt')
def fake_getCapabilities():
return """
<capabilities>
<host>
<uuid>cef19ce0-0ca2-11df-855d-b19fbce37686</uuid>
<cpu>
<arch>x86_64</arch>
<model>Penryn</model>
<vendor>Intel</vendor>
<topology sockets='1' cores='2' threads='1'/>
<feature name='xtpr'/>
</cpu>
</host>
</capabilities>
"""
def fake_baselineCPU(cpu, flag):
return """<cpu mode='custom' match='exact'>
<model fallback='allow'>Penryn</model>
<vendor>Intel</vendor>
<feature policy='require' name='xtpr'/>
</cpu>
"""
# Make sure the host arch is mocked as x86_64
self.create_fake_libvirt_mock(getCapabilities=fake_getCapabilities,
baselineCPU=fake_baselineCPU,
getVersion=lambda: 1005001)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
image_meta, disk_info)
self.assertEqual(cfg.os_mach_type, "fake_machine_type")
def _test_get_guest_config_ppc64(self, device_index):
"""Test for nova.virt.libvirt.driver.LibvirtDriver._get_guest_config.
"""
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
expected = (arch.PPC64, arch.PPC)
for guestarch in expected:
with mock.patch.object(libvirt_driver.libvirt_utils,
'get_arch',
return_value=guestarch):
cfg = drvr._get_guest_config(instance_ref, [],
image_meta,
disk_info)
self.assertIsInstance(cfg.devices[device_index],
vconfig.LibvirtConfigGuestVideo)
self.assertEqual(cfg.devices[device_index].type, 'vga')
def test_get_guest_config_ppc64_through_image_meta_vnc_enabled(self):
self.flags(enabled=True, group='vnc')
self._test_get_guest_config_ppc64(6)
def test_get_guest_config_ppc64_through_image_meta_spice_enabled(self):
self.flags(enabled=True,
agent_enabled=True,
group='spice')
self._test_get_guest_config_ppc64(8)
def _test_get_guest_config_bootmenu(self, image_meta, extra_specs):
self.flags(virt_type='kvm', group='libvirt')
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.extra_specs = extra_specs
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref, image_meta)
conf = conn._get_guest_config(instance_ref, [], image_meta, disk_info)
self.assertTrue(conf.os_bootmenu)
def test_get_guest_config_bootmenu_via_image_meta(self):
image_meta = objects.ImageMeta.from_dict(
{"disk_format": "raw",
"properties": {"hw_boot_menu": "True"}})
self._test_get_guest_config_bootmenu(image_meta, {})
def test_get_guest_config_bootmenu_via_extra_specs(self):
image_meta = objects.ImageMeta.from_dict(
self.test_image_meta)
self._test_get_guest_config_bootmenu(image_meta,
{'hw:boot_menu': 'True'})
def test_get_guest_cpu_config_none(self):
self.flags(cpu_mode="none", group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
conf = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
image_meta, disk_info)
self.assertIsInstance(conf.cpu,
vconfig.LibvirtConfigGuestCPU)
self.assertIsNone(conf.cpu.mode)
self.assertIsNone(conf.cpu.model)
self.assertEqual(conf.cpu.sockets, 1)
self.assertEqual(conf.cpu.cores, 1)
self.assertEqual(conf.cpu.threads, 1)
def test_get_guest_cpu_config_default_kvm(self):
self.flags(virt_type="kvm",
cpu_mode=None,
group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
conf = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
image_meta, disk_info)
self.assertIsInstance(conf.cpu,
vconfig.LibvirtConfigGuestCPU)
self.assertEqual(conf.cpu.mode, "host-model")
self.assertIsNone(conf.cpu.model)
self.assertEqual(conf.cpu.sockets, 1)
self.assertEqual(conf.cpu.cores, 1)
self.assertEqual(conf.cpu.threads, 1)
def test_get_guest_cpu_config_default_uml(self):
self.flags(virt_type="uml",
cpu_mode=None,
group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
conf = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
image_meta, disk_info)
self.assertIsNone(conf.cpu)
def test_get_guest_cpu_config_default_lxc(self):
self.flags(virt_type="lxc",
cpu_mode=None,
group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
conf = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
image_meta, disk_info)
self.assertIsNone(conf.cpu)
def test_get_guest_cpu_config_host_passthrough(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
self.flags(cpu_mode="host-passthrough", group='libvirt')
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
conf = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
image_meta, disk_info)
self.assertIsInstance(conf.cpu,
vconfig.LibvirtConfigGuestCPU)
self.assertEqual(conf.cpu.mode, "host-passthrough")
self.assertIsNone(conf.cpu.model)
self.assertEqual(conf.cpu.sockets, 1)
self.assertEqual(conf.cpu.cores, 1)
self.assertEqual(conf.cpu.threads, 1)
def test_get_guest_cpu_config_host_model(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
self.flags(cpu_mode="host-model", group='libvirt')
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
conf = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
image_meta, disk_info)
self.assertIsInstance(conf.cpu,
vconfig.LibvirtConfigGuestCPU)
self.assertEqual(conf.cpu.mode, "host-model")
self.assertIsNone(conf.cpu.model)
self.assertEqual(conf.cpu.sockets, 1)
self.assertEqual(conf.cpu.cores, 1)
self.assertEqual(conf.cpu.threads, 1)
def test_get_guest_cpu_config_custom(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
self.flags(cpu_mode="custom",
cpu_model="Penryn",
group='libvirt')
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
conf = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
image_meta, disk_info)
self.assertIsInstance(conf.cpu,
vconfig.LibvirtConfigGuestCPU)
self.assertEqual(conf.cpu.mode, "custom")
self.assertEqual(conf.cpu.model, "Penryn")
self.assertEqual(conf.cpu.sockets, 1)
self.assertEqual(conf.cpu.cores, 1)
self.assertEqual(conf.cpu.threads, 1)
@mock.patch.object(host.Host, "has_min_version", return_value=True)
def test_get_guest_cpu_config_numa_topology(self, mock_has_min_version):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.vcpus = 2
instance_ref.numa_topology = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(
id=0,
cpuset=set([0, 1]),
memory=1024,
cpu_pinning={})])
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
self.assertIsNone(instance_ref.numa_topology.cells[0].cpu_topology)
drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
image_meta, disk_info)
topo = instance_ref.numa_topology.cells[0].cpu_topology
self.assertIsNotNone(topo)
self.assertEqual(topo.cores * topo.sockets * topo.threads,
instance_ref.flavor.vcpus)
def test_get_guest_cpu_topology(self):
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.vcpus = 8
instance_ref.flavor.extra_specs = {'hw:cpu_max_sockets': '4'}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
conf = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
image_meta, disk_info)
self.assertIsInstance(conf.cpu,
vconfig.LibvirtConfigGuestCPU)
self.assertEqual(conf.cpu.mode, "host-model")
self.assertEqual(conf.cpu.sockets, 4)
self.assertEqual(conf.cpu.cores, 2)
self.assertEqual(conf.cpu.threads, 1)
def test_get_guest_memory_balloon_config_by_default(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
for device in cfg.devices:
if device.root_name == 'memballoon':
self.assertIsInstance(device,
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual('virtio', device.model)
self.assertEqual(10, device.period)
def test_get_guest_memory_balloon_config_disable(self):
self.flags(mem_stats_period_seconds=0, group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
no_exist = True
for device in cfg.devices:
if device.root_name == 'memballoon':
no_exist = False
break
self.assertTrue(no_exist)
def test_get_guest_memory_balloon_config_period_value(self):
self.flags(mem_stats_period_seconds=21, group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
for device in cfg.devices:
if device.root_name == 'memballoon':
self.assertIsInstance(device,
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual('virtio', device.model)
self.assertEqual(21, device.period)
def test_get_guest_memory_balloon_config_qemu(self):
self.flags(virt_type='qemu', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
for device in cfg.devices:
if device.root_name == 'memballoon':
self.assertIsInstance(device,
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual('virtio', device.model)
self.assertEqual(10, device.period)
def test_get_guest_memory_balloon_config_xen(self):
self.flags(virt_type='xen', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
for device in cfg.devices:
if device.root_name == 'memballoon':
self.assertIsInstance(device,
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual('xen', device.model)
self.assertEqual(10, device.period)
def test_get_guest_memory_balloon_config_lxc(self):
self.flags(virt_type='lxc', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
no_exist = True
for device in cfg.devices:
if device.root_name == 'memballoon':
no_exist = False
break
self.assertTrue(no_exist)
def test_xml_and_uri_no_ramdisk_no_kernel(self):
instance_data = dict(self.test_instance)
self._check_xml_and_uri(instance_data,
expect_kernel=False, expect_ramdisk=False)
def test_xml_and_uri_no_ramdisk_no_kernel_xen_hvm(self):
instance_data = dict(self.test_instance)
instance_data.update({'vm_mode': vm_mode.HVM})
self._check_xml_and_uri(instance_data, expect_kernel=False,
expect_ramdisk=False, expect_xen_hvm=True)
def test_xml_and_uri_no_ramdisk_no_kernel_xen_pv(self):
instance_data = dict(self.test_instance)
instance_data.update({'vm_mode': vm_mode.XEN})
self._check_xml_and_uri(instance_data, expect_kernel=False,
expect_ramdisk=False, expect_xen_hvm=False,
xen_only=True)
def test_xml_and_uri_no_ramdisk(self):
instance_data = dict(self.test_instance)
instance_data['kernel_id'] = 'aki-deadbeef'
self._check_xml_and_uri(instance_data,
expect_kernel=True, expect_ramdisk=False)
def test_xml_and_uri_no_kernel(self):
instance_data = dict(self.test_instance)
instance_data['ramdisk_id'] = 'ari-deadbeef'
self._check_xml_and_uri(instance_data,
expect_kernel=False, expect_ramdisk=False)
def test_xml_and_uri(self):
instance_data = dict(self.test_instance)
instance_data['ramdisk_id'] = 'ari-deadbeef'
instance_data['kernel_id'] = 'aki-deadbeef'
self._check_xml_and_uri(instance_data,
expect_kernel=True, expect_ramdisk=True)
def test_xml_and_uri_rescue(self):
instance_data = dict(self.test_instance)
instance_data['ramdisk_id'] = 'ari-deadbeef'
instance_data['kernel_id'] = 'aki-deadbeef'
self._check_xml_and_uri(instance_data, expect_kernel=True,
expect_ramdisk=True, rescue=instance_data)
def test_xml_and_uri_rescue_no_kernel_no_ramdisk(self):
instance_data = dict(self.test_instance)
self._check_xml_and_uri(instance_data, expect_kernel=False,
expect_ramdisk=False, rescue=instance_data)
def test_xml_and_uri_rescue_no_kernel(self):
instance_data = dict(self.test_instance)
instance_data['ramdisk_id'] = 'aki-deadbeef'
self._check_xml_and_uri(instance_data, expect_kernel=False,
expect_ramdisk=True, rescue=instance_data)
def test_xml_and_uri_rescue_no_ramdisk(self):
instance_data = dict(self.test_instance)
instance_data['kernel_id'] = 'aki-deadbeef'
self._check_xml_and_uri(instance_data, expect_kernel=True,
expect_ramdisk=False, rescue=instance_data)
def test_xml_uuid(self):
self._check_xml_and_uuid(self.test_image_meta)
def test_lxc_container_and_uri(self):
instance_data = dict(self.test_instance)
self._check_xml_and_container(instance_data)
def test_xml_disk_prefix(self):
instance_data = dict(self.test_instance)
self._check_xml_and_disk_prefix(instance_data, None)
def test_xml_user_specified_disk_prefix(self):
instance_data = dict(self.test_instance)
self._check_xml_and_disk_prefix(instance_data, 'sd')
def test_xml_disk_driver(self):
instance_data = dict(self.test_instance)
self._check_xml_and_disk_driver(instance_data)
def test_xml_disk_bus_virtio(self):
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
self._check_xml_and_disk_bus(image_meta,
None,
(("disk", "virtio", "vda"),))
def test_xml_disk_bus_ide(self):
# It's necessary to check if the architecture is power, because
# power doesn't have support to ide, and so libvirt translate
# all ide calls to scsi
expected = {arch.PPC: ("cdrom", "scsi", "sda"),
arch.PPC64: ("cdrom", "scsi", "sda")}
expec_val = expected.get(blockinfo.libvirt_utils.get_arch({}),
("cdrom", "ide", "hda"))
image_meta = objects.ImageMeta.from_dict({
"disk_format": "iso"})
self._check_xml_and_disk_bus(image_meta,
None,
(expec_val,))
def test_xml_disk_bus_ide_and_virtio(self):
# It's necessary to check if the architecture is power, because
# power doesn't have support to ide, and so libvirt translate
# all ide calls to scsi
expected = {arch.PPC: ("cdrom", "scsi", "sda"),
arch.PPC64: ("cdrom", "scsi", "sda")}
swap = {'device_name': '/dev/vdc',
'swap_size': 1}
ephemerals = [{'device_type': 'disk',
'disk_bus': 'virtio',
'device_name': '/dev/vdb',
'size': 1}]
block_device_info = {
'swap': swap,
'ephemerals': ephemerals}
expec_val = expected.get(blockinfo.libvirt_utils.get_arch({}),
("cdrom", "ide", "hda"))
image_meta = objects.ImageMeta.from_dict({
"disk_format": "iso"})
self._check_xml_and_disk_bus(image_meta,
block_device_info,
(expec_val,
("disk", "virtio", "vdb"),
("disk", "virtio", "vdc")))
@mock.patch.object(host.Host, "list_instance_domains")
def test_list_instances(self, mock_list):
vm1 = FakeVirtDomain(id=3, name="instance00000001")
vm2 = FakeVirtDomain(id=17, name="instance00000002")
vm3 = FakeVirtDomain(name="instance00000003")
vm4 = FakeVirtDomain(name="instance00000004")
mock_list.return_value = [vm1, vm2, vm3, vm4]
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
names = drvr.list_instances()
self.assertEqual(names[0], vm1.name())
self.assertEqual(names[1], vm2.name())
self.assertEqual(names[2], vm3.name())
self.assertEqual(names[3], vm4.name())
mock_list.assert_called_with(only_running=False)
@mock.patch.object(host.Host, "list_instance_domains")
def test_list_instance_uuids(self, mock_list):
vm1 = FakeVirtDomain(id=3, name="instance00000001")
vm2 = FakeVirtDomain(id=17, name="instance00000002")
vm3 = FakeVirtDomain(name="instance00000003")
vm4 = FakeVirtDomain(name="instance00000004")
mock_list.return_value = [vm1, vm2, vm3, vm4]
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
uuids = drvr.list_instance_uuids()
self.assertEqual(len(uuids), 4)
self.assertEqual(uuids[0], vm1.UUIDString())
self.assertEqual(uuids[1], vm2.UUIDString())
self.assertEqual(uuids[2], vm3.UUIDString())
self.assertEqual(uuids[3], vm4.UUIDString())
mock_list.assert_called_with(only_running=False)
@mock.patch.object(host.Host, "list_instance_domains")
def test_get_all_block_devices(self, mock_list):
xml = [
"""
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
</disk>
<disk type='block'>
<source dev='/path/to/dev/1'/>
</disk>
</devices>
</domain>
""",
"""
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
</disk>
</devices>
</domain>
""",
"""
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
</disk>
<disk type='block'>
<source dev='/path/to/dev/3'/>
</disk>
</devices>
</domain>
""",
]
mock_list.return_value = [
FakeVirtDomain(xml[0], id=3, name="instance00000001"),
FakeVirtDomain(xml[1], id=1, name="instance00000002"),
FakeVirtDomain(xml[2], id=5, name="instance00000003")]
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
devices = drvr._get_all_block_devices()
self.assertEqual(devices, ['/path/to/dev/1', '/path/to/dev/3'])
mock_list.assert_called_with()
@mock.patch('nova.virt.libvirt.host.Host.get_online_cpus')
def test_get_host_vcpus(self, get_online_cpus):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.flags(vcpu_pin_set="4-5")
get_online_cpus.return_value = set([4, 5, 6])
expected_vcpus = 2
vcpus = drvr._get_vcpu_total()
self.assertEqual(expected_vcpus, vcpus)
@mock.patch('nova.virt.libvirt.host.Host.get_online_cpus')
def test_get_host_vcpus_out_of_range(self, get_online_cpus):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.flags(vcpu_pin_set="4-6")
get_online_cpus.return_value = set([4, 5])
self.assertRaises(exception.Invalid, drvr._get_vcpu_total)
@mock.patch('nova.virt.libvirt.host.Host.get_online_cpus')
def test_get_host_vcpus_libvirt_error(self, get_online_cpus):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
not_supported_exc = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
'this function is not supported by the connection driver:'
' virNodeNumOfDevices',
error_code=fakelibvirt.VIR_ERR_NO_SUPPORT)
self.flags(vcpu_pin_set="4-6")
get_online_cpus.side_effect = not_supported_exc
self.assertRaises(exception.Invalid, drvr._get_vcpu_total)
@mock.patch('nova.virt.libvirt.host.Host.get_online_cpus')
def test_get_host_vcpus_libvirt_error_success(self, get_online_cpus):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
not_supported_exc = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
'this function is not supported by the connection driver:'
' virNodeNumOfDevices',
error_code=fakelibvirt.VIR_ERR_NO_SUPPORT)
self.flags(vcpu_pin_set="1")
get_online_cpus.side_effect = not_supported_exc
expected_vcpus = 1
vcpus = drvr._get_vcpu_total()
self.assertEqual(expected_vcpus, vcpus)
@mock.patch('nova.virt.libvirt.host.Host.get_cpu_count')
def test_get_host_vcpus_after_hotplug(self, get_cpu_count):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
get_cpu_count.return_value = 2
expected_vcpus = 2
vcpus = drvr._get_vcpu_total()
self.assertEqual(expected_vcpus, vcpus)
get_cpu_count.return_value = 3
expected_vcpus = 3
vcpus = drvr._get_vcpu_total()
self.assertEqual(expected_vcpus, vcpus)
@mock.patch.object(host.Host, "has_min_version", return_value=True)
def test_quiesce(self, mock_has_min_version):
self.create_fake_libvirt_mock(lookupByName=self.fake_lookup)
with mock.patch.object(FakeVirtDomain, "fsFreeze") as mock_fsfreeze:
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
instance = objects.Instance(**self.test_instance)
img_meta = {"properties": {"hw_qemu_guest_agent": "yes",
"os_require_quiesce": "yes"}}
self.assertIsNone(drvr.quiesce(self.context, instance, img_meta))
mock_fsfreeze.assert_called_once_with()
def test_quiesce_not_supported(self):
self.create_fake_libvirt_mock()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
instance = objects.Instance(**self.test_instance)
self.assertRaises(exception.InstanceQuiesceNotSupported,
drvr.quiesce, self.context, instance, None)
@mock.patch.object(host.Host, "has_min_version", return_value=True)
def test_unquiesce(self, mock_has_min_version):
self.create_fake_libvirt_mock(getLibVersion=lambda: 1002005,
lookupByName=self.fake_lookup)
with mock.patch.object(FakeVirtDomain, "fsThaw") as mock_fsthaw:
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
instance = objects.Instance(**self.test_instance)
img_meta = {"properties": {"hw_qemu_guest_agent": "yes",
"os_require_quiesce": "yes"}}
self.assertIsNone(drvr.unquiesce(self.context, instance, img_meta))
mock_fsthaw.assert_called_once_with()
def test_create_snapshot_metadata(self):
base = objects.ImageMeta.from_dict(
{'disk_format': 'raw'})
instance_data = {'kernel_id': 'kernel',
'project_id': 'prj_id',
'ramdisk_id': 'ram_id',
'os_type': None}
instance = objects.Instance(**instance_data)
img_fmt = 'raw'
snp_name = 'snapshot_name'
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
ret = drvr._create_snapshot_metadata(base, instance, img_fmt, snp_name)
expected = {'is_public': False,
'status': 'active',
'name': snp_name,
'properties': {
'kernel_id': instance['kernel_id'],
'image_location': 'snapshot',
'image_state': 'available',
'owner_id': instance['project_id'],
'ramdisk_id': instance['ramdisk_id'],
},
'disk_format': img_fmt,
'container_format': 'bare',
}
self.assertEqual(ret, expected)
# simulate an instance with os_type field defined
# disk format equals to ami
# container format not equals to bare
instance['os_type'] = 'linux'
base = objects.ImageMeta.from_dict(
{'disk_format': 'ami',
'container_format': 'test_container'})
expected['properties']['os_type'] = instance['os_type']
expected['disk_format'] = base.disk_format
expected['container_format'] = base.container_format
ret = drvr._create_snapshot_metadata(base, instance, img_fmt, snp_name)
self.assertEqual(ret, expected)
def test_get_volume_driver(self):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
connection_info = {'driver_volume_type': 'fake',
'data': {'device_path': '/fake',
'access_mode': 'rw'}}
driver = conn._get_volume_driver(connection_info)
result = isinstance(driver, volume_drivers.LibvirtFakeVolumeDriver)
self.assertTrue(result)
def test_get_volume_driver_unknown(self):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
connection_info = {'driver_volume_type': 'unknown',
'data': {'device_path': '/fake',
'access_mode': 'rw'}}
self.assertRaises(
exception.VolumeDriverNotFound,
conn._get_volume_driver,
connection_info
)
@mock.patch.object(volume_drivers.LibvirtFakeVolumeDriver,
'connect_volume')
@mock.patch.object(volume_drivers.LibvirtFakeVolumeDriver, 'get_config')
def test_get_volume_config(self, get_config, connect_volume):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
connection_info = {'driver_volume_type': 'fake',
'data': {'device_path': '/fake',
'access_mode': 'rw'}}
bdm = {'device_name': 'vdb',
'disk_bus': 'fake-bus',
'device_type': 'fake-type'}
disk_info = {'bus': bdm['disk_bus'], 'type': bdm['device_type'],
'dev': 'vdb'}
mock_config = mock.MagicMock()
get_config.return_value = mock_config
config = drvr._get_volume_config(connection_info, disk_info)
get_config.assert_called_once_with(connection_info, disk_info)
self.assertEqual(mock_config, config)
def test_attach_invalid_volume_type(self):
self.create_fake_libvirt_mock()
libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
instance = objects.Instance(**self.test_instance)
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.VolumeDriverNotFound,
drvr.attach_volume, None,
{"driver_volume_type": "badtype"},
instance,
"/dev/sda")
def test_attach_blockio_invalid_hypervisor(self):
self.flags(virt_type='fake_type', group='libvirt')
self.create_fake_libvirt_mock()
libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
instance = objects.Instance(**self.test_instance)
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.InvalidHypervisorType,
drvr.attach_volume, None,
{"driver_volume_type": "fake",
"data": {"logical_block_size": "4096",
"physical_block_size": "4096"}
},
instance,
"/dev/sda")
@mock.patch.object(fakelibvirt.virConnect, "getLibVersion")
def test_attach_blockio_invalid_version(self, mock_version):
mock_version.return_value = (0 * 1000 * 1000) + (9 * 1000) + 8
self.flags(virt_type='qemu', group='libvirt')
self.create_fake_libvirt_mock()
libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
instance = objects.Instance(**self.test_instance)
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.Invalid,
drvr.attach_volume, None,
{"driver_volume_type": "fake",
"data": {"logical_block_size": "4096",
"physical_block_size": "4096"}
},
instance,
"/dev/sda")
@mock.patch('nova.utils.get_image_from_system_metadata')
@mock.patch('nova.virt.libvirt.blockinfo.get_info_from_bdm')
@mock.patch('nova.virt.libvirt.host.Host.get_domain')
def test_attach_volume_with_vir_domain_affect_live_flag(self,
mock_get_domain, mock_get_info, get_image):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
image_meta = {}
get_image.return_value = image_meta
mock_dom = mock.MagicMock()
mock_get_domain.return_value = mock_dom
connection_info = {"driver_volume_type": "fake",
"data": {"device_path": "/fake",
"access_mode": "rw"}}
bdm = {'device_name': 'vdb',
'disk_bus': 'fake-bus',
'device_type': 'fake-type'}
disk_info = {'bus': bdm['disk_bus'], 'type': bdm['device_type'],
'dev': 'vdb'}
mock_get_info.return_value = disk_info
mock_conf = mock.MagicMock()
flags = (fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG |
fakelibvirt.VIR_DOMAIN_AFFECT_LIVE)
with contextlib.nested(
mock.patch.object(drvr, '_connect_volume'),
mock.patch.object(drvr, '_get_volume_config',
return_value=mock_conf),
mock.patch.object(drvr, '_set_cache_mode')
) as (mock_connect_volume, mock_get_volume_config,
mock_set_cache_mode):
for state in (power_state.RUNNING, power_state.PAUSED):
mock_dom.info.return_value = [state, 512, 512, 2, 1234, 5678]
drvr.attach_volume(self.context, connection_info, instance,
"/dev/vdb", disk_bus=bdm['disk_bus'],
device_type=bdm['device_type'])
mock_get_domain.assert_called_with(instance)
mock_get_info.assert_called_with(
instance,
CONF.libvirt.virt_type,
test.MatchType(objects.ImageMeta),
bdm)
mock_connect_volume.assert_called_with(
connection_info, disk_info)
mock_get_volume_config.assert_called_with(
connection_info, disk_info)
mock_set_cache_mode.assert_called_with(mock_conf)
mock_dom.attachDeviceFlags.assert_called_with(
mock_conf.to_xml(), flags=flags)
@mock.patch('nova.virt.libvirt.host.Host.get_domain')
def test_detach_volume_with_vir_domain_affect_live_flag(self,
mock_get_domain):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
mock_xml = """<domain>
<devices>
<disk type='file'>
<source file='/path/to/fake-volume'/>
<target dev='vdc' bus='virtio'/>
</disk>
</devices>
</domain>"""
mock_dom = mock.MagicMock()
mock_dom.XMLDesc.return_value = mock_xml
connection_info = {"driver_volume_type": "fake",
"data": {"device_path": "/fake",
"access_mode": "rw"}}
flags = (fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG |
fakelibvirt.VIR_DOMAIN_AFFECT_LIVE)
with mock.patch.object(drvr, '_disconnect_volume') as \
mock_disconnect_volume:
for state in (power_state.RUNNING, power_state.PAUSED):
mock_dom.info.return_value = [state, 512, 512, 2, 1234, 5678]
mock_get_domain.return_value = mock_dom
drvr.detach_volume(connection_info, instance, '/dev/vdc')
mock_get_domain.assert_called_with(instance)
mock_dom.detachDeviceFlags.assert_called_with("""<disk type="file" device="disk">
<source file="/path/to/fake-volume"/>
<target bus="virtio" dev="vdc"/>
</disk>
""", flags=flags)
mock_disconnect_volume.assert_called_with(
connection_info, 'vdc')
def test_multi_nic(self):
network_info = _fake_network_info(self.stubs, 2)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
xml = drvr._get_guest_xml(self.context, instance_ref,
network_info, disk_info,
image_meta)
tree = etree.fromstring(xml)
interfaces = tree.findall("./devices/interface")
self.assertEqual(len(interfaces), 2)
self.assertEqual(interfaces[0].get('type'), 'bridge')
def _behave_supports_direct_io(self, raise_open=False, raise_write=False,
exc=ValueError()):
open_behavior = os.open(os.path.join('.', '.directio.test'),
os.O_CREAT | os.O_WRONLY | os.O_DIRECT)
if raise_open:
open_behavior.AndRaise(exc)
else:
open_behavior.AndReturn(3)
write_bahavior = os.write(3, mox.IgnoreArg())
if raise_write:
write_bahavior.AndRaise(exc)
else:
os.close(3)
os.unlink(3)
def test_supports_direct_io(self):
# O_DIRECT is not supported on all Python runtimes, so on platforms
# where it's not supported (e.g. Mac), we can still test the code-path
# by stubbing out the value.
if not hasattr(os, 'O_DIRECT'):
# `mock` seems to have trouble stubbing an attr that doesn't
# originally exist, so falling back to stubbing out the attribute
# directly.
os.O_DIRECT = 16384
self.addCleanup(delattr, os, 'O_DIRECT')
einval = OSError()
einval.errno = errno.EINVAL
self.mox.StubOutWithMock(os, 'open')
self.mox.StubOutWithMock(os, 'write')
self.mox.StubOutWithMock(os, 'close')
self.mox.StubOutWithMock(os, 'unlink')
_supports_direct_io = libvirt_driver.LibvirtDriver._supports_direct_io
self._behave_supports_direct_io()
self._behave_supports_direct_io(raise_write=True)
self._behave_supports_direct_io(raise_open=True)
self._behave_supports_direct_io(raise_write=True, exc=einval)
self._behave_supports_direct_io(raise_open=True, exc=einval)
self.mox.ReplayAll()
self.assertTrue(_supports_direct_io('.'))
self.assertRaises(ValueError, _supports_direct_io, '.')
self.assertRaises(ValueError, _supports_direct_io, '.')
self.assertFalse(_supports_direct_io('.'))
self.assertFalse(_supports_direct_io('.'))
self.mox.VerifyAll()
def _check_xml_and_container(self, instance):
instance_ref = objects.Instance(**instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
self.flags(virt_type='lxc', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertEqual(drvr._uri(), 'lxc:///')
network_info = _fake_network_info(self.stubs, 1)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
xml = drvr._get_guest_xml(self.context, instance_ref,
network_info, disk_info,
image_meta)
tree = etree.fromstring(xml)
check = [
(lambda t: t.find('.').get('type'), 'lxc'),
(lambda t: t.find('./os/type').text, 'exe'),
(lambda t: t.find('./devices/filesystem/target').get('dir'), '/')]
for i, (check, expected_result) in enumerate(check):
self.assertEqual(check(tree),
expected_result,
'%s failed common check %d' % (xml, i))
target = tree.find('./devices/filesystem/source').get('dir')
self.assertTrue(len(target) > 0)
def _check_xml_and_disk_prefix(self, instance, prefix):
instance_ref = objects.Instance(**instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
def _get_prefix(p, default):
if p:
return p + 'a'
return default
type_disk_map = {
'qemu': [
(lambda t: t.find('.').get('type'), 'qemu'),
(lambda t: t.find('./devices/disk/target').get('dev'),
_get_prefix(prefix, 'vda'))],
'xen': [
(lambda t: t.find('.').get('type'), 'xen'),
(lambda t: t.find('./devices/disk/target').get('dev'),
_get_prefix(prefix, 'xvda'))],
'kvm': [
(lambda t: t.find('.').get('type'), 'kvm'),
(lambda t: t.find('./devices/disk/target').get('dev'),
_get_prefix(prefix, 'vda'))],
'uml': [
(lambda t: t.find('.').get('type'), 'uml'),
(lambda t: t.find('./devices/disk/target').get('dev'),
_get_prefix(prefix, 'ubda'))]
}
for (virt_type, checks) in six.iteritems(type_disk_map):
self.flags(virt_type=virt_type, group='libvirt')
if prefix:
self.flags(disk_prefix=prefix, group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
network_info = _fake_network_info(self.stubs, 1)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
xml = drvr._get_guest_xml(self.context, instance_ref,
network_info, disk_info,
image_meta)
tree = etree.fromstring(xml)
for i, (check, expected_result) in enumerate(checks):
self.assertEqual(check(tree),
expected_result,
'%s != %s failed check %d' %
(check(tree), expected_result, i))
def _check_xml_and_disk_driver(self, image_meta):
os_open = os.open
directio_supported = True
def os_open_stub(path, flags, *args, **kwargs):
if flags & os.O_DIRECT:
if not directio_supported:
raise OSError(errno.EINVAL,
'%s: %s' % (os.strerror(errno.EINVAL), path))
flags &= ~os.O_DIRECT
return os_open(path, flags, *args, **kwargs)
self.stubs.Set(os, 'open', os_open_stub)
@staticmethod
def connection_supports_direct_io_stub(dirpath):
return directio_supported
self.stubs.Set(libvirt_driver.LibvirtDriver,
'_supports_direct_io', connection_supports_direct_io_stub)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
network_info = _fake_network_info(self.stubs, 1)
drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
xml = drv._get_guest_xml(self.context, instance_ref,
network_info, disk_info, image_meta)
tree = etree.fromstring(xml)
disks = tree.findall('./devices/disk/driver')
for guest_disk in disks:
self.assertEqual(guest_disk.get("cache"), "none")
directio_supported = False
# The O_DIRECT availability is cached on first use in
# LibvirtDriver, hence we re-create it here
drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
xml = drv._get_guest_xml(self.context, instance_ref,
network_info, disk_info, image_meta)
tree = etree.fromstring(xml)
disks = tree.findall('./devices/disk/driver')
for guest_disk in disks:
self.assertEqual(guest_disk.get("cache"), "writethrough")
def _check_xml_and_disk_bus(self, image_meta,
block_device_info, wantConfig):
instance_ref = objects.Instance(**self.test_instance)
network_info = _fake_network_info(self.stubs, 1)
drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta,
block_device_info)
xml = drv._get_guest_xml(self.context, instance_ref,
network_info, disk_info, image_meta,
block_device_info=block_device_info)
tree = etree.fromstring(xml)
got_disks = tree.findall('./devices/disk')
got_disk_targets = tree.findall('./devices/disk/target')
for i in range(len(wantConfig)):
want_device_type = wantConfig[i][0]
want_device_bus = wantConfig[i][1]
want_device_dev = wantConfig[i][2]
got_device_type = got_disks[i].get('device')
got_device_bus = got_disk_targets[i].get('bus')
got_device_dev = got_disk_targets[i].get('dev')
self.assertEqual(got_device_type, want_device_type)
self.assertEqual(got_device_bus, want_device_bus)
self.assertEqual(got_device_dev, want_device_dev)
def _check_xml_and_uuid(self, image_meta):
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
network_info = _fake_network_info(self.stubs, 1)
drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
xml = drv._get_guest_xml(self.context, instance_ref,
network_info, disk_info, image_meta)
tree = etree.fromstring(xml)
self.assertEqual(tree.find('./uuid').text,
instance_ref['uuid'])
@mock.patch.object(libvirt_driver.LibvirtDriver,
"_get_host_sysinfo_serial_hardware",)
def _check_xml_and_uri(self, instance, mock_serial,
expect_ramdisk=False, expect_kernel=False,
rescue=None, expect_xen_hvm=False, xen_only=False):
mock_serial.return_value = "cef19ce0-0ca2-11df-855d-b19fbce37686"
instance_ref = objects.Instance(**instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
xen_vm_mode = vm_mode.XEN
if expect_xen_hvm:
xen_vm_mode = vm_mode.HVM
type_uri_map = {'qemu': ('qemu:///system',
[(lambda t: t.find('.').get('type'), 'qemu'),
(lambda t: t.find('./os/type').text,
vm_mode.HVM),
(lambda t: t.find('./devices/emulator'), None)]),
'kvm': ('qemu:///system',
[(lambda t: t.find('.').get('type'), 'kvm'),
(lambda t: t.find('./os/type').text,
vm_mode.HVM),
(lambda t: t.find('./devices/emulator'), None)]),
'uml': ('uml:///system',
[(lambda t: t.find('.').get('type'), 'uml'),
(lambda t: t.find('./os/type').text,
vm_mode.UML)]),
'xen': ('xen:///',
[(lambda t: t.find('.').get('type'), 'xen'),
(lambda t: t.find('./os/type').text,
xen_vm_mode)])}
if expect_xen_hvm or xen_only:
hypervisors_to_check = ['xen']
else:
hypervisors_to_check = ['qemu', 'kvm', 'xen']
for hypervisor_type in hypervisors_to_check:
check_list = type_uri_map[hypervisor_type][1]
if rescue:
suffix = '.rescue'
else:
suffix = ''
if expect_kernel:
check = (lambda t: self.relpath(t.find('./os/kernel').text).
split('/')[1], 'kernel' + suffix)
else:
check = (lambda t: t.find('./os/kernel'), None)
check_list.append(check)
if expect_kernel:
check = (lambda t: "no_timer_check" in t.find('./os/cmdline').
text, hypervisor_type == "qemu")
check_list.append(check)
# Hypervisors that only support vm_mode.HVM and Xen
# should not produce configuration that results in kernel
# arguments
if not expect_kernel and (hypervisor_type in
['qemu', 'kvm', 'xen']):
check = (lambda t: t.find('./os/root'), None)
check_list.append(check)
check = (lambda t: t.find('./os/cmdline'), None)
check_list.append(check)
if expect_ramdisk:
check = (lambda t: self.relpath(t.find('./os/initrd').text).
split('/')[1], 'ramdisk' + suffix)
else:
check = (lambda t: t.find('./os/initrd'), None)
check_list.append(check)
if hypervisor_type in ['qemu', 'kvm']:
xpath = "./sysinfo/system/entry"
check = (lambda t: t.findall(xpath)[0].get("name"),
"manufacturer")
check_list.append(check)
check = (lambda t: t.findall(xpath)[0].text,
version.vendor_string())
check_list.append(check)
check = (lambda t: t.findall(xpath)[1].get("name"),
"product")
check_list.append(check)
check = (lambda t: t.findall(xpath)[1].text,
version.product_string())
check_list.append(check)
check = (lambda t: t.findall(xpath)[2].get("name"),
"version")
check_list.append(check)
# NOTE(sirp): empty strings don't roundtrip in lxml (they are
# converted to None), so we need an `or ''` to correct for that
check = (lambda t: t.findall(xpath)[2].text or '',
version.version_string_with_package())
check_list.append(check)
check = (lambda t: t.findall(xpath)[3].get("name"),
"serial")
check_list.append(check)
check = (lambda t: t.findall(xpath)[3].text,
"cef19ce0-0ca2-11df-855d-b19fbce37686")
check_list.append(check)
check = (lambda t: t.findall(xpath)[4].get("name"),
"uuid")
check_list.append(check)
check = (lambda t: t.findall(xpath)[4].text,
instance['uuid'])
check_list.append(check)
if hypervisor_type in ['qemu', 'kvm']:
check = (lambda t: t.findall('./devices/serial')[0].get(
'type'), 'file')
check_list.append(check)
check = (lambda t: t.findall('./devices/serial')[1].get(
'type'), 'pty')
check_list.append(check)
check = (lambda t: self.relpath(t.findall(
'./devices/serial/source')[0].get('path')).
split('/')[1], 'console.log')
check_list.append(check)
else:
check = (lambda t: t.find('./devices/console').get(
'type'), 'pty')
check_list.append(check)
common_checks = [
(lambda t: t.find('.').tag, 'domain'),
(lambda t: t.find('./memory').text, '2097152')]
if rescue:
common_checks += [
(lambda t: self.relpath(t.findall('./devices/disk/source')[0].
get('file')).split('/')[1], 'disk.rescue'),
(lambda t: self.relpath(t.findall('./devices/disk/source')[1].
get('file')).split('/')[1], 'disk')]
else:
common_checks += [(lambda t: self.relpath(t.findall(
'./devices/disk/source')[0].get('file')).split('/')[1],
'disk')]
common_checks += [(lambda t: self.relpath(t.findall(
'./devices/disk/source')[1].get('file')).split('/')[1],
'disk.local')]
for virt_type in hypervisors_to_check:
expected_uri = type_uri_map[virt_type][0]
checks = type_uri_map[virt_type][1]
self.flags(virt_type=virt_type, group='libvirt')
with mock.patch('nova.virt.libvirt.driver.libvirt') as old_virt:
del old_virt.VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertEqual(drvr._uri(), expected_uri)
network_info = _fake_network_info(self.stubs, 1)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta,
rescue=rescue)
xml = drvr._get_guest_xml(self.context, instance_ref,
network_info, disk_info,
image_meta,
rescue=rescue)
tree = etree.fromstring(xml)
for i, (check, expected_result) in enumerate(checks):
self.assertEqual(check(tree),
expected_result,
'%s != %s failed check %d' %
(check(tree), expected_result, i))
for i, (check, expected_result) in enumerate(common_checks):
self.assertEqual(check(tree),
expected_result,
'%s != %s failed common check %d' %
(check(tree), expected_result, i))
filterref = './devices/interface/filterref'
vif = network_info[0]
nic_id = vif['address'].replace(':', '')
fw = firewall.NWFilterFirewall(fake.FakeVirtAPI(), drvr)
instance_filter_name = fw._instance_filter_name(instance_ref,
nic_id)
self.assertEqual(tree.find(filterref).get('filter'),
instance_filter_name)
# This test is supposed to make sure we don't
# override a specifically set uri
#
# Deliberately not just assigning this string to CONF.connection_uri
# and checking against that later on. This way we make sure the
# implementation doesn't fiddle around with the CONF.
testuri = 'something completely different'
self.flags(connection_uri=testuri, group='libvirt')
for (virt_type, (expected_uri, checks)) in six.iteritems(type_uri_map):
self.flags(virt_type=virt_type, group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertEqual(drvr._uri(), testuri)
def test_ensure_filtering_rules_for_instance_timeout(self):
# ensure_filtering_fules_for_instance() finishes with timeout.
# Preparing mocks
def fake_none(self, *args):
return
class FakeTime(object):
def __init__(self):
self.counter = 0
def sleep(self, t):
self.counter += t
fake_timer = FakeTime()
def fake_sleep(t):
fake_timer.sleep(t)
# _fake_network_info must be called before create_fake_libvirt_mock(),
# as _fake_network_info calls importutils.import_class() and
# create_fake_libvirt_mock() mocks importutils.import_class().
network_info = _fake_network_info(self.stubs, 1)
self.create_fake_libvirt_mock()
instance_ref = objects.Instance(**self.test_instance)
# Start test
self.mox.ReplayAll()
try:
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(drvr.firewall_driver,
'setup_basic_filtering',
fake_none)
self.stubs.Set(drvr.firewall_driver,
'prepare_instance_filter',
fake_none)
self.stubs.Set(drvr.firewall_driver,
'instance_filter_exists',
fake_none)
self.stubs.Set(greenthread,
'sleep',
fake_sleep)
drvr.ensure_filtering_rules_for_instance(instance_ref,
network_info)
except exception.NovaException as e:
msg = ('The firewall filter for %s does not exist' %
instance_ref['name'])
c1 = (0 <= six.text_type(e).find(msg))
self.assertTrue(c1)
self.assertEqual(29, fake_timer.counter, "Didn't wait the expected "
"amount of time")
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_create_shared_storage_test_file')
@mock.patch.object(fakelibvirt.Connection, 'compareCPU')
def test_check_can_live_migrate_dest_all_pass_with_block_migration(
self, mock_cpu, mock_test_file):
instance_ref = objects.Instance(**self.test_instance)
instance_ref.vcpu_model = test_vcpu_model.fake_vcpumodel
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
compute_info = {'disk_available_least': 400,
'cpu_info': 'asdf',
}
filename = "file"
# _check_cpu_match
mock_cpu.return_value = 1
# mounted_on_same_shared_storage
mock_test_file.return_value = filename
# No need for the src_compute_info
return_value = drvr.check_can_live_migrate_destination(self.context,
instance_ref, None, compute_info, True)
self.assertThat({"filename": "file",
'image_type': 'default',
'disk_available_mb': 409600,
"disk_over_commit": False,
"block_migration": True},
matchers.DictMatches(return_value))
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_create_shared_storage_test_file')
@mock.patch.object(fakelibvirt.Connection, 'compareCPU')
def test_check_can_live_migrate_dest_all_pass_no_block_migration(
self, mock_cpu, mock_test_file):
instance_ref = objects.Instance(**self.test_instance)
instance_ref.vcpu_model = test_vcpu_model.fake_vcpumodel
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
compute_info = {'disk_available_least': 400,
'cpu_info': 'asdf',
}
filename = "file"
# _check_cpu_match
mock_cpu.return_value = 1
# mounted_on_same_shared_storage
mock_test_file.return_value = filename
# No need for the src_compute_info
return_value = drvr.check_can_live_migrate_destination(self.context,
instance_ref, None, compute_info, False)
self.assertThat({"filename": "file",
"image_type": 'default',
"block_migration": False,
"disk_over_commit": False,
"disk_available_mb": None},
matchers.DictMatches(return_value))
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_create_shared_storage_test_file',
return_value='fake')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_compare_cpu')
def test_check_can_live_migrate_guest_cpu_none_model(
self, mock_cpu, mock_test_file):
# Tests that when instance.vcpu_model.model is None, the host cpu
# model is used for live migration.
instance_ref = objects.Instance(**self.test_instance)
instance_ref.vcpu_model = test_vcpu_model.fake_vcpumodel
instance_ref.vcpu_model.model = None
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
compute_info = {'cpu_info': 'asdf'}
result = drvr.check_can_live_migrate_destination(
self.context, instance_ref, compute_info, compute_info)
mock_cpu.assert_called_once_with(None, 'asdf')
expected_result = {"filename": 'fake',
"image_type": CONF.libvirt.images_type,
"block_migration": False,
"disk_over_commit": False,
"disk_available_mb": None}
self.assertDictEqual(expected_result, result)
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_create_shared_storage_test_file')
@mock.patch.object(fakelibvirt.Connection, 'compareCPU')
def test_check_can_live_migrate_dest_no_instance_cpu_info(
self, mock_cpu, mock_test_file):
instance_ref = objects.Instance(**self.test_instance)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
compute_info = {'cpu_info': jsonutils.dumps({
"vendor": "AMD",
"arch": arch.I686,
"features": ["sse3"],
"model": "Opteron_G3",
"topology": {"cores": 2, "threads": 1, "sockets": 4}
})}
filename = "file"
# _check_cpu_match
mock_cpu.return_value = 1
# mounted_on_same_shared_storage
mock_test_file.return_value = filename
return_value = drvr.check_can_live_migrate_destination(self.context,
instance_ref, compute_info, compute_info, False)
self.assertThat({"filename": "file",
"image_type": 'default',
"block_migration": False,
"disk_over_commit": False,
"disk_available_mb": None},
matchers.DictMatches(return_value))
@mock.patch.object(fakelibvirt.Connection, 'compareCPU')
def test_check_can_live_migrate_dest_incompatible_cpu_raises(
self, mock_cpu):
instance_ref = objects.Instance(**self.test_instance)
instance_ref.vcpu_model = test_vcpu_model.fake_vcpumodel
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
compute_info = {'cpu_info': 'asdf'}
mock_cpu.side_effect = exception.InvalidCPUInfo(reason='foo')
self.assertRaises(exception.InvalidCPUInfo,
drvr.check_can_live_migrate_destination,
self.context, instance_ref,
compute_info, compute_info, False)
@mock.patch.object(host.Host, 'compare_cpu')
@mock.patch.object(nova.virt.libvirt, 'config')
def test_compare_cpu_compatible_host_cpu(self, mock_vconfig, mock_compare):
mock_compare.return_value = 5
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
ret = conn._compare_cpu(None, jsonutils.dumps(_fake_cpu_info))
self.assertIsNone(ret)
@mock.patch.object(host.Host, 'compare_cpu')
@mock.patch.object(nova.virt.libvirt, 'config')
def test_compare_cpu_handles_not_supported_error_gracefully(self,
mock_vconfig,
mock_compare):
not_supported_exc = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
'this function is not supported by the connection driver:'
' virCompareCPU',
error_code=fakelibvirt.VIR_ERR_NO_SUPPORT)
mock_compare.side_effect = not_supported_exc
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
ret = conn._compare_cpu(None, jsonutils.dumps(_fake_cpu_info))
self.assertIsNone(ret)
@mock.patch.object(host.Host, 'compare_cpu')
@mock.patch.object(nova.virt.libvirt.LibvirtDriver,
'_vcpu_model_to_cpu_config')
def test_compare_cpu_compatible_guest_cpu(self, mock_vcpu_to_cpu,
mock_compare):
mock_compare.return_value = 6
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
ret = conn._compare_cpu(jsonutils.dumps(_fake_cpu_info), None)
self.assertIsNone(ret)
def test_compare_cpu_virt_type_xen(self):
self.flags(virt_type='xen', group='libvirt')
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
ret = conn._compare_cpu(None, None)
self.assertIsNone(ret)
@mock.patch.object(host.Host, 'compare_cpu')
@mock.patch.object(nova.virt.libvirt, 'config')
def test_compare_cpu_invalid_cpuinfo_raises(self, mock_vconfig,
mock_compare):
mock_compare.return_value = 0
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.InvalidCPUInfo,
conn._compare_cpu, None,
jsonutils.dumps(_fake_cpu_info))
@mock.patch.object(host.Host, 'compare_cpu')
@mock.patch.object(nova.virt.libvirt, 'config')
def test_compare_cpu_incompatible_cpu_raises(self, mock_vconfig,
mock_compare):
mock_compare.side_effect = fakelibvirt.libvirtError('cpu')
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.MigrationPreCheckError,
conn._compare_cpu, None,
jsonutils.dumps(_fake_cpu_info))
def test_check_can_live_migrate_dest_cleanup_works_correctly(self):
objects.Instance(**self.test_instance)
dest_check_data = {"filename": "file",
"block_migration": True,
"disk_over_commit": False,
"disk_available_mb": 1024}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(drvr, '_cleanup_shared_storage_test_file')
drvr._cleanup_shared_storage_test_file("file")
self.mox.ReplayAll()
drvr.check_can_live_migrate_destination_cleanup(self.context,
dest_check_data)
def _mock_can_live_migrate_source(self, block_migration=False,
is_shared_block_storage=False,
is_shared_instance_path=False,
is_booted_from_volume=False,
disk_available_mb=1024,
block_device_info=None,
block_device_text=None):
instance = objects.Instance(**self.test_instance)
dest_check_data = {'filename': 'file',
'image_type': 'default',
'block_migration': block_migration,
'disk_over_commit': False,
'disk_available_mb': disk_available_mb}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(drvr, '_is_shared_block_storage')
drvr._is_shared_block_storage(instance, dest_check_data,
block_device_info).AndReturn(is_shared_block_storage)
self.mox.StubOutWithMock(drvr, '_check_shared_storage_test_file')
drvr._check_shared_storage_test_file('file').AndReturn(
is_shared_instance_path)
self.mox.StubOutWithMock(drvr, "get_instance_disk_info")
drvr.get_instance_disk_info(instance,
block_device_info=block_device_info).\
AndReturn(block_device_text)
self.mox.StubOutWithMock(drvr, '_is_booted_from_volume')
drvr._is_booted_from_volume(instance, block_device_text).AndReturn(
is_booted_from_volume)
return (instance, dest_check_data, drvr)
def test_check_can_live_migrate_source_block_migration(self):
instance, dest_check_data, drvr = self._mock_can_live_migrate_source(
block_migration=True)
self.mox.StubOutWithMock(drvr, "_assert_dest_node_has_enough_disk")
drvr._assert_dest_node_has_enough_disk(
self.context, instance, dest_check_data['disk_available_mb'],
False, None)
self.mox.ReplayAll()
ret = drvr.check_can_live_migrate_source(self.context, instance,
dest_check_data)
self.assertIsInstance(ret, dict)
self.assertIn('is_shared_block_storage', ret)
self.assertIn('is_shared_instance_path', ret)
self.assertEqual(ret['is_shared_instance_path'],
ret['is_shared_storage'])
def test_check_can_live_migrate_source_shared_block_storage(self):
instance, dest_check_data, drvr = self._mock_can_live_migrate_source(
is_shared_block_storage=True)
self.mox.ReplayAll()
drvr.check_can_live_migrate_source(self.context, instance,
dest_check_data)
def test_check_can_live_migrate_source_shared_instance_path(self):
instance, dest_check_data, drvr = self._mock_can_live_migrate_source(
is_shared_instance_path=True)
self.mox.ReplayAll()
drvr.check_can_live_migrate_source(self.context, instance,
dest_check_data)
def test_check_can_live_migrate_source_non_shared_fails(self):
instance, dest_check_data, drvr = self._mock_can_live_migrate_source()
self.mox.ReplayAll()
self.assertRaises(exception.InvalidSharedStorage,
drvr.check_can_live_migrate_source, self.context,
instance, dest_check_data)
def test_check_can_live_migrate_source_shared_block_migration_fails(self):
instance, dest_check_data, drvr = self._mock_can_live_migrate_source(
block_migration=True,
is_shared_block_storage=True)
self.mox.ReplayAll()
self.assertRaises(exception.InvalidLocalStorage,
drvr.check_can_live_migrate_source,
self.context, instance, dest_check_data)
def test_check_can_live_migrate_shared_path_block_migration_fails(self):
instance, dest_check_data, drvr = self._mock_can_live_migrate_source(
block_migration=True,
is_shared_instance_path=True)
self.mox.ReplayAll()
self.assertRaises(exception.InvalidLocalStorage,
drvr.check_can_live_migrate_source,
self.context, instance, dest_check_data, None)
def test_check_can_live_migrate_non_shared_non_block_migration_fails(self):
instance, dest_check_data, drvr = self._mock_can_live_migrate_source()
self.mox.ReplayAll()
self.assertRaises(exception.InvalidSharedStorage,
drvr.check_can_live_migrate_source,
self.context, instance, dest_check_data)
def test_check_can_live_migrate_source_with_dest_not_enough_disk(self):
instance, dest_check_data, drvr = self._mock_can_live_migrate_source(
block_migration=True,
disk_available_mb=0)
drvr.get_instance_disk_info(instance,
block_device_info=None).AndReturn(
'[{"virt_disk_size":2}]')
self.mox.ReplayAll()
self.assertRaises(exception.MigrationError,
drvr.check_can_live_migrate_source,
self.context, instance, dest_check_data)
def test_check_can_live_migrate_source_booted_from_volume(self):
instance, dest_check_data, drvr = self._mock_can_live_migrate_source(
is_booted_from_volume=True,
block_device_text='[]')
self.mox.ReplayAll()
drvr.check_can_live_migrate_source(self.context, instance,
dest_check_data)
def test_check_can_live_migrate_source_booted_from_volume_with_swap(self):
instance, dest_check_data, drvr = self._mock_can_live_migrate_source(
is_booted_from_volume=True,
block_device_text='[{"path":"disk.swap"}]')
self.mox.ReplayAll()
self.assertRaises(exception.InvalidSharedStorage,
drvr.check_can_live_migrate_source,
self.context, instance, dest_check_data)
def _is_shared_block_storage_test_create_mocks(self, disks):
# Test data
instance_xml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices>{}</devices></domain>")
disks_xml = ''
for dsk in disks:
if dsk['type'] is not 'network':
disks_xml = ''.join([disks_xml,
"<disk type='{type}'>"
"<driver name='qemu' type='{driver}'/>"
"<source {source}='{source_path}'/>"
"<target dev='{target_dev}' bus='virtio'/>"
"</disk>".format(**dsk)])
else:
disks_xml = ''.join([disks_xml,
"<disk type='{type}'>"
"<driver name='qemu' type='{driver}'/>"
"<source protocol='{source_proto}'"
"name='{source_image}' >"
"<host name='hostname' port='7000'/>"
"<config file='/path/to/file'/>"
"</source>"
"<target dev='{target_dev}'"
"bus='ide'/>".format(**dsk)])
# Preparing mocks
mock_virDomain = mock.Mock(fakelibvirt.virDomain)
mock_virDomain.XMLDesc = mock.Mock()
mock_virDomain.XMLDesc.return_value = (instance_xml.format(disks_xml))
mock_lookup = mock.Mock()
def mock_lookup_side_effect(name):
return mock_virDomain
mock_lookup.side_effect = mock_lookup_side_effect
mock_getsize = mock.Mock()
mock_getsize.return_value = "10737418240"
return (mock_getsize, mock_lookup)
def test_is_shared_block_storage_rbd(self):
self.flags(images_type='rbd', group='libvirt')
bdi = {'block_device_mapping': []}
instance = objects.Instance(**self.test_instance)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
mock_get_instance_disk_info = mock.Mock()
with mock.patch.object(drvr, 'get_instance_disk_info',
mock_get_instance_disk_info):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertTrue(drvr._is_shared_block_storage(instance,
{'image_type': 'rbd'},
block_device_info=bdi))
self.assertEqual(0, mock_get_instance_disk_info.call_count)
def test_is_shared_block_storage_lvm(self):
self.flags(images_type='lvm', group='libvirt')
bdi = {'block_device_mapping': []}
instance = objects.Instance(**self.test_instance)
mock_get_instance_disk_info = mock.Mock()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with mock.patch.object(drvr, 'get_instance_disk_info',
mock_get_instance_disk_info):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertFalse(drvr._is_shared_block_storage(
instance, {'image_type': 'lvm'},
block_device_info=bdi))
self.assertEqual(0, mock_get_instance_disk_info.call_count)
def test_is_shared_block_storage_qcow2(self):
self.flags(images_type='qcow2', group='libvirt')
bdi = {'block_device_mapping': []}
instance = objects.Instance(**self.test_instance)
mock_get_instance_disk_info = mock.Mock()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with mock.patch.object(drvr, 'get_instance_disk_info',
mock_get_instance_disk_info):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertFalse(drvr._is_shared_block_storage(
instance, {'image_type': 'qcow2'},
block_device_info=bdi))
self.assertEqual(0, mock_get_instance_disk_info.call_count)
def test_is_shared_block_storage_rbd_only_source(self):
self.flags(images_type='rbd', group='libvirt')
bdi = {'block_device_mapping': []}
instance = objects.Instance(**self.test_instance)
mock_get_instance_disk_info = mock.Mock()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with mock.patch.object(drvr, 'get_instance_disk_info',
mock_get_instance_disk_info):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertFalse(drvr._is_shared_block_storage(
instance, {'is_shared_instance_path': False},
block_device_info=bdi))
self.assertEqual(0, mock_get_instance_disk_info.call_count)
def test_is_shared_block_storage_rbd_only_dest(self):
bdi = {'block_device_mapping': []}
instance = objects.Instance(**self.test_instance)
mock_get_instance_disk_info = mock.Mock()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with mock.patch.object(drvr, 'get_instance_disk_info',
mock_get_instance_disk_info):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertFalse(drvr._is_shared_block_storage(
instance, {'image_type': 'rbd',
'is_shared_instance_path': False},
block_device_info=bdi))
self.assertEqual(0, mock_get_instance_disk_info.call_count)
def test_is_shared_block_storage_volume_backed(self):
disks = [{'type': 'block',
'driver': 'raw',
'source': 'dev',
'source_path': '/dev/disk',
'target_dev': 'vda'}]
bdi = {'block_device_mapping': [
{'connection_info': 'info', 'mount_device': '/dev/vda'}]}
instance = objects.Instance(**self.test_instance)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
(mock_getsize, mock_lookup) =\
self._is_shared_block_storage_test_create_mocks(disks)
with mock.patch.object(host.Host, 'get_domain', mock_lookup):
self.assertTrue(drvr._is_shared_block_storage(instance,
{'is_volume_backed': True,
'is_shared_instance_path': False},
block_device_info = bdi))
mock_lookup.assert_called_once_with(instance)
def test_is_shared_block_storage_volume_backed_with_disk(self):
disks = [{'type': 'block',
'driver': 'raw',
'source': 'dev',
'source_path': '/dev/disk',
'target_dev': 'vda'},
{'type': 'file',
'driver': 'raw',
'source': 'file',
'source_path': '/instance/disk.local',
'target_dev': 'vdb'}]
bdi = {'block_device_mapping': [
{'connection_info': 'info', 'mount_device': '/dev/vda'}]}
instance = objects.Instance(**self.test_instance)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
(mock_getsize, mock_lookup) =\
self._is_shared_block_storage_test_create_mocks(disks)
with contextlib.nested(
mock.patch.object(os.path, 'getsize', mock_getsize),
mock.patch.object(host.Host, 'get_domain', mock_lookup)):
self.assertFalse(drvr._is_shared_block_storage(
instance,
{'is_volume_backed': True,
'is_shared_instance_path': False},
block_device_info = bdi))
mock_getsize.assert_called_once_with('/instance/disk.local')
mock_lookup.assert_called_once_with(instance)
def test_is_shared_block_storage_nfs(self):
bdi = {'block_device_mapping': []}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
mock_image_backend = mock.MagicMock()
drvr.image_backend = mock_image_backend
mock_backend = mock.MagicMock()
mock_image_backend.backend.return_value = mock_backend
mock_backend.is_file_in_instance_path.return_value = True
mock_get_instance_disk_info = mock.Mock()
with mock.patch.object(drvr, 'get_instance_disk_info',
mock_get_instance_disk_info):
self.assertTrue(drvr._is_shared_block_storage('instance',
{'is_shared_instance_path': True},
block_device_info=bdi))
self.assertEqual(0, mock_get_instance_disk_info.call_count)
def test_live_migration_update_graphics_xml(self):
self.compute = importutils.import_object(CONF.compute_manager)
instance_dict = dict(self.test_instance)
instance_dict.update({'host': 'fake',
'power_state': power_state.RUNNING,
'vm_state': vm_states.ACTIVE})
instance_ref = objects.Instance(**instance_dict)
xml_tmpl = ("<domain type='kvm'>"
"<devices>"
"<graphics type='vnc' listen='{vnc}'>"
"<listen address='{vnc}'/>"
"</graphics>"
"<graphics type='spice' listen='{spice}'>"
"<listen address='{spice}'/>"
"</graphics>"
"</devices>"
"</domain>")
initial_xml = xml_tmpl.format(vnc='1.2.3.4',
spice='5.6.7.8')
target_xml = xml_tmpl.format(vnc='10.0.0.1',
spice='10.0.0.2')
target_xml = etree.tostring(etree.fromstring(target_xml))
# Preparing mocks
vdmock = self.mox.CreateMock(fakelibvirt.virDomain)
self.mox.StubOutWithMock(vdmock, "migrateToURI2")
_bandwidth = CONF.libvirt.live_migration_bandwidth
vdmock.XMLDesc(flags=fakelibvirt.VIR_DOMAIN_XML_MIGRATABLE).AndReturn(
initial_xml)
vdmock.migrateToURI2(CONF.libvirt.live_migration_uri % 'dest',
None,
target_xml,
mox.IgnoreArg(),
None,
_bandwidth).AndRaise(
fakelibvirt.libvirtError("ERR"))
# start test
migrate_data = {'pre_live_migration_result':
{'graphics_listen_addrs':
{'vnc': '10.0.0.1', 'spice': '10.0.0.2'}}}
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(fakelibvirt.libvirtError,
drvr._live_migration_operation,
self.context, instance_ref, 'dest',
False, migrate_data, vdmock)
def test_live_migration_update_volume_xml(self):
self.compute = importutils.import_object(CONF.compute_manager)
instance_dict = dict(self.test_instance)
instance_dict.update({'host': 'fake',
'power_state': power_state.RUNNING,
'vm_state': vm_states.ACTIVE})
instance_ref = objects.Instance(**instance_dict)
target_xml = self.device_xml_tmpl.format(
device_path='/dev/disk/by-path/'
'ip-1.2.3.4:3260-iqn.'
'cde.67890.opst-lun-Z')
# start test
migrate_data = {'pre_live_migration_result':
{'volume': {u'58a84f6d-3f0c-4e19-a0af-eb657b790657':
{'connection_info': {u'driver_volume_type': u'iscsi',
'serial': u'58a84f6d-3f0c-4e19-a0af-eb657b790657',
u'data': {u'access_mode': u'rw', u'target_discovered': False,
u'target_iqn': u'ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z',
u'volume_id': u'58a84f6d-3f0c-4e19-a0af-eb657b790657',
'device_path':
u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z'}},
'disk_info': {'bus': u'virtio', 'type': u'disk', 'dev': u'vdb'}}}},
'graphics_listen_addrs': {}}
pre_live_migrate_data = ((migrate_data or {}).
get('pre_live_migration_result', {}))
volume = pre_live_migrate_data.get('volume')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
test_mock = mock.MagicMock()
with mock.patch.object(libvirt_driver.LibvirtDriver, 'get_info') as \
mget_info,\
mock.patch.object(drvr._host, 'get_domain') as mget_domain,\
mock.patch.object(fakelibvirt.virDomain, 'migrateToURI2'),\
mock.patch.object(drvr, '_update_xml') as mupdate:
mget_info.side_effect = exception.InstanceNotFound(
instance_id='foo')
mget_domain.return_value = test_mock
test_mock.XMLDesc.return_value = target_xml
self.assertFalse(drvr._live_migration_operation(
self.context, instance_ref, 'dest', False,
migrate_data, test_mock))
mupdate.assert_called_once_with(target_xml, volume, None, None)
def test_update_volume_xml(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
initial_xml = self.device_xml_tmpl.format(
device_path='/dev/disk/by-path/'
'ip-1.2.3.4:3260-iqn.'
'abc.12345.opst-lun-X')
target_xml = self.device_xml_tmpl.format(
device_path='/dev/disk/by-path/'
'ip-1.2.3.4:3260-iqn.'
'cde.67890.opst-lun-Z')
target_xml = etree.tostring(etree.fromstring(target_xml))
serial = "58a84f6d-3f0c-4e19-a0af-eb657b790657"
volume_xml = {'volume': {}}
volume_xml['volume'][serial] = {'connection_info': {}, 'disk_info': {}}
volume_xml['volume'][serial]['connection_info'] = \
{u'driver_volume_type': u'iscsi',
'serial': u'58a84f6d-3f0c-4e19-a0af-eb657b790657',
u'data': {u'access_mode': u'rw', u'target_discovered': False,
u'target_iqn': u'ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z',
u'volume_id': u'58a84f6d-3f0c-4e19-a0af-eb657b790657',
'device_path':
u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z'}}
volume_xml['volume'][serial]['disk_info'] = {'bus': u'virtio',
'type': u'disk',
'dev': u'vdb'}
connection_info = volume_xml['volume'][serial]['connection_info']
disk_info = volume_xml['volume'][serial]['disk_info']
conf = vconfig.LibvirtConfigGuestDisk()
conf.source_device = disk_info['type']
conf.driver_name = "qemu"
conf.driver_format = "raw"
conf.driver_cache = "none"
conf.target_dev = disk_info['dev']
conf.target_bus = disk_info['bus']
conf.serial = connection_info.get('serial')
conf.source_type = "block"
conf.source_path = connection_info['data'].get('device_path')
with mock.patch.object(drvr, '_get_volume_config',
return_value=conf):
parser = etree.XMLParser(remove_blank_text=True)
xml_doc = etree.fromstring(initial_xml, parser)
config = drvr._update_volume_xml(xml_doc,
volume_xml['volume'])
xml_doc = etree.fromstring(target_xml, parser)
self.assertEqual(etree.tostring(xml_doc), etree.tostring(config))
def test_update_volume_xml_no_serial(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
xml_tmpl = """
<domain type='kvm'>
<devices>
<disk type='block' device='disk'>
<driver name='qemu' type='raw' cache='none'/>
<source dev='{device_path}'/>
<target bus='virtio' dev='vdb'/>
<serial></serial>
<address type='pci' domain='0x0' bus='0x0' slot='0x04' \
function='0x0'/>
</disk>
</devices>
</domain>
"""
initial_xml = xml_tmpl.format(device_path='/dev/disk/by-path/'
'ip-1.2.3.4:3260-iqn.'
'abc.12345.opst-lun-X')
target_xml = xml_tmpl.format(device_path='/dev/disk/by-path/'
'ip-1.2.3.4:3260-iqn.'
'abc.12345.opst-lun-X')
target_xml = etree.tostring(etree.fromstring(target_xml))
serial = "58a84f6d-3f0c-4e19-a0af-eb657b790657"
volume_xml = {'volume': {}}
volume_xml['volume'][serial] = {'connection_info': {}, 'disk_info': {}}
volume_xml['volume'][serial]['connection_info'] = \
{u'driver_volume_type': u'iscsi',
'serial': u'58a84f6d-3f0c-4e19-a0af-eb657b790657',
u'data': {u'access_mode': u'rw', u'target_discovered': False,
u'target_iqn': u'ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z',
u'volume_id': u'58a84f6d-3f0c-4e19-a0af-eb657b790657',
'device_path':
u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z'}}
volume_xml['volume'][serial]['disk_info'] = {'bus': u'virtio',
'type': u'disk',
'dev': u'vdb'}
connection_info = volume_xml['volume'][serial]['connection_info']
disk_info = volume_xml['volume'][serial]['disk_info']
conf = vconfig.LibvirtConfigGuestDisk()
conf.source_device = disk_info['type']
conf.driver_name = "qemu"
conf.driver_format = "raw"
conf.driver_cache = "none"
conf.target_dev = disk_info['dev']
conf.target_bus = disk_info['bus']
conf.serial = connection_info.get('serial')
conf.source_type = "block"
conf.source_path = connection_info['data'].get('device_path')
with mock.patch.object(drvr, '_get_volume_config',
return_value=conf):
xml_doc = etree.fromstring(initial_xml)
config = drvr._update_volume_xml(xml_doc,
volume_xml['volume'])
self.assertEqual(target_xml, etree.tostring(config))
def test_update_volume_xml_no_connection_info(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
initial_xml = self.device_xml_tmpl.format(
device_path='/dev/disk/by-path/'
'ip-1.2.3.4:3260-iqn.'
'abc.12345.opst-lun-X')
target_xml = self.device_xml_tmpl.format(
device_path='/dev/disk/by-path/'
'ip-1.2.3.4:3260-iqn.'
'abc.12345.opst-lun-X')
target_xml = etree.tostring(etree.fromstring(target_xml))
serial = "58a84f6d-3f0c-4e19-a0af-eb657b790657"
volume_xml = {'volume': {}}
volume_xml['volume'][serial] = {'info1': {}, 'info2': {}}
conf = vconfig.LibvirtConfigGuestDisk()
with mock.patch.object(drvr, '_get_volume_config',
return_value=conf):
xml_doc = etree.fromstring(initial_xml)
config = drvr._update_volume_xml(xml_doc,
volume_xml['volume'])
self.assertEqual(target_xml, etree.tostring(config))
@mock.patch.object(fakelibvirt.virDomain, "migrateToURI2")
@mock.patch.object(fakelibvirt.virDomain, "XMLDesc")
def test_live_migration_update_serial_console_xml(self, mock_xml,
mock_migrate):
self.compute = importutils.import_object(CONF.compute_manager)
instance_ref = self.test_instance
xml_tmpl = ("<domain type='kvm'>"
"<devices>"
"<console type='tcp'>"
"<source mode='bind' host='{addr}' service='10000'/>"
"</console>"
"</devices>"
"</domain>")
initial_xml = xml_tmpl.format(addr='9.0.0.1')
target_xml = xml_tmpl.format(addr='9.0.0.12')
target_xml = etree.tostring(etree.fromstring(target_xml))
# Preparing mocks
mock_xml.return_value = initial_xml
mock_migrate.side_effect = fakelibvirt.libvirtError("ERR")
# start test
bandwidth = CONF.libvirt.live_migration_bandwidth
migrate_data = {'pre_live_migration_result':
{'graphics_listen_addrs':
{'vnc': '10.0.0.1', 'spice': '10.0.0.2'},
'serial_listen_addr': '9.0.0.12'}}
dom = fakelibvirt.virDomain
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(fakelibvirt.libvirtError,
drvr._live_migration_operation,
self.context, instance_ref, 'dest',
False, migrate_data, dom)
mock_xml.assert_called_once_with(
flags=fakelibvirt.VIR_DOMAIN_XML_MIGRATABLE)
mock_migrate.assert_called_once_with(
CONF.libvirt.live_migration_uri % 'dest',
None, target_xml, mock.ANY, None, bandwidth)
@mock.patch.object(fakelibvirt, 'VIR_DOMAIN_XML_MIGRATABLE', None,
create=True)
def test_live_migration_fails_with_serial_console_without_migratable(self):
self.compute = importutils.import_object(CONF.compute_manager)
instance_ref = self.test_instance
CONF.set_override("enabled", True, "serial_console")
dom = fakelibvirt.virDomain
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.MigrationError,
drvr._live_migration_operation,
self.context, instance_ref, 'dest',
False, None, dom)
@mock.patch.object(fakelibvirt, 'VIR_DOMAIN_XML_MIGRATABLE', None,
create=True)
def test_live_migration_uses_migrateToURI_without_migratable_flag(self):
self.compute = importutils.import_object(CONF.compute_manager)
instance_dict = dict(self.test_instance)
instance_dict.update({'host': 'fake',
'power_state': power_state.RUNNING,
'vm_state': vm_states.ACTIVE})
instance_ref = objects.Instance(**instance_dict)
# Preparing mocks
vdmock = self.mox.CreateMock(fakelibvirt.virDomain)
self.mox.StubOutWithMock(vdmock, "migrateToURI")
_bandwidth = CONF.libvirt.live_migration_bandwidth
vdmock.migrateToURI(CONF.libvirt.live_migration_uri % 'dest',
mox.IgnoreArg(),
None,
_bandwidth).AndRaise(
fakelibvirt.libvirtError("ERR"))
# start test
migrate_data = {'pre_live_migration_result':
{'graphics_listen_addrs':
{'vnc': '0.0.0.0', 'spice': '0.0.0.0'}}}
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(fakelibvirt.libvirtError,
drvr._live_migration_operation,
self.context, instance_ref, 'dest',
False, migrate_data, vdmock)
def test_live_migration_uses_migrateToURI_without_dest_listen_addrs(self):
self.compute = importutils.import_object(CONF.compute_manager)
instance_dict = dict(self.test_instance)
instance_dict.update({'host': 'fake',
'power_state': power_state.RUNNING,
'vm_state': vm_states.ACTIVE})
instance_ref = objects.Instance(**instance_dict)
# Preparing mocks
vdmock = self.mox.CreateMock(fakelibvirt.virDomain)
self.mox.StubOutWithMock(vdmock, "migrateToURI")
_bandwidth = CONF.libvirt.live_migration_bandwidth
vdmock.migrateToURI(CONF.libvirt.live_migration_uri % 'dest',
mox.IgnoreArg(),
None,
_bandwidth).AndRaise(
fakelibvirt.libvirtError("ERR"))
# start test
migrate_data = {}
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(fakelibvirt.libvirtError,
drvr._live_migration_operation,
self.context, instance_ref, 'dest',
False, migrate_data, vdmock)
@mock.patch.object(fakelibvirt, 'VIR_DOMAIN_XML_MIGRATABLE', None,
create=True)
def test_live_migration_fails_without_migratable_flag_or_0_addr(self):
self.flags(enabled=True, vncserver_listen='1.2.3.4', group='vnc')
self.compute = importutils.import_object(CONF.compute_manager)
instance_dict = dict(self.test_instance)
instance_dict.update({'host': 'fake',
'power_state': power_state.RUNNING,
'vm_state': vm_states.ACTIVE})
instance_ref = objects.Instance(**instance_dict)
# Preparing mocks
vdmock = self.mox.CreateMock(fakelibvirt.virDomain)
self.mox.StubOutWithMock(vdmock, "migrateToURI")
# start test
migrate_data = {'pre_live_migration_result':
{'graphics_listen_addrs':
{'vnc': '1.2.3.4', 'spice': '1.2.3.4'}}}
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.MigrationError,
drvr._live_migration_operation,
self.context, instance_ref, 'dest',
False, migrate_data, vdmock)
def test_live_migration_raises_exception(self):
# Confirms recover method is called when exceptions are raised.
# Preparing data
self.compute = importutils.import_object(CONF.compute_manager)
instance_dict = dict(self.test_instance)
instance_dict.update({'host': 'fake',
'power_state': power_state.RUNNING,
'vm_state': vm_states.ACTIVE})
instance_ref = objects.Instance(**instance_dict)
# Preparing mocks
vdmock = self.mox.CreateMock(fakelibvirt.virDomain)
self.mox.StubOutWithMock(vdmock, "migrateToURI2")
_bandwidth = CONF.libvirt.live_migration_bandwidth
if getattr(fakelibvirt, 'VIR_DOMAIN_XML_MIGRATABLE', None) is None:
vdmock.migrateToURI(CONF.libvirt.live_migration_uri % 'dest',
mox.IgnoreArg(),
None,
_bandwidth).AndRaise(
fakelibvirt.libvirtError('ERR'))
else:
vdmock.XMLDesc(flags=fakelibvirt.VIR_DOMAIN_XML_MIGRATABLE
).AndReturn(FakeVirtDomain().XMLDesc(flags=0))
vdmock.migrateToURI2(CONF.libvirt.live_migration_uri % 'dest',
None,
mox.IgnoreArg(),
mox.IgnoreArg(),
None,
_bandwidth).AndRaise(
fakelibvirt.libvirtError('ERR'))
# start test
migrate_data = {'pre_live_migration_result':
{'graphics_listen_addrs':
{'vnc': '127.0.0.1', 'spice': '127.0.0.1'}}}
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(fakelibvirt.libvirtError,
drvr._live_migration_operation,
self.context, instance_ref, 'dest',
False, migrate_data, vdmock)
self.assertEqual(vm_states.ACTIVE, instance_ref.vm_state)
self.assertEqual(power_state.RUNNING, instance_ref.power_state)
def test_live_migration_raises_unsupported_config_exception(self):
# Tests that when migrateToURI2 fails with VIR_ERR_CONFIG_UNSUPPORTED,
# migrateToURI is used instead.
# Preparing data
instance_ref = objects.Instance(**self.test_instance)
# Preparing mocks
vdmock = self.mox.CreateMock(fakelibvirt.virDomain)
self.mox.StubOutWithMock(vdmock, 'migrateToURI2')
self.mox.StubOutWithMock(vdmock, 'migrateToURI')
_bandwidth = CONF.libvirt.live_migration_bandwidth
vdmock.XMLDesc(flags=fakelibvirt.VIR_DOMAIN_XML_MIGRATABLE).AndReturn(
FakeVirtDomain().XMLDesc(flags=0))
unsupported_config_error = fakelibvirt.libvirtError('ERR')
unsupported_config_error.err = (
fakelibvirt.VIR_ERR_CONFIG_UNSUPPORTED,)
# This is the first error we hit but since the error code is
# VIR_ERR_CONFIG_UNSUPPORTED we'll try migrateToURI.
vdmock.migrateToURI2(CONF.libvirt.live_migration_uri % 'dest', None,
mox.IgnoreArg(), mox.IgnoreArg(), None,
_bandwidth).AndRaise(unsupported_config_error)
# This is the second and final error that will actually kill the run,
# we use TestingException to make sure it's not the same libvirtError
# above.
vdmock.migrateToURI(CONF.libvirt.live_migration_uri % 'dest',
mox.IgnoreArg(), None,
_bandwidth).AndRaise(test.TestingException('oops'))
graphics_listen_addrs = {'vnc': '0.0.0.0', 'spice': '127.0.0.1'}
migrate_data = {'pre_live_migration_result':
{'graphics_listen_addrs': graphics_listen_addrs}}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(
drvr, '_check_graphics_addresses_can_live_migrate')
drvr._check_graphics_addresses_can_live_migrate(graphics_listen_addrs)
self.mox.ReplayAll()
# start test
self.assertRaises(test.TestingException,
drvr._live_migration_operation,
self.context, instance_ref, 'dest',
False, migrate_data, vdmock)
@mock.patch('shutil.rmtree')
@mock.patch('os.path.exists', return_value=True)
@mock.patch('nova.virt.libvirt.utils.get_instance_path_at_destination')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.destroy')
def test_rollback_live_migration_at_dest_not_shared(self, mock_destroy,
mock_get_instance_path,
mock_exist,
mock_shutil
):
# destroy method may raise InstanceTerminationFailure or
# InstancePowerOffFailure, here use their base class Invalid.
mock_destroy.side_effect = exception.Invalid(reason='just test')
fake_instance_path = os.path.join(cfg.CONF.instances_path,
'/fake_instance_uuid')
mock_get_instance_path.return_value = fake_instance_path
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
migrate_data = {'is_shared_instance_path': False}
self.assertRaises(exception.Invalid,
drvr.rollback_live_migration_at_destination,
"context", "instance", [], None, True, migrate_data)
mock_exist.assert_called_once_with(fake_instance_path)
mock_shutil.assert_called_once_with(fake_instance_path)
@mock.patch('shutil.rmtree')
@mock.patch('os.path.exists')
@mock.patch('nova.virt.libvirt.utils.get_instance_path_at_destination')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.destroy')
def test_rollback_live_migration_at_dest_shared(self, mock_destroy,
mock_get_instance_path,
mock_exist,
mock_shutil
):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
migrate_data = {'is_shared_instance_path': True}
drvr.rollback_live_migration_at_destination("context", "instance", [],
None, True, migrate_data)
mock_destroy.assert_called_once_with("context", "instance", [],
None, True, migrate_data)
self.assertFalse(mock_get_instance_path.called)
self.assertFalse(mock_exist.called)
self.assertFalse(mock_shutil.called)
@mock.patch.object(fakelibvirt.Domain, "XMLDesc")
def test_live_migration_copy_disk_paths(self, mock_xml):
xml = """
<domain>
<name>dummy</name>
<uuid>d4e13113-918e-42fe-9fc9-861693ffd432</uuid>
<devices>
<disk type="file">
<source file="/var/lib/nova/instance/123/disk.root"/>
</disk>
<disk type="file">
<source file="/var/lib/nova/instance/123/disk.shared"/>
<shareable/>
</disk>
<disk type="file">
<source file="/var/lib/nova/instance/123/disk.config"/>
<readonly/>
</disk>
<disk type="block">
<source dev="/dev/mapper/somevol"/>
</disk>
<disk type="network">
<source protocol="https" name="url_path">
<host name="hostname" port="443"/>
</source>
</disk>
</devices>
</domain>"""
mock_xml.return_value = xml
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
dom = fakelibvirt.Domain(drvr._get_connection(), xml, False)
guest = libvirt_guest.Guest(dom)
paths = drvr._live_migration_copy_disk_paths(guest)
self.assertEqual(["/var/lib/nova/instance/123/disk.root",
"/dev/mapper/somevol"], paths)
@mock.patch.object(libvirt_driver.LibvirtDriver,
"_live_migration_copy_disk_paths")
def test_live_migration_data_gb_plain(self, mock_paths):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
dom = fakelibvirt.Domain(drvr._get_connection(), "<domain/>", False)
guest = libvirt_guest.Guest(dom)
instance = objects.Instance(**self.test_instance)
data_gb = drvr._live_migration_data_gb(instance, guest, False)
self.assertEqual(2, data_gb)
self.assertEqual(0, mock_paths.call_count)
@mock.patch.object(libvirt_driver.LibvirtDriver,
"_live_migration_copy_disk_paths")
def test_live_migration_data_gb_block(self, mock_paths):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
dom = fakelibvirt.Domain(drvr._get_connection(), "<domain/>", False)
guest = libvirt_guest.Guest(dom)
instance = objects.Instance(**self.test_instance)
def fake_stat(path):
class StatResult(object):
def __init__(self, size):
self._size = size
@property
def st_size(self):
return self._size
if path == "/var/lib/nova/instance/123/disk.root":
return StatResult(10 * units.Gi)
elif path == "/dev/mapper/somevol":
return StatResult(1.5 * units.Gi)
else:
raise Exception("Should not be reached")
mock_paths.return_value = ["/var/lib/nova/instance/123/disk.root",
"/dev/mapper/somevol"]
with mock.patch.object(os, "stat") as mock_stat:
mock_stat.side_effect = fake_stat
data_gb = drvr._live_migration_data_gb(instance, guest, True)
# Expecting 2 GB for RAM, plus 10 GB for disk.root
# and 1.5 GB rounded to 2 GB for somevol, so 14 GB
self.assertEqual(14, data_gb)
self.assertEqual(1, mock_paths.call_count)
EXPECT_SUCCESS = 1
EXPECT_FAILURE = 2
EXPECT_ABORT = 3
@mock.patch.object(time, "time")
@mock.patch.object(time, "sleep",
side_effect=lambda x: eventlet.sleep(0))
@mock.patch.object(host.DomainJobInfo, "for_domain")
@mock.patch.object(objects.Instance, "save")
@mock.patch.object(fakelibvirt.Connection, "_mark_running")
@mock.patch.object(fakelibvirt.virDomain, "abortJob")
def _test_live_migration_monitoring(self,
job_info_records,
time_records,
expect_result,
mock_abort,
mock_running,
mock_save,
mock_job_info,
mock_sleep,
mock_time):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
dom = fakelibvirt.Domain(drvr._get_connection(), "<domain/>", True)
guest = libvirt_guest.Guest(dom)
finish_event = eventlet.event.Event()
def fake_job_info(hostself):
while True:
self.assertTrue(len(job_info_records) > 0)
rec = job_info_records.pop(0)
if type(rec) == str:
if rec == "thread-finish":
finish_event.send()
elif rec == "domain-stop":
dom.destroy()
else:
if len(time_records) > 0:
time_records.pop(0)
return rec
return rec
def fake_time():
if len(time_records) > 0:
return time_records[0]
else:
return int(
datetime.datetime(2001, 1, 20, 20, 1, 0)
.strftime('%s'))
mock_job_info.side_effect = fake_job_info
mock_time.side_effect = fake_time
dest = mock.sentinel.migrate_dest
migrate_data = mock.sentinel.migrate_data
fake_post_method = mock.MagicMock()
fake_recover_method = mock.MagicMock()
drvr._live_migration_monitor(self.context, instance,
guest, dest,
fake_post_method,
fake_recover_method,
False,
migrate_data,
dom,
finish_event)
if expect_result == self.EXPECT_SUCCESS:
self.assertFalse(fake_recover_method.called,
'Recover method called when success expected')
self.assertFalse(mock_abort.called,
'abortJob not called when success expected')
fake_post_method.assert_called_once_with(
self.context, instance, dest, False, migrate_data)
else:
if expect_result == self.EXPECT_ABORT:
self.assertTrue(mock_abort.called,
'abortJob called when abort expected')
else:
self.assertFalse(mock_abort.called,
'abortJob not called when failure expected')
self.assertFalse(fake_post_method.called,
'Post method called when success not expected')
fake_recover_method.assert_called_once_with(
self.context, instance, dest, False, migrate_data)
def test_live_migration_monitor_success(self):
# A normal sequence where see all the normal job states
domain_info_records = [
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_NONE),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
"thread-finish",
"domain-stop",
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_COMPLETED),
]
self._test_live_migration_monitoring(domain_info_records, [],
self.EXPECT_SUCCESS)
def test_live_migration_monitor_success_race(self):
# A normalish sequence but we're too slow to see the
# completed job state
domain_info_records = [
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_NONE),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
"thread-finish",
"domain-stop",
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_NONE),
]
self._test_live_migration_monitoring(domain_info_records, [],
self.EXPECT_SUCCESS)
def test_live_migration_monitor_failed(self):
# A failed sequence where we see all the expected events
domain_info_records = [
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_NONE),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
"thread-finish",
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_FAILED),
]
self._test_live_migration_monitoring(domain_info_records, [],
self.EXPECT_FAILURE)
def test_live_migration_monitor_failed_race(self):
# A failed sequence where we are too slow to see the
# failed event
domain_info_records = [
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_NONE),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
"thread-finish",
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_NONE),
]
self._test_live_migration_monitoring(domain_info_records, [],
self.EXPECT_FAILURE)
def test_live_migration_monitor_cancelled(self):
# A cancelled sequence where we see all the events
domain_info_records = [
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_NONE),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
"thread-finish",
"domain-stop",
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_CANCELLED),
]
self._test_live_migration_monitoring(domain_info_records, [],
self.EXPECT_FAILURE)
@mock.patch.object(fakelibvirt.virDomain, "migrateSetMaxDowntime")
@mock.patch.object(libvirt_driver.LibvirtDriver,
"_migration_downtime_steps")
def test_live_migration_monitor_downtime(self, mock_downtime_steps,
mock_set_downtime):
self.flags(live_migration_completion_timeout=1000000,
live_migration_progress_timeout=1000000,
group='libvirt')
# We've setup 4 fake downtime steps - first value is the
# time delay, second is the downtime value
downtime_steps = [
(90, 10),
(180, 50),
(270, 200),
(500, 300),
]
mock_downtime_steps.return_value = downtime_steps
# Each one of these fake times is used for time.time()
# when a new domain_info_records entry is consumed.
# Times are chosen so that only the first 3 downtime
# steps are needed.
fake_times = [0, 1, 30, 95, 150, 200, 300]
# A normal sequence where see all the normal job states
domain_info_records = [
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_NONE),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
"thread-finish",
"domain-stop",
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_COMPLETED),
]
self._test_live_migration_monitoring(domain_info_records,
fake_times, self.EXPECT_SUCCESS)
mock_set_downtime.assert_has_calls([mock.call(10),
mock.call(50),
mock.call(200)])
def test_live_migration_monitor_completion(self):
self.flags(live_migration_completion_timeout=100,
live_migration_progress_timeout=1000000,
group='libvirt')
# Each one of these fake times is used for time.time()
# when a new domain_info_records entry is consumed.
fake_times = [0, 40, 80, 120, 160, 200, 240, 280, 320]
# A normal sequence where see all the normal job states
domain_info_records = [
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_NONE),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
"thread-finish",
"domain-stop",
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_CANCELLED),
]
self._test_live_migration_monitoring(domain_info_records,
fake_times, self.EXPECT_ABORT)
def test_live_migration_monitor_progress(self):
self.flags(live_migration_completion_timeout=1000000,
live_migration_progress_timeout=150,
group='libvirt')
# Each one of these fake times is used for time.time()
# when a new domain_info_records entry is consumed.
fake_times = [0, 40, 80, 120, 160, 200, 240, 280, 320]
# A normal sequence where see all the normal job states
domain_info_records = [
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_NONE),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
"thread-finish",
"domain-stop",
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_CANCELLED),
]
self._test_live_migration_monitoring(domain_info_records,
fake_times, self.EXPECT_ABORT)
def test_live_migration_downtime_steps(self):
self.flags(live_migration_downtime=400, group='libvirt')
self.flags(live_migration_downtime_steps=10, group='libvirt')
self.flags(live_migration_downtime_delay=30, group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
steps = drvr._migration_downtime_steps(3.0)
self.assertEqual([
(0, 37),
(90, 38),
(180, 39),
(270, 42),
(360, 46),
(450, 55),
(540, 70),
(630, 98),
(720, 148),
(810, 238),
(900, 400),
], list(steps))
@mock.patch.object(utils, "spawn")
@mock.patch.object(libvirt_driver.LibvirtDriver, "_live_migration_monitor")
@mock.patch.object(host.Host, "get_guest")
@mock.patch.object(fakelibvirt.Connection, "_mark_running")
def test_live_migration_main(self, mock_running, mock_guest,
mock_monitor, mock_thread):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
dom = fakelibvirt.Domain(drvr._get_connection(),
"<domain><name>demo</name></domain>", True)
guest = libvirt_guest.Guest(dom)
migrate_data = {}
mock_guest.return_value = guest
def fake_post():
pass
def fake_recover():
pass
drvr._live_migration(self.context, instance, "fakehost",
fake_post, fake_recover, False,
migrate_data)
class AnyEventletEvent(object):
def __eq__(self, other):
return type(other) == eventlet.event.Event
mock_thread.assert_called_once_with(
drvr._live_migration_operation,
self.context, instance, "fakehost", False,
migrate_data, dom)
mock_monitor.assert_called_once_with(
self.context, instance, guest, "fakehost",
fake_post, fake_recover, False,
migrate_data, dom, AnyEventletEvent())
def _do_test_create_images_and_backing(self, disk_type):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(drvr, '_fetch_instance_kernel_ramdisk')
self.mox.StubOutWithMock(libvirt_driver.libvirt_utils, 'create_image')
disk_info = {'path': 'foo', 'type': disk_type,
'disk_size': 1 * 1024 ** 3,
'virt_disk_size': 20 * 1024 ** 3,
'backing_file': None}
libvirt_driver.libvirt_utils.create_image(
disk_info['type'], mox.IgnoreArg(), disk_info['virt_disk_size'])
drvr._fetch_instance_kernel_ramdisk(self.context, self.test_instance,
fallback_from_host=None)
self.mox.ReplayAll()
self.stubs.Set(os.path, 'exists', lambda *args: False)
drvr._create_images_and_backing(self.context, self.test_instance,
"/fake/instance/dir", [disk_info])
def test_create_images_and_backing_qcow2(self):
self._do_test_create_images_and_backing('qcow2')
def test_create_images_and_backing_raw(self):
self._do_test_create_images_and_backing('raw')
def test_create_images_and_backing_images_not_exist_no_fallback(self):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
disk_info = [
{u'backing_file': u'fake_image_backing_file',
u'disk_size': 10747904,
u'path': u'disk_path',
u'type': u'qcow2',
u'virt_disk_size': 25165824}]
self.test_instance.update({'user_id': 'fake-user',
'os_type': None,
'project_id': 'fake-project'})
instance = objects.Instance(**self.test_instance)
with mock.patch.object(libvirt_driver.libvirt_utils, 'fetch_image',
side_effect=exception.ImageNotFound(
image_id="fake_id")):
self.assertRaises(exception.ImageNotFound,
conn._create_images_and_backing,
self.context, instance,
"/fake/instance/dir", disk_info)
def test_create_images_and_backing_images_not_exist_fallback(self):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
disk_info = [
{u'backing_file': u'fake_image_backing_file',
u'disk_size': 10747904,
u'path': u'disk_path',
u'type': u'qcow2',
u'virt_disk_size': 25165824}]
base_dir = os.path.join(CONF.instances_path,
CONF.image_cache_subdirectory_name)
self.test_instance.update({'user_id': 'fake-user',
'os_type': None,
'kernel_id': 'fake_kernel_id',
'ramdisk_id': 'fake_ramdisk_id',
'project_id': 'fake-project'})
instance = objects.Instance(**self.test_instance)
with contextlib.nested(
mock.patch.object(libvirt_driver.libvirt_utils, 'copy_image'),
mock.patch.object(libvirt_driver.libvirt_utils, 'fetch_image',
side_effect=exception.ImageNotFound(
image_id="fake_id")),
) as (copy_image_mock, fetch_image_mock):
conn._create_images_and_backing(self.context, instance,
"/fake/instance/dir", disk_info,
fallback_from_host="fake_host")
backfile_path = os.path.join(base_dir, 'fake_image_backing_file')
kernel_path = os.path.join(CONF.instances_path,
self.test_instance['uuid'],
'kernel')
ramdisk_path = os.path.join(CONF.instances_path,
self.test_instance['uuid'],
'ramdisk')
copy_image_mock.assert_has_calls([
mock.call(dest=backfile_path, src=backfile_path,
host='fake_host', receive=True),
mock.call(dest=kernel_path, src=kernel_path,
host='fake_host', receive=True),
mock.call(dest=ramdisk_path, src=ramdisk_path,
host='fake_host', receive=True)
])
fetch_image_mock.assert_has_calls([
mock.call(context=self.context,
target=backfile_path,
image_id=self.test_instance['image_ref'],
user_id=self.test_instance['user_id'],
project_id=self.test_instance['project_id'],
max_size=25165824),
mock.call(self.context, kernel_path,
self.test_instance['kernel_id'],
self.test_instance['user_id'],
self.test_instance['project_id']),
mock.call(self.context, ramdisk_path,
self.test_instance['ramdisk_id'],
self.test_instance['user_id'],
self.test_instance['project_id']),
])
@mock.patch.object(libvirt_driver.libvirt_utils, 'fetch_image')
@mock.patch.object(os.path, 'exists', return_value=True)
def test_create_images_and_backing_images_exist(self, mock_exists,
mock_fetch_image):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
disk_info = [
{u'backing_file': u'fake_image_backing_file',
u'disk_size': 10747904,
u'path': u'disk_path',
u'type': u'qcow2',
u'virt_disk_size': 25165824}]
self.test_instance.update({'user_id': 'fake-user',
'os_type': None,
'kernel_id': 'fake_kernel_id',
'ramdisk_id': 'fake_ramdisk_id',
'project_id': 'fake-project'})
instance = objects.Instance(**self.test_instance)
conn._create_images_and_backing(self.context, instance,
'/fake/instance/dir', disk_info)
self.assertFalse(mock_fetch_image.called)
def test_create_images_and_backing_ephemeral_gets_created(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
disk_info = [
{u'backing_file': u'fake_image_backing_file',
u'disk_size': 10747904,
u'path': u'disk_path',
u'type': u'qcow2',
u'virt_disk_size': 25165824},
{u'backing_file': u'ephemeral_1_default',
u'disk_size': 393216,
u'over_committed_disk_size': 1073348608,
u'path': u'disk_eph_path',
u'type': u'qcow2',
u'virt_disk_size': 1073741824}]
base_dir = os.path.join(CONF.instances_path,
CONF.image_cache_subdirectory_name)
instance = objects.Instance(**self.test_instance)
with contextlib.nested(
mock.patch.object(drvr, '_fetch_instance_kernel_ramdisk'),
mock.patch.object(libvirt_driver.libvirt_utils, 'fetch_image'),
mock.patch.object(drvr, '_create_ephemeral'),
mock.patch.object(imagebackend.Image, 'verify_base_size')
) as (fetch_kernel_ramdisk_mock, fetch_image_mock,
create_ephemeral_mock, verify_base_size_mock):
drvr._create_images_and_backing(self.context, instance,
"/fake/instance/dir",
disk_info)
self.assertEqual(len(create_ephemeral_mock.call_args_list), 1)
m_args, m_kwargs = create_ephemeral_mock.call_args_list[0]
self.assertEqual(
os.path.join(base_dir, 'ephemeral_1_default'),
m_kwargs['target'])
self.assertEqual(len(fetch_image_mock.call_args_list), 1)
m_args, m_kwargs = fetch_image_mock.call_args_list[0]
self.assertEqual(
os.path.join(base_dir, 'fake_image_backing_file'),
m_kwargs['target'])
verify_base_size_mock.assert_has_calls([
mock.call(os.path.join(base_dir, 'fake_image_backing_file'),
25165824),
mock.call(os.path.join(base_dir, 'ephemeral_1_default'),
1073741824)
])
def test_create_images_and_backing_disk_info_none(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(drvr, '_fetch_instance_kernel_ramdisk')
drvr._fetch_instance_kernel_ramdisk(self.context, self.test_instance,
fallback_from_host=None)
self.mox.ReplayAll()
drvr._create_images_and_backing(self.context, self.test_instance,
"/fake/instance/dir", None)
def test_pre_live_migration_works_correctly_mocked(self):
# Creating testdata
vol = {'block_device_mapping': [
{'connection_info': {'serial': '12345', u'data':
{'device_path':
u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.abc.12345.opst-lun-X'}},
'mount_device': '/dev/sda'},
{'connection_info': {'serial': '67890', u'data':
{'device_path':
u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z'}},
'mount_device': '/dev/sdb'}]}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
class FakeNetworkInfo(object):
def fixed_ips(self):
return ["test_ip_addr"]
def fake_none(*args, **kwargs):
return
self.stubs.Set(drvr, '_create_images_and_backing', fake_none)
instance = objects.Instance(**self.test_instance)
c = context.get_admin_context()
nw_info = FakeNetworkInfo()
# Creating mocks
self.mox.StubOutWithMock(driver, "block_device_info_get_mapping")
driver.block_device_info_get_mapping(vol
).AndReturn(vol['block_device_mapping'])
self.mox.StubOutWithMock(drvr, "_connect_volume")
for v in vol['block_device_mapping']:
disk_info = {
'bus': "scsi",
'dev': v['mount_device'].rpartition("/")[2],
'type': "disk"
}
drvr._connect_volume(v['connection_info'],
disk_info)
self.mox.StubOutWithMock(drvr, 'plug_vifs')
drvr.plug_vifs(mox.IsA(instance), nw_info)
self.mox.ReplayAll()
result = drvr.pre_live_migration(
c, instance, vol, nw_info, None,
migrate_data={"block_migration": False})
target_ret = {
'graphics_listen_addrs': {'spice': '127.0.0.1', 'vnc': '127.0.0.1'},
'serial_listen_addr': '127.0.0.1',
'volume': {
'12345': {'connection_info': {u'data': {'device_path':
u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.abc.12345.opst-lun-X'},
'serial': '12345'},
'disk_info': {'bus': 'scsi',
'dev': 'sda',
'type': 'disk'}},
'67890': {'connection_info': {u'data': {'device_path':
u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z'},
'serial': '67890'},
'disk_info': {'bus': 'scsi',
'dev': 'sdb',
'type': 'disk'}}}}
self.assertEqual(result, target_ret)
def test_pre_live_migration_block_with_config_drive_mocked(self):
# Creating testdata
vol = {'block_device_mapping': [
{'connection_info': 'dummy', 'mount_device': '/dev/sda'},
{'connection_info': 'dummy', 'mount_device': '/dev/sdb'}]}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
def fake_true(*args, **kwargs):
return True
self.stubs.Set(configdrive, 'required_by', fake_true)
instance = objects.Instance(**self.test_instance)
c = context.get_admin_context()
self.assertRaises(exception.NoLiveMigrationForConfigDriveInLibVirt,
drvr.pre_live_migration, c, instance, vol, None,
None, {'is_shared_instance_path': False,
'is_shared_block_storage': False})
@mock.patch('nova.virt.driver.block_device_info_get_mapping',
return_value=())
@mock.patch('nova.virt.configdrive.required_by',
return_value=True)
def test_pre_live_migration_block_with_config_drive_mocked_with_vfat(
self, mock_required_by, block_device_info_get_mapping):
self.flags(config_drive_format='vfat')
# Creating testdata
vol = {'block_device_mapping': [
{'connection_info': 'dummy', 'mount_device': '/dev/sda'},
{'connection_info': 'dummy', 'mount_device': '/dev/sdb'}]}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
res_data = drvr.pre_live_migration(
self.context, instance, vol, [], None,
{'is_shared_instance_path': False,
'is_shared_block_storage': False})
block_device_info_get_mapping.assert_called_once_with(
{'block_device_mapping': [
{'connection_info': 'dummy', 'mount_device': '/dev/sda'},
{'connection_info': 'dummy', 'mount_device': '/dev/sdb'}
]}
)
self.assertEqual({'graphics_listen_addrs': {'spice': '127.0.0.1',
'vnc': '127.0.0.1'},
'serial_listen_addr': '127.0.0.1',
'volume': {}}, res_data)
def test_pre_live_migration_vol_backed_works_correctly_mocked(self):
# Creating testdata, using temp dir.
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
vol = {'block_device_mapping': [
{'connection_info': {'serial': '12345', u'data':
{'device_path':
u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.abc.12345.opst-lun-X'}},
'mount_device': '/dev/sda'},
{'connection_info': {'serial': '67890', u'data':
{'device_path':
u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z'}},
'mount_device': '/dev/sdb'}]}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
def fake_none(*args, **kwargs):
return
self.stubs.Set(drvr, '_create_images_and_backing', fake_none)
class FakeNetworkInfo(object):
def fixed_ips(self):
return ["test_ip_addr"]
inst_ref = objects.Instance(**self.test_instance)
c = context.get_admin_context()
nw_info = FakeNetworkInfo()
# Creating mocks
self.mox.StubOutWithMock(drvr, "_connect_volume")
for v in vol['block_device_mapping']:
disk_info = {
'bus': "scsi",
'dev': v['mount_device'].rpartition("/")[2],
'type': "disk"
}
drvr._connect_volume(v['connection_info'],
disk_info)
self.mox.StubOutWithMock(drvr, 'plug_vifs')
drvr.plug_vifs(mox.IsA(inst_ref), nw_info)
self.mox.ReplayAll()
migrate_data = {'is_shared_instance_path': False,
'is_volume_backed': True,
'block_migration': False,
'instance_relative_path': inst_ref['name']
}
ret = drvr.pre_live_migration(c, inst_ref, vol, nw_info, None,
migrate_data)
target_ret = {
'graphics_listen_addrs': {'spice': '127.0.0.1',
'vnc': '127.0.0.1'},
'serial_listen_addr': '127.0.0.1',
'volume': {
'12345': {'connection_info': {u'data': {'device_path':
u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.abc.12345.opst-lun-X'},
'serial': '12345'},
'disk_info': {'bus': 'scsi',
'dev': 'sda',
'type': 'disk'}},
'67890': {'connection_info': {u'data': {'device_path':
u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z'},
'serial': '67890'},
'disk_info': {'bus': 'scsi',
'dev': 'sdb',
'type': 'disk'}}}}
self.assertEqual(ret, target_ret)
self.assertTrue(os.path.exists('%s/%s/' % (tmpdir,
inst_ref['name'])))
def test_pre_live_migration_plug_vifs_retry_fails(self):
self.flags(live_migration_retry_count=3)
instance = objects.Instance(**self.test_instance)
def fake_plug_vifs(instance, network_info):
raise processutils.ProcessExecutionError()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(drvr, 'plug_vifs', fake_plug_vifs)
self.stubs.Set(eventlet.greenthread, 'sleep',
lambda x: eventlet.sleep(0))
disk_info_json = jsonutils.dumps({})
self.assertRaises(processutils.ProcessExecutionError,
drvr.pre_live_migration,
self.context, instance, block_device_info=None,
network_info=[], disk_info=disk_info_json)
def test_pre_live_migration_plug_vifs_retry_works(self):
self.flags(live_migration_retry_count=3)
called = {'count': 0}
instance = objects.Instance(**self.test_instance)
def fake_plug_vifs(instance, network_info):
called['count'] += 1
if called['count'] < CONF.live_migration_retry_count:
raise processutils.ProcessExecutionError()
else:
return
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(drvr, 'plug_vifs', fake_plug_vifs)
self.stubs.Set(eventlet.greenthread, 'sleep',
lambda x: eventlet.sleep(0))
disk_info_json = jsonutils.dumps({})
drvr.pre_live_migration(self.context, instance, block_device_info=None,
network_info=[], disk_info=disk_info_json)
def test_pre_live_migration_image_not_created_with_shared_storage(self):
migrate_data_set = [{'is_shared_block_storage': False,
'block_migration': False},
{'is_shared_block_storage': True,
'block_migration': False},
{'is_shared_block_storage': False,
'block_migration': True}]
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
# creating mocks
with contextlib.nested(
mock.patch.object(drvr,
'_create_images_and_backing'),
mock.patch.object(drvr,
'ensure_filtering_rules_for_instance'),
mock.patch.object(drvr, 'plug_vifs'),
) as (
create_image_mock,
rules_mock,
plug_mock,
):
disk_info_json = jsonutils.dumps({})
for migrate_data in migrate_data_set:
res = drvr.pre_live_migration(self.context, instance,
block_device_info=None,
network_info=[],
disk_info=disk_info_json,
migrate_data=migrate_data)
self.assertFalse(create_image_mock.called)
self.assertIsInstance(res, dict)
def test_pre_live_migration_with_not_shared_instance_path(self):
migrate_data = {'is_shared_block_storage': False,
'is_shared_instance_path': False}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
def check_instance_dir(context, instance,
instance_dir, disk_info,
fallback_from_host=False):
self.assertTrue(instance_dir)
# creating mocks
with contextlib.nested(
mock.patch.object(drvr,
'_create_images_and_backing',
side_effect=check_instance_dir),
mock.patch.object(drvr,
'ensure_filtering_rules_for_instance'),
mock.patch.object(drvr, 'plug_vifs'),
) as (
create_image_mock,
rules_mock,
plug_mock,
):
disk_info_json = jsonutils.dumps({})
res = drvr.pre_live_migration(self.context, instance,
block_device_info=None,
network_info=[],
disk_info=disk_info_json,
migrate_data=migrate_data)
create_image_mock.assert_has_calls(
[mock.call(self.context, instance, mock.ANY, {},
fallback_from_host=instance.host)])
self.assertIsInstance(res, dict)
def test_pre_live_migration_block_migrate_fails(self):
bdms = [{
'connection_info': {
'serial': '12345',
u'data': {
'device_path':
u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.abc.12345.t-lun-X'
}
},
'mount_device': '/dev/sda'}]
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
with contextlib.nested(
mock.patch.object(drvr, '_create_images_and_backing'),
mock.patch.object(drvr, 'ensure_filtering_rules_for_instance'),
mock.patch.object(drvr, 'plug_vifs'),
mock.patch.object(drvr, '_connect_volume'),
mock.patch.object(driver, 'block_device_info_get_mapping',
return_value=bdms)):
disk_info_json = jsonutils.dumps({})
self.assertRaises(exception.MigrationError,
drvr.pre_live_migration,
self.context, instance, block_device_info=None,
network_info=[], disk_info=disk_info_json,
migrate_data={})
def test_get_instance_disk_info_works_correctly(self):
# Test data
instance = objects.Instance(**self.test_instance)
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices>"
"<disk type='file'><driver name='qemu' type='raw'/>"
"<source file='/test/disk'/>"
"<target dev='vda' bus='virtio'/></disk>"
"<disk type='file'><driver name='qemu' type='qcow2'/>"
"<source file='/test/disk.local'/>"
"<target dev='vdb' bus='virtio'/></disk>"
"</devices></domain>")
# Preparing mocks
vdmock = self.mox.CreateMock(fakelibvirt.virDomain)
self.mox.StubOutWithMock(vdmock, "XMLDesc")
vdmock.XMLDesc(flags=0).AndReturn(dummyxml)
def fake_lookup(instance_name):
if instance_name == instance.name:
return vdmock
self.create_fake_libvirt_mock(lookupByName=fake_lookup)
fake_libvirt_utils.disk_sizes['/test/disk'] = 10 * units.Gi
fake_libvirt_utils.disk_sizes['/test/disk.local'] = 20 * units.Gi
fake_libvirt_utils.disk_backing_files['/test/disk.local'] = 'file'
self.mox.StubOutWithMock(os.path, "getsize")
os.path.getsize('/test/disk').AndReturn((10737418240))
os.path.getsize('/test/disk.local').AndReturn((3328599655))
ret = ("image: /test/disk\n"
"file format: raw\n"
"virtual size: 20G (21474836480 bytes)\n"
"disk size: 3.1G\n"
"cluster_size: 2097152\n"
"backing file: /test/dummy (actual path: /backing/file)\n")
self.mox.StubOutWithMock(os.path, "exists")
os.path.exists('/test/disk.local').AndReturn(True)
self.mox.StubOutWithMock(utils, "execute")
utils.execute('env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info',
'/test/disk.local').AndReturn((ret, ''))
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
info = drvr.get_instance_disk_info(instance)
info = jsonutils.loads(info)
self.assertEqual(info[0]['type'], 'raw')
self.assertEqual(info[0]['path'], '/test/disk')
self.assertEqual(info[0]['disk_size'], 10737418240)
self.assertEqual(info[0]['backing_file'], "")
self.assertEqual(info[0]['over_committed_disk_size'], 0)
self.assertEqual(info[1]['type'], 'qcow2')
self.assertEqual(info[1]['path'], '/test/disk.local')
self.assertEqual(info[1]['virt_disk_size'], 21474836480)
self.assertEqual(info[1]['backing_file'], "file")
self.assertEqual(info[1]['over_committed_disk_size'], 18146236825)
def test_post_live_migration(self):
vol = {'block_device_mapping': [
{'connection_info': {
'data': {'multipath_id': 'dummy1'},
'serial': 'fake_serial1'},
'mount_device': '/dev/sda',
},
{'connection_info': {
'data': {},
'serial': 'fake_serial2'},
'mount_device': '/dev/sdb', }]}
def fake_initialize_connection(context, volume_id, connector):
return {'data': {}}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
fake_connector = {'host': 'fake'}
inst_ref = {'id': 'foo'}
cntx = context.get_admin_context()
# Set up the mock expectations
with contextlib.nested(
mock.patch.object(driver, 'block_device_info_get_mapping',
return_value=vol['block_device_mapping']),
mock.patch.object(drvr, "get_volume_connector",
return_value=fake_connector),
mock.patch.object(drvr._volume_api, "initialize_connection",
side_effect=fake_initialize_connection),
mock.patch.object(drvr, '_disconnect_volume')
) as (block_device_info_get_mapping, get_volume_connector,
initialize_connection, _disconnect_volume):
drvr.post_live_migration(cntx, inst_ref, vol)
block_device_info_get_mapping.assert_has_calls([
mock.call(vol)])
get_volume_connector.assert_has_calls([
mock.call(inst_ref)])
_disconnect_volume.assert_has_calls([
mock.call({'data': {'multipath_id': 'dummy1'}}, 'sda'),
mock.call({'data': {}}, 'sdb')])
def test_get_instance_disk_info_excludes_volumes(self):
# Test data
instance = objects.Instance(**self.test_instance)
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices>"
"<disk type='file'><driver name='qemu' type='raw'/>"
"<source file='/test/disk'/>"
"<target dev='vda' bus='virtio'/></disk>"
"<disk type='file'><driver name='qemu' type='qcow2'/>"
"<source file='/test/disk.local'/>"
"<target dev='vdb' bus='virtio'/></disk>"
"<disk type='file'><driver name='qemu' type='qcow2'/>"
"<source file='/fake/path/to/volume1'/>"
"<target dev='vdc' bus='virtio'/></disk>"
"<disk type='file'><driver name='qemu' type='qcow2'/>"
"<source file='/fake/path/to/volume2'/>"
"<target dev='vdd' bus='virtio'/></disk>"
"</devices></domain>")
# Preparing mocks
vdmock = self.mox.CreateMock(fakelibvirt.virDomain)
self.mox.StubOutWithMock(vdmock, "XMLDesc")
vdmock.XMLDesc(flags=0).AndReturn(dummyxml)
def fake_lookup(instance_name):
if instance_name == instance.name:
return vdmock
self.create_fake_libvirt_mock(lookupByName=fake_lookup)
fake_libvirt_utils.disk_sizes['/test/disk'] = 10 * units.Gi
fake_libvirt_utils.disk_sizes['/test/disk.local'] = 20 * units.Gi
fake_libvirt_utils.disk_backing_files['/test/disk.local'] = 'file'
self.mox.StubOutWithMock(os.path, "getsize")
os.path.getsize('/test/disk').AndReturn((10737418240))
os.path.getsize('/test/disk.local').AndReturn((3328599655))
ret = ("image: /test/disk\n"
"file format: raw\n"
"virtual size: 20G (21474836480 bytes)\n"
"disk size: 3.1G\n"
"cluster_size: 2097152\n"
"backing file: /test/dummy (actual path: /backing/file)\n")
self.mox.StubOutWithMock(os.path, "exists")
os.path.exists('/test/disk.local').AndReturn(True)
self.mox.StubOutWithMock(utils, "execute")
utils.execute('env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info',
'/test/disk.local').AndReturn((ret, ''))
self.mox.ReplayAll()
conn_info = {'driver_volume_type': 'fake'}
info = {'block_device_mapping': [
{'connection_info': conn_info, 'mount_device': '/dev/vdc'},
{'connection_info': conn_info, 'mount_device': '/dev/vdd'}]}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
info = drvr.get_instance_disk_info(instance,
block_device_info=info)
info = jsonutils.loads(info)
self.assertEqual(info[0]['type'], 'raw')
self.assertEqual(info[0]['path'], '/test/disk')
self.assertEqual(info[0]['disk_size'], 10737418240)
self.assertEqual(info[0]['backing_file'], "")
self.assertEqual(info[0]['over_committed_disk_size'], 0)
self.assertEqual(info[1]['type'], 'qcow2')
self.assertEqual(info[1]['path'], '/test/disk.local')
self.assertEqual(info[1]['virt_disk_size'], 21474836480)
self.assertEqual(info[1]['backing_file'], "file")
self.assertEqual(info[1]['over_committed_disk_size'], 18146236825)
def test_get_instance_disk_info_no_bdinfo_passed(self):
# NOTE(ndipanov): _get_disk_overcomitted_size_total calls this method
# without access to Nova's block device information. We want to make
# sure that we guess volumes mostly correctly in that case as well
instance = objects.Instance(**self.test_instance)
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices>"
"<disk type='file'><driver name='qemu' type='raw'/>"
"<source file='/test/disk'/>"
"<target dev='vda' bus='virtio'/></disk>"
"<disk type='block'><driver name='qemu' type='raw'/>"
"<source file='/fake/path/to/volume1'/>"
"<target dev='vdb' bus='virtio'/></disk>"
"</devices></domain>")
# Preparing mocks
vdmock = self.mox.CreateMock(fakelibvirt.virDomain)
self.mox.StubOutWithMock(vdmock, "XMLDesc")
vdmock.XMLDesc(flags=0).AndReturn(dummyxml)
def fake_lookup(instance_name):
if instance_name == instance.name:
return vdmock
self.create_fake_libvirt_mock(lookupByName=fake_lookup)
fake_libvirt_utils.disk_sizes['/test/disk'] = 10 * units.Gi
self.mox.StubOutWithMock(os.path, "getsize")
os.path.getsize('/test/disk').AndReturn((10737418240))
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
info = drvr.get_instance_disk_info(instance)
info = jsonutils.loads(info)
self.assertEqual(1, len(info))
self.assertEqual(info[0]['type'], 'raw')
self.assertEqual(info[0]['path'], '/test/disk')
self.assertEqual(info[0]['disk_size'], 10737418240)
self.assertEqual(info[0]['backing_file'], "")
self.assertEqual(info[0]['over_committed_disk_size'], 0)
def test_spawn_with_network_info(self):
# Preparing mocks
def fake_none(*args, **kwargs):
return
def fake_getLibVersion():
return 9011
def fake_getCapabilities():
return """
<capabilities>
<host>
<uuid>cef19ce0-0ca2-11df-855d-b19fbce37686</uuid>
<cpu>
<arch>x86_64</arch>
<model>Penryn</model>
<vendor>Intel</vendor>
<topology sockets='1' cores='2' threads='1'/>
<feature name='xtpr'/>
</cpu>
</host>
</capabilities>
"""
def fake_baselineCPU(cpu, flag):
return """<cpu mode='custom' match='exact'>
<model fallback='allow'>Penryn</model>
<vendor>Intel</vendor>
<feature policy='require' name='xtpr'/>
</cpu>
"""
# _fake_network_info must be called before create_fake_libvirt_mock(),
# as _fake_network_info calls importutils.import_class() and
# create_fake_libvirt_mock() mocks importutils.import_class().
network_info = _fake_network_info(self.stubs, 1)
self.create_fake_libvirt_mock(getLibVersion=fake_getLibVersion,
getCapabilities=fake_getCapabilities,
getVersion=lambda: 1005001,
baselineCPU=fake_baselineCPU)
instance_ref = self.test_instance
instance_ref['image_ref'] = 123456 # we send an int to test sha1 call
instance = objects.Instance(**instance_ref)
image_meta = self.test_image_meta
# Mock out the get_info method of the LibvirtDriver so that the polling
# in the spawn method of the LibvirtDriver returns immediately
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, 'get_info')
libvirt_driver.LibvirtDriver.get_info(instance
).AndReturn(hardware.InstanceInfo(state=power_state.RUNNING))
# Start test
self.mox.ReplayAll()
with mock.patch('nova.virt.libvirt.driver.libvirt') as old_virt:
del old_virt.VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(drvr.firewall_driver,
'setup_basic_filtering',
fake_none)
self.stubs.Set(drvr.firewall_driver,
'prepare_instance_filter',
fake_none)
self.stubs.Set(imagebackend.Image,
'cache',
fake_none)
drvr.spawn(self.context, instance, image_meta, [], 'herp',
network_info=network_info)
path = os.path.join(CONF.instances_path, instance['name'])
if os.path.isdir(path):
shutil.rmtree(path)
path = os.path.join(CONF.instances_path,
CONF.image_cache_subdirectory_name)
if os.path.isdir(path):
shutil.rmtree(os.path.join(CONF.instances_path,
CONF.image_cache_subdirectory_name))
def test_spawn_without_image_meta(self):
self.create_image_called = False
def fake_none(*args, **kwargs):
return
def fake_create_image(*args, **kwargs):
self.create_image_called = True
def fake_get_info(instance):
return hardware.InstanceInfo(state=power_state.RUNNING)
instance_ref = self.test_instance
instance_ref['image_ref'] = 1
instance = objects.Instance(**instance_ref)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(drvr, '_get_guest_xml', fake_none)
self.stubs.Set(drvr, '_create_image', fake_create_image)
self.stubs.Set(drvr, '_create_domain_and_network', fake_none)
self.stubs.Set(drvr, 'get_info', fake_get_info)
drvr.spawn(self.context, instance,
self.test_image_meta, [], None)
self.assertTrue(self.create_image_called)
drvr.spawn(self.context, instance,
self.test_image_meta, [], None)
self.assertTrue(self.create_image_called)
def test_spawn_from_volume_calls_cache(self):
self.cache_called_for_disk = False
def fake_none(*args, **kwargs):
return
def fake_cache(*args, **kwargs):
if kwargs.get('image_id') == 'my_fake_image':
self.cache_called_for_disk = True
def fake_get_info(instance):
return hardware.InstanceInfo(state=power_state.RUNNING)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(drvr, '_get_guest_xml', fake_none)
self.stubs.Set(imagebackend.Image, 'cache', fake_cache)
self.stubs.Set(drvr, '_create_domain_and_network', fake_none)
self.stubs.Set(drvr, 'get_info', fake_get_info)
block_device_info = {'root_device_name': '/dev/vda',
'block_device_mapping': [
{'mount_device': 'vda',
'boot_index': 0}
]
}
# Volume-backed instance created without image
instance_ref = self.test_instance
instance_ref['image_ref'] = ''
instance_ref['root_device_name'] = '/dev/vda'
instance_ref['uuid'] = uuidutils.generate_uuid()
instance = objects.Instance(**instance_ref)
drvr.spawn(self.context, instance,
self.test_image_meta, [], None,
block_device_info=block_device_info)
self.assertFalse(self.cache_called_for_disk)
# Booted from volume but with placeholder image
instance_ref = self.test_instance
instance_ref['image_ref'] = 'my_fake_image'
instance_ref['root_device_name'] = '/dev/vda'
instance_ref['uuid'] = uuidutils.generate_uuid()
instance = objects.Instance(**instance_ref)
drvr.spawn(self.context, instance,
self.test_image_meta, [], None,
block_device_info=block_device_info)
self.assertFalse(self.cache_called_for_disk)
# Booted from an image
instance_ref['image_ref'] = 'my_fake_image'
instance_ref['uuid'] = uuidutils.generate_uuid()
instance = objects.Instance(**instance_ref)
drvr.spawn(self.context, instance,
self.test_image_meta, [], None)
self.assertTrue(self.cache_called_for_disk)
def test_start_lxc_from_volume(self):
self.flags(virt_type="lxc",
group='libvirt')
def check_setup_container(image, container_dir=None):
self.assertIsInstance(image, imgmodel.LocalBlockImage)
self.assertEqual(image.path, '/dev/path/to/dev')
return '/dev/nbd1'
bdm = {
'guest_format': None,
'boot_index': 0,
'mount_device': '/dev/sda',
'connection_info': {
'driver_volume_type': 'iscsi',
'serial': 'afc1',
'data': {
'access_mode': 'rw',
'target_discovered': False,
'encrypted': False,
'qos_specs': None,
'target_iqn': 'iqn: volume-afc1',
'target_portal': 'ip: 3260',
'volume_id': 'afc1',
'target_lun': 1,
'auth_password': 'uj',
'auth_username': '47',
'auth_method': 'CHAP'
}
},
'disk_bus': 'scsi',
'device_type': 'disk',
'delete_on_termination': False
}
def _connect_volume_side_effect(connection_info, disk_info):
bdm['connection_info']['data']['device_path'] = '/dev/path/to/dev'
def _get(key, opt=None):
return bdm.get(key, opt)
def getitem(key):
return bdm[key]
def setitem(key, val):
bdm[key] = val
bdm_mock = mock.MagicMock()
bdm_mock.__getitem__.side_effect = getitem
bdm_mock.__setitem__.side_effect = setitem
bdm_mock.get = _get
disk_mock = mock.MagicMock()
disk_mock.source_path = '/dev/path/to/dev'
block_device_info = {'block_device_mapping': [bdm_mock],
'root_device_name': '/dev/sda'}
# Volume-backed instance created without image
instance_ref = self.test_instance
instance_ref['image_ref'] = ''
instance_ref['root_device_name'] = '/dev/sda'
instance_ref['ephemeral_gb'] = 0
instance_ref['uuid'] = uuidutils.generate_uuid()
inst_obj = objects.Instance(**instance_ref)
image_meta = {}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with contextlib.nested(
mock.patch.object(drvr, '_create_images_and_backing'),
mock.patch.object(drvr, 'plug_vifs'),
mock.patch.object(drvr.firewall_driver, 'setup_basic_filtering'),
mock.patch.object(drvr.firewall_driver, 'prepare_instance_filter'),
mock.patch.object(drvr.firewall_driver, 'apply_instance_filter'),
mock.patch.object(drvr, '_create_domain'),
mock.patch.object(drvr, '_connect_volume',
side_effect=_connect_volume_side_effect),
mock.patch.object(drvr, '_get_volume_config',
return_value=disk_mock),
mock.patch.object(drvr, 'get_info',
return_value=hardware.InstanceInfo(
state=power_state.RUNNING)),
mock.patch('nova.virt.disk.api.setup_container',
side_effect=check_setup_container),
mock.patch('nova.virt.disk.api.teardown_container'),
mock.patch.object(objects.Instance, 'save')):
drvr.spawn(self.context, inst_obj, image_meta, [], None,
network_info=[],
block_device_info=block_device_info)
self.assertEqual('/dev/nbd1',
inst_obj.system_metadata.get(
'rootfs_device_name'))
def test_spawn_with_pci_devices(self):
def fake_none(*args, **kwargs):
return None
def fake_get_info(instance):
return hardware.InstanceInfo(state=power_state.RUNNING)
class FakeLibvirtPciDevice(object):
def dettach(self):
return None
def reset(self):
return None
def fake_node_device_lookup_by_name(address):
pattern = ("pci_%(hex)s{4}_%(hex)s{2}_%(hex)s{2}_%(oct)s{1}"
% dict(hex='[\da-f]', oct='[0-8]'))
pattern = re.compile(pattern)
if pattern.match(address) is None:
raise fakelibvirt.libvirtError()
return FakeLibvirtPciDevice()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(drvr, '_get_guest_xml', fake_none)
self.stubs.Set(drvr, '_create_image', fake_none)
self.stubs.Set(drvr, '_create_domain_and_network', fake_none)
self.stubs.Set(drvr, 'get_info', fake_get_info)
drvr._conn.nodeDeviceLookupByName = \
fake_node_device_lookup_by_name
instance_ref = self.test_instance
instance_ref['image_ref'] = 'my_fake_image'
instance = objects.Instance(**instance_ref)
instance['pci_devices'] = objects.PciDeviceList(
objects=[objects.PciDevice(address='0000:00:00.0')])
drvr.spawn(self.context, instance,
self.test_image_meta, [], None)
def test_chown_disk_config_for_instance(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
self.mox.StubOutWithMock(fake_libvirt_utils, 'get_instance_path')
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(fake_libvirt_utils, 'chown')
fake_libvirt_utils.get_instance_path(instance).AndReturn('/tmp/uuid')
os.path.exists('/tmp/uuid/disk.config').AndReturn(True)
fake_libvirt_utils.chown('/tmp/uuid/disk.config', os.getuid())
self.mox.ReplayAll()
drvr._chown_disk_config_for_instance(instance)
def _test_create_image_plain(self, os_type='', filename='', mkfs=False):
gotFiles = []
def fake_image(self, instance, name, image_type=''):
class FakeImage(imagebackend.Image):
def __init__(self, instance, name, is_block_dev=False):
self.path = os.path.join(instance['name'], name)
self.is_block_dev = is_block_dev
def create_image(self, prepare_template, base,
size, *args, **kwargs):
pass
def cache(self, fetch_func, filename, size=None,
*args, **kwargs):
gotFiles.append({'filename': filename,
'size': size})
def snapshot(self, name):
pass
return FakeImage(instance, name)
def fake_none(*args, **kwargs):
return
def fake_get_info(instance):
return hardware.InstanceInfo(state=power_state.RUNNING)
# Stop 'libvirt_driver._create_image' touching filesystem
self.stubs.Set(nova.virt.libvirt.imagebackend.Backend, "image",
fake_image)
instance_ref = self.test_instance
instance_ref['image_ref'] = 1
instance = objects.Instance(**instance_ref)
instance['os_type'] = os_type
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(drvr, '_get_guest_xml', fake_none)
self.stubs.Set(drvr, '_create_domain_and_network', fake_none)
self.stubs.Set(drvr, 'get_info', fake_get_info)
if mkfs:
self.stubs.Set(nova.virt.disk.api, '_MKFS_COMMAND',
{os_type: 'mkfs.ext4 --label %(fs_label)s %(target)s'})
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
image_meta)
drvr._create_image(context, instance, disk_info['mapping'])
drvr._get_guest_xml(self.context, instance, None,
disk_info, image_meta)
wantFiles = [
{'filename': '356a192b7913b04c54574d18c28d46e6395428ab',
'size': 10 * units.Gi},
{'filename': filename,
'size': 20 * units.Gi},
]
self.assertEqual(gotFiles, wantFiles)
def test_create_image_plain_os_type_blank(self):
self._test_create_image_plain(os_type='',
filename=self._EPHEMERAL_20_DEFAULT,
mkfs=False)
def test_create_image_plain_os_type_none(self):
self._test_create_image_plain(os_type=None,
filename=self._EPHEMERAL_20_DEFAULT,
mkfs=False)
def test_create_image_plain_os_type_set_no_fs(self):
self._test_create_image_plain(os_type='test',
filename=self._EPHEMERAL_20_DEFAULT,
mkfs=False)
def test_create_image_plain_os_type_set_with_fs(self):
ephemeral_file_name = ('ephemeral_20_%s' % utils.get_hash_str(
'mkfs.ext4 --label %(fs_label)s %(target)s')[:7])
self._test_create_image_plain(os_type='test',
filename=ephemeral_file_name,
mkfs=True)
def _create_image_helper(self, callback, suffix=''):
gotFiles = []
imported_files = []
def fake_image(self, instance, name, image_type=''):
class FakeImage(imagebackend.Image):
def __init__(self, instance, name, is_block_dev=False):
self.path = os.path.join(instance['name'], name)
self.is_block_dev = is_block_dev
def create_image(self, prepare_template, base,
size, *args, **kwargs):
pass
def cache(self, fetch_func, filename, size=None,
*args, **kwargs):
gotFiles.append({'filename': filename,
'size': size})
def import_file(self, instance, local_filename,
remote_filename):
imported_files.append((local_filename, remote_filename))
def snapshot(self, name):
pass
return FakeImage(instance, name)
def fake_none(*args, **kwargs):
return
def fake_get_info(instance):
return hardware.InstanceInfo(state=power_state.RUNNING)
# Stop 'libvirt_driver._create_image' touching filesystem
self.stubs.Set(nova.virt.libvirt.imagebackend.Backend, "image",
fake_image)
instance_ref = self.test_instance
instance_ref['image_ref'] = 1
# NOTE(mikal): use this callback to tweak the instance to match
# what you're trying to test
callback(instance_ref)
instance = objects.Instance(**instance_ref)
# Turn on some swap to exercise that codepath in _create_image
instance.flavor.swap = 500
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(drvr, '_get_guest_xml', fake_none)
self.stubs.Set(drvr, '_create_domain_and_network', fake_none)
self.stubs.Set(drvr, 'get_info', fake_get_info)
self.stubs.Set(instance_metadata, 'InstanceMetadata', fake_none)
self.stubs.Set(nova.virt.configdrive.ConfigDriveBuilder,
'make_drive', fake_none)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
image_meta)
drvr._create_image(context, instance, disk_info['mapping'],
suffix=suffix)
drvr._get_guest_xml(self.context, instance, None,
disk_info, image_meta)
return gotFiles, imported_files
def test_create_image_with_swap(self):
def enable_swap(instance_ref):
# Turn on some swap to exercise that codepath in _create_image
instance_ref['system_metadata']['instance_type_swap'] = 500
gotFiles, _ = self._create_image_helper(enable_swap)
wantFiles = [
{'filename': '356a192b7913b04c54574d18c28d46e6395428ab',
'size': 10 * units.Gi},
{'filename': self._EPHEMERAL_20_DEFAULT,
'size': 20 * units.Gi},
{'filename': 'swap_500',
'size': 500 * units.Mi},
]
self.assertEqual(gotFiles, wantFiles)
def test_create_image_with_configdrive(self):
def enable_configdrive(instance_ref):
instance_ref['config_drive'] = 'true'
# Ensure that we create a config drive and then import it into the
# image backend store
_, imported_files = self._create_image_helper(enable_configdrive)
self.assertTrue(imported_files[0][0].endswith('/disk.config'))
self.assertEqual('disk.config', imported_files[0][1])
def test_create_image_with_configdrive_rescue(self):
def enable_configdrive(instance_ref):
instance_ref['config_drive'] = 'true'
# Ensure that we create a config drive and then import it into the
# image backend store
_, imported_files = self._create_image_helper(enable_configdrive,
suffix='.rescue')
self.assertTrue(imported_files[0][0].endswith('/disk.config.rescue'))
self.assertEqual('disk.config.rescue', imported_files[0][1])
@mock.patch.object(nova.virt.libvirt.imagebackend.Image, 'cache',
side_effect=exception.ImageNotFound(image_id='fake-id'))
def test_create_image_not_exist_no_fallback(self, mock_cache):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
image_meta)
self.assertRaises(exception.ImageNotFound,
drvr._create_image,
self.context, instance, disk_info['mapping'])
@mock.patch.object(nova.virt.libvirt.imagebackend.Image, 'cache')
def test_create_image_not_exist_fallback(self, mock_cache):
def side_effect(fetch_func, filename, size=None, *args, **kwargs):
def second_call(fetch_func, filename, size=None, *args, **kwargs):
# call copy_from_host ourselves because we mocked image.cache()
fetch_func('fake-target', 'fake-max-size')
# further calls have no side effect
mock_cache.side_effect = None
mock_cache.side_effect = second_call
# raise an error only the first call
raise exception.ImageNotFound(image_id='fake-id')
mock_cache.side_effect = side_effect
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
image_meta)
with mock.patch.object(libvirt_driver.libvirt_utils,
'copy_image') as mock_copy:
drvr._create_image(self.context, instance, disk_info['mapping'],
fallback_from_host='fake-source-host')
mock_copy.assert_called_once_with(src='fake-target',
dest='fake-target',
host='fake-source-host',
receive=True)
@mock.patch.object(utils, 'execute')
def test_create_ephemeral_specified_fs(self, mock_exec):
self.flags(default_ephemeral_format='ext3')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr._create_ephemeral('/dev/something', 20, 'myVol', 'linux',
is_block_dev=True, max_size=20,
specified_fs='ext4')
mock_exec.assert_called_once_with('mkfs', '-t', 'ext4', '-F', '-L',
'myVol', '/dev/something',
run_as_root=True)
def test_create_ephemeral_specified_fs_not_valid(self):
CONF.set_override('default_ephemeral_format', 'ext4')
ephemerals = [{'device_type': 'disk',
'disk_bus': 'virtio',
'device_name': '/dev/vdb',
'guest_format': 'dummy',
'size': 1}]
block_device_info = {
'ephemerals': ephemerals}
instance_ref = self.test_instance
instance_ref['image_ref'] = 1
instance = objects.Instance(**instance_ref)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
image_meta = objects.ImageMeta.from_dict({'disk_format': 'raw'})
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
image_meta)
disk_info['mapping'].pop('disk.local')
with contextlib.nested(
mock.patch.object(utils, 'execute'),
mock.patch.object(drvr, 'get_info'),
mock.patch.object(drvr, '_create_domain_and_network'),
mock.patch.object(imagebackend.Image, 'verify_base_size')):
self.assertRaises(exception.InvalidBDMFormat, drvr._create_image,
context, instance, disk_info['mapping'],
block_device_info=block_device_info)
def test_create_ephemeral_default(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(utils, 'execute')
utils.execute('mkfs', '-t', 'ext4', '-F', '-L', 'myVol',
'/dev/something', run_as_root=True)
self.mox.ReplayAll()
drvr._create_ephemeral('/dev/something', 20, 'myVol', 'linux',
is_block_dev=True, max_size=20)
def test_create_ephemeral_with_conf(self):
CONF.set_override('default_ephemeral_format', 'ext4')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(utils, 'execute')
utils.execute('mkfs', '-t', 'ext4', '-F', '-L', 'myVol',
'/dev/something', run_as_root=True)
self.mox.ReplayAll()
drvr._create_ephemeral('/dev/something', 20, 'myVol', 'linux',
is_block_dev=True)
def test_create_ephemeral_with_arbitrary(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(nova.virt.disk.api, '_MKFS_COMMAND',
{'linux': 'mkfs.ext4 --label %(fs_label)s %(target)s'})
self.mox.StubOutWithMock(utils, 'execute')
utils.execute('mkfs.ext4', '--label', 'myVol', '/dev/something',
run_as_root=True)
self.mox.ReplayAll()
drvr._create_ephemeral('/dev/something', 20, 'myVol', 'linux',
is_block_dev=True)
def test_create_ephemeral_with_ext3(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(nova.virt.disk.api, '_MKFS_COMMAND',
{'linux': 'mkfs.ext3 --label %(fs_label)s %(target)s'})
self.mox.StubOutWithMock(utils, 'execute')
utils.execute('mkfs.ext3', '--label', 'myVol', '/dev/something',
run_as_root=True)
self.mox.ReplayAll()
drvr._create_ephemeral('/dev/something', 20, 'myVol', 'linux',
is_block_dev=True)
def test_create_swap_default(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(utils, 'execute')
utils.execute('mkswap', '/dev/something', run_as_root=False)
self.mox.ReplayAll()
drvr._create_swap('/dev/something', 1, max_size=20)
def test_get_console_output_file(self):
fake_libvirt_utils.files['console.log'] = '01234567890'
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
instance_ref = self.test_instance
instance_ref['image_ref'] = 123456
instance = objects.Instance(**instance_ref)
console_dir = (os.path.join(tmpdir, instance['name']))
console_log = '%s/console.log' % (console_dir)
fake_dom_xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
</disk>
<console type='file'>
<source path='%s'/>
<target port='0'/>
</console>
</devices>
</domain>
""" % console_log
def fake_lookup(id):
return FakeVirtDomain(fake_dom_xml)
self.create_fake_libvirt_mock()
libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
try:
prev_max = libvirt_driver.MAX_CONSOLE_BYTES
libvirt_driver.MAX_CONSOLE_BYTES = 5
with mock.patch('os.path.exists', return_value=True):
output = drvr.get_console_output(self.context, instance)
finally:
libvirt_driver.MAX_CONSOLE_BYTES = prev_max
self.assertEqual('67890', output)
def test_get_console_output_file_missing(self):
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
instance_ref = self.test_instance
instance_ref['image_ref'] = 123456
instance = objects.Instance(**instance_ref)
console_log = os.path.join(tmpdir, instance['name'],
'non-existent.log')
fake_dom_xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
</disk>
<console type='file'>
<source path='%s'/>
<target port='0'/>
</console>
</devices>
</domain>
""" % console_log
def fake_lookup(id):
return FakeVirtDomain(fake_dom_xml)
self.create_fake_libvirt_mock()
libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with mock.patch('os.path.exists', return_value=False):
output = drvr.get_console_output(self.context, instance)
self.assertEqual('', output)
def test_get_console_output_pty(self):
fake_libvirt_utils.files['pty'] = '01234567890'
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
instance_ref = self.test_instance
instance_ref['image_ref'] = 123456
instance = objects.Instance(**instance_ref)
console_dir = (os.path.join(tmpdir, instance['name']))
pty_file = '%s/fake_pty' % (console_dir)
fake_dom_xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
</disk>
<console type='pty'>
<source path='%s'/>
<target port='0'/>
</console>
</devices>
</domain>
""" % pty_file
def fake_lookup(id):
return FakeVirtDomain(fake_dom_xml)
def _fake_flush(self, fake_pty):
return 'foo'
def _fake_append_to_file(self, data, fpath):
return 'pty'
self.create_fake_libvirt_mock()
libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup
libvirt_driver.LibvirtDriver._flush_libvirt_console = _fake_flush
libvirt_driver.LibvirtDriver._append_to_file = _fake_append_to_file
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
try:
prev_max = libvirt_driver.MAX_CONSOLE_BYTES
libvirt_driver.MAX_CONSOLE_BYTES = 5
output = drvr.get_console_output(self.context, instance)
finally:
libvirt_driver.MAX_CONSOLE_BYTES = prev_max
self.assertEqual('67890', output)
def test_get_host_ip_addr(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
ip = drvr.get_host_ip_addr()
self.assertEqual(ip, CONF.my_ip)
@mock.patch.object(libvirt_driver.LOG, 'warn')
@mock.patch('nova.compute.utils.get_machine_ips')
def test_get_host_ip_addr_failure(self, mock_ips, mock_log):
mock_ips.return_value = ['8.8.8.8', '75.75.75.75']
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr.get_host_ip_addr()
mock_log.assert_called_once_with(u'my_ip address (%(my_ip)s) was '
u'not found on any of the '
u'interfaces: %(ifaces)s',
{'ifaces': '8.8.8.8, 75.75.75.75',
'my_ip': mock.ANY})
def test_conn_event_handler(self):
self.mox.UnsetStubs()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
service_mock = mock.MagicMock()
service_mock.disabled.return_value = False
with contextlib.nested(
mock.patch.object(drvr._host, "_connect",
side_effect=fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
"Failed to connect to host",
error_code=
fakelibvirt.VIR_ERR_INTERNAL_ERROR)),
mock.patch.object(drvr._host, "_init_events",
return_value=None),
mock.patch.object(objects.Service, "get_by_compute_host",
return_value=service_mock)):
# verify that the driver registers for the close callback
# and re-connects after receiving the callback
self.assertRaises(exception.HypervisorUnavailable,
drvr.init_host,
"wibble")
self.assertTrue(service_mock.disabled)
def test_command_with_broken_connection(self):
self.mox.UnsetStubs()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
service_mock = mock.MagicMock()
service_mock.disabled.return_value = False
with contextlib.nested(
mock.patch.object(drvr._host, "_connect",
side_effect=fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
"Failed to connect to host",
error_code=
fakelibvirt.VIR_ERR_INTERNAL_ERROR)),
mock.patch.object(drvr._host, "_init_events",
return_value=None),
mock.patch.object(host.Host, "has_min_version",
return_value=True),
mock.patch.object(drvr, "_do_quality_warnings",
return_value=None),
mock.patch.object(objects.Service, "get_by_compute_host",
return_value=service_mock)):
drvr.init_host("wibble")
self.assertRaises(exception.HypervisorUnavailable,
drvr.get_num_instances)
self.assertTrue(service_mock.disabled)
def test_service_resume_after_broken_connection(self):
self.mox.UnsetStubs()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
service_mock = mock.MagicMock()
service_mock.disabled.return_value = True
with contextlib.nested(
mock.patch.object(drvr._host, "_connect",
return_value=mock.MagicMock()),
mock.patch.object(drvr._host, "_init_events",
return_value=None),
mock.patch.object(host.Host, "has_min_version",
return_value=True),
mock.patch.object(drvr, "_do_quality_warnings",
return_value=None),
mock.patch.object(objects.Service, "get_by_compute_host",
return_value=service_mock)):
drvr.init_host("wibble")
drvr.get_num_instances()
self.assertTrue(not service_mock.disabled and
service_mock.disabled_reason is None)
@mock.patch.object(objects.Instance, 'save')
def test_immediate_delete(self, mock_save):
def fake_get_domain(instance):
raise exception.InstanceNotFound(instance_id=instance.uuid)
def fake_delete_instance_files(instance):
pass
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(drvr._host, 'get_domain', fake_get_domain)
self.stubs.Set(drvr, 'delete_instance_files',
fake_delete_instance_files)
instance = objects.Instance(self.context, **self.test_instance)
drvr.destroy(self.context, instance, {})
mock_save.assert_called_once_with()
@mock.patch.object(objects.Instance, 'get_by_uuid')
@mock.patch.object(objects.Instance, 'obj_load_attr', autospec=True)
@mock.patch.object(objects.Instance, 'save', autospec=True)
@mock.patch.object(libvirt_driver.LibvirtDriver, '_destroy')
@mock.patch.object(libvirt_driver.LibvirtDriver, 'delete_instance_files')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_disconnect_volume')
@mock.patch.object(driver, 'block_device_info_get_mapping')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_undefine_domain')
def _test_destroy_removes_disk(self, mock_undefine_domain, mock_mapping,
mock_disconnect_volume,
mock_delete_instance_files, mock_destroy,
mock_inst_save, mock_inst_obj_load_attr,
mock_get_by_uuid, volume_fail=False):
instance = objects.Instance(self.context, **self.test_instance)
vol = {'block_device_mapping': [
{'connection_info': 'dummy', 'mount_device': '/dev/sdb'}]}
mock_mapping.return_value = vol['block_device_mapping']
mock_delete_instance_files.return_value = True
mock_get_by_uuid.return_value = instance
if volume_fail:
mock_disconnect_volume.return_value = (
exception.VolumeNotFound('vol'))
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr.destroy(self.context, instance, [], vol)
def test_destroy_removes_disk(self):
self._test_destroy_removes_disk(volume_fail=False)
def test_destroy_removes_disk_volume_fails(self):
self._test_destroy_removes_disk(volume_fail=True)
@mock.patch.object(libvirt_driver.LibvirtDriver, 'unplug_vifs')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_destroy')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_undefine_domain')
def test_destroy_not_removes_disk(self, mock_undefine_domain, mock_destroy,
mock_unplug_vifs):
instance = fake_instance.fake_instance_obj(
None, name='instancename', id=1,
uuid='875a8070-d0b9-4949-8b31-104d125c9a64')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr.destroy(self.context, instance, [], None, False)
@mock.patch.object(libvirt_driver.LibvirtDriver, 'cleanup')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_teardown_container')
@mock.patch.object(host.Host, 'get_domain')
def test_destroy_lxc_calls_teardown_container(self, mock_get_domain,
mock_teardown_container,
mock_cleanup):
self.flags(virt_type='lxc', group='libvirt')
fake_domain = FakeVirtDomain()
def destroy_side_effect(*args, **kwargs):
fake_domain._info[0] = power_state.SHUTDOWN
with mock.patch.object(fake_domain, 'destroy',
side_effect=destroy_side_effect) as mock_domain_destroy:
mock_get_domain.return_value = fake_domain
instance = objects.Instance(**self.test_instance)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
network_info = []
drvr.destroy(self.context, instance, network_info, None, False)
mock_get_domain.assert_has_calls([mock.call(instance),
mock.call(instance)])
mock_domain_destroy.assert_called_once_with()
mock_teardown_container.assert_called_once_with(instance)
mock_cleanup.assert_called_once_with(self.context, instance,
network_info, None, False,
None)
@mock.patch.object(libvirt_driver.LibvirtDriver, 'cleanup')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_teardown_container')
@mock.patch.object(host.Host, 'get_domain')
def test_destroy_lxc_calls_teardown_container_when_no_domain(self,
mock_get_domain, mock_teardown_container, mock_cleanup):
self.flags(virt_type='lxc', group='libvirt')
instance = objects.Instance(**self.test_instance)
inf_exception = exception.InstanceNotFound(instance_id=instance.uuid)
mock_get_domain.side_effect = inf_exception
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
network_info = []
drvr.destroy(self.context, instance, network_info, None, False)
mock_get_domain.assert_has_calls([mock.call(instance),
mock.call(instance)])
mock_teardown_container.assert_called_once_with(instance)
mock_cleanup.assert_called_once_with(self.context, instance,
network_info, None, False,
None)
def test_reboot_different_ids(self):
class FakeLoopingCall(object):
def start(self, *a, **k):
return self
def wait(self):
return None
self.flags(wait_soft_reboot_seconds=1, group='libvirt')
info_tuple = ('fake', 'fake', 'fake', 'also_fake')
self.reboot_create_called = False
# Mock domain
mock_domain = self.mox.CreateMock(fakelibvirt.virDomain)
mock_domain.info().AndReturn(
(libvirt_guest.VIR_DOMAIN_RUNNING,) + info_tuple)
mock_domain.ID().AndReturn('some_fake_id')
mock_domain.ID().AndReturn('some_fake_id')
mock_domain.shutdown()
mock_domain.info().AndReturn(
(libvirt_guest.VIR_DOMAIN_CRASHED,) + info_tuple)
mock_domain.ID().AndReturn('some_other_fake_id')
mock_domain.ID().AndReturn('some_other_fake_id')
self.mox.ReplayAll()
def fake_get_domain(instance):
return mock_domain
def fake_create_domain(**kwargs):
self.reboot_create_called = True
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
self.stubs.Set(drvr._host, 'get_domain', fake_get_domain)
self.stubs.Set(drvr, '_create_domain', fake_create_domain)
self.stubs.Set(loopingcall, 'FixedIntervalLoopingCall',
lambda *a, **k: FakeLoopingCall())
self.stubs.Set(pci_manager, 'get_instance_pci_devs', lambda *a: [])
drvr.reboot(None, instance, [], 'SOFT')
self.assertTrue(self.reboot_create_called)
@mock.patch.object(pci_manager, 'get_instance_pci_devs')
@mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
@mock.patch.object(greenthread, 'sleep')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_hard_reboot')
@mock.patch.object(host.Host, 'get_domain')
def test_reboot_same_ids(self, mock_get_domain, mock_hard_reboot,
mock_sleep, mock_loopingcall,
mock_get_instance_pci_devs):
class FakeLoopingCall(object):
def start(self, *a, **k):
return self
def wait(self):
return None
self.flags(wait_soft_reboot_seconds=1, group='libvirt')
info_tuple = ('fake', 'fake', 'fake', 'also_fake')
self.reboot_hard_reboot_called = False
# Mock domain
mock_domain = mock.Mock(fakelibvirt.virDomain)
return_values = [(libvirt_guest.VIR_DOMAIN_RUNNING,) + info_tuple,
(libvirt_guest.VIR_DOMAIN_CRASHED,) + info_tuple]
mock_domain.info.side_effect = return_values
mock_domain.ID.return_value = 'some_fake_id'
mock_domain.shutdown.side_effect = mock.Mock()
def fake_hard_reboot(*args, **kwargs):
self.reboot_hard_reboot_called = True
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
mock_get_domain.return_value = mock_domain
mock_hard_reboot.side_effect = fake_hard_reboot
mock_loopingcall.return_value = FakeLoopingCall()
mock_get_instance_pci_devs.return_value = []
drvr.reboot(None, instance, [], 'SOFT')
self.assertTrue(self.reboot_hard_reboot_called)
@mock.patch.object(libvirt_driver.LibvirtDriver, '_hard_reboot')
@mock.patch.object(host.Host, 'get_domain')
def test_soft_reboot_libvirt_exception(self, mock_get_domain,
mock_hard_reboot):
# Tests that a hard reboot is performed when a soft reboot results
# in raising a libvirtError.
info_tuple = ('fake', 'fake', 'fake', 'also_fake')
# setup mocks
mock_virDomain = mock.Mock(fakelibvirt.virDomain)
mock_virDomain.info.return_value = (
(libvirt_guest.VIR_DOMAIN_RUNNING,) + info_tuple)
mock_virDomain.ID.return_value = 'some_fake_id'
mock_virDomain.shutdown.side_effect = fakelibvirt.libvirtError('Err')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
context = None
instance = objects.Instance(**self.test_instance)
network_info = []
mock_get_domain.return_value = mock_virDomain
drvr.reboot(context, instance, network_info, 'SOFT')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_hard_reboot')
@mock.patch.object(host.Host, 'get_domain')
def _test_resume_state_on_host_boot_with_state(self, state,
mock_get_domain,
mock_hard_reboot):
mock_virDomain = mock.Mock(fakelibvirt.virDomain)
mock_virDomain.info.return_value = ([state, None, None, None, None])
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
mock_get_domain.return_value = mock_virDomain
instance = objects.Instance(**self.test_instance)
network_info = _fake_network_info(self.stubs, 1)
drvr.resume_state_on_host_boot(self.context, instance, network_info,
block_device_info=None)
ignored_states = (power_state.RUNNING,
power_state.SUSPENDED,
power_state.NOSTATE,
power_state.PAUSED)
self.assertEqual(mock_hard_reboot.called, state not in ignored_states)
def test_resume_state_on_host_boot_with_running_state(self):
self._test_resume_state_on_host_boot_with_state(power_state.RUNNING)
def test_resume_state_on_host_boot_with_suspended_state(self):
self._test_resume_state_on_host_boot_with_state(power_state.SUSPENDED)
def test_resume_state_on_host_boot_with_paused_state(self):
self._test_resume_state_on_host_boot_with_state(power_state.PAUSED)
def test_resume_state_on_host_boot_with_nostate(self):
self._test_resume_state_on_host_boot_with_state(power_state.NOSTATE)
def test_resume_state_on_host_boot_with_shutdown_state(self):
self._test_resume_state_on_host_boot_with_state(power_state.RUNNING)
def test_resume_state_on_host_boot_with_crashed_state(self):
self._test_resume_state_on_host_boot_with_state(power_state.CRASHED)
@mock.patch.object(libvirt_driver.LibvirtDriver, '_hard_reboot')
@mock.patch.object(host.Host, 'get_domain')
def test_resume_state_on_host_boot_with_instance_not_found_on_driver(
self, mock_get_domain, mock_hard_reboot):
instance = objects.Instance(**self.test_instance)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
mock_get_domain.side_effect = exception.InstanceNotFound(
instance_id='fake')
drvr.resume_state_on_host_boot(self.context, instance, network_info=[],
block_device_info=None)
mock_hard_reboot.assert_called_once_with(self.context,
instance, [], None)
@mock.patch('nova.virt.libvirt.LibvirtDriver.get_info')
@mock.patch('nova.virt.libvirt.LibvirtDriver._create_domain_and_network')
@mock.patch('nova.virt.libvirt.LibvirtDriver._create_images_and_backing')
@mock.patch('nova.virt.libvirt.LibvirtDriver._get_guest_xml')
@mock.patch('nova.virt.libvirt.LibvirtDriver._get_instance_disk_info')
@mock.patch('nova.virt.libvirt.blockinfo.get_disk_info')
@mock.patch('nova.virt.libvirt.LibvirtDriver._destroy')
def test_hard_reboot(self, mock_destroy, mock_get_disk_info,
mock_get_instance_disk_info, mock_get_guest_xml,
mock_create_images_and_backing,
mock_create_domain_and_network, mock_get_info):
self.context.auth_token = True # any non-None value will suffice
instance = objects.Instance(**self.test_instance)
instance_path = libvirt_utils.get_instance_path(instance)
network_info = _fake_network_info(self.stubs, 1)
block_device_info = None
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices>"
"<disk type='file'><driver name='qemu' type='raw'/>"
"<source file='/test/disk'/>"
"<target dev='vda' bus='virtio'/></disk>"
"<disk type='file'><driver name='qemu' type='qcow2'/>"
"<source file='/test/disk.local'/>"
"<target dev='vdb' bus='virtio'/></disk>"
"</devices></domain>")
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
return_values = [hardware.InstanceInfo(state=power_state.SHUTDOWN),
hardware.InstanceInfo(state=power_state.RUNNING)]
mock_get_info.side_effect = return_values
backing_disk_info = [{"virt_disk_size": 2}]
mock_get_disk_info.return_value = mock.sentinel.disk_info
mock_get_guest_xml.return_value = dummyxml
mock_get_instance_disk_info.return_value = backing_disk_info
drvr._hard_reboot(self.context, instance, network_info,
block_device_info)
# make sure that _create_images_and_backing is passed the disk_info
# returned from _get_instance_disk_info and not the one that is in
# scope from blockinfo.get_disk_info
mock_create_images_and_backing.assert_called_once_with(self.context,
instance, instance_path, backing_disk_info)
# make sure that _create_domain_and_network is passed the disk_info
# returned from blockinfo.get_disk_info and not the one that's
# returned from _get_instance_disk_info
mock_create_domain_and_network.assert_called_once_with(self.context,
dummyxml, instance, network_info, mock.sentinel.disk_info,
block_device_info=block_device_info,
reboot=True, vifs_already_plugged=True)
@mock.patch('oslo_utils.fileutils.ensure_tree')
@mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall')
@mock.patch('nova.pci.manager.get_instance_pci_devs')
@mock.patch('nova.virt.libvirt.LibvirtDriver._prepare_pci_devices_for_use')
@mock.patch('nova.virt.libvirt.LibvirtDriver._create_domain_and_network')
@mock.patch('nova.virt.libvirt.LibvirtDriver._create_images_and_backing')
@mock.patch('nova.virt.libvirt.LibvirtDriver._get_instance_disk_info')
@mock.patch('nova.virt.libvirt.utils.write_to_file')
@mock.patch('nova.virt.libvirt.utils.get_instance_path')
@mock.patch('nova.virt.libvirt.LibvirtDriver._get_guest_config')
@mock.patch('nova.virt.libvirt.blockinfo.get_disk_info')
@mock.patch('nova.virt.libvirt.LibvirtDriver._destroy')
def test_hard_reboot_does_not_call_glance_show(self,
mock_destroy, mock_get_disk_info, mock_get_guest_config,
mock_get_instance_path, mock_write_to_file,
mock_get_instance_disk_info, mock_create_images_and_backing,
mock_create_domand_and_network, mock_prepare_pci_devices_for_use,
mock_get_instance_pci_devs, mock_looping_call, mock_ensure_tree):
"""For a hard reboot, we shouldn't need an additional call to glance
to get the image metadata.
This is important for automatically spinning up instances on a
host-reboot, since we won't have a user request context that'll allow
the Glance request to go through. We have to rely on the cached image
metadata, instead.
https://bugs.launchpad.net/nova/+bug/1339386
"""
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
network_info = mock.MagicMock()
block_device_info = mock.MagicMock()
mock_get_disk_info.return_value = {}
mock_get_guest_config.return_value = mock.MagicMock()
mock_get_instance_path.return_value = '/foo'
mock_looping_call.return_value = mock.MagicMock()
drvr._image_api = mock.MagicMock()
drvr._hard_reboot(self.context, instance, network_info,
block_device_info)
self.assertFalse(drvr._image_api.get.called)
mock_ensure_tree.assert_called_once_with('/foo')
@mock.patch.object(time, 'sleep')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_create_domain')
@mock.patch.object(host.Host, 'get_domain')
def _test_clean_shutdown(self, mock_get_domain, mock_create_domain,
mock_sleep, seconds_to_shutdown,
timeout, retry_interval,
shutdown_attempts, succeeds):
info_tuple = ('fake', 'fake', 'fake', 'also_fake')
shutdown_count = []
# Mock domain
mock_domain = mock.Mock(fakelibvirt.virDomain)
return_infos = [(libvirt_guest.VIR_DOMAIN_RUNNING,) + info_tuple]
return_shutdowns = [shutdown_count.append("shutdown")]
retry_countdown = retry_interval
for x in range(min(seconds_to_shutdown, timeout)):
return_infos.append(
(libvirt_guest.VIR_DOMAIN_RUNNING,) + info_tuple)
if retry_countdown == 0:
return_shutdowns.append(shutdown_count.append("shutdown"))
retry_countdown = retry_interval
else:
retry_countdown -= 1
if seconds_to_shutdown < timeout:
return_infos.append(
(libvirt_guest.VIR_DOMAIN_SHUTDOWN,) + info_tuple)
mock_domain.info.side_effect = return_infos
mock_domain.shutdown.side_effect = return_shutdowns
def fake_create_domain(**kwargs):
self.reboot_create_called = True
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
mock_get_domain.return_value = mock_domain
mock_create_domain.side_effect = fake_create_domain
result = drvr._clean_shutdown(instance, timeout, retry_interval)
self.assertEqual(succeeds, result)
self.assertEqual(shutdown_attempts, len(shutdown_count))
def test_clean_shutdown_first_time(self):
self._test_clean_shutdown(seconds_to_shutdown=2,
timeout=5,
retry_interval=3,
shutdown_attempts=1,
succeeds=True)
def test_clean_shutdown_with_retry(self):
self._test_clean_shutdown(seconds_to_shutdown=4,
timeout=5,
retry_interval=3,
shutdown_attempts=2,
succeeds=True)
def test_clean_shutdown_failure(self):
self._test_clean_shutdown(seconds_to_shutdown=6,
timeout=5,
retry_interval=3,
shutdown_attempts=2,
succeeds=False)
def test_clean_shutdown_no_wait(self):
self._test_clean_shutdown(seconds_to_shutdown=6,
timeout=0,
retry_interval=3,
shutdown_attempts=1,
succeeds=False)
@mock.patch.object(FakeVirtDomain, 'attachDeviceFlags')
@mock.patch.object(FakeVirtDomain, 'ID', return_value=1)
@mock.patch.object(utils, 'get_image_from_system_metadata',
return_value=None)
def test_attach_sriov_ports(self,
mock_get_image_metadata,
mock_ID,
mock_attachDevice):
instance = objects.Instance(**self.test_instance)
network_info = _fake_network_info(self.stubs, 1)
network_info[0]['vnic_type'] = network_model.VNIC_TYPE_DIRECT
guest = libvirt_guest.Guest(FakeVirtDomain())
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr._attach_sriov_ports(self.context, instance, guest, network_info)
mock_get_image_metadata.assert_called_once_with(
instance.system_metadata)
self.assertTrue(mock_attachDevice.called)
@mock.patch.object(FakeVirtDomain, 'attachDeviceFlags')
@mock.patch.object(FakeVirtDomain, 'ID', return_value=1)
@mock.patch.object(utils, 'get_image_from_system_metadata',
return_value=None)
def test_attach_sriov_ports_with_info_cache(self,
mock_get_image_metadata,
mock_ID,
mock_attachDevice):
instance = objects.Instance(**self.test_instance)
network_info = _fake_network_info(self.stubs, 1)
network_info[0]['vnic_type'] = network_model.VNIC_TYPE_DIRECT
instance.info_cache = objects.InstanceInfoCache(
network_info=network_info)
guest = libvirt_guest.Guest(FakeVirtDomain())
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr._attach_sriov_ports(self.context, instance, guest, None)
mock_get_image_metadata.assert_called_once_with(
instance.system_metadata)
self.assertTrue(mock_attachDevice.called)
@mock.patch.object(host.Host,
'has_min_version', return_value=True)
@mock.patch.object(FakeVirtDomain, 'detachDeviceFlags')
@mock.patch.object(utils, 'get_image_from_system_metadata',
return_value=None)
def test_detach_sriov_ports(self,
mock_get_image_metadata,
mock_detachDeviceFlags,
mock_has_min_version):
instance = objects.Instance(**self.test_instance)
network_info = _fake_network_info(self.stubs, 1)
network_info[0]['vnic_type'] = network_model.VNIC_TYPE_DIRECT
instance.info_cache = objects.InstanceInfoCache(
network_info=network_info)
domain = FakeVirtDomain()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
guest = libvirt_guest.Guest(domain)
drvr._detach_sriov_ports(self.context, instance, guest)
mock_get_image_metadata.assert_called_once_with(
instance.system_metadata)
self.assertTrue(mock_detachDeviceFlags.called)
def test_resume(self):
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices>"
"<disk type='file'><driver name='qemu' type='raw'/>"
"<source file='/test/disk'/>"
"<target dev='vda' bus='virtio'/></disk>"
"<disk type='file'><driver name='qemu' type='qcow2'/>"
"<source file='/test/disk.local'/>"
"<target dev='vdb' bus='virtio'/></disk>"
"</devices></domain>")
instance = objects.Instance(**self.test_instance)
network_info = _fake_network_info(self.stubs, 1)
block_device_info = None
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
guest = libvirt_guest.Guest('fake_dom')
with contextlib.nested(
mock.patch.object(drvr, '_get_existing_domain_xml',
return_value=dummyxml),
mock.patch.object(drvr, '_create_domain_and_network',
return_value=guest),
mock.patch.object(drvr, '_attach_pci_devices'),
mock.patch.object(pci_manager, 'get_instance_pci_devs',
return_value='fake_pci_devs'),
mock.patch.object(utils, 'get_image_from_system_metadata'),
mock.patch.object(blockinfo, 'get_disk_info'),
) as (_get_existing_domain_xml, _create_domain_and_network,
_attach_pci_devices, get_instance_pci_devs, get_image_metadata,
get_disk_info):
get_image_metadata.return_value = {'bar': 234}
disk_info = {'foo': 123}
get_disk_info.return_value = disk_info
drvr.resume(self.context, instance, network_info,
block_device_info)
_get_existing_domain_xml.assert_has_calls([mock.call(instance,
network_info, block_device_info)])
_create_domain_and_network.assert_has_calls([mock.call(
self.context, dummyxml,
instance, network_info, disk_info,
block_device_info=block_device_info,
vifs_already_plugged=True)])
_attach_pci_devices.assert_has_calls([mock.call(guest,
'fake_pci_devs')])
@mock.patch.object(host.Host, 'get_domain')
@mock.patch.object(libvirt_driver.LibvirtDriver, 'get_info')
@mock.patch.object(libvirt_driver.LibvirtDriver, 'delete_instance_files')
@mock.patch.object(objects.Instance, 'save')
def test_destroy_undefines(self, mock_save, mock_delete_instance_files,
mock_get_info, mock_get_domain):
dom_mock = mock.MagicMock()
dom_mock.undefineFlags.return_value = 1
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
mock_get_domain.return_value = dom_mock
mock_get_info.return_value = hardware.InstanceInfo(
state=power_state.SHUTDOWN, id=-1)
mock_delete_instance_files.return_value = None
instance = objects.Instance(self.context, **self.test_instance)
drvr.destroy(self.context, instance, [])
mock_save.assert_called_once_with()
@mock.patch.object(rbd_utils, 'RBDDriver')
def test_cleanup_rbd(self, mock_driver):
driver = mock_driver.return_value
driver.cleanup_volumes = mock.Mock()
fake_instance = {'uuid': '875a8070-d0b9-4949-8b31-104d125c9a64'}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
drvr._cleanup_rbd(fake_instance)
driver.cleanup_volumes.assert_called_once_with(fake_instance)
@mock.patch.object(objects.Instance, 'save')
def test_destroy_undefines_no_undefine_flags(self, mock_save):
mock = self.mox.CreateMock(fakelibvirt.virDomain)
mock.ID()
mock.destroy()
mock.undefineFlags(1).AndRaise(fakelibvirt.libvirtError('Err'))
mock.ID().AndReturn(123)
mock.undefine()
self.mox.ReplayAll()
def fake_get_domain(instance):
return mock
def fake_get_info(instance_name):
return hardware.InstanceInfo(state=power_state.SHUTDOWN, id=-1)
def fake_delete_instance_files(instance):
return None
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(drvr._host, 'get_domain', fake_get_domain)
self.stubs.Set(drvr, 'get_info', fake_get_info)
self.stubs.Set(drvr, 'delete_instance_files',
fake_delete_instance_files)
instance = objects.Instance(self.context, **self.test_instance)
drvr.destroy(self.context, instance, [])
mock_save.assert_called_once_with()
@mock.patch.object(objects.Instance, 'save')
def test_destroy_undefines_no_attribute_with_managed_save(self, mock_save):
mock = self.mox.CreateMock(fakelibvirt.virDomain)
mock.ID()
mock.destroy()
mock.undefineFlags(1).AndRaise(AttributeError())
mock.hasManagedSaveImage(0).AndReturn(True)
mock.managedSaveRemove(0)
mock.undefine()
self.mox.ReplayAll()
def fake_get_domain(instance):
return mock
def fake_get_info(instance_name):
return hardware.InstanceInfo(state=power_state.SHUTDOWN, id=-1)
def fake_delete_instance_files(instance):
return None
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(drvr._host, 'get_domain', fake_get_domain)
self.stubs.Set(drvr, 'get_info', fake_get_info)
self.stubs.Set(drvr, 'delete_instance_files',
fake_delete_instance_files)
instance = objects.Instance(self.context, **self.test_instance)
drvr.destroy(self.context, instance, [])
mock_save.assert_called_once_with()
@mock.patch.object(objects.Instance, 'save')
def test_destroy_undefines_no_attribute_no_managed_save(self, mock_save):
mock = self.mox.CreateMock(fakelibvirt.virDomain)
mock.ID()
mock.destroy()
mock.undefineFlags(1).AndRaise(AttributeError())
mock.hasManagedSaveImage(0).AndRaise(AttributeError())
mock.undefine()
self.mox.ReplayAll()
def fake_get_domain(self, instance):
return mock
def fake_get_info(instance_name):
return hardware.InstanceInfo(state=power_state.SHUTDOWN)
def fake_delete_instance_files(instance):
return None
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(host.Host, 'get_domain', fake_get_domain)
self.stubs.Set(drvr, 'get_info', fake_get_info)
self.stubs.Set(drvr, 'delete_instance_files',
fake_delete_instance_files)
instance = objects.Instance(self.context, **self.test_instance)
drvr.destroy(self.context, instance, [])
mock_save.assert_called_once_with()
def test_destroy_timed_out(self):
mock = self.mox.CreateMock(fakelibvirt.virDomain)
mock.ID()
mock.destroy().AndRaise(fakelibvirt.libvirtError("timed out"))
self.mox.ReplayAll()
def fake_get_domain(self, instance):
return mock
def fake_get_error_code(self):
return fakelibvirt.VIR_ERR_OPERATION_TIMEOUT
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(host.Host, 'get_domain', fake_get_domain)
self.stubs.Set(fakelibvirt.libvirtError, 'get_error_code',
fake_get_error_code)
instance = objects.Instance(**self.test_instance)
self.assertRaises(exception.InstancePowerOffFailure,
drvr.destroy, self.context, instance, [])
def test_private_destroy_not_found(self):
ex = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
"No such domain",
error_code=fakelibvirt.VIR_ERR_NO_DOMAIN)
mock = self.mox.CreateMock(fakelibvirt.virDomain)
mock.ID()
mock.destroy().AndRaise(ex)
mock.info().AndRaise(ex)
mock.UUIDString()
self.mox.ReplayAll()
def fake_get_domain(instance):
return mock
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(drvr._host, 'get_domain', fake_get_domain)
instance = objects.Instance(**self.test_instance)
# NOTE(vish): verifies destroy doesn't raise if the instance disappears
drvr._destroy(instance)
def test_private_destroy_lxc_processes_refused_to_die(self):
self.flags(virt_type='lxc', group='libvirt')
ex = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError, "",
error_message="internal error: Some processes refused to die",
error_code=fakelibvirt.VIR_ERR_INTERNAL_ERROR)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with mock.patch.object(conn._host, 'get_domain') as mock_get_domain, \
mock.patch.object(conn, 'get_info') as mock_get_info:
mock_domain = mock.MagicMock()
mock_domain.ID.return_value = 1
mock_get_domain.return_value = mock_domain
mock_domain.destroy.side_effect = ex
mock_info = mock.MagicMock()
mock_info.id = 1
mock_info.state = power_state.SHUTDOWN
mock_get_info.return_value = mock_info
instance = objects.Instance(**self.test_instance)
conn._destroy(instance)
def test_private_destroy_processes_refused_to_die_still_raises(self):
ex = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError, "",
error_message="internal error: Some processes refused to die",
error_code=fakelibvirt.VIR_ERR_INTERNAL_ERROR)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with mock.patch.object(conn._host, 'get_domain') as mock_get_domain:
mock_domain = mock.MagicMock()
mock_domain.ID.return_value = 1
mock_get_domain.return_value = mock_domain
mock_domain.destroy.side_effect = ex
instance = objects.Instance(**self.test_instance)
self.assertRaises(fakelibvirt.libvirtError, conn._destroy,
instance)
def test_private_destroy_ebusy_timeout(self):
# Tests that _destroy will retry 3 times to destroy the guest when an
# EBUSY is raised, but eventually times out and raises the libvirtError
ex = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
("Failed to terminate process 26425 with SIGKILL: "
"Device or resource busy"),
error_code=fakelibvirt.VIR_ERR_SYSTEM_ERROR,
int1=errno.EBUSY)
mock_guest = mock.Mock(libvirt_guest.Guest, id=1)
mock_guest.poweroff = mock.Mock(side_effect=ex)
instance = objects.Instance(**self.test_instance)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with mock.patch.object(drvr._host, 'get_guest',
return_value=mock_guest):
self.assertRaises(fakelibvirt.libvirtError, drvr._destroy,
instance)
self.assertEqual(3, mock_guest.poweroff.call_count)
def test_private_destroy_ebusy_multiple_attempt_ok(self):
# Tests that the _destroy attempt loop is broken when EBUSY is no
# longer raised.
ex = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
("Failed to terminate process 26425 with SIGKILL: "
"Device or resource busy"),
error_code=fakelibvirt.VIR_ERR_SYSTEM_ERROR,
int1=errno.EBUSY)
mock_guest = mock.Mock(libvirt_guest.Guest, id=1)
mock_guest.poweroff = mock.Mock(side_effect=[ex, None])
inst_info = hardware.InstanceInfo(power_state.SHUTDOWN, id=1)
instance = objects.Instance(**self.test_instance)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with mock.patch.object(drvr._host, 'get_guest',
return_value=mock_guest):
with mock.patch.object(drvr, 'get_info', return_value=inst_info):
drvr._destroy(instance)
self.assertEqual(2, mock_guest.poweroff.call_count)
def test_undefine_domain_with_not_found_instance(self):
def fake_get_domain(self, instance):
raise exception.InstanceNotFound(instance_id=instance.uuid)
self.stubs.Set(host.Host, 'get_domain', fake_get_domain)
self.mox.StubOutWithMock(fakelibvirt.libvirtError, "get_error_code")
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
# NOTE(wenjianhn): verifies undefine doesn't raise if the
# instance disappears
drvr._undefine_domain(instance)
@mock.patch.object(host.Host, "list_instance_domains")
def test_disk_over_committed_size_total(self, mock_list):
# Ensure destroy calls managedSaveRemove for saved instance.
class DiagFakeDomain(object):
def __init__(self, name):
self._name = name
def ID(self):
return 1
def name(self):
return self._name
def UUIDString(self):
return "19479fee-07a5-49bb-9138-d3738280d63c"
def XMLDesc(self, flags):
return "<domain/>"
mock_list.return_value = [
DiagFakeDomain("instance0000001"),
DiagFakeDomain("instance0000002")]
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
fake_disks = {'instance0000001':
[{'type': 'qcow2', 'path': '/somepath/disk1',
'virt_disk_size': '10737418240',
'backing_file': '/somepath/disk1',
'disk_size': '83886080',
'over_committed_disk_size': '10653532160'}],
'instance0000002':
[{'type': 'raw', 'path': '/somepath/disk2',
'virt_disk_size': '0',
'backing_file': '/somepath/disk2',
'disk_size': '10737418240',
'over_committed_disk_size': '0'}]}
def get_info(instance_name, xml, **kwargs):
return fake_disks.get(instance_name)
with mock.patch.object(drvr,
"_get_instance_disk_info") as mock_info:
mock_info.side_effect = get_info
result = drvr._get_disk_over_committed_size_total()
self.assertEqual(result, 10653532160)
mock_list.assert_called_with()
self.assertTrue(mock_info.called)
@mock.patch.object(host.Host, "list_instance_domains")
def test_disk_over_committed_size_total_eperm(self, mock_list):
# Ensure destroy calls managedSaveRemove for saved instance.
class DiagFakeDomain(object):
def __init__(self, name):
self._name = name
def ID(self):
return 1
def name(self):
return self._name
def UUIDString(self):
return "19479fee-07a5-49bb-9138-d3738280d63c"
def XMLDesc(self, flags):
return "<domain/>"
mock_list.return_value = [
DiagFakeDomain("instance0000001"),
DiagFakeDomain("instance0000002")]
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
fake_disks = {'instance0000001':
[{'type': 'qcow2', 'path': '/somepath/disk1',
'virt_disk_size': '10737418240',
'backing_file': '/somepath/disk1',
'disk_size': '83886080',
'over_committed_disk_size': '10653532160'}],
'instance0000002':
[{'type': 'raw', 'path': '/somepath/disk2',
'virt_disk_size': '0',
'backing_file': '/somepath/disk2',
'disk_size': '10737418240',
'over_committed_disk_size': '21474836480'}]}
def side_effect(name, dom):
if name == 'instance0000001':
raise OSError(errno.EACCES, 'Permission denied')
if name == 'instance0000002':
return fake_disks.get(name)
get_disk_info = mock.Mock()
get_disk_info.side_effect = side_effect
drvr._get_instance_disk_info = get_disk_info
result = drvr._get_disk_over_committed_size_total()
self.assertEqual(21474836480, result)
mock_list.assert_called_with()
@mock.patch.object(host.Host, "list_instance_domains",
return_value=[mock.MagicMock(name='foo')])
@mock.patch.object(libvirt_driver.LibvirtDriver, "_get_instance_disk_info",
side_effect=exception.VolumeBDMPathNotFound(path='bar'))
def test_disk_over_committed_size_total_bdm_not_found(self,
mock_get_disk_info,
mock_list_domains):
# Tests that we handle VolumeBDMPathNotFound gracefully.
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertEqual(0, drvr._get_disk_over_committed_size_total())
def test_cpu_info(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
def get_host_capabilities_stub(self):
cpu = vconfig.LibvirtConfigCPU()
cpu.model = "Opteron_G4"
cpu.vendor = "AMD"
cpu.arch = arch.X86_64
cpu.cores = 2
cpu.threads = 1
cpu.sockets = 4
cpu.add_feature(vconfig.LibvirtConfigCPUFeature("extapic"))
cpu.add_feature(vconfig.LibvirtConfigCPUFeature("3dnow"))
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = cpu
guest = vconfig.LibvirtConfigGuest()
guest.ostype = vm_mode.HVM
guest.arch = arch.X86_64
guest.domtype = ["kvm"]
caps.guests.append(guest)
guest = vconfig.LibvirtConfigGuest()
guest.ostype = vm_mode.HVM
guest.arch = arch.I686
guest.domtype = ["kvm"]
caps.guests.append(guest)
return caps
self.stubs.Set(host.Host, "get_capabilities",
get_host_capabilities_stub)
want = {"vendor": "AMD",
"features": set(["extapic", "3dnow"]),
"model": "Opteron_G4",
"arch": arch.X86_64,
"topology": {"cores": 2, "threads": 1, "sockets": 4}}
got = drvr._get_cpu_info()
self.assertEqual(want, got)
def test_get_pcidev_info(self):
def fake_nodeDeviceLookupByName(self, name):
return FakeNodeDevice(_fake_NodeDevXml[name])
self.mox.StubOutWithMock(host.Host, 'device_lookup_by_name')
host.Host.device_lookup_by_name = fake_nodeDeviceLookupByName
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
actualvf = drvr._get_pcidev_info("pci_0000_04_00_3")
expect_vf = {
"dev_id": "pci_0000_04_00_3",
"address": "0000:04:00.3",
"product_id": '1521',
"numa_node": None,
"vendor_id": '8086',
"label": 'label_8086_1521',
"dev_type": fields.PciDeviceType.SRIOV_PF,
}
self.assertEqual(expect_vf, actualvf)
actualvf = drvr._get_pcidev_info("pci_0000_04_10_7")
expect_vf = {
"dev_id": "pci_0000_04_10_7",
"address": "0000:04:10.7",
"product_id": '1520',
"numa_node": None,
"vendor_id": '8086',
"label": 'label_8086_1520',
"dev_type": fields.PciDeviceType.SRIOV_VF,
"phys_function": '0000:04:00.3',
}
self.assertEqual(expect_vf, actualvf)
actualvf = drvr._get_pcidev_info("pci_0000_04_11_7")
expect_vf = {
"dev_id": "pci_0000_04_11_7",
"address": "0000:04:11.7",
"product_id": '1520',
"vendor_id": '8086',
"numa_node": 0,
"label": 'label_8086_1520',
"dev_type": fields.PciDeviceType.SRIOV_VF,
"phys_function": '0000:04:00.3',
}
self.assertEqual(expect_vf, actualvf)
def test_list_devices_not_supported(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
# Handle just the NO_SUPPORT error
not_supported_exc = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
'this function is not supported by the connection driver:'
' virNodeNumOfDevices',
error_code=fakelibvirt.VIR_ERR_NO_SUPPORT)
with mock.patch.object(drvr._conn, 'listDevices',
side_effect=not_supported_exc):
self.assertEqual('[]', drvr._get_pci_passthrough_devices())
# We cache not supported status to avoid emitting too many logging
# messages. Clear this value to test the other exception case.
del drvr._list_devices_supported
# Other errors should not be caught
other_exc = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
'other exc',
error_code=fakelibvirt.VIR_ERR_NO_DOMAIN)
with mock.patch.object(drvr._conn, 'listDevices',
side_effect=other_exc):
self.assertRaises(fakelibvirt.libvirtError,
drvr._get_pci_passthrough_devices)
def test_get_pci_passthrough_devices(self):
def fakelistDevices(caps, fakeargs=0):
return ['pci_0000_04_00_3', 'pci_0000_04_10_7',
'pci_0000_04_11_7']
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.listDevices = fakelistDevices
def fake_nodeDeviceLookupByName(self, name):
return FakeNodeDevice(_fake_NodeDevXml[name])
self.mox.StubOutWithMock(host.Host, 'device_lookup_by_name')
host.Host.device_lookup_by_name = fake_nodeDeviceLookupByName
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
actjson = drvr._get_pci_passthrough_devices()
expectvfs = [
{
"dev_id": "pci_0000_04_00_3",
"address": "0000:04:00.3",
"product_id": '1521',
"vendor_id": '8086',
"dev_type": fields.PciDeviceType.SRIOV_PF,
"phys_function": None,
"numa_node": None},
{
"dev_id": "pci_0000_04_10_7",
"domain": 0,
"address": "0000:04:10.7",
"product_id": '1520',
"vendor_id": '8086',
"numa_node": None,
"dev_type": fields.PciDeviceType.SRIOV_VF,
"phys_function": [('0x0000', '0x04', '0x00', '0x3')]},
{
"dev_id": "pci_0000_04_11_7",
"domain": 0,
"address": "0000:04:11.7",
"product_id": '1520',
"vendor_id": '8086',
"numa_node": 0,
"dev_type": fields.PciDeviceType.SRIOV_VF,
"phys_function": [('0x0000', '0x04', '0x00', '0x3')],
}
]
actualvfs = jsonutils.loads(actjson)
for dev in range(len(actualvfs)):
for key in actualvfs[dev].keys():
if key not in ['phys_function', 'virt_functions', 'label']:
self.assertEqual(expectvfs[dev][key], actualvfs[dev][key])
def _fake_caps_numa_topology(self,
cells_per_host=4,
sockets_per_cell=1,
cores_per_socket=1,
threads_per_core=2,
kb_mem=1048576):
# Generate mempages list per cell
cell_mempages = list()
for cellid in range(cells_per_host):
mempages_0 = vconfig.LibvirtConfigCapsNUMAPages()
mempages_0.size = 4
mempages_0.total = 1024 * cellid
mempages_1 = vconfig.LibvirtConfigCapsNUMAPages()
mempages_1.size = 2048
mempages_1.total = 0 + cellid
cell_mempages.append([mempages_0, mempages_1])
topology = fakelibvirt.HostInfo._gen_numa_topology(cells_per_host,
sockets_per_cell,
cores_per_socket,
threads_per_core,
kb_mem=kb_mem,
numa_mempages_list=cell_mempages)
return topology
def _test_get_host_numa_topology(self, mempages):
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = arch.X86_64
caps.host.topology = self._fake_caps_numa_topology()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
expected_topo_dict = {'cells': [
{'cpus': '0,1', 'cpu_usage': 0,
'mem': {'total': 256, 'used': 0},
'id': 0},
{'cpus': '3', 'cpu_usage': 0,
'mem': {'total': 256, 'used': 0},
'id': 1},
{'cpus': '', 'cpu_usage': 0,
'mem': {'total': 256, 'used': 0},
'id': 2},
{'cpus': '', 'cpu_usage': 0,
'mem': {'total': 256, 'used': 0},
'id': 3}]}
with contextlib.nested(
mock.patch.object(host.Host, "get_capabilities",
return_value=caps),
mock.patch.object(
hardware, 'get_vcpu_pin_set',
return_value=set([0, 1, 3, 4, 5])),
mock.patch.object(host.Host, 'get_online_cpus',
return_value=set([0, 1, 2, 3, 6])),
):
got_topo = drvr._get_host_numa_topology()
got_topo_dict = got_topo._to_dict()
self.assertThat(
expected_topo_dict, matchers.DictMatches(got_topo_dict))
if mempages:
# cells 0
self.assertEqual(4, got_topo.cells[0].mempages[0].size_kb)
self.assertEqual(0, got_topo.cells[0].mempages[0].total)
self.assertEqual(2048, got_topo.cells[0].mempages[1].size_kb)
self.assertEqual(0, got_topo.cells[0].mempages[1].total)
# cells 1
self.assertEqual(4, got_topo.cells[1].mempages[0].size_kb)
self.assertEqual(1024, got_topo.cells[1].mempages[0].total)
self.assertEqual(2048, got_topo.cells[1].mempages[1].size_kb)
self.assertEqual(1, got_topo.cells[1].mempages[1].total)
else:
self.assertEqual([], got_topo.cells[0].mempages)
self.assertEqual([], got_topo.cells[1].mempages)
self.assertEqual(expected_topo_dict, got_topo_dict)
self.assertEqual(set([]), got_topo.cells[0].pinned_cpus)
self.assertEqual(set([]), got_topo.cells[1].pinned_cpus)
self.assertEqual(set([]), got_topo.cells[2].pinned_cpus)
self.assertEqual(set([]), got_topo.cells[3].pinned_cpus)
self.assertEqual([set([0, 1])], got_topo.cells[0].siblings)
self.assertEqual([], got_topo.cells[1].siblings)
@mock.patch.object(host.Host, 'has_min_version', return_value=True)
def test_get_host_numa_topology(self, mock_version):
self._test_get_host_numa_topology(mempages=True)
@mock.patch.object(fakelibvirt.Connection, 'getType')
@mock.patch.object(fakelibvirt.Connection, 'getVersion')
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion')
def test_get_host_numa_topology_no_mempages(self, mock_lib_version,
mock_version, mock_type):
self.flags(virt_type='kvm', group='libvirt')
mock_lib_version.return_value = utils.convert_version_to_int(
libvirt_driver.MIN_LIBVIRT_HUGEPAGE_VERSION) - 1
mock_version.return_value = utils.convert_version_to_int(
libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION)
mock_type.return_value = host.HV_DRIVER_QEMU
self._test_get_host_numa_topology(mempages=False)
def test_get_host_numa_topology_empty(self):
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = arch.X86_64
caps.host.topology = None
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with contextlib.nested(
mock.patch.object(host.Host, 'has_min_version', return_value=True),
mock.patch.object(host.Host, "get_capabilities",
return_value=caps)
) as (has_min_version, get_caps):
self.assertIsNone(drvr._get_host_numa_topology())
self.assertEqual(2, get_caps.call_count)
@mock.patch.object(fakelibvirt.Connection, 'getType')
@mock.patch.object(fakelibvirt.Connection, 'getVersion')
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion')
def test_get_host_numa_topology_old_version(self, mock_lib_version,
mock_version, mock_type):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
mock_lib_version.return_value = utils.convert_version_to_int(
libvirt_driver.MIN_LIBVIRT_NUMA_VERSION) - 1
mock_version.return_value = utils.convert_version_to_int(
libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION)
mock_type.return_value = host.HV_DRIVER_QEMU
self.assertIsNone(drvr._get_host_numa_topology())
@mock.patch.object(fakelibvirt.Connection, 'getType')
@mock.patch.object(fakelibvirt.Connection, 'getVersion')
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion')
def test_get_host_numa_topology_xen(self, mock_lib_version,
mock_version, mock_type):
self.flags(virt_type='xen', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
mock_lib_version.return_value = utils.convert_version_to_int(
libvirt_driver.MIN_LIBVIRT_NUMA_VERSION)
mock_version.return_value = utils.convert_version_to_int(
libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION)
mock_type.return_value = host.HV_DRIVER_XEN
self.assertIsNone(drvr._get_host_numa_topology())
def test_diagnostic_vcpus_exception(self):
xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
<target dev='vda' bus='virtio'/>
</disk>
<disk type='block'>
<source dev='/path/to/dev/1'/>
<target dev='vdb' bus='virtio'/>
</disk>
<interface type='network'>
<mac address='52:54:00:a4:38:38'/>
<source network='default'/>
<target dev='vnet0'/>
</interface>
</devices>
</domain>
"""
class DiagFakeDomain(FakeVirtDomain):
def __init__(self):
super(DiagFakeDomain, self).__init__(fake_xml=xml)
def vcpus(self):
raise fakelibvirt.libvirtError('vcpus missing')
def blockStats(self, path):
return (169, 688640, 0, 0, -1)
def interfaceStats(self, path):
return (4408, 82, 0, 0, 0, 0, 0, 0)
def memoryStats(self):
return {'actual': 220160, 'rss': 200164}
def maxMemory(self):
return 280160
def fake_get_domain(self, instance):
return DiagFakeDomain()
self.stubs.Set(host.Host, "get_domain", fake_get_domain)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
actual = drvr.get_diagnostics(instance)
expect = {'vda_read': 688640,
'vda_read_req': 169,
'vda_write': 0,
'vda_write_req': 0,
'vda_errors': -1,
'vdb_read': 688640,
'vdb_read_req': 169,
'vdb_write': 0,
'vdb_write_req': 0,
'vdb_errors': -1,
'memory': 280160,
'memory-actual': 220160,
'memory-rss': 200164,
'vnet0_rx': 4408,
'vnet0_rx_drop': 0,
'vnet0_rx_errors': 0,
'vnet0_rx_packets': 82,
'vnet0_tx': 0,
'vnet0_tx_drop': 0,
'vnet0_tx_errors': 0,
'vnet0_tx_packets': 0,
}
self.assertEqual(actual, expect)
lt = datetime.datetime(2012, 11, 22, 12, 00, 00)
diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10)
timeutils.set_time_override(diags_time)
instance.launched_at = lt
actual = drvr.get_instance_diagnostics(instance)
expected = {'config_drive': False,
'cpu_details': [],
'disk_details': [{'errors_count': 0,
'id': '',
'read_bytes': 688640,
'read_requests': 169,
'write_bytes': 0,
'write_requests': 0},
{'errors_count': 0,
'id': '',
'read_bytes': 688640,
'read_requests': 169,
'write_bytes': 0,
'write_requests': 0}],
'driver': 'libvirt',
'hypervisor_os': 'linux',
'memory_details': {'maximum': 2048, 'used': 1234},
'nic_details': [{'mac_address': '52:54:00:a4:38:38',
'rx_drop': 0,
'rx_errors': 0,
'rx_octets': 4408,
'rx_packets': 82,
'tx_drop': 0,
'tx_errors': 0,
'tx_octets': 0,
'tx_packets': 0}],
'state': 'running',
'uptime': 10,
'version': '1.0'}
self.assertEqual(expected, actual.serialize())
def test_diagnostic_blockstats_exception(self):
xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
<target dev='vda' bus='virtio'/>
</disk>
<disk type='block'>
<source dev='/path/to/dev/1'/>
<target dev='vdb' bus='virtio'/>
</disk>
<interface type='network'>
<mac address='52:54:00:a4:38:38'/>
<source network='default'/>
<target dev='vnet0'/>
</interface>
</devices>
</domain>
"""
class DiagFakeDomain(FakeVirtDomain):
def __init__(self):
super(DiagFakeDomain, self).__init__(fake_xml=xml)
def vcpus(self):
return ([(0, 1, 15340000000, 0),
(1, 1, 1640000000, 0),
(2, 1, 3040000000, 0),
(3, 1, 1420000000, 0)],
[(True, False),
(True, False),
(True, False),
(True, False)])
def blockStats(self, path):
raise fakelibvirt.libvirtError('blockStats missing')
def interfaceStats(self, path):
return (4408, 82, 0, 0, 0, 0, 0, 0)
def memoryStats(self):
return {'actual': 220160, 'rss': 200164}
def maxMemory(self):
return 280160
def fake_get_domain(self, instance):
return DiagFakeDomain()
self.stubs.Set(host.Host, "get_domain", fake_get_domain)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
actual = drvr.get_diagnostics(instance)
expect = {'cpu0_time': 15340000000,
'cpu1_time': 1640000000,
'cpu2_time': 3040000000,
'cpu3_time': 1420000000,
'memory': 280160,
'memory-actual': 220160,
'memory-rss': 200164,
'vnet0_rx': 4408,
'vnet0_rx_drop': 0,
'vnet0_rx_errors': 0,
'vnet0_rx_packets': 82,
'vnet0_tx': 0,
'vnet0_tx_drop': 0,
'vnet0_tx_errors': 0,
'vnet0_tx_packets': 0,
}
self.assertEqual(actual, expect)
lt = datetime.datetime(2012, 11, 22, 12, 00, 00)
diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10)
timeutils.set_time_override(diags_time)
instance.launched_at = lt
actual = drvr.get_instance_diagnostics(instance)
expected = {'config_drive': False,
'cpu_details': [{'time': 15340000000},
{'time': 1640000000},
{'time': 3040000000},
{'time': 1420000000}],
'disk_details': [],
'driver': 'libvirt',
'hypervisor_os': 'linux',
'memory_details': {'maximum': 2048, 'used': 1234},
'nic_details': [{'mac_address': '52:54:00:a4:38:38',
'rx_drop': 0,
'rx_errors': 0,
'rx_octets': 4408,
'rx_packets': 82,
'tx_drop': 0,
'tx_errors': 0,
'tx_octets': 0,
'tx_packets': 0}],
'state': 'running',
'uptime': 10,
'version': '1.0'}
self.assertEqual(expected, actual.serialize())
def test_diagnostic_interfacestats_exception(self):
xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
<target dev='vda' bus='virtio'/>
</disk>
<disk type='block'>
<source dev='/path/to/dev/1'/>
<target dev='vdb' bus='virtio'/>
</disk>
<interface type='network'>
<mac address='52:54:00:a4:38:38'/>
<source network='default'/>
<target dev='vnet0'/>
</interface>
</devices>
</domain>
"""
class DiagFakeDomain(FakeVirtDomain):
def __init__(self):
super(DiagFakeDomain, self).__init__(fake_xml=xml)
def vcpus(self):
return ([(0, 1, 15340000000, 0),
(1, 1, 1640000000, 0),
(2, 1, 3040000000, 0),
(3, 1, 1420000000, 0)],
[(True, False),
(True, False),
(True, False),
(True, False)])
def blockStats(self, path):
return (169, 688640, 0, 0, -1)
def interfaceStats(self, path):
raise fakelibvirt.libvirtError('interfaceStat missing')
def memoryStats(self):
return {'actual': 220160, 'rss': 200164}
def maxMemory(self):
return 280160
def fake_get_domain(self, instance):
return DiagFakeDomain()
self.stubs.Set(host.Host, "get_domain", fake_get_domain)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
actual = drvr.get_diagnostics(instance)
expect = {'cpu0_time': 15340000000,
'cpu1_time': 1640000000,
'cpu2_time': 3040000000,
'cpu3_time': 1420000000,
'vda_read': 688640,
'vda_read_req': 169,
'vda_write': 0,
'vda_write_req': 0,
'vda_errors': -1,
'vdb_read': 688640,
'vdb_read_req': 169,
'vdb_write': 0,
'vdb_write_req': 0,
'vdb_errors': -1,
'memory': 280160,
'memory-actual': 220160,
'memory-rss': 200164,
}
self.assertEqual(actual, expect)
lt = datetime.datetime(2012, 11, 22, 12, 00, 00)
diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10)
timeutils.set_time_override(diags_time)
instance.launched_at = lt
actual = drvr.get_instance_diagnostics(instance)
expected = {'config_drive': False,
'cpu_details': [{'time': 15340000000},
{'time': 1640000000},
{'time': 3040000000},
{'time': 1420000000}],
'disk_details': [{'errors_count': 0,
'id': '',
'read_bytes': 688640,
'read_requests': 169,
'write_bytes': 0,
'write_requests': 0},
{'errors_count': 0,
'id': '',
'read_bytes': 688640,
'read_requests': 169,
'write_bytes': 0,
'write_requests': 0}],
'driver': 'libvirt',
'hypervisor_os': 'linux',
'memory_details': {'maximum': 2048, 'used': 1234},
'nic_details': [],
'state': 'running',
'uptime': 10,
'version': '1.0'}
self.assertEqual(expected, actual.serialize())
def test_diagnostic_memorystats_exception(self):
xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
<target dev='vda' bus='virtio'/>
</disk>
<disk type='block'>
<source dev='/path/to/dev/1'/>
<target dev='vdb' bus='virtio'/>
</disk>
<interface type='network'>
<mac address='52:54:00:a4:38:38'/>
<source network='default'/>
<target dev='vnet0'/>
</interface>
</devices>
</domain>
"""
class DiagFakeDomain(FakeVirtDomain):
def __init__(self):
super(DiagFakeDomain, self).__init__(fake_xml=xml)
def vcpus(self):
return ([(0, 1, 15340000000, 0),
(1, 1, 1640000000, 0),
(2, 1, 3040000000, 0),
(3, 1, 1420000000, 0)],
[(True, False),
(True, False),
(True, False),
(True, False)])
def blockStats(self, path):
return (169, 688640, 0, 0, -1)
def interfaceStats(self, path):
return (4408, 82, 0, 0, 0, 0, 0, 0)
def memoryStats(self):
raise fakelibvirt.libvirtError('memoryStats missing')
def maxMemory(self):
return 280160
def fake_get_domain(self, instance):
return DiagFakeDomain()
self.stubs.Set(host.Host, "get_domain", fake_get_domain)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
actual = drvr.get_diagnostics(instance)
expect = {'cpu0_time': 15340000000,
'cpu1_time': 1640000000,
'cpu2_time': 3040000000,
'cpu3_time': 1420000000,
'vda_read': 688640,
'vda_read_req': 169,
'vda_write': 0,
'vda_write_req': 0,
'vda_errors': -1,
'vdb_read': 688640,
'vdb_read_req': 169,
'vdb_write': 0,
'vdb_write_req': 0,
'vdb_errors': -1,
'memory': 280160,
'vnet0_rx': 4408,
'vnet0_rx_drop': 0,
'vnet0_rx_errors': 0,
'vnet0_rx_packets': 82,
'vnet0_tx': 0,
'vnet0_tx_drop': 0,
'vnet0_tx_errors': 0,
'vnet0_tx_packets': 0,
}
self.assertEqual(actual, expect)
lt = datetime.datetime(2012, 11, 22, 12, 00, 00)
diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10)
timeutils.set_time_override(diags_time)
instance.launched_at = lt
actual = drvr.get_instance_diagnostics(instance)
expected = {'config_drive': False,
'cpu_details': [{'time': 15340000000},
{'time': 1640000000},
{'time': 3040000000},
{'time': 1420000000}],
'disk_details': [{'errors_count': 0,
'id': '',
'read_bytes': 688640,
'read_requests': 169,
'write_bytes': 0,
'write_requests': 0},
{'errors_count': 0,
'id': '',
'read_bytes': 688640,
'read_requests': 169,
'write_bytes': 0,
'write_requests': 0}],
'driver': 'libvirt',
'hypervisor_os': 'linux',
'memory_details': {'maximum': 2048, 'used': 1234},
'nic_details': [{'mac_address': '52:54:00:a4:38:38',
'rx_drop': 0,
'rx_errors': 0,
'rx_octets': 4408,
'rx_packets': 82,
'tx_drop': 0,
'tx_errors': 0,
'tx_octets': 0,
'tx_packets': 0}],
'state': 'running',
'uptime': 10,
'version': '1.0'}
self.assertEqual(expected, actual.serialize())
def test_diagnostic_full(self):
xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
<target dev='vda' bus='virtio'/>
</disk>
<disk type='block'>
<source dev='/path/to/dev/1'/>
<target dev='vdb' bus='virtio'/>
</disk>
<interface type='network'>
<mac address='52:54:00:a4:38:38'/>
<source network='default'/>
<target dev='vnet0'/>
</interface>
</devices>
</domain>
"""
class DiagFakeDomain(FakeVirtDomain):
def __init__(self):
super(DiagFakeDomain, self).__init__(fake_xml=xml)
def vcpus(self):
return ([(0, 1, 15340000000, 0),
(1, 1, 1640000000, 0),
(2, 1, 3040000000, 0),
(3, 1, 1420000000, 0)],
[(True, False),
(True, False),
(True, False),
(True, False)])
def blockStats(self, path):
return (169, 688640, 0, 0, -1)
def interfaceStats(self, path):
return (4408, 82, 0, 0, 0, 0, 0, 0)
def memoryStats(self):
return {'actual': 220160, 'rss': 200164}
def maxMemory(self):
return 280160
def fake_get_domain(self, instance):
return DiagFakeDomain()
self.stubs.Set(host.Host, "get_domain", fake_get_domain)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
actual = drvr.get_diagnostics(instance)
expect = {'cpu0_time': 15340000000,
'cpu1_time': 1640000000,
'cpu2_time': 3040000000,
'cpu3_time': 1420000000,
'vda_read': 688640,
'vda_read_req': 169,
'vda_write': 0,
'vda_write_req': 0,
'vda_errors': -1,
'vdb_read': 688640,
'vdb_read_req': 169,
'vdb_write': 0,
'vdb_write_req': 0,
'vdb_errors': -1,
'memory': 280160,
'memory-actual': 220160,
'memory-rss': 200164,
'vnet0_rx': 4408,
'vnet0_rx_drop': 0,
'vnet0_rx_errors': 0,
'vnet0_rx_packets': 82,
'vnet0_tx': 0,
'vnet0_tx_drop': 0,
'vnet0_tx_errors': 0,
'vnet0_tx_packets': 0,
}
self.assertEqual(actual, expect)
lt = datetime.datetime(2012, 11, 22, 12, 00, 00)
diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10)
timeutils.set_time_override(diags_time)
instance.launched_at = lt
actual = drvr.get_instance_diagnostics(instance)
expected = {'config_drive': False,
'cpu_details': [{'time': 15340000000},
{'time': 1640000000},
{'time': 3040000000},
{'time': 1420000000}],
'disk_details': [{'errors_count': 0,
'id': '',
'read_bytes': 688640,
'read_requests': 169,
'write_bytes': 0,
'write_requests': 0},
{'errors_count': 0,
'id': '',
'read_bytes': 688640,
'read_requests': 169,
'write_bytes': 0,
'write_requests': 0}],
'driver': 'libvirt',
'hypervisor_os': 'linux',
'memory_details': {'maximum': 2048, 'used': 1234},
'nic_details': [{'mac_address': '52:54:00:a4:38:38',
'rx_drop': 0,
'rx_errors': 0,
'rx_octets': 4408,
'rx_packets': 82,
'tx_drop': 0,
'tx_errors': 0,
'tx_octets': 0,
'tx_packets': 0}],
'state': 'running',
'uptime': 10,
'version': '1.0'}
self.assertEqual(expected, actual.serialize())
@mock.patch.object(timeutils, 'utcnow')
@mock.patch.object(host.Host, 'get_domain')
def test_diagnostic_full_with_multiple_interfaces(self, mock_get_domain,
mock_utcnow):
xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
<target dev='vda' bus='virtio'/>
</disk>
<disk type='block'>
<source dev='/path/to/dev/1'/>
<target dev='vdb' bus='virtio'/>
</disk>
<interface type='network'>
<mac address='52:54:00:a4:38:38'/>
<source network='default'/>
<target dev='vnet0'/>
</interface>
<interface type="bridge">
<mac address="53:55:00:a5:39:39"/>
<model type="virtio"/>
<target dev="br0"/>
</interface>
</devices>
</domain>
"""
class DiagFakeDomain(FakeVirtDomain):
def __init__(self):
super(DiagFakeDomain, self).__init__(fake_xml=xml)
def vcpus(self):
return ([(0, 1, 15340000000, 0),
(1, 1, 1640000000, 0),
(2, 1, 3040000000, 0),
(3, 1, 1420000000, 0)],
[(True, False),
(True, False),
(True, False),
(True, False)])
def blockStats(self, path):
return (169, 688640, 0, 0, -1)
def interfaceStats(self, path):
return (4408, 82, 0, 0, 0, 0, 0, 0)
def memoryStats(self):
return {'actual': 220160, 'rss': 200164}
def maxMemory(self):
return 280160
def fake_get_domain(self):
return DiagFakeDomain()
mock_get_domain.side_effect = fake_get_domain
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
actual = drvr.get_diagnostics(instance)
expect = {'cpu0_time': 15340000000,
'cpu1_time': 1640000000,
'cpu2_time': 3040000000,
'cpu3_time': 1420000000,
'vda_read': 688640,
'vda_read_req': 169,
'vda_write': 0,
'vda_write_req': 0,
'vda_errors': -1,
'vdb_read': 688640,
'vdb_read_req': 169,
'vdb_write': 0,
'vdb_write_req': 0,
'vdb_errors': -1,
'memory': 280160,
'memory-actual': 220160,
'memory-rss': 200164,
'vnet0_rx': 4408,
'vnet0_rx_drop': 0,
'vnet0_rx_errors': 0,
'vnet0_rx_packets': 82,
'vnet0_tx': 0,
'vnet0_tx_drop': 0,
'vnet0_tx_errors': 0,
'vnet0_tx_packets': 0,
'br0_rx': 4408,
'br0_rx_drop': 0,
'br0_rx_errors': 0,
'br0_rx_packets': 82,
'br0_tx': 0,
'br0_tx_drop': 0,
'br0_tx_errors': 0,
'br0_tx_packets': 0,
}
self.assertEqual(actual, expect)
lt = datetime.datetime(2012, 11, 22, 12, 00, 00)
diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10)
mock_utcnow.return_value = diags_time
instance.launched_at = lt
actual = drvr.get_instance_diagnostics(instance)
expected = {'config_drive': False,
'cpu_details': [{'time': 15340000000},
{'time': 1640000000},
{'time': 3040000000},
{'time': 1420000000}],
'disk_details': [{'errors_count': 0,
'id': '',
'read_bytes': 688640,
'read_requests': 169,
'write_bytes': 0,
'write_requests': 0},
{'errors_count': 0,
'id': '',
'read_bytes': 688640,
'read_requests': 169,
'write_bytes': 0,
'write_requests': 0}],
'driver': 'libvirt',
'hypervisor_os': 'linux',
'memory_details': {'maximum': 2048, 'used': 1234},
'nic_details': [{'mac_address': '52:54:00:a4:38:38',
'rx_drop': 0,
'rx_errors': 0,
'rx_octets': 4408,
'rx_packets': 82,
'tx_drop': 0,
'tx_errors': 0,
'tx_octets': 0,
'tx_packets': 0},
{'mac_address': '53:55:00:a5:39:39',
'rx_drop': 0,
'rx_errors': 0,
'rx_octets': 4408,
'rx_packets': 82,
'tx_drop': 0,
'tx_errors': 0,
'tx_octets': 0,
'tx_packets': 0}],
'state': 'running',
'uptime': 10.,
'version': '1.0'}
self.assertEqual(expected, actual.serialize())
@mock.patch.object(host.Host, "list_instance_domains")
def test_failing_vcpu_count(self, mock_list):
"""Domain can fail to return the vcpu description in case it's
just starting up or shutting down. Make sure None is handled
gracefully.
"""
class DiagFakeDomain(object):
def __init__(self, vcpus):
self._vcpus = vcpus
def vcpus(self):
if self._vcpus is None:
raise fakelibvirt.libvirtError("fake-error")
else:
return ([[1, 2, 3, 4]] * self._vcpus, [True] * self._vcpus)
def ID(self):
return 1
def name(self):
return "instance000001"
def UUIDString(self):
return "19479fee-07a5-49bb-9138-d3738280d63c"
mock_list.return_value = [
DiagFakeDomain(None), DiagFakeDomain(5)]
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertEqual(5, drvr._get_vcpu_used())
mock_list.assert_called_with()
@mock.patch.object(host.Host, "list_instance_domains")
def test_failing_vcpu_count_none(self, mock_list):
"""Domain will return zero if the current number of vcpus used
is None. This is in case of VM state starting up or shutting
down. None type returned is counted as zero.
"""
class DiagFakeDomain(object):
def __init__(self):
pass
def vcpus(self):
return None
def ID(self):
return 1
def name(self):
return "instance000001"
mock_list.return_value = [DiagFakeDomain()]
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertEqual(0, drvr._get_vcpu_used())
mock_list.assert_called_with()
def test_get_instance_capabilities(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
def get_host_capabilities_stub(self):
caps = vconfig.LibvirtConfigCaps()
guest = vconfig.LibvirtConfigGuest()
guest.ostype = 'hvm'
guest.arch = arch.X86_64
guest.domtype = ['kvm', 'qemu']
caps.guests.append(guest)
guest = vconfig.LibvirtConfigGuest()
guest.ostype = 'hvm'
guest.arch = arch.I686
guest.domtype = ['kvm']
caps.guests.append(guest)
return caps
self.stubs.Set(host.Host, "get_capabilities",
get_host_capabilities_stub)
want = [(arch.X86_64, 'kvm', 'hvm'),
(arch.X86_64, 'qemu', 'hvm'),
(arch.I686, 'kvm', 'hvm')]
got = drvr._get_instance_capabilities()
self.assertEqual(want, got)
def test_set_cache_mode(self):
self.flags(disk_cachemodes=['file=directsync'], group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
fake_conf = FakeConfigGuestDisk()
fake_conf.source_type = 'file'
drvr._set_cache_mode(fake_conf)
self.assertEqual(fake_conf.driver_cache, 'directsync')
def test_set_cache_mode_invalid_mode(self):
self.flags(disk_cachemodes=['file=FAKE'], group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
fake_conf = FakeConfigGuestDisk()
fake_conf.source_type = 'file'
drvr._set_cache_mode(fake_conf)
self.assertIsNone(fake_conf.driver_cache)
def test_set_cache_mode_invalid_object(self):
self.flags(disk_cachemodes=['file=directsync'], group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
fake_conf = FakeConfigGuest()
fake_conf.driver_cache = 'fake'
drvr._set_cache_mode(fake_conf)
self.assertEqual(fake_conf.driver_cache, 'fake')
@mock.patch('os.unlink')
@mock.patch.object(os.path, 'exists')
def _test_shared_storage_detection(self, is_same,
mock_exists, mock_unlink):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
drvr.get_host_ip_addr = mock.MagicMock(return_value='bar')
mock_exists.return_value = is_same
with contextlib.nested(
mock.patch.object(drvr._remotefs, 'create_file'),
mock.patch.object(drvr._remotefs, 'remove_file')
) as (mock_rem_fs_create, mock_rem_fs_remove):
result = drvr._is_storage_shared_with('host', '/path')
mock_rem_fs_create.assert_any_call('host', mock.ANY)
create_args, create_kwargs = mock_rem_fs_create.call_args
self.assertTrue(create_args[1].startswith('/path'))
if is_same:
mock_unlink.assert_called_once_with(mock.ANY)
else:
mock_rem_fs_remove.assert_called_with('host', mock.ANY)
remove_args, remove_kwargs = mock_rem_fs_remove.call_args
self.assertTrue(remove_args[1].startswith('/path'))
return result
def test_shared_storage_detection_same_host(self):
self.assertTrue(self._test_shared_storage_detection(True))
def test_shared_storage_detection_different_host(self):
self.assertFalse(self._test_shared_storage_detection(False))
def test_shared_storage_detection_easy(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.mox.StubOutWithMock(drvr, 'get_host_ip_addr')
self.mox.StubOutWithMock(utils, 'execute')
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(os, 'unlink')
drvr.get_host_ip_addr().AndReturn('foo')
self.mox.ReplayAll()
self.assertTrue(drvr._is_storage_shared_with('foo', '/path'))
def test_store_pid_remove_pid(self):
instance = objects.Instance(**self.test_instance)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
popen = mock.Mock(pid=3)
drvr.job_tracker.add_job(instance, popen.pid)
self.assertIn(3, drvr.job_tracker.jobs[instance.uuid])
drvr.job_tracker.remove_job(instance, popen.pid)
self.assertNotIn(instance.uuid, drvr.job_tracker.jobs)
@mock.patch('nova.virt.libvirt.host.Host.get_domain')
def test_get_domain_info_with_more_return(self, mock_get_domain):
instance = objects.Instance(**self.test_instance)
dom_mock = mock.MagicMock()
dom_mock.info.return_value = [
1, 2048, 737, 8, 12345, 888888
]
dom_mock.ID.return_value = mock.sentinel.instance_id
mock_get_domain.return_value = dom_mock
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
info = drvr.get_info(instance)
self.assertEqual(1, info.state)
self.assertEqual(2048, info.max_mem_kb)
self.assertEqual(737, info.mem_kb)
self.assertEqual(8, info.num_cpu)
self.assertEqual(12345, info.cpu_time_ns)
self.assertEqual(mock.sentinel.instance_id, info.id)
dom_mock.info.assert_called_once_with()
dom_mock.ID.assert_called_once_with()
mock_get_domain.assert_called_once_with(instance)
def test_create_domain(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
mock_domain = mock.MagicMock()
guest = drvr._create_domain(domain=mock_domain)
self.assertEqual(mock_domain, guest._domain)
mock_domain.createWithFlags.assert_has_calls([mock.call(0)])
@mock.patch('nova.virt.disk.api.clean_lxc_namespace')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_info')
@mock.patch('nova.virt.disk.api.setup_container')
@mock.patch('oslo_utils.fileutils.ensure_tree')
@mock.patch.object(fake_libvirt_utils, 'get_instance_path')
def test_create_domain_lxc(self, mock_get_inst_path, mock_ensure_tree,
mock_setup_container, mock_get_info, mock_clean):
self.flags(virt_type='lxc', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
mock_instance = mock.MagicMock()
inst_sys_meta = dict()
mock_instance.system_metadata = inst_sys_meta
mock_get_inst_path.return_value = '/tmp/'
mock_image_backend = mock.MagicMock()
drvr.image_backend = mock_image_backend
mock_image = mock.MagicMock()
mock_image.path = '/tmp/test.img'
drvr.image_backend.image.return_value = mock_image
mock_setup_container.return_value = '/dev/nbd0'
mock_get_info.return_value = hardware.InstanceInfo(
state=power_state.RUNNING)
with contextlib.nested(
mock.patch.object(drvr, '_create_images_and_backing'),
mock.patch.object(drvr, '_is_booted_from_volume',
return_value=False),
mock.patch.object(drvr, '_create_domain'),
mock.patch.object(drvr, 'plug_vifs'),
mock.patch.object(drvr.firewall_driver, 'setup_basic_filtering'),
mock.patch.object(drvr.firewall_driver, 'prepare_instance_filter'),
mock.patch.object(drvr.firewall_driver, 'apply_instance_filter')):
drvr._create_domain_and_network(self.context, 'xml',
mock_instance, [], None)
self.assertEqual('/dev/nbd0', inst_sys_meta['rootfs_device_name'])
self.assertFalse(mock_instance.called)
mock_get_inst_path.assert_has_calls([mock.call(mock_instance)])
mock_ensure_tree.assert_has_calls([mock.call('/tmp/rootfs')])
drvr.image_backend.image.assert_has_calls([mock.call(mock_instance,
'disk')])
setup_container_call = mock.call(
mock_image.get_model(),
container_dir='/tmp/rootfs')
mock_setup_container.assert_has_calls([setup_container_call])
mock_get_info.assert_has_calls([mock.call(mock_instance)])
mock_clean.assert_has_calls([mock.call(container_dir='/tmp/rootfs')])
@mock.patch('nova.virt.disk.api.clean_lxc_namespace')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_info')
@mock.patch.object(fake_libvirt_utils, 'chown_for_id_maps')
@mock.patch('nova.virt.disk.api.setup_container')
@mock.patch('oslo_utils.fileutils.ensure_tree')
@mock.patch.object(fake_libvirt_utils, 'get_instance_path')
def test_create_domain_lxc_id_maps(self, mock_get_inst_path,
mock_ensure_tree, mock_setup_container,
mock_chown, mock_get_info, mock_clean):
self.flags(virt_type='lxc', uid_maps=["0:1000:100"],
gid_maps=["0:1000:100"], group='libvirt')
def chown_side_effect(path, id_maps):
self.assertEqual('/tmp/rootfs', path)
self.assertIsInstance(id_maps[0], vconfig.LibvirtConfigGuestUIDMap)
self.assertEqual(0, id_maps[0].start)
self.assertEqual(1000, id_maps[0].target)
self.assertEqual(100, id_maps[0].count)
self.assertIsInstance(id_maps[1], vconfig.LibvirtConfigGuestGIDMap)
self.assertEqual(0, id_maps[1].start)
self.assertEqual(1000, id_maps[1].target)
self.assertEqual(100, id_maps[1].count)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
mock_instance = mock.MagicMock()
inst_sys_meta = dict()
mock_instance.system_metadata = inst_sys_meta
mock_get_inst_path.return_value = '/tmp/'
mock_image_backend = mock.MagicMock()
drvr.image_backend = mock_image_backend
mock_image = mock.MagicMock()
mock_image.path = '/tmp/test.img'
drvr.image_backend.image.return_value = mock_image
mock_setup_container.return_value = '/dev/nbd0'
mock_chown.side_effect = chown_side_effect
mock_get_info.return_value = hardware.InstanceInfo(
state=power_state.RUNNING)
with contextlib.nested(
mock.patch.object(drvr, '_create_images_and_backing'),
mock.patch.object(drvr, '_is_booted_from_volume',
return_value=False),
mock.patch.object(drvr, '_create_domain'),
mock.patch.object(drvr, 'plug_vifs'),
mock.patch.object(drvr.firewall_driver, 'setup_basic_filtering'),
mock.patch.object(drvr.firewall_driver, 'prepare_instance_filter'),
mock.patch.object(drvr.firewall_driver, 'apply_instance_filter')
) as (
mock_create_images_and_backing, mock_is_booted_from_volume,
mock_create_domain, mock_plug_vifs, mock_setup_basic_filtering,
mock_prepare_instance_filter, mock_apply_instance_filter
):
drvr._create_domain_and_network(self.context, 'xml',
mock_instance, [], None)
self.assertEqual('/dev/nbd0', inst_sys_meta['rootfs_device_name'])
self.assertFalse(mock_instance.called)
mock_get_inst_path.assert_has_calls([mock.call(mock_instance)])
mock_is_booted_from_volume.assert_called_once_with(mock_instance, {})
mock_ensure_tree.assert_has_calls([mock.call('/tmp/rootfs')])
drvr.image_backend.image.assert_has_calls([mock.call(mock_instance,
'disk')])
setup_container_call = mock.call(
mock_image.get_model(),
container_dir='/tmp/rootfs')
mock_setup_container.assert_has_calls([setup_container_call])
mock_get_info.assert_has_calls([mock.call(mock_instance)])
mock_clean.assert_has_calls([mock.call(container_dir='/tmp/rootfs')])
@mock.patch('nova.virt.disk.api.teardown_container')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_info')
@mock.patch('nova.virt.disk.api.setup_container')
@mock.patch('oslo_utils.fileutils.ensure_tree')
@mock.patch.object(fake_libvirt_utils, 'get_instance_path')
def test_create_domain_lxc_not_running(self, mock_get_inst_path,
mock_ensure_tree,
mock_setup_container,
mock_get_info, mock_teardown):
self.flags(virt_type='lxc', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
mock_instance = mock.MagicMock()
inst_sys_meta = dict()
mock_instance.system_metadata = inst_sys_meta
mock_get_inst_path.return_value = '/tmp/'
mock_image_backend = mock.MagicMock()
drvr.image_backend = mock_image_backend
mock_image = mock.MagicMock()
mock_image.path = '/tmp/test.img'
drvr.image_backend.image.return_value = mock_image
mock_setup_container.return_value = '/dev/nbd0'
mock_get_info.return_value = hardware.InstanceInfo(
state=power_state.SHUTDOWN)
with contextlib.nested(
mock.patch.object(drvr, '_create_images_and_backing'),
mock.patch.object(drvr, '_is_booted_from_volume',
return_value=False),
mock.patch.object(drvr, '_create_domain'),
mock.patch.object(drvr, 'plug_vifs'),
mock.patch.object(drvr.firewall_driver, 'setup_basic_filtering'),
mock.patch.object(drvr.firewall_driver, 'prepare_instance_filter'),
mock.patch.object(drvr.firewall_driver, 'apply_instance_filter')):
drvr._create_domain_and_network(self.context, 'xml',
mock_instance, [], None)
self.assertEqual('/dev/nbd0', inst_sys_meta['rootfs_device_name'])
self.assertFalse(mock_instance.called)
mock_get_inst_path.assert_has_calls([mock.call(mock_instance)])
mock_ensure_tree.assert_has_calls([mock.call('/tmp/rootfs')])
drvr.image_backend.image.assert_has_calls([mock.call(mock_instance,
'disk')])
setup_container_call = mock.call(
mock_image.get_model(),
container_dir='/tmp/rootfs')
mock_setup_container.assert_has_calls([setup_container_call])
mock_get_info.assert_has_calls([mock.call(mock_instance)])
teardown_call = mock.call(container_dir='/tmp/rootfs')
mock_teardown.assert_has_calls([teardown_call])
def test_create_domain_define_xml_fails(self):
"""Tests that the xml is logged when defining the domain fails."""
fake_xml = "<test>this is a test</test>"
def fake_defineXML(xml):
self.assertEqual(fake_xml, xml)
raise fakelibvirt.libvirtError('virDomainDefineXML() failed')
def fake_safe_decode(text, *args, **kwargs):
return text + 'safe decoded'
self.log_error_called = False
def fake_error(msg, *args, **kwargs):
self.log_error_called = True
self.assertIn(fake_xml, msg % args)
self.assertIn('safe decoded', msg % args)
self.stubs.Set(encodeutils, 'safe_decode', fake_safe_decode)
self.stubs.Set(nova.virt.libvirt.guest.LOG, 'error', fake_error)
self.create_fake_libvirt_mock(defineXML=fake_defineXML)
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertRaises(fakelibvirt.libvirtError, drvr._create_domain,
fake_xml)
self.assertTrue(self.log_error_called)
def test_create_domain_with_flags_fails(self):
"""Tests that the xml is logged when creating the domain with flags
fails
"""
fake_xml = "<test>this is a test</test>"
fake_domain = FakeVirtDomain(fake_xml)
def fake_createWithFlags(launch_flags):
raise fakelibvirt.libvirtError('virDomainCreateWithFlags() failed')
self.log_error_called = False
def fake_error(msg, *args, **kwargs):
self.log_error_called = True
self.assertIn(fake_xml, msg % args)
self.stubs.Set(fake_domain, 'createWithFlags', fake_createWithFlags)
self.stubs.Set(nova.virt.libvirt.guest.LOG, 'error', fake_error)
self.create_fake_libvirt_mock()
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertRaises(fakelibvirt.libvirtError, drvr._create_domain,
domain=fake_domain)
self.assertTrue(self.log_error_called)
def test_create_domain_enable_hairpin_fails(self):
"""Tests that the xml is logged when enabling hairpin mode for the
domain fails.
"""
fake_xml = "<test>this is a test</test>"
fake_domain = FakeVirtDomain(fake_xml)
def fake_execute(*args, **kwargs):
raise processutils.ProcessExecutionError('error')
def fake_get_interfaces(*args):
return ["dev"]
self.log_error_called = False
def fake_error(msg, *args, **kwargs):
self.log_error_called = True
self.assertIn(fake_xml, msg % args)
self.stubs.Set(nova.virt.libvirt.guest.LOG, 'error', fake_error)
self.create_fake_libvirt_mock()
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.stubs.Set(nova.utils, 'execute', fake_execute)
self.stubs.Set(
nova.virt.libvirt.guest.Guest, 'get_interfaces',
fake_get_interfaces)
self.assertRaises(processutils.ProcessExecutionError,
drvr._create_domain,
domain=fake_domain,
power_on=False)
self.assertTrue(self.log_error_called)
def test_get_vnc_console(self):
instance = objects.Instance(**self.test_instance)
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices>"
"<graphics type='vnc' port='5900'/>"
"</devices></domain>")
vdmock = self.mox.CreateMock(fakelibvirt.virDomain)
self.mox.StubOutWithMock(vdmock, "XMLDesc")
vdmock.XMLDesc(flags=0).AndReturn(dummyxml)
def fake_lookup(instance_name):
if instance_name == instance['name']:
return vdmock
self.create_fake_libvirt_mock(lookupByName=fake_lookup)
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
vnc_dict = drvr.get_vnc_console(self.context, instance)
self.assertEqual(vnc_dict.port, '5900')
def test_get_vnc_console_unavailable(self):
instance = objects.Instance(**self.test_instance)
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices></devices></domain>")
vdmock = self.mox.CreateMock(fakelibvirt.virDomain)
self.mox.StubOutWithMock(vdmock, "XMLDesc")
vdmock.XMLDesc(flags=0).AndReturn(dummyxml)
def fake_lookup(instance_name):
if instance_name == instance['name']:
return vdmock
self.create_fake_libvirt_mock(lookupByName=fake_lookup)
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.ConsoleTypeUnavailable,
drvr.get_vnc_console, self.context, instance)
def test_get_spice_console(self):
instance = objects.Instance(**self.test_instance)
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices>"
"<graphics type='spice' port='5950'/>"
"</devices></domain>")
vdmock = self.mox.CreateMock(fakelibvirt.virDomain)
self.mox.StubOutWithMock(vdmock, "XMLDesc")
vdmock.XMLDesc(flags=0).AndReturn(dummyxml)
def fake_lookup(instance_name):
if instance_name == instance['name']:
return vdmock
self.create_fake_libvirt_mock(lookupByName=fake_lookup)
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
spice_dict = drvr.get_spice_console(self.context, instance)
self.assertEqual(spice_dict.port, '5950')
def test_get_spice_console_unavailable(self):
instance = objects.Instance(**self.test_instance)
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices></devices></domain>")
vdmock = self.mox.CreateMock(fakelibvirt.virDomain)
self.mox.StubOutWithMock(vdmock, "XMLDesc")
vdmock.XMLDesc(flags=0).AndReturn(dummyxml)
def fake_lookup(instance_name):
if instance_name == instance['name']:
return vdmock
self.create_fake_libvirt_mock(lookupByName=fake_lookup)
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.ConsoleTypeUnavailable,
drvr.get_spice_console, self.context, instance)
def test_detach_volume_with_instance_not_found(self):
# Test that detach_volume() method does not raise exception,
# if the instance does not exist.
instance = objects.Instance(**self.test_instance)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with contextlib.nested(
mock.patch.object(host.Host, 'get_domain',
side_effect=exception.InstanceNotFound(
instance_id=instance.uuid)),
mock.patch.object(drvr, '_disconnect_volume')
) as (_get_domain, _disconnect_volume):
connection_info = {'driver_volume_type': 'fake'}
drvr.detach_volume(connection_info, instance, '/dev/sda')
_get_domain.assert_called_once_with(instance)
_disconnect_volume.assert_called_once_with(connection_info,
'sda')
def _test_attach_detach_interface_get_config(self, method_name):
"""Tests that the get_config() method is properly called in
attach_interface() and detach_interface().
method_name: either \"attach_interface\" or \"detach_interface\"
depending on the method to test.
"""
self.stubs.Set(host.Host, "get_domain", lambda a, b: FakeVirtDomain())
instance = objects.Instance(**self.test_instance)
network_info = _fake_network_info(self.stubs, 1)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
fake_image_meta = {'id': instance['image_ref']}
fake_image_meta_obj = objects.ImageMeta.from_dict(
fake_image_meta)
if method_name == "attach_interface":
self.mox.StubOutWithMock(drvr.firewall_driver,
'setup_basic_filtering')
drvr.firewall_driver.setup_basic_filtering(instance, network_info)
expected = drvr.vif_driver.get_config(instance, network_info[0],
fake_image_meta_obj,
instance.get_flavor(),
CONF.libvirt.virt_type,
drvr._host)
self.mox.StubOutWithMock(drvr.vif_driver, 'get_config')
drvr.vif_driver.get_config(instance, network_info[0],
mox.IsA(objects.ImageMeta),
mox.IsA(objects.Flavor),
CONF.libvirt.virt_type,
drvr._host).\
AndReturn(expected)
self.mox.ReplayAll()
if method_name == "attach_interface":
drvr.attach_interface(instance, fake_image_meta,
network_info[0])
elif method_name == "detach_interface":
drvr.detach_interface(instance, network_info[0])
else:
raise ValueError("Unhandled method %s" % method_name)
@mock.patch.object(lockutils, "external_lock")
def test_attach_interface_get_config(self, mock_lock):
"""Tests that the get_config() method is properly called in
attach_interface().
"""
mock_lock.return_value = threading.Semaphore()
self._test_attach_detach_interface_get_config("attach_interface")
def test_detach_interface_get_config(self):
"""Tests that the get_config() method is properly called in
detach_interface().
"""
self._test_attach_detach_interface_get_config("detach_interface")
def test_default_root_device_name(self):
instance = {'uuid': 'fake_instance'}
image_meta = {'id': 'fake'}
root_bdm = {'source_type': 'image',
'detination_type': 'volume',
'image_id': 'fake_id'}
self.flags(virt_type='fake_libvirt_type', group='libvirt')
self.mox.StubOutWithMock(blockinfo, 'get_disk_bus_for_device_type')
self.mox.StubOutWithMock(blockinfo, 'get_root_info')
blockinfo.get_disk_bus_for_device_type(instance,
'fake_libvirt_type',
mox.IsA(objects.ImageMeta),
'disk').InAnyOrder().\
AndReturn('virtio')
blockinfo.get_disk_bus_for_device_type(instance,
'fake_libvirt_type',
mox.IsA(objects.ImageMeta),
'cdrom').InAnyOrder().\
AndReturn('ide')
blockinfo.get_root_info(instance, 'fake_libvirt_type',
mox.IsA(objects.ImageMeta), root_bdm,
'virtio', 'ide').AndReturn({'dev': 'vda'})
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertEqual(drvr.default_root_device_name(instance, image_meta,
root_bdm), '/dev/vda')
@mock.patch.object(objects.BlockDeviceMapping, "save")
def test_default_device_names_for_instance(self, save_mock):
instance = objects.Instance(**self.test_instance)
instance.root_device_name = '/dev/vda'
ephemerals = [objects.BlockDeviceMapping(
**fake_block_device.AnonFakeDbBlockDeviceDict(
{'device_name': 'vdb',
'source_type': 'blank',
'volume_size': 2,
'destination_type': 'local'}))]
swap = [objects.BlockDeviceMapping(
**fake_block_device.AnonFakeDbBlockDeviceDict(
{'device_name': 'vdg',
'source_type': 'blank',
'volume_size': 512,
'guest_format': 'swap',
'destination_type': 'local'}))]
block_device_mapping = [
objects.BlockDeviceMapping(
**fake_block_device.AnonFakeDbBlockDeviceDict(
{'source_type': 'volume',
'destination_type': 'volume',
'volume_id': 'fake-image-id',
'device_name': '/dev/vdxx',
'disk_bus': 'scsi'}))]
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr.default_device_names_for_instance(instance,
instance.root_device_name,
ephemerals, swap,
block_device_mapping)
# Ephemeral device name was correct so no changes
self.assertEqual('/dev/vdb', ephemerals[0].device_name)
# Swap device name was incorrect so it was changed
self.assertEqual('/dev/vdc', swap[0].device_name)
# Volume device name was changed too, taking the bus into account
self.assertEqual('/dev/sda', block_device_mapping[0].device_name)
self.assertEqual(3, save_mock.call_count)
def _test_get_device_name_for_instance(self, new_bdm, expected_dev):
instance = objects.Instance(**self.test_instance)
instance.root_device_name = '/dev/vda'
instance.ephemeral_gb = 0
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
got_dev = drvr.get_device_name_for_instance(
instance, [], new_bdm)
self.assertEqual(expected_dev, got_dev)
def test_get_device_name_for_instance_simple(self):
new_bdm = objects.BlockDeviceMapping(
context=context,
source_type='volume', destination_type='volume',
boot_index=-1, volume_id='fake-id',
device_name=None, guest_format=None,
disk_bus=None, device_type=None)
self._test_get_device_name_for_instance(new_bdm, '/dev/vdb')
def test_get_device_name_for_instance_suggested(self):
new_bdm = objects.BlockDeviceMapping(
context=context,
source_type='volume', destination_type='volume',
boot_index=-1, volume_id='fake-id',
device_name='/dev/vdg', guest_format=None,
disk_bus=None, device_type=None)
self._test_get_device_name_for_instance(new_bdm, '/dev/vdb')
def test_get_device_name_for_instance_bus(self):
new_bdm = objects.BlockDeviceMapping(
context=context,
source_type='volume', destination_type='volume',
boot_index=-1, volume_id='fake-id',
device_name=None, guest_format=None,
disk_bus='scsi', device_type=None)
self._test_get_device_name_for_instance(new_bdm, '/dev/sda')
def test_get_device_name_for_instance_device_type(self):
new_bdm = objects.BlockDeviceMapping(
context=context,
source_type='volume', destination_type='volume',
boot_index=-1, volume_id='fake-id',
device_name=None, guest_format=None,
disk_bus=None, device_type='floppy')
self._test_get_device_name_for_instance(new_bdm, '/dev/fda')
def test_is_supported_fs_format(self):
supported_fs = [disk.FS_FORMAT_EXT2, disk.FS_FORMAT_EXT3,
disk.FS_FORMAT_EXT4, disk.FS_FORMAT_XFS]
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
for fs in supported_fs:
self.assertTrue(drvr.is_supported_fs_format(fs))
supported_fs = ['', 'dummy']
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
for fs in supported_fs:
self.assertFalse(drvr.is_supported_fs_format(fs))
def test_post_live_migration_at_destination_with_block_device_info(self):
# Preparing mocks
mock_domain = self.mox.CreateMock(fakelibvirt.virDomain)
self.resultXML = None
def fake_getLibVersion():
return 9011
def fake_getCapabilities():
return """
<capabilities>
<host>
<uuid>cef19ce0-0ca2-11df-855d-b19fbce37686</uuid>
<cpu>
<arch>x86_64</arch>
<model>Penryn</model>
<vendor>Intel</vendor>
<topology sockets='1' cores='2' threads='1'/>
<feature name='xtpr'/>
</cpu>
</host>
</capabilities>
"""
def fake_to_xml(context, instance, network_info, disk_info,
image_meta=None, rescue=None,
block_device_info=None, write_to_disk=False):
if image_meta is None:
image_meta = {}
conf = drvr._get_guest_config(instance, network_info, image_meta,
disk_info, rescue, block_device_info)
self.resultXML = conf.to_xml()
return self.resultXML
def fake_get_domain(instance):
return mock_domain
def fake_baselineCPU(cpu, flag):
return """<cpu mode='custom' match='exact'>
<model fallback='allow'>Westmere</model>
<vendor>Intel</vendor>
<feature policy='require' name='aes'/>
</cpu>
"""
network_info = _fake_network_info(self.stubs, 1)
self.create_fake_libvirt_mock(getLibVersion=fake_getLibVersion,
getCapabilities=fake_getCapabilities,
getVersion=lambda: 1005001,
listDefinedDomains=lambda: [],
numOfDomains=lambda: 0,
baselineCPU=fake_baselineCPU)
instance_ref = self.test_instance
instance_ref['image_ref'] = 123456 # we send an int to test sha1 call
instance = objects.Instance(**instance_ref)
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(drvr,
'_get_guest_xml',
fake_to_xml)
self.stubs.Set(host.Host,
'get_domain',
fake_get_domain)
block_device_info = {'block_device_mapping':
driver_block_device.convert_volumes([
fake_block_device.FakeDbBlockDeviceDict(
{'id': 1, 'guest_format': None,
'boot_index': 0,
'source_type': 'volume',
'destination_type': 'volume',
'device_name': '/dev/vda',
'disk_bus': 'virtio',
'device_type': 'disk',
'delete_on_termination': False}),
])}
block_device_info['block_device_mapping'][0]['connection_info'] = (
{'driver_volume_type': 'iscsi'})
with contextlib.nested(
mock.patch.object(
driver_block_device.DriverVolumeBlockDevice, 'save'),
mock.patch.object(objects.Instance, 'save')
) as (mock_volume_save, mock_instance_save):
drvr.post_live_migration_at_destination(
self.context, instance, network_info, True,
block_device_info=block_device_info)
self.assertIn('fake', self.resultXML)
mock_volume_save.assert_called_once_with()
def test_create_propagates_exceptions(self):
self.flags(virt_type='lxc', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(id=1, uuid='fake-uuid',
image_ref='my_fake_image')
with contextlib.nested(
mock.patch.object(drvr, '_create_domain_setup_lxc'),
mock.patch.object(drvr, '_create_domain_cleanup_lxc'),
mock.patch.object(drvr, '_is_booted_from_volume',
return_value=False),
mock.patch.object(drvr, 'plug_vifs'),
mock.patch.object(drvr, 'firewall_driver'),
mock.patch.object(drvr, '_create_domain',
side_effect=exception.NovaException),
mock.patch.object(drvr, 'cleanup')):
self.assertRaises(exception.NovaException,
drvr._create_domain_and_network,
self.context,
'xml',
instance, None, None)
def test_create_without_pause(self):
self.flags(virt_type='lxc', group='libvirt')
@contextlib.contextmanager
def fake_lxc_disk_handler(*args, **kwargs):
yield
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
with contextlib.nested(
mock.patch.object(drvr, '_lxc_disk_handler',
side_effect=fake_lxc_disk_handler),
mock.patch.object(drvr, 'plug_vifs'),
mock.patch.object(drvr, 'firewall_driver'),
mock.patch.object(drvr, '_create_domain'),
mock.patch.object(drvr, 'cleanup')) as (
_handler, cleanup, firewall_driver, create, plug_vifs):
domain = drvr._create_domain_and_network(self.context, 'xml',
instance, None, None)
self.assertEqual(0, create.call_args_list[0][1]['pause'])
self.assertEqual(0, domain.resume.call_count)
def _test_create_with_network_events(self, neutron_failure=None,
power_on=True):
generated_events = []
def wait_timeout():
event = mock.MagicMock()
if neutron_failure == 'timeout':
raise eventlet.timeout.Timeout()
elif neutron_failure == 'error':
event.status = 'failed'
else:
event.status = 'completed'
return event
def fake_prepare(instance, event_name):
m = mock.MagicMock()
m.instance = instance
m.event_name = event_name
m.wait.side_effect = wait_timeout
generated_events.append(m)
return m
virtapi = manager.ComputeVirtAPI(mock.MagicMock())
prepare = virtapi._compute.instance_events.prepare_for_instance_event
prepare.side_effect = fake_prepare
drvr = libvirt_driver.LibvirtDriver(virtapi, False)
instance = objects.Instance(**self.test_instance)
vifs = [{'id': 'vif1', 'active': False},
{'id': 'vif2', 'active': False}]
@mock.patch.object(drvr, 'plug_vifs')
@mock.patch.object(drvr, 'firewall_driver')
@mock.patch.object(drvr, '_create_domain')
@mock.patch.object(drvr, 'cleanup')
def test_create(cleanup, create, fw_driver, plug_vifs):
domain = drvr._create_domain_and_network(self.context, 'xml',
instance, vifs, None,
power_on=power_on)
plug_vifs.assert_called_with(instance, vifs)
pause = self._get_pause_flag(drvr, vifs, power_on=power_on)
self.assertEqual(pause,
create.call_args_list[0][1]['pause'])
if pause:
domain.resume.assert_called_once_with()
if neutron_failure and CONF.vif_plugging_is_fatal:
cleanup.assert_called_once_with(self.context,
instance, network_info=vifs,
block_device_info=None)
test_create()
if utils.is_neutron() and CONF.vif_plugging_timeout and power_on:
prepare.assert_has_calls([
mock.call(instance, 'network-vif-plugged-vif1'),
mock.call(instance, 'network-vif-plugged-vif2')])
for event in generated_events:
if neutron_failure and generated_events.index(event) != 0:
self.assertEqual(0, event.call_count)
elif (neutron_failure == 'error' and
not CONF.vif_plugging_is_fatal):
event.wait.assert_called_once_with()
else:
self.assertEqual(0, prepare.call_count)
@mock.patch('nova.utils.is_neutron', return_value=True)
def test_create_with_network_events_neutron(self, is_neutron):
self._test_create_with_network_events()
@mock.patch('nova.utils.is_neutron', return_value=True)
def test_create_with_network_events_neutron_power_off(self,
is_neutron):
# Tests that we don't wait for events if we don't start the instance.
self._test_create_with_network_events(power_on=False)
@mock.patch('nova.utils.is_neutron', return_value=True)
def test_create_with_network_events_neutron_nowait(self, is_neutron):
self.flags(vif_plugging_timeout=0)
self._test_create_with_network_events()
@mock.patch('nova.utils.is_neutron', return_value=True)
def test_create_with_network_events_neutron_failed_nonfatal_timeout(
self, is_neutron):
self.flags(vif_plugging_is_fatal=False)
self._test_create_with_network_events(neutron_failure='timeout')
@mock.patch('nova.utils.is_neutron', return_value=True)
def test_create_with_network_events_neutron_failed_fatal_timeout(
self, is_neutron):
self.assertRaises(exception.VirtualInterfaceCreateException,
self._test_create_with_network_events,
neutron_failure='timeout')
@mock.patch('nova.utils.is_neutron', return_value=True)
def test_create_with_network_events_neutron_failed_nonfatal_error(
self, is_neutron):
self.flags(vif_plugging_is_fatal=False)
self._test_create_with_network_events(neutron_failure='error')
@mock.patch('nova.utils.is_neutron', return_value=True)
def test_create_with_network_events_neutron_failed_fatal_error(
self, is_neutron):
self.assertRaises(exception.VirtualInterfaceCreateException,
self._test_create_with_network_events,
neutron_failure='error')
@mock.patch('nova.utils.is_neutron', return_value=False)
def test_create_with_network_events_non_neutron(self, is_neutron):
self._test_create_with_network_events()
@mock.patch('nova.volume.encryptors.get_encryption_metadata')
@mock.patch('nova.virt.libvirt.blockinfo.get_info_from_bdm')
def test_create_with_bdm(self, get_info_from_bdm, get_encryption_metadata):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
mock_dom = mock.MagicMock()
mock_encryption_meta = mock.MagicMock()
get_encryption_metadata.return_value = mock_encryption_meta
fake_xml = """
<domain>
<name>instance-00000001</name>
<memory>1048576</memory>
<vcpu>1</vcpu>
<devices>
<disk type='file' device='disk'>
<driver name='qemu' type='raw' cache='none'/>
<source file='/path/fake-volume1'/>
<target dev='vda' bus='virtio'/>
</disk>
</devices>
</domain>
"""
fake_volume_id = "fake-volume-id"
connection_info = {"driver_volume_type": "fake",
"data": {"access_mode": "rw",
"volume_id": fake_volume_id}}
def fake_getitem(*args, **kwargs):
fake_bdm = {'connection_info': connection_info,
'mount_device': '/dev/vda'}
return fake_bdm.get(args[0])
mock_volume = mock.MagicMock()
mock_volume.__getitem__.side_effect = fake_getitem
block_device_info = {'block_device_mapping': [mock_volume]}
network_info = [network_model.VIF(id='1'),
network_model.VIF(id='2', active=True)]
with contextlib.nested(
mock.patch.object(drvr, '_get_volume_encryptor'),
mock.patch.object(drvr, 'plug_vifs'),
mock.patch.object(drvr.firewall_driver, 'setup_basic_filtering'),
mock.patch.object(drvr.firewall_driver,
'prepare_instance_filter'),
mock.patch.object(drvr, '_create_domain'),
mock.patch.object(drvr.firewall_driver, 'apply_instance_filter'),
) as (get_volume_encryptor, plug_vifs, setup_basic_filtering,
prepare_instance_filter, create_domain, apply_instance_filter):
create_domain.return_value = libvirt_guest.Guest(mock_dom)
guest = drvr._create_domain_and_network(
self.context, fake_xml, instance, network_info, None,
block_device_info=block_device_info)
get_encryption_metadata.assert_called_once_with(self.context,
drvr._volume_api, fake_volume_id, connection_info)
get_volume_encryptor.assert_called_once_with(connection_info,
mock_encryption_meta)
plug_vifs.assert_called_once_with(instance, network_info)
setup_basic_filtering.assert_called_once_with(instance,
network_info)
prepare_instance_filter.assert_called_once_with(instance,
network_info)
pause = self._get_pause_flag(drvr, network_info)
create_domain.assert_called_once_with(
fake_xml, pause=pause, power_on=True)
self.assertEqual(mock_dom, guest._domain)
def test_get_guest_storage_config(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
test_instance = copy.deepcopy(self.test_instance)
test_instance["default_swap_device"] = None
instance = objects.Instance(**test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
flavor = instance.get_flavor()
conn_info = {'driver_volume_type': 'fake', 'data': {}}
bdi = {'block_device_mapping':
driver_block_device.convert_volumes([
fake_block_device.FakeDbBlockDeviceDict({
'id': 1,
'source_type': 'volume',
'destination_type': 'volume',
'device_name': '/dev/vdc'})
])}
bdm = bdi['block_device_mapping'][0]
bdm['connection_info'] = conn_info
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
image_meta,
bdi)
mock_conf = mock.MagicMock(source_path='fake')
with contextlib.nested(
mock.patch.object(driver_block_device.DriverVolumeBlockDevice,
'save'),
mock.patch.object(drvr, '_connect_volume'),
mock.patch.object(drvr, '_get_volume_config',
return_value=mock_conf),
mock.patch.object(drvr, '_set_cache_mode')
) as (volume_save, connect_volume, get_volume_config, set_cache_mode):
devices = drvr._get_guest_storage_config(instance, image_meta,
disk_info, False, bdi, flavor, "hvm")
self.assertEqual(3, len(devices))
self.assertEqual('/dev/vdb', instance.default_ephemeral_device)
self.assertIsNone(instance.default_swap_device)
connect_volume.assert_called_with(bdm['connection_info'],
{'bus': 'virtio', 'type': 'disk', 'dev': 'vdc'})
get_volume_config.assert_called_with(bdm['connection_info'],
{'bus': 'virtio', 'type': 'disk', 'dev': 'vdc'})
volume_save.assert_called_once_with()
self.assertEqual(3, set_cache_mode.call_count)
def test_get_neutron_events(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
network_info = [network_model.VIF(id='1'),
network_model.VIF(id='2', active=True)]
events = drvr._get_neutron_events(network_info)
self.assertEqual([('network-vif-plugged', '1')], events)
def test_unplug_vifs_ignores_errors(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
with mock.patch.object(drvr, 'vif_driver') as vif_driver:
vif_driver.unplug.side_effect = exception.AgentError(
method='unplug')
drvr._unplug_vifs('inst', [1], ignore_errors=True)
vif_driver.unplug.assert_called_once_with('inst', 1)
def test_unplug_vifs_reports_errors(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
with mock.patch.object(drvr, 'vif_driver') as vif_driver:
vif_driver.unplug.side_effect = exception.AgentError(
method='unplug')
self.assertRaises(exception.AgentError,
drvr.unplug_vifs, 'inst', [1])
vif_driver.unplug.assert_called_once_with('inst', 1)
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._unplug_vifs')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._undefine_domain')
def test_cleanup_pass_with_no_mount_device(self, undefine, unplug):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
drvr.firewall_driver = mock.Mock()
drvr._disconnect_volume = mock.Mock()
fake_inst = {'name': 'foo'}
fake_bdms = [{'connection_info': 'foo',
'mount_device': None}]
with mock.patch('nova.virt.driver'
'.block_device_info_get_mapping',
return_value=fake_bdms):
drvr.cleanup('ctxt', fake_inst, 'netinfo', destroy_disks=False)
self.assertTrue(drvr._disconnect_volume.called)
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._unplug_vifs')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._undefine_domain')
def test_cleanup_wants_vif_errors_ignored(self, undefine, unplug):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
fake_inst = {'name': 'foo'}
with mock.patch.object(drvr._conn, 'lookupByName') as lookup:
lookup.return_value = fake_inst
# NOTE(danms): Make unplug cause us to bail early, since
# we only care about how it was called
unplug.side_effect = test.TestingException
self.assertRaises(test.TestingException,
drvr.cleanup, 'ctxt', fake_inst, 'netinfo')
unplug.assert_called_once_with(fake_inst, 'netinfo', True)
@mock.patch.object(driver, 'block_device_info_get_mapping')
@mock.patch.object(host.Host, "get_guest")
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_get_serial_ports_from_guest')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_undefine_domain')
def test_cleanup_serial_console_enabled(
self, undefine, get_ports, get_guest,
block_device_info_get_mapping):
self.flags(enabled="True", group='serial_console')
instance = 'i1'
network_info = {}
bdm_info = {}
firewall_driver = mock.MagicMock()
guest = mock.Mock(spec=libvirt_guest.Guest)
get_guest.return_value = guest
get_ports.return_value = iter([('127.0.0.1', 10000)])
block_device_info_get_mapping.return_value = ()
# We want to ensure undefine_domain is called after
# lookup_domain.
def undefine_domain(instance):
get_ports.side_effect = Exception("domain undefined")
undefine.side_effect = undefine_domain
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
drvr.firewall_driver = firewall_driver
drvr.cleanup(
'ctx', instance, network_info,
block_device_info=bdm_info,
destroy_disks=False, destroy_vifs=False)
get_ports.assert_called_once_with(guest)
undefine.assert_called_once_with(instance)
firewall_driver.unfilter_instance.assert_called_once_with(
instance, network_info=network_info)
block_device_info_get_mapping.assert_called_once_with(bdm_info)
@mock.patch.object(driver, 'block_device_info_get_mapping')
@mock.patch.object(host.Host, "get_guest")
@mock.patch.object(libvirt_driver.LibvirtDriver, '_undefine_domain')
def test_cleanup_serial_console_domain_gone(
self, undefine, get_guest, block_device_info_get_mapping):
self.flags(enabled="True", group='serial_console')
instance = {'name': 'i1'}
network_info = {}
bdm_info = {}
firewall_driver = mock.MagicMock()
block_device_info_get_mapping.return_value = ()
# Ensure get_guest raises same exception that would have occurred
# if domain was gone.
get_guest.side_effect = exception.InstanceNotFound("domain undefined")
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
drvr.firewall_driver = firewall_driver
drvr.cleanup(
'ctx', instance, network_info,
block_device_info=bdm_info,
destroy_disks=False, destroy_vifs=False)
get_guest.assert_called_once_with(instance)
undefine.assert_called_once_with(instance)
firewall_driver.unfilter_instance.assert_called_once_with(
instance, network_info=network_info)
block_device_info_get_mapping.assert_called_once_with(bdm_info)
def test_swap_volume(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
mock_dom = mock.MagicMock()
guest = libvirt_guest.Guest(mock_dom)
with mock.patch.object(drvr._conn, 'defineXML',
create=True) as mock_define:
xmldoc = "<domain/>"
srcfile = "/first/path"
dstfile = "/second/path"
mock_dom.XMLDesc.return_value = xmldoc
mock_dom.isPersistent.return_value = True
mock_dom.blockJobInfo.return_value = {}
drvr._swap_volume(guest, srcfile, dstfile, 1)
mock_dom.XMLDesc.assert_called_once_with(
flags=(fakelibvirt.VIR_DOMAIN_XML_INACTIVE |
fakelibvirt.VIR_DOMAIN_XML_SECURE))
mock_dom.blockRebase.assert_called_once_with(
srcfile, dstfile, 0, flags=(
fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_COPY |
fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT))
mock_dom.blockResize.assert_called_once_with(
srcfile, 1 * units.Gi / units.Ki)
mock_define.assert_called_once_with(xmldoc)
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._disconnect_volume')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._swap_volume')
@mock.patch('nova.virt.block_device.DriverVolumeBlockDevice.save')
@mock.patch('nova.objects.block_device.BlockDeviceMapping.'
'get_by_volume_id')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_volume_config')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._connect_volume')
@mock.patch('nova.virt.libvirt.host.Host.get_guest')
def test_swap_volume_driver_bdm_save(self, get_guest,
connect_volume, get_volume_config,
get_by_volume_id, volume_save,
swap_volume, disconnect_volume):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
instance = objects.Instance(**self.test_instance)
old_connection_info = {'driver_volume_type': 'fake',
'serial': 'old-volume-id',
'data': {'device_path': '/fake-old-volume',
'access_mode': 'rw'}}
new_connection_info = {'driver_volume_type': 'fake',
'serial': 'new-volume-id',
'data': {'device_path': '/fake-new-volume',
'access_mode': 'rw'}}
mock_dom = mock.MagicMock()
guest = libvirt_guest.Guest(mock_dom)
mock_dom.XMLDesc.return_value = """<domain>
<devices>
<disk type='file'>
<source file='/fake-old-volume'/>
<target dev='vdb' bus='virtio'/>
</disk>
</devices>
</domain>
"""
mock_dom.name.return_value = 'inst'
mock_dom.UUIDString.return_value = 'uuid'
get_guest.return_value = guest
disk_info = {'bus': 'virtio', 'type': 'disk', 'dev': 'vdb'}
get_volume_config.return_value = mock.MagicMock(
source_path='/fake-new-volume')
bdm = objects.BlockDeviceMapping(self.context,
**fake_block_device.FakeDbBlockDeviceDict(
{'id': 2, 'instance_uuid': 'fake-instance',
'device_name': '/dev/vdb',
'source_type': 'volume',
'destination_type': 'volume',
'volume_id': 'fake-volume-id-2',
'boot_index': 0}))
get_by_volume_id.return_value = bdm
conn.swap_volume(old_connection_info, new_connection_info, instance,
'/dev/vdb', 1)
get_guest.assert_called_once_with(instance)
connect_volume.assert_called_once_with(new_connection_info, disk_info)
swap_volume.assert_called_once_with(guest, 'vdb',
'/fake-new-volume', 1)
disconnect_volume.assert_called_once_with(old_connection_info, 'vdb')
volume_save.assert_called_once_with()
def test_live_snapshot(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
mock_dom = mock.MagicMock()
with contextlib.nested(
mock.patch.object(drvr._conn, 'defineXML', create=True),
mock.patch.object(fake_libvirt_utils, 'get_disk_size'),
mock.patch.object(fake_libvirt_utils, 'get_disk_backing_file'),
mock.patch.object(fake_libvirt_utils, 'create_cow_image'),
mock.patch.object(fake_libvirt_utils, 'chown'),
mock.patch.object(fake_libvirt_utils, 'extract_snapshot'),
) as (mock_define, mock_size, mock_backing, mock_create_cow,
mock_chown, mock_snapshot):
xmldoc = "<domain/>"
srcfile = "/first/path"
dstfile = "/second/path"
bckfile = "/other/path"
dltfile = dstfile + ".delta"
mock_dom.XMLDesc.return_value = xmldoc
mock_dom.isPersistent.return_value = True
mock_size.return_value = 1004009
mock_backing.return_value = bckfile
guest = libvirt_guest.Guest(mock_dom)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
drvr._live_snapshot(self.context, self.test_instance, guest,
srcfile, dstfile, "qcow2", image_meta)
mock_dom.XMLDesc.assert_called_once_with(flags=(
fakelibvirt.VIR_DOMAIN_XML_INACTIVE |
fakelibvirt.VIR_DOMAIN_XML_SECURE))
mock_dom.blockRebase.assert_called_once_with(
srcfile, dltfile, 0, flags=(
fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_COPY |
fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT |
fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_SHALLOW))
mock_size.assert_called_once_with(srcfile)
mock_backing.assert_called_once_with(srcfile, basename=False)
mock_create_cow.assert_called_once_with(bckfile, dltfile, 1004009)
mock_chown.assert_called_once_with(dltfile, os.getuid())
mock_snapshot.assert_called_once_with(dltfile, "qcow2",
dstfile, "qcow2")
mock_define.assert_called_once_with(xmldoc)
@mock.patch.object(libvirt_driver.LibvirtDriver, "_live_migration")
def test_live_migration_hostname_valid(self, mock_lm):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr.live_migration(self.context, self.test_instance,
"host1.example.com",
lambda x: x,
lambda x: x)
self.assertEqual(1, mock_lm.call_count)
@mock.patch.object(libvirt_driver.LibvirtDriver, "_live_migration")
@mock.patch.object(fake_libvirt_utils, "is_valid_hostname")
def test_live_migration_hostname_invalid(self, mock_hostname, mock_lm):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
mock_hostname.return_value = False
self.assertRaises(exception.InvalidHostname,
drvr.live_migration,
self.context, self.test_instance,
"foo/?com=/bin/sh",
lambda x: x,
lambda x: x)
@mock.patch('os.path.exists', return_value=True)
@mock.patch('tempfile.mkstemp')
@mock.patch('os.close', return_value=None)
def test_check_instance_shared_storage_local_raw(self,
mock_close,
mock_mkstemp,
mock_exists):
instance_uuid = str(uuid.uuid4())
self.flags(images_type='raw', group='libvirt')
self.flags(instances_path='/tmp')
mock_mkstemp.return_value = (-1,
'/tmp/{0}/file'.format(instance_uuid))
driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
temp_file = driver.check_instance_shared_storage_local(self.context,
instance)
self.assertEqual('/tmp/{0}/file'.format(instance_uuid),
temp_file['filename'])
def test_check_instance_shared_storage_local_rbd(self):
self.flags(images_type='rbd', group='libvirt')
driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
self.assertIsNone(driver.
check_instance_shared_storage_local(self.context,
instance))
def test_version_to_string(self):
driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
string_ver = driver._version_to_string((4, 33, 173))
self.assertEqual("4.33.173", string_ver)
def test_parallels_min_version_fail(self):
self.flags(virt_type='parallels', group='libvirt')
driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with mock.patch.object(driver._conn, 'getLibVersion',
return_value=1002011):
self.assertRaises(exception.NovaException,
driver.init_host, 'wibble')
def test_parallels_min_version_ok(self):
self.flags(virt_type='parallels', group='libvirt')
driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with mock.patch.object(driver._conn, 'getLibVersion',
return_value=1002012):
driver.init_host('wibble')
def test_get_guest_config_parallels_vm(self):
self.flags(virt_type='parallels', group='libvirt')
self.flags(images_type='ploop', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
image_meta, disk_info)
self.assertEqual("parallels", cfg.virt_type)
self.assertEqual(instance_ref["uuid"], cfg.uuid)
self.assertEqual(2 * units.Mi, cfg.memory)
self.assertEqual(1, cfg.vcpus)
self.assertEqual(vm_mode.HVM, cfg.os_type)
self.assertIsNone(cfg.os_root)
self.assertEqual(6, len(cfg.devices))
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertEqual(cfg.devices[0].driver_format, "ploop")
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestInterface)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestInput)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestVideo)
def test_get_guest_config_parallels_ct(self):
self.flags(virt_type='parallels', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
ct_instance = self.test_instance.copy()
ct_instance["vm_mode"] = vm_mode.EXE
instance_ref = objects.Instance(**ct_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
image_meta, {'mapping': {'disk': {}}})
self.assertEqual("parallels", cfg.virt_type)
self.assertEqual(instance_ref["uuid"], cfg.uuid)
self.assertEqual(2 * units.Mi, cfg.memory)
self.assertEqual(1, cfg.vcpus)
self.assertEqual(vm_mode.EXE, cfg.os_type)
self.assertEqual("/sbin/init", cfg.os_init_path)
self.assertIsNone(cfg.os_root)
self.assertEqual(4, len(cfg.devices))
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestFilesys)
fs = cfg.devices[0]
self.assertEqual(fs.source_type, "file")
self.assertEqual(fs.driver_type, "ploop")
self.assertEqual(fs.target_dir, "/")
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestInterface)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestVideo)
def _test_get_guest_config_parallels_volume(self, vmmode, devices):
self.flags(virt_type='parallels', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
ct_instance = self.test_instance.copy()
ct_instance["vm_mode"] = vmmode
instance_ref = objects.Instance(**ct_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
conn_info = {'driver_volume_type': 'fake'}
info = {'block_device_mapping': driver_block_device.convert_volumes([
fake_block_device.FakeDbBlockDeviceDict(
{'id': 0,
'source_type': 'volume', 'destination_type': 'volume',
'device_name': '/dev/sda'}),
])}
info['block_device_mapping'][0]['connection_info'] = conn_info
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta,
info)
with mock.patch.object(
driver_block_device.DriverVolumeBlockDevice, 'save'
) as mock_save:
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
image_meta, disk_info, None, info)
mock_save.assert_called_once_with()
self.assertEqual("parallels", cfg.virt_type)
self.assertEqual(instance_ref["uuid"], cfg.uuid)
self.assertEqual(2 * units.Mi, cfg.memory)
self.assertEqual(1, cfg.vcpus)
self.assertEqual(vmmode, cfg.os_type)
self.assertIsNone(cfg.os_root)
self.assertEqual(devices, len(cfg.devices))
disk_found = False
for dev in cfg.devices:
result = isinstance(dev, vconfig.LibvirtConfigGuestFilesys)
self.assertFalse(result)
if (isinstance(dev, vconfig.LibvirtConfigGuestDisk) and
(dev.source_path is None or
'disk.local' not in dev.source_path)):
self.assertEqual("disk", dev.source_device)
self.assertEqual("sda", dev.target_dev)
disk_found = True
self.assertTrue(disk_found)
def test_get_guest_config_parallels_volume(self):
self._test_get_guest_config_parallels_volume(vm_mode.EXE, 4)
self._test_get_guest_config_parallels_volume(vm_mode.HVM, 6)
class HostStateTestCase(test.NoDBTestCase):
cpu_info = {"vendor": "Intel", "model": "pentium", "arch": "i686",
"features": ["ssse3", "monitor", "pni", "sse2", "sse",
"fxsr", "clflush", "pse36", "pat", "cmov", "mca", "pge",
"mtrr", "sep", "apic"],
"topology": {"cores": "1", "threads": "1", "sockets": "1"}}
instance_caps = [(arch.X86_64, "kvm", "hvm"),
(arch.I686, "kvm", "hvm")]
pci_devices = [{
"dev_id": "pci_0000_04_00_3",
"address": "0000:04:10.3",
"product_id": '1521',
"vendor_id": '8086',
"dev_type": fields.PciDeviceType.SRIOV_PF,
"phys_function": None}]
numa_topology = objects.NUMATopology(
cells=[objects.NUMACell(
id=1, cpuset=set([1, 2]), memory=1024,
cpu_usage=0, memory_usage=0,
mempages=[], siblings=[],
pinned_cpus=set([])),
objects.NUMACell(
id=2, cpuset=set([3, 4]), memory=1024,
cpu_usage=0, memory_usage=0,
mempages=[], siblings=[],
pinned_cpus=set([]))])
class FakeConnection(libvirt_driver.LibvirtDriver):
"""Fake connection object."""
def __init__(self):
super(HostStateTestCase.FakeConnection,
self).__init__(fake.FakeVirtAPI(), True)
self._host = host.Host("qemu:///system")
def _get_memory_mb_total():
return 497
def _get_memory_mb_used():
return 88
self._host.get_memory_mb_total = _get_memory_mb_total
self._host.get_memory_mb_used = _get_memory_mb_used
def _get_vcpu_total(self):
return 1
def _get_vcpu_used(self):
return 0
def _get_cpu_info(self):
return HostStateTestCase.cpu_info
def _get_disk_over_committed_size_total(self):
return 0
def _get_local_gb_info(self):
return {'total': 100, 'used': 20, 'free': 80}
def get_host_uptime(self):
return ('10:01:16 up 1:36, 6 users, '
'load average: 0.21, 0.16, 0.19')
def _get_disk_available_least(self):
return 13091
def _get_instance_capabilities(self):
return HostStateTestCase.instance_caps
def _get_pci_passthrough_devices(self):
return jsonutils.dumps(HostStateTestCase.pci_devices)
def _get_host_numa_topology(self):
return HostStateTestCase.numa_topology
@mock.patch.object(fakelibvirt, "openAuth")
def test_update_status(self, mock_open):
mock_open.return_value = fakelibvirt.Connection("qemu:///system")
drvr = HostStateTestCase.FakeConnection()
stats = drvr.get_available_resource("compute1")
self.assertEqual(stats["vcpus"], 1)
self.assertEqual(stats["memory_mb"], 497)
self.assertEqual(stats["local_gb"], 100)
self.assertEqual(stats["vcpus_used"], 0)
self.assertEqual(stats["memory_mb_used"], 88)
self.assertEqual(stats["local_gb_used"], 20)
self.assertEqual(stats["hypervisor_type"], 'QEMU')
self.assertEqual(stats["hypervisor_version"], 1001000)
self.assertEqual(stats["hypervisor_hostname"], 'compute1')
cpu_info = jsonutils.loads(stats["cpu_info"])
self.assertEqual(cpu_info,
{"vendor": "Intel", "model": "pentium",
"arch": arch.I686,
"features": ["ssse3", "monitor", "pni", "sse2", "sse",
"fxsr", "clflush", "pse36", "pat", "cmov",
"mca", "pge", "mtrr", "sep", "apic"],
"topology": {"cores": "1", "threads": "1", "sockets": "1"}
})
self.assertEqual(stats["disk_available_least"], 80)
self.assertEqual(jsonutils.loads(stats["pci_passthrough_devices"]),
HostStateTestCase.pci_devices)
self.assertThat(objects.NUMATopology.obj_from_db_obj(
stats['numa_topology'])._to_dict(),
matchers.DictMatches(
HostStateTestCase.numa_topology._to_dict()))
class LibvirtDriverTestCase(test.NoDBTestCase):
"""Test for nova.virt.libvirt.libvirt_driver.LibvirtDriver."""
def setUp(self):
super(LibvirtDriverTestCase, self).setUp()
self.drvr = libvirt_driver.LibvirtDriver(
fake.FakeVirtAPI(), read_only=True)
self.context = context.get_admin_context()
self.test_image_meta = {
"disk_format": "raw",
}
def _create_instance(self, params=None):
"""Create a test instance."""
if not params:
params = {}
flavor = objects.Flavor(memory_mb=512,
swap=0,
vcpu_weight=None,
root_gb=10,
id=2,
name=u'm1.tiny',
ephemeral_gb=20,
rxtx_factor=1.0,
flavorid=u'1',
vcpus=1)
inst = {}
inst['id'] = 1
inst['uuid'] = '52d3b512-1152-431f-a8f7-28f0288a622b'
inst['os_type'] = 'linux'
inst['image_ref'] = '1'
inst['reservation_id'] = 'r-fakeres'
inst['user_id'] = 'fake'
inst['project_id'] = 'fake'
inst['instance_type_id'] = 2
inst['ami_launch_index'] = 0
inst['host'] = 'host1'
inst['root_gb'] = flavor.root_gb
inst['ephemeral_gb'] = flavor.ephemeral_gb
inst['config_drive'] = True
inst['kernel_id'] = 2
inst['ramdisk_id'] = 3
inst['key_data'] = 'ABCDEFG'
inst['system_metadata'] = {}
inst['metadata'] = {}
inst.update(params)
return objects.Instance(flavor=flavor,
old_flavor=None, new_flavor=None,
**inst)
@staticmethod
def _disk_info():
# 10G root and 512M swap disk
disk_info = [{'disk_size': 1, 'type': 'qcow2',
'virt_disk_size': 10737418240, 'path': '/test/disk',
'backing_file': '/base/disk'},
{'disk_size': 1, 'type': 'qcow2',
'virt_disk_size': 536870912, 'path': '/test/disk.swap',
'backing_file': '/base/swap_512'}]
return jsonutils.dumps(disk_info)
def test_migrate_disk_and_power_off_exception(self):
"""Test for nova.virt.libvirt.libvirt_driver.LivirtConnection
.migrate_disk_and_power_off.
"""
self.counter = 0
self.checked_shared_storage = False
def fake_get_instance_disk_info(instance,
block_device_info=None):
return '[]'
def fake_destroy(instance):
pass
def fake_get_host_ip_addr():
return '10.0.0.1'
def fake_execute(*args, **kwargs):
self.counter += 1
if self.counter == 1:
assert False, "intentional failure"
def fake_os_path_exists(path):
return True
def fake_is_storage_shared(dest, inst_base):
self.checked_shared_storage = True
return False
self.stubs.Set(self.drvr, 'get_instance_disk_info',
fake_get_instance_disk_info)
self.stubs.Set(self.drvr, '_destroy', fake_destroy)
self.stubs.Set(self.drvr, 'get_host_ip_addr',
fake_get_host_ip_addr)
self.stubs.Set(self.drvr, '_is_storage_shared_with',
fake_is_storage_shared)
self.stubs.Set(utils, 'execute', fake_execute)
self.stubs.Set(os.path, 'exists', fake_os_path_exists)
ins_ref = self._create_instance()
flavor = {'root_gb': 10, 'ephemeral_gb': 20}
flavor_obj = objects.Flavor(**flavor)
self.assertRaises(AssertionError,
self.drvr.migrate_disk_and_power_off,
context.get_admin_context(), ins_ref, '10.0.0.2',
flavor_obj, None)
def _test_migrate_disk_and_power_off(self, flavor_obj,
block_device_info=None,
params_for_instance=None):
"""Test for nova.virt.libvirt.libvirt_driver.LivirtConnection
.migrate_disk_and_power_off.
"""
disk_info = self._disk_info()
def fake_get_instance_disk_info(instance,
block_device_info=None):
return disk_info
def fake_destroy(instance):
pass
def fake_get_host_ip_addr():
return '10.0.0.1'
def fake_execute(*args, **kwargs):
pass
def fake_copy_image(src, dest, host=None, receive=False,
on_execute=None, on_completion=None,
compression=True):
self.assertIsNotNone(on_execute)
self.assertIsNotNone(on_completion)
self.stubs.Set(self.drvr, 'get_instance_disk_info',
fake_get_instance_disk_info)
self.stubs.Set(self.drvr, '_destroy', fake_destroy)
self.stubs.Set(self.drvr, 'get_host_ip_addr',
fake_get_host_ip_addr)
self.stubs.Set(utils, 'execute', fake_execute)
self.stubs.Set(libvirt_utils, 'copy_image', fake_copy_image)
ins_ref = self._create_instance(params=params_for_instance)
# dest is different host case
out = self.drvr.migrate_disk_and_power_off(
context.get_admin_context(), ins_ref, '10.0.0.2',
flavor_obj, None, block_device_info=block_device_info)
self.assertEqual(out, disk_info)
# dest is same host case
out = self.drvr.migrate_disk_and_power_off(
context.get_admin_context(), ins_ref, '10.0.0.1',
flavor_obj, None, block_device_info=block_device_info)
self.assertEqual(out, disk_info)
def test_migrate_disk_and_power_off(self):
flavor = {'root_gb': 10, 'ephemeral_gb': 20}
flavor_obj = objects.Flavor(**flavor)
self._test_migrate_disk_and_power_off(flavor_obj)
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._disconnect_volume')
def test_migrate_disk_and_power_off_boot_from_volume(self,
disconnect_volume):
info = {'block_device_mapping': [{'boot_index': None,
'mount_device': '/dev/vdd',
'connection_info': None},
{'boot_index': 0,
'mount_device': '/dev/vda',
'connection_info': None}]}
flavor = {'root_gb': 1, 'ephemeral_gb': 0}
flavor_obj = objects.Flavor(**flavor)
# Note(Mike_D): The size of instance's ephemeral_gb is 0 gb.
self._test_migrate_disk_and_power_off(
flavor_obj, block_device_info=info,
params_for_instance={'image_ref': None, 'ephemeral_gb': 0})
disconnect_volume.assert_called_with(
info['block_device_mapping'][1]['connection_info'], 'vda')
@mock.patch('nova.utils.execute')
@mock.patch('nova.virt.libvirt.utils.copy_image')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._destroy')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_host_ip_addr')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver'
'.get_instance_disk_info')
def test_migrate_disk_and_power_off_swap(self, mock_get_disk_info,
get_host_ip_addr,
mock_destroy,
mock_copy_image,
mock_execute):
"""Test for nova.virt.libvirt.libvirt_driver.LivirtConnection
.migrate_disk_and_power_off.
"""
self.copy_or_move_swap_called = False
disk_info = self._disk_info()
mock_get_disk_info.return_value = disk_info
get_host_ip_addr.return_value = '10.0.0.1'
def fake_copy_image(*args, **kwargs):
# disk.swap should not be touched since it is skipped over
if '/test/disk.swap' in list(args):
self.copy_or_move_swap_called = True
def fake_execute(*args, **kwargs):
# disk.swap should not be touched since it is skipped over
if set(['mv', '/test/disk.swap']).issubset(list(args)):
self.copy_or_move_swap_called = True
mock_copy_image.side_effect = fake_copy_image
mock_execute.side_effect = fake_execute
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
# Original instance config
instance = self._create_instance({'root_gb': 10,
'ephemeral_gb': 0})
# Re-size fake instance to 20G root and 1024M swap disk
flavor = {'root_gb': 20, 'ephemeral_gb': 0, 'swap': 1024}
flavor_obj = objects.Flavor(**flavor)
# Destination is same host
out = drvr.migrate_disk_and_power_off(context.get_admin_context(),
instance, '10.0.0.1',
flavor_obj, None)
mock_get_disk_info.assert_called_once_with(instance,
block_device_info=None)
self.assertTrue(get_host_ip_addr.called)
mock_destroy.assert_called_once_with(instance)
self.assertFalse(self.copy_or_move_swap_called)
self.assertEqual(disk_info, out)
def _test_migrate_disk_and_power_off_resize_check(self, expected_exc):
"""Test for nova.virt.libvirt.libvirt_driver.LibvirtConnection
.migrate_disk_and_power_off.
"""
def fake_get_instance_disk_info(instance, xml=None,
block_device_info=None):
return self._disk_info()
def fake_destroy(instance):
pass
def fake_get_host_ip_addr():
return '10.0.0.1'
self.stubs.Set(self.drvr, 'get_instance_disk_info',
fake_get_instance_disk_info)
self.stubs.Set(self.drvr, '_destroy', fake_destroy)
self.stubs.Set(self.drvr, 'get_host_ip_addr',
fake_get_host_ip_addr)
ins_ref = self._create_instance()
flavor = {'root_gb': 10, 'ephemeral_gb': 20}
flavor_obj = objects.Flavor(**flavor)
# Migration is not implemented for LVM backed instances
self.assertRaises(expected_exc,
self.drvr.migrate_disk_and_power_off,
None, ins_ref, '10.0.0.1', flavor_obj, None)
@mock.patch('nova.utils.execute')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._destroy')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver'
'.get_instance_disk_info')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver'
'._is_storage_shared_with')
def _test_migrate_disk_and_power_off_backing_file(self,
shared_storage,
mock_is_shared_storage,
mock_get_disk_info,
mock_destroy,
mock_execute):
self.convert_file_called = False
flavor = {'root_gb': 20, 'ephemeral_gb': 30, 'swap': 0}
flavor_obj = objects.Flavor(**flavor)
disk_info = [{'type': 'qcow2', 'path': '/test/disk',
'virt_disk_size': '10737418240',
'backing_file': '/base/disk',
'disk_size': '83886080'}]
disk_info_text = jsonutils.dumps(disk_info)
mock_get_disk_info.return_value = disk_info_text
mock_is_shared_storage.return_value = shared_storage
def fake_execute(*args, **kwargs):
self.assertNotEqual(args[0:2], ['qemu-img', 'convert'])
mock_execute.side_effect = fake_execute
instance = self._create_instance()
out = self.drvr.migrate_disk_and_power_off(
context.get_admin_context(), instance, '10.0.0.2',
flavor_obj, None)
self.assertTrue(mock_is_shared_storage.called)
mock_destroy.assert_called_once_with(instance)
self.assertEqual(out, disk_info_text)
def test_migrate_disk_and_power_off_shared_storage(self):
self._test_migrate_disk_and_power_off_backing_file(True)
def test_migrate_disk_and_power_off_non_shared_storage(self):
self._test_migrate_disk_and_power_off_backing_file(False)
def test_migrate_disk_and_power_off_lvm(self):
self.flags(images_type='lvm', group='libvirt')
def fake_execute(*args, **kwargs):
pass
self.stubs.Set(utils, 'execute', fake_execute)
expected_exc = exception.InstanceFaultRollback
self._test_migrate_disk_and_power_off_resize_check(expected_exc)
def test_migrate_disk_and_power_off_resize_cannot_ssh(self):
def fake_execute(*args, **kwargs):
raise processutils.ProcessExecutionError()
def fake_is_storage_shared(dest, inst_base):
self.checked_shared_storage = True
return False
self.stubs.Set(self.drvr, '_is_storage_shared_with',
fake_is_storage_shared)
self.stubs.Set(utils, 'execute', fake_execute)
expected_exc = exception.InstanceFaultRollback
self._test_migrate_disk_and_power_off_resize_check(expected_exc)
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver'
'.get_instance_disk_info')
def test_migrate_disk_and_power_off_resize_error(self, mock_get_disk_info):
instance = self._create_instance()
flavor = {'root_gb': 5, 'ephemeral_gb': 10}
flavor_obj = objects.Flavor(**flavor)
mock_get_disk_info.return_value = self._disk_info()
self.assertRaises(
exception.InstanceFaultRollback,
self.drvr.migrate_disk_and_power_off,
'ctx', instance, '10.0.0.1', flavor_obj, None)
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver'
'.get_instance_disk_info')
def test_migrate_disk_and_power_off_resize_error_default_ephemeral(
self, mock_get_disk_info):
# Note(Mike_D): The size of this instance's ephemeral_gb is 20 gb.
instance = self._create_instance()
flavor = {'root_gb': 10, 'ephemeral_gb': 0}
flavor_obj = objects.Flavor(**flavor)
mock_get_disk_info.return_value = self._disk_info()
self.assertRaises(exception.InstanceFaultRollback,
self.drvr.migrate_disk_and_power_off,
'ctx', instance, '10.0.0.1', flavor_obj, None)
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver'
'.get_instance_disk_info')
@mock.patch('nova.virt.driver.block_device_info_get_ephemerals')
def test_migrate_disk_and_power_off_resize_error_eph(self, mock_get,
mock_get_disk_info):
mappings = [
{
'device_name': '/dev/sdb4',
'source_type': 'blank',
'destination_type': 'local',
'device_type': 'disk',
'guest_format': 'swap',
'boot_index': -1,
'volume_size': 1
},
{
'device_name': '/dev/sda1',
'source_type': 'volume',
'destination_type': 'volume',
'device_type': 'disk',
'volume_id': 1,
'guest_format': None,
'boot_index': 1,
'volume_size': 6
},
{
'device_name': '/dev/sda2',
'source_type': 'snapshot',
'destination_type': 'volume',
'snapshot_id': 1,
'device_type': 'disk',
'guest_format': None,
'boot_index': 0,
'volume_size': 4
},
{
'device_name': '/dev/sda3',
'source_type': 'blank',
'destination_type': 'local',
'device_type': 'disk',
'guest_format': None,
'boot_index': -1,
'volume_size': 3
}
]
mock_get.return_value = mappings
instance = self._create_instance()
# Old flavor, eph is 20, real disk is 3, target is 2, fail
flavor = {'root_gb': 10, 'ephemeral_gb': 2}
flavor_obj = objects.Flavor(**flavor)
mock_get_disk_info.return_value = self._disk_info()
self.assertRaises(
exception.InstanceFaultRollback,
self.drvr.migrate_disk_and_power_off,
'ctx', instance, '10.0.0.1', flavor_obj, None)
# Old flavor, eph is 20, real disk is 3, target is 4
flavor = {'root_gb': 10, 'ephemeral_gb': 4}
flavor_obj = objects.Flavor(**flavor)
self._test_migrate_disk_and_power_off(flavor_obj)
def test_wait_for_running(self):
def fake_get_info(instance):
if instance['name'] == "not_found":
raise exception.InstanceNotFound(instance_id=instance['uuid'])
elif instance['name'] == "running":
return hardware.InstanceInfo(state=power_state.RUNNING)
else:
return hardware.InstanceInfo(state=power_state.SHUTDOWN)
self.stubs.Set(self.drvr, 'get_info',
fake_get_info)
# instance not found case
self.assertRaises(exception.InstanceNotFound,
self.drvr._wait_for_running,
{'name': 'not_found',
'uuid': 'not_found_uuid'})
# instance is running case
self.assertRaises(loopingcall.LoopingCallDone,
self.drvr._wait_for_running,
{'name': 'running',
'uuid': 'running_uuid'})
# else case
self.drvr._wait_for_running({'name': 'else',
'uuid': 'other_uuid'})
def test_disk_size_from_instance_disk_info(self):
instance_data = {'root_gb': 10, 'ephemeral_gb': 20, 'swap_gb': 30}
inst = objects.Instance(**instance_data)
info = {'path': '/path/disk'}
self.assertEqual(10 * units.Gi,
self.drvr._disk_size_from_instance(inst, info))
info = {'path': '/path/disk.local'}
self.assertEqual(20 * units.Gi,
self.drvr._disk_size_from_instance(inst, info))
info = {'path': '/path/disk.swap'}
self.assertEqual(0,
self.drvr._disk_size_from_instance(inst, info))
@mock.patch('nova.utils.execute')
def test_disk_raw_to_qcow2(self, mock_execute):
path = '/test/disk'
_path_qcow = path + '_qcow'
self.drvr._disk_raw_to_qcow2(path)
mock_execute.assert_has_calls([
mock.call('qemu-img', 'convert', '-f', 'raw',
'-O', 'qcow2', path, _path_qcow),
mock.call('mv', _path_qcow, path)])
@mock.patch('nova.utils.execute')
def test_disk_qcow2_to_raw(self, mock_execute):
path = '/test/disk'
_path_raw = path + '_raw'
self.drvr._disk_qcow2_to_raw(path)
mock_execute.assert_has_calls([
mock.call('qemu-img', 'convert', '-f', 'qcow2',
'-O', 'raw', path, _path_raw),
mock.call('mv', _path_raw, path)])
@mock.patch('nova.virt.disk.api.extend')
def test_disk_resize_raw(self, mock_extend):
image = imgmodel.LocalFileImage("/test/disk",
imgmodel.FORMAT_RAW)
self.drvr._disk_resize(image, 50)
mock_extend.assert_called_once_with(image, 50)
@mock.patch('nova.virt.disk.api.can_resize_image')
@mock.patch('nova.virt.disk.api.is_image_extendable')
@mock.patch('nova.virt.disk.api.extend')
def test_disk_resize_qcow2(
self, mock_extend, mock_can_resize, mock_is_image_extendable):
with contextlib.nested(
mock.patch.object(
self.drvr, '_disk_qcow2_to_raw'),
mock.patch.object(
self.drvr, '_disk_raw_to_qcow2'))\
as (mock_disk_qcow2_to_raw, mock_disk_raw_to_qcow2):
mock_can_resize.return_value = True
mock_is_image_extendable.return_value = True
imageqcow2 = imgmodel.LocalFileImage("/test/disk",
imgmodel.FORMAT_QCOW2)
imageraw = imgmodel.LocalFileImage("/test/disk",
imgmodel.FORMAT_RAW)
self.drvr._disk_resize(imageqcow2, 50)
mock_disk_qcow2_to_raw.assert_called_once_with(imageqcow2.path)
mock_extend.assert_called_once_with(imageraw, 50)
mock_disk_raw_to_qcow2.assert_called_once_with(imageqcow2.path)
def _test_finish_migration(self, power_on, resize_instance=False):
"""Test for nova.virt.libvirt.libvirt_driver.LivirtConnection
.finish_migration.
"""
powered_on = power_on
self.fake_create_domain_called = False
self.fake_disk_resize_called = False
def fake_to_xml(context, instance, network_info, disk_info,
image_meta=None, rescue=None,
block_device_info=None, write_to_disk=False):
return ""
def fake_plug_vifs(instance, network_info):
pass
def fake_create_image(context, inst,
disk_mapping, suffix='',
disk_images=None, network_info=None,
block_device_info=None, inject_files=True,
fallback_from_host=None):
self.assertFalse(inject_files)
def fake_create_domain_and_network(
context, xml, instance, network_info, disk_info,
block_device_info=None, power_on=True, reboot=False,
vifs_already_plugged=False):
self.fake_create_domain_called = True
self.assertEqual(powered_on, power_on)
self.assertTrue(vifs_already_plugged)
def fake_enable_hairpin():
pass
def fake_execute(*args, **kwargs):
pass
def fake_get_info(instance):
if powered_on:
return hardware.InstanceInfo(state=power_state.RUNNING)
else:
return hardware.InstanceInfo(state=power_state.SHUTDOWN)
def fake_disk_resize(image, size):
self.fake_disk_resize_called = True
self.flags(use_cow_images=True)
self.stubs.Set(self.drvr, '_disk_resize',
fake_disk_resize)
self.stubs.Set(self.drvr, '_get_guest_xml', fake_to_xml)
self.stubs.Set(self.drvr, 'plug_vifs', fake_plug_vifs)
self.stubs.Set(self.drvr, '_create_image',
fake_create_image)
self.stubs.Set(self.drvr, '_create_domain_and_network',
fake_create_domain_and_network)
self.stubs.Set(nova.virt.libvirt.guest.Guest, 'enable_hairpin',
fake_enable_hairpin)
self.stubs.Set(utils, 'execute', fake_execute)
fw = base_firewall.NoopFirewallDriver()
self.stubs.Set(self.drvr, 'firewall_driver', fw)
self.stubs.Set(self.drvr, 'get_info',
fake_get_info)
ins_ref = self._create_instance()
migration = objects.Migration()
migration.source_compute = 'fake-source-compute'
migration.dest_compute = 'fake-dest-compute'
migration.source_node = 'fake-source-node'
migration.dest_node = 'fake-dest-node'
self.drvr.finish_migration(
context.get_admin_context(), migration, ins_ref,
self._disk_info(), [], self.test_image_meta,
resize_instance, None, power_on)
self.assertTrue(self.fake_create_domain_called)
self.assertEqual(
resize_instance, self.fake_disk_resize_called)
def test_finish_migration_resize(self):
self._test_finish_migration(True, resize_instance=True)
def test_finish_migration_power_on(self):
self._test_finish_migration(True)
def test_finish_migration_power_off(self):
self._test_finish_migration(False)
def _test_finish_revert_migration(self, power_on):
"""Test for nova.virt.libvirt.libvirt_driver.LivirtConnection
.finish_revert_migration.
"""
powered_on = power_on
self.fake_create_domain_called = False
def fake_execute(*args, **kwargs):
pass
def fake_plug_vifs(instance, network_info):
pass
def fake_create_domain(context, xml, instance, network_info,
disk_info, block_device_info=None,
power_on=None,
vifs_already_plugged=None):
self.fake_create_domain_called = True
self.assertEqual(powered_on, power_on)
self.assertTrue(vifs_already_plugged)
return mock.MagicMock()
def fake_enable_hairpin():
pass
def fake_get_info(instance):
if powered_on:
return hardware.InstanceInfo(state=power_state.RUNNING)
else:
return hardware.InstanceInfo(state=power_state.SHUTDOWN)
def fake_to_xml(context, instance, network_info, disk_info,
image_meta=None, rescue=None,
block_device_info=None):
return ""
self.stubs.Set(self.drvr, '_get_guest_xml', fake_to_xml)
self.stubs.Set(self.drvr, 'plug_vifs', fake_plug_vifs)
self.stubs.Set(utils, 'execute', fake_execute)
fw = base_firewall.NoopFirewallDriver()
self.stubs.Set(self.drvr, 'firewall_driver', fw)
self.stubs.Set(self.drvr, '_create_domain_and_network',
fake_create_domain)
self.stubs.Set(nova.virt.libvirt.guest.Guest, 'enable_hairpin',
fake_enable_hairpin)
self.stubs.Set(self.drvr, 'get_info',
fake_get_info)
self.stubs.Set(utils, 'get_image_from_system_metadata',
lambda *a: self.test_image_meta)
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
ins_ref = self._create_instance()
os.mkdir(os.path.join(tmpdir, ins_ref['name']))
libvirt_xml_path = os.path.join(tmpdir,
ins_ref['name'],
'libvirt.xml')
f = open(libvirt_xml_path, 'w')
f.close()
self.drvr.finish_revert_migration(
context.get_admin_context(), ins_ref,
[], None, power_on)
self.assertTrue(self.fake_create_domain_called)
def test_finish_revert_migration_power_on(self):
self._test_finish_revert_migration(True)
def test_finish_revert_migration_power_off(self):
self._test_finish_revert_migration(False)
def _test_finish_revert_migration_after_crash(self, backup_made=True,
del_inst_failed=False):
class FakeLoopingCall(object):
def start(self, *a, **k):
return self
def wait(self):
return None
context = 'fake_context'
instance = self._create_instance()
self.mox.StubOutWithMock(libvirt_utils, 'get_instance_path')
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(shutil, 'rmtree')
self.mox.StubOutWithMock(utils, 'execute')
self.stubs.Set(blockinfo, 'get_disk_info', lambda *a: None)
self.stubs.Set(self.drvr, '_get_guest_xml',
lambda *a, **k: None)
self.stubs.Set(self.drvr, '_create_domain_and_network',
lambda *a, **kw: None)
self.stubs.Set(loopingcall, 'FixedIntervalLoopingCall',
lambda *a, **k: FakeLoopingCall())
libvirt_utils.get_instance_path(instance).AndReturn('/fake/foo')
os.path.exists('/fake/foo_resize').AndReturn(backup_made)
if backup_made:
if del_inst_failed:
os_error = OSError(errno.ENOENT, 'No such file or directory')
shutil.rmtree('/fake/foo').AndRaise(os_error)
else:
shutil.rmtree('/fake/foo')
utils.execute('mv', '/fake/foo_resize', '/fake/foo')
self.mox.ReplayAll()
self.drvr.finish_revert_migration(context, instance, [])
def test_finish_revert_migration_after_crash(self):
self._test_finish_revert_migration_after_crash(backup_made=True)
def test_finish_revert_migration_after_crash_before_new(self):
self._test_finish_revert_migration_after_crash(backup_made=True)
def test_finish_revert_migration_after_crash_before_backup(self):
self._test_finish_revert_migration_after_crash(backup_made=False)
def test_finish_revert_migration_after_crash_delete_failed(self):
self._test_finish_revert_migration_after_crash(backup_made=True,
del_inst_failed=True)
def test_finish_revert_migration_preserves_disk_bus(self):
def fake_get_guest_xml(context, instance, network_info, disk_info,
image_meta, block_device_info=None):
self.assertEqual('ide', disk_info['disk_bus'])
image_meta = {"disk_format": "raw",
"properties": {"hw_disk_bus": "ide"}}
instance = self._create_instance()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with contextlib.nested(
mock.patch.object(drvr, '_create_domain_and_network'),
mock.patch.object(utils, 'get_image_from_system_metadata',
return_value=image_meta),
mock.patch.object(drvr, '_get_guest_xml',
side_effect=fake_get_guest_xml)):
drvr.finish_revert_migration('', instance, None, power_on=False)
def test_cleanup_failed_migration(self):
self.mox.StubOutWithMock(shutil, 'rmtree')
shutil.rmtree('/fake/inst')
self.mox.ReplayAll()
self.drvr._cleanup_failed_migration('/fake/inst')
def test_confirm_migration(self):
ins_ref = self._create_instance()
self.mox.StubOutWithMock(self.drvr, "_cleanup_resize")
self.drvr._cleanup_resize(ins_ref,
_fake_network_info(self.stubs, 1))
self.mox.ReplayAll()
self.drvr.confirm_migration("migration_ref", ins_ref,
_fake_network_info(self.stubs, 1))
def test_cleanup_resize_same_host(self):
CONF.set_override('policy_dirs', [])
ins_ref = self._create_instance({'host': CONF.host})
def fake_os_path_exists(path):
return True
self.stubs.Set(os.path, 'exists', fake_os_path_exists)
self.mox.StubOutWithMock(libvirt_utils, 'get_instance_path')
self.mox.StubOutWithMock(utils, 'execute')
libvirt_utils.get_instance_path(ins_ref,
forceold=True).AndReturn('/fake/inst')
utils.execute('rm', '-rf', '/fake/inst_resize', delay_on_retry=True,
attempts=5)
self.mox.ReplayAll()
self.drvr._cleanup_resize(ins_ref,
_fake_network_info(self.stubs, 1))
def test_cleanup_resize_not_same_host(self):
CONF.set_override('policy_dirs', [])
host = 'not' + CONF.host
ins_ref = self._create_instance({'host': host})
def fake_os_path_exists(path):
return True
def fake_undefine_domain(instance):
pass
def fake_unplug_vifs(instance, network_info, ignore_errors=False):
pass
def fake_unfilter_instance(instance, network_info):
pass
self.stubs.Set(os.path, 'exists', fake_os_path_exists)
self.stubs.Set(self.drvr, '_undefine_domain',
fake_undefine_domain)
self.stubs.Set(self.drvr, 'unplug_vifs',
fake_unplug_vifs)
self.stubs.Set(self.drvr.firewall_driver,
'unfilter_instance', fake_unfilter_instance)
self.mox.StubOutWithMock(libvirt_utils, 'get_instance_path')
self.mox.StubOutWithMock(utils, 'execute')
libvirt_utils.get_instance_path(ins_ref,
forceold=True).AndReturn('/fake/inst')
utils.execute('rm', '-rf', '/fake/inst_resize', delay_on_retry=True,
attempts=5)
self.mox.ReplayAll()
self.drvr._cleanup_resize(ins_ref,
_fake_network_info(self.stubs, 1))
def test_get_instance_disk_info_exception(self):
instance = self._create_instance()
class FakeExceptionDomain(FakeVirtDomain):
def __init__(self):
super(FakeExceptionDomain, self).__init__()
def XMLDesc(self, flags):
raise fakelibvirt.libvirtError("Libvirt error")
def fake_get_domain(self, instance):
return FakeExceptionDomain()
self.stubs.Set(host.Host, 'get_domain',
fake_get_domain)
self.assertRaises(exception.InstanceNotFound,
self.drvr.get_instance_disk_info,
instance)
@mock.patch('os.path.exists')
@mock.patch.object(lvm, 'list_volumes')
def test_lvm_disks(self, listlvs, exists):
instance = objects.Instance(uuid='fake-uuid', id=1)
self.flags(images_volume_group='vols', group='libvirt')
exists.return_value = True
listlvs.return_value = ['fake-uuid_foo',
'other-uuid_foo']
disks = self.drvr._lvm_disks(instance)
self.assertEqual(['/dev/vols/fake-uuid_foo'], disks)
def test_is_booted_from_volume(self):
func = libvirt_driver.LibvirtDriver._is_booted_from_volume
instance, disk_mapping = {}, {}
self.assertTrue(func(instance, disk_mapping))
disk_mapping['disk'] = 'map'
self.assertTrue(func(instance, disk_mapping))
instance['image_ref'] = 'uuid'
self.assertFalse(func(instance, disk_mapping))
@mock.patch('nova.virt.netutils.get_injected_network_template')
@mock.patch('nova.virt.disk.api.inject_data')
@mock.patch.object(libvirt_driver.LibvirtDriver, "_conn")
def _test_inject_data(self, driver_params, path, disk_params,
mock_conn, disk_inject_data, inj_network,
called=True):
class ImageBackend(object):
path = '/path'
def check_image_exists(self):
if self.path == '/fail/path':
return False
return True
def get_model(self, connection):
return imgmodel.LocalFileImage(self.path,
imgmodel.FORMAT_RAW)
def fake_inj_network(*args, **kwds):
return args[0] or None
inj_network.side_effect = fake_inj_network
image_backend = ImageBackend()
image_backend.path = path
with mock.patch.object(
self.drvr.image_backend,
'image',
return_value=image_backend):
self.flags(inject_partition=0, group='libvirt')
self.drvr._inject_data(**driver_params)
if called:
disk_inject_data.assert_called_once_with(
mock.ANY,
*disk_params,
partition=None, mandatory=('files',))
self.assertEqual(disk_inject_data.called, called)
def _test_inject_data_default_driver_params(self, **params):
return {
'instance': self._create_instance(params=params),
'network_info': None,
'admin_pass': None,
'files': None,
'suffix': ''
}
def test_inject_data_adminpass(self):
self.flags(inject_password=True, group='libvirt')
driver_params = self._test_inject_data_default_driver_params()
driver_params['admin_pass'] = 'foobar'
disk_params = [
None, # key
None, # net
{}, # metadata
'foobar', # admin_pass
None, # files
]
self._test_inject_data(driver_params, "/path", disk_params)
# Test with the configuration setted to false.
self.flags(inject_password=False, group='libvirt')
self._test_inject_data(driver_params, "/path",
disk_params, called=False)
def test_inject_data_key(self):
driver_params = self._test_inject_data_default_driver_params()
driver_params['instance']['key_data'] = 'key-content'
self.flags(inject_key=True, group='libvirt')
disk_params = [
'key-content', # key
None, # net
{}, # metadata
None, # admin_pass
None, # files
]
self._test_inject_data(driver_params, "/path", disk_params)
# Test with the configuration setted to false.
self.flags(inject_key=False, group='libvirt')
self._test_inject_data(driver_params, "/path",
disk_params, called=False)
def test_inject_data_metadata(self):
instance_metadata = {'metadata': {'data': 'foo'}}
driver_params = self._test_inject_data_default_driver_params(
**instance_metadata
)
disk_params = [
None, # key
None, # net
{'data': 'foo'}, # metadata
None, # admin_pass
None, # files
]
self._test_inject_data(driver_params, "/path", disk_params)
def test_inject_data_files(self):
driver_params = self._test_inject_data_default_driver_params()
driver_params['files'] = ['file1', 'file2']
disk_params = [
None, # key
None, # net
{}, # metadata
None, # admin_pass
['file1', 'file2'], # files
]
self._test_inject_data(driver_params, "/path", disk_params)
def test_inject_data_net(self):
driver_params = self._test_inject_data_default_driver_params()
driver_params['network_info'] = {'net': 'eno1'}
disk_params = [
None, # key
{'net': 'eno1'}, # net
{}, # metadata
None, # admin_pass
None, # files
]
self._test_inject_data(driver_params, "/path", disk_params)
def test_inject_not_exist_image(self):
driver_params = self._test_inject_data_default_driver_params()
disk_params = [
'key-content', # key
None, # net
None, # metadata
None, # admin_pass
None, # files
]
self._test_inject_data(driver_params, "/fail/path",
disk_params, called=False)
def _test_attach_detach_interface(self, method, power_state,
expected_flags):
instance = self._create_instance()
network_info = _fake_network_info(self.stubs, 1)
domain = FakeVirtDomain()
self.mox.StubOutWithMock(host.Host, 'get_domain')
self.mox.StubOutWithMock(self.drvr.firewall_driver,
'setup_basic_filtering')
self.mox.StubOutWithMock(domain, 'attachDeviceFlags')
self.mox.StubOutWithMock(domain, 'info')
host.Host.get_domain(instance).AndReturn(domain)
if method == 'attach_interface':
self.drvr.firewall_driver.setup_basic_filtering(
instance, [network_info[0]])
fake_image_meta = {'id': instance.image_ref}
fake_image_meta_obj = objects.ImageMeta.from_dict(
fake_image_meta)
expected = self.drvr.vif_driver.get_config(
instance, network_info[0], fake_image_meta_obj, instance.flavor,
CONF.libvirt.virt_type, self.drvr._host)
self.mox.StubOutWithMock(self.drvr.vif_driver,
'get_config')
self.drvr.vif_driver.get_config(
instance, network_info[0],
mox.IsA(objects.ImageMeta),
mox.IsA(objects.Flavor),
CONF.libvirt.virt_type,
self.drvr._host).AndReturn(expected)
domain.info().AndReturn([power_state, 1, 2, 3, 4])
if method == 'attach_interface':
domain.attachDeviceFlags(expected.to_xml(), flags=expected_flags)
elif method == 'detach_interface':
domain.detachDeviceFlags(expected.to_xml(), expected_flags)
self.mox.ReplayAll()
if method == 'attach_interface':
self.drvr.attach_interface(
instance, fake_image_meta, network_info[0])
elif method == 'detach_interface':
self.drvr.detach_interface(
instance, network_info[0])
self.mox.VerifyAll()
def test_attach_interface_with_running_instance(self):
self._test_attach_detach_interface(
'attach_interface', power_state.RUNNING,
expected_flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG |
fakelibvirt.VIR_DOMAIN_AFFECT_LIVE))
def test_attach_interface_with_pause_instance(self):
self._test_attach_detach_interface(
'attach_interface', power_state.PAUSED,
expected_flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG |
fakelibvirt.VIR_DOMAIN_AFFECT_LIVE))
def test_attach_interface_with_shutdown_instance(self):
self._test_attach_detach_interface(
'attach_interface', power_state.SHUTDOWN,
expected_flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG))
def test_detach_interface_with_running_instance(self):
self._test_attach_detach_interface(
'detach_interface', power_state.RUNNING,
expected_flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG |
fakelibvirt.VIR_DOMAIN_AFFECT_LIVE))
def test_detach_interface_with_pause_instance(self):
self._test_attach_detach_interface(
'detach_interface', power_state.PAUSED,
expected_flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG |
fakelibvirt.VIR_DOMAIN_AFFECT_LIVE))
def test_detach_interface_with_shutdown_instance(self):
self._test_attach_detach_interface(
'detach_interface', power_state.SHUTDOWN,
expected_flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG))
def test_rescue(self):
instance = self._create_instance({'config_drive': None})
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices>"
"<disk type='file'><driver name='qemu' type='raw'/>"
"<source file='/test/disk'/>"
"<target dev='vda' bus='virtio'/></disk>"
"<disk type='file'><driver name='qemu' type='qcow2'/>"
"<source file='/test/disk.local'/>"
"<target dev='vdb' bus='virtio'/></disk>"
"</devices></domain>")
network_info = _fake_network_info(self.stubs, 1)
self.mox.StubOutWithMock(self.drvr,
'_get_existing_domain_xml')
self.mox.StubOutWithMock(libvirt_utils, 'write_to_file')
self.mox.StubOutWithMock(imagebackend.Backend, 'image')
self.mox.StubOutWithMock(imagebackend.Image, 'cache')
self.mox.StubOutWithMock(self.drvr, '_get_guest_xml')
self.mox.StubOutWithMock(self.drvr, '_destroy')
self.mox.StubOutWithMock(self.drvr, '_create_domain')
self.drvr._get_existing_domain_xml(mox.IgnoreArg(),
mox.IgnoreArg()).MultipleTimes().AndReturn(dummyxml)
libvirt_utils.write_to_file(mox.IgnoreArg(), mox.IgnoreArg())
libvirt_utils.write_to_file(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg())
imagebackend.Backend.image(instance, 'kernel.rescue', 'raw'
).AndReturn(fake_imagebackend.Raw())
imagebackend.Backend.image(instance, 'ramdisk.rescue', 'raw'
).AndReturn(fake_imagebackend.Raw())
imagebackend.Backend.image(instance, 'disk.rescue', 'default'
).AndReturn(fake_imagebackend.Raw())
imagebackend.Image.cache(context=mox.IgnoreArg(),
fetch_func=mox.IgnoreArg(),
filename=mox.IgnoreArg(),
image_id=mox.IgnoreArg(),
project_id=mox.IgnoreArg(),
user_id=mox.IgnoreArg()).MultipleTimes()
imagebackend.Image.cache(context=mox.IgnoreArg(),
fetch_func=mox.IgnoreArg(),
filename=mox.IgnoreArg(),
image_id=mox.IgnoreArg(),
project_id=mox.IgnoreArg(),
size=None, user_id=mox.IgnoreArg())
image_meta = {'id': 'fake', 'name': 'fake'}
self.drvr._get_guest_xml(mox.IgnoreArg(), instance,
network_info, mox.IgnoreArg(),
mox.IsA(objects.ImageMeta),
rescue=mox.IgnoreArg(),
write_to_disk=mox.IgnoreArg()
).AndReturn(dummyxml)
self.drvr._destroy(instance)
self.drvr._create_domain(mox.IgnoreArg())
self.mox.ReplayAll()
rescue_password = 'fake_password'
self.drvr.rescue(self.context, instance,
network_info, image_meta, rescue_password)
self.mox.VerifyAll()
@mock.patch.object(libvirt_utils, 'get_instance_path')
@mock.patch.object(libvirt_utils, 'load_file')
@mock.patch.object(host.Host, "get_domain")
def test_unrescue(self, mock_get_domain, mock_load_file,
mock_get_instance_path):
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices>"
"<disk type='block' device='disk'>"
"<source dev='/dev/some-vg/some-lv'/>"
"<target dev='vda' bus='virtio'/></disk>"
"</devices></domain>")
mock_get_instance_path.return_value = '/path'
instance = objects.Instance(uuid='fake=uuid', id=1)
fake_dom = FakeVirtDomain(fake_xml=dummyxml)
mock_get_domain.return_value = fake_dom
mock_load_file.return_value = "fake_unrescue_xml"
unrescue_xml_path = os.path.join('/path', 'unrescue.xml')
rescue_file = os.path.join('/path', 'rescue.file')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with contextlib.nested(
mock.patch.object(drvr, '_destroy'),
mock.patch.object(drvr, '_create_domain'),
mock.patch.object(libvirt_utils, 'file_delete'),
mock.patch.object(drvr, '_lvm_disks',
return_value=['lvm.rescue']),
mock.patch.object(lvm, 'remove_volumes'),
mock.patch.object(glob, 'iglob', return_value=[rescue_file])
) as (mock_destroy, mock_create, mock_del, mock_lvm_disks,
mock_remove_volumes, mock_glob):
drvr.unrescue(instance, None)
mock_destroy.assert_called_once_with(instance)
mock_create.assert_called_once_with("fake_unrescue_xml",
fake_dom)
self.assertEqual(2, mock_del.call_count)
self.assertEqual(unrescue_xml_path,
mock_del.call_args_list[0][0][0])
self.assertEqual(rescue_file, mock_del.call_args_list[1][0][0])
mock_remove_volumes.assert_called_once_with(['lvm.rescue'])
@mock.patch(
'nova.virt.configdrive.ConfigDriveBuilder.add_instance_metadata')
@mock.patch('nova.virt.configdrive.ConfigDriveBuilder.make_drive')
def test_rescue_config_drive(self, mock_make, mock_add):
instance = self._create_instance()
uuid = instance.uuid
configdrive_path = uuid + '/disk.config.rescue'
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices>"
"<disk type='file'><driver name='qemu' type='raw'/>"
"<source file='/test/disk'/>"
"<target dev='vda' bus='virtio'/></disk>"
"<disk type='file'><driver name='qemu' type='qcow2'/>"
"<source file='/test/disk.local'/>"
"<target dev='vdb' bus='virtio'/></disk>"
"</devices></domain>")
network_info = _fake_network_info(self.stubs, 1)
self.mox.StubOutWithMock(self.drvr,
'_get_existing_domain_xml')
self.mox.StubOutWithMock(libvirt_utils, 'write_to_file')
self.mox.StubOutWithMock(imagebackend.Backend, 'image')
self.mox.StubOutWithMock(imagebackend.Image, 'cache')
self.mox.StubOutWithMock(instance_metadata.InstanceMetadata,
'__init__')
self.mox.StubOutWithMock(self.drvr, '_get_guest_xml')
self.mox.StubOutWithMock(self.drvr, '_destroy')
self.mox.StubOutWithMock(self.drvr, '_create_domain')
self.drvr._get_existing_domain_xml(mox.IgnoreArg(),
mox.IgnoreArg()).MultipleTimes().AndReturn(dummyxml)
libvirt_utils.write_to_file(mox.IgnoreArg(), mox.IgnoreArg())
libvirt_utils.write_to_file(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg())
imagebackend.Backend.image(instance, 'kernel.rescue', 'raw'
).AndReturn(fake_imagebackend.Raw())
imagebackend.Backend.image(instance, 'ramdisk.rescue', 'raw'
).AndReturn(fake_imagebackend.Raw())
imagebackend.Backend.image(instance, 'disk.rescue', 'default'
).AndReturn(fake_imagebackend.Raw())
imagebackend.Backend.image(instance, 'disk.config.rescue', 'raw'
).AndReturn(fake_imagebackend.Raw())
imagebackend.Image.cache(context=mox.IgnoreArg(),
fetch_func=mox.IgnoreArg(),
filename=mox.IgnoreArg(),
image_id=mox.IgnoreArg(),
project_id=mox.IgnoreArg(),
user_id=mox.IgnoreArg()).MultipleTimes()
imagebackend.Image.cache(context=mox.IgnoreArg(),
fetch_func=mox.IgnoreArg(),
filename=mox.IgnoreArg(),
image_id=mox.IgnoreArg(),
project_id=mox.IgnoreArg(),
size=None, user_id=mox.IgnoreArg())
instance_metadata.InstanceMetadata.__init__(mox.IgnoreArg(),
content=mox.IgnoreArg(),
extra_md=mox.IgnoreArg(),
network_info=mox.IgnoreArg())
image_meta = {'id': 'fake', 'name': 'fake'}
self.drvr._get_guest_xml(mox.IgnoreArg(), instance,
network_info, mox.IgnoreArg(),
mox.IsA(objects.ImageMeta),
rescue=mox.IgnoreArg(),
write_to_disk=mox.IgnoreArg()
).AndReturn(dummyxml)
self.drvr._destroy(instance)
self.drvr._create_domain(mox.IgnoreArg())
self.mox.ReplayAll()
rescue_password = 'fake_password'
self.drvr.rescue(self.context, instance, network_info,
image_meta, rescue_password)
self.mox.VerifyAll()
mock_add.assert_any_call(mock.ANY)
expected_call = [mock.call(os.path.join(CONF.instances_path,
configdrive_path))]
mock_make.assert_has_calls(expected_call)
@mock.patch('shutil.rmtree')
@mock.patch('nova.utils.execute')
@mock.patch('os.path.exists')
@mock.patch('nova.virt.libvirt.utils.get_instance_path')
def test_delete_instance_files(self, get_instance_path, exists, exe,
shutil):
get_instance_path.return_value = '/path'
instance = objects.Instance(uuid='fake-uuid', id=1)
exists.side_effect = [False, False, True, False]
result = self.drvr.delete_instance_files(instance)
get_instance_path.assert_called_with(instance)
exe.assert_called_with('mv', '/path', '/path_del')
shutil.assert_called_with('/path_del')
self.assertTrue(result)
@mock.patch('shutil.rmtree')
@mock.patch('nova.utils.execute')
@mock.patch('os.path.exists')
@mock.patch('os.kill')
@mock.patch('nova.virt.libvirt.utils.get_instance_path')
def test_delete_instance_files_kill_running(
self, get_instance_path, kill, exists, exe, shutil):
get_instance_path.return_value = '/path'
instance = objects.Instance(uuid='fake-uuid', id=1)
self.drvr.job_tracker.jobs[instance.uuid] = [3, 4]
exists.side_effect = [False, False, True, False]
result = self.drvr.delete_instance_files(instance)
get_instance_path.assert_called_with(instance)
exe.assert_called_with('mv', '/path', '/path_del')
kill.assert_has_calls([mock.call(3, signal.SIGKILL), mock.call(3, 0),
mock.call(4, signal.SIGKILL), mock.call(4, 0)])
shutil.assert_called_with('/path_del')
self.assertTrue(result)
self.assertNotIn(instance.uuid, self.drvr.job_tracker.jobs)
@mock.patch('shutil.rmtree')
@mock.patch('nova.utils.execute')
@mock.patch('os.path.exists')
@mock.patch('nova.virt.libvirt.utils.get_instance_path')
def test_delete_instance_files_resize(self, get_instance_path, exists,
exe, shutil):
get_instance_path.return_value = '/path'
instance = objects.Instance(uuid='fake-uuid', id=1)
nova.utils.execute.side_effect = [Exception(), None]
exists.side_effect = [False, False, True, False]
result = self.drvr.delete_instance_files(instance)
get_instance_path.assert_called_with(instance)
expected = [mock.call('mv', '/path', '/path_del'),
mock.call('mv', '/path_resize', '/path_del')]
self.assertEqual(expected, exe.mock_calls)
shutil.assert_called_with('/path_del')
self.assertTrue(result)
@mock.patch('shutil.rmtree')
@mock.patch('nova.utils.execute')
@mock.patch('os.path.exists')
@mock.patch('nova.virt.libvirt.utils.get_instance_path')
def test_delete_instance_files_failed(self, get_instance_path, exists, exe,
shutil):
get_instance_path.return_value = '/path'
instance = objects.Instance(uuid='fake-uuid', id=1)
exists.side_effect = [False, False, True, True]
result = self.drvr.delete_instance_files(instance)
get_instance_path.assert_called_with(instance)
exe.assert_called_with('mv', '/path', '/path_del')
shutil.assert_called_with('/path_del')
self.assertFalse(result)
@mock.patch('shutil.rmtree')
@mock.patch('nova.utils.execute')
@mock.patch('os.path.exists')
@mock.patch('nova.virt.libvirt.utils.get_instance_path')
def test_delete_instance_files_mv_failed(self, get_instance_path, exists,
exe, shutil):
get_instance_path.return_value = '/path'
instance = objects.Instance(uuid='fake-uuid', id=1)
nova.utils.execute.side_effect = Exception()
exists.side_effect = [True, True]
result = self.drvr.delete_instance_files(instance)
get_instance_path.assert_called_with(instance)
expected = [mock.call('mv', '/path', '/path_del'),
mock.call('mv', '/path_resize', '/path_del')] * 2
self.assertEqual(expected, exe.mock_calls)
self.assertFalse(result)
@mock.patch('shutil.rmtree')
@mock.patch('nova.utils.execute')
@mock.patch('os.path.exists')
@mock.patch('nova.virt.libvirt.utils.get_instance_path')
def test_delete_instance_files_resume(self, get_instance_path, exists,
exe, shutil):
get_instance_path.return_value = '/path'
instance = objects.Instance(uuid='fake-uuid', id=1)
nova.utils.execute.side_effect = Exception()
exists.side_effect = [False, False, True, False]
result = self.drvr.delete_instance_files(instance)
get_instance_path.assert_called_with(instance)
expected = [mock.call('mv', '/path', '/path_del'),
mock.call('mv', '/path_resize', '/path_del')] * 2
self.assertEqual(expected, exe.mock_calls)
self.assertTrue(result)
@mock.patch('shutil.rmtree')
@mock.patch('nova.utils.execute')
@mock.patch('os.path.exists')
@mock.patch('nova.virt.libvirt.utils.get_instance_path')
def test_delete_instance_files_none(self, get_instance_path, exists,
exe, shutil):
get_instance_path.return_value = '/path'
instance = objects.Instance(uuid='fake-uuid', id=1)
nova.utils.execute.side_effect = Exception()
exists.side_effect = [False, False, False, False]
result = self.drvr.delete_instance_files(instance)
get_instance_path.assert_called_with(instance)
expected = [mock.call('mv', '/path', '/path_del'),
mock.call('mv', '/path_resize', '/path_del')] * 2
self.assertEqual(expected, exe.mock_calls)
self.assertEqual(0, len(shutil.mock_calls))
self.assertTrue(result)
@mock.patch('shutil.rmtree')
@mock.patch('nova.utils.execute')
@mock.patch('os.path.exists')
@mock.patch('nova.virt.libvirt.utils.get_instance_path')
def test_delete_instance_files_concurrent(self, get_instance_path, exists,
exe, shutil):
get_instance_path.return_value = '/path'
instance = objects.Instance(uuid='fake-uuid', id=1)
nova.utils.execute.side_effect = [Exception(), Exception(), None]
exists.side_effect = [False, False, True, False]
result = self.drvr.delete_instance_files(instance)
get_instance_path.assert_called_with(instance)
expected = [mock.call('mv', '/path', '/path_del'),
mock.call('mv', '/path_resize', '/path_del')]
expected.append(expected[0])
self.assertEqual(expected, exe.mock_calls)
shutil.assert_called_with('/path_del')
self.assertTrue(result)
def _assert_on_id_map(self, idmap, klass, start, target, count):
self.assertIsInstance(idmap, klass)
self.assertEqual(start, idmap.start)
self.assertEqual(target, idmap.target)
self.assertEqual(count, idmap.count)
def test_get_id_maps(self):
self.flags(virt_type="lxc", group="libvirt")
CONF.libvirt.virt_type = "lxc"
CONF.libvirt.uid_maps = ["0:10000:1", "1:20000:10"]
CONF.libvirt.gid_maps = ["0:10000:1", "1:20000:10"]
idmaps = self.drvr._get_guest_idmaps()
self.assertEqual(len(idmaps), 4)
self._assert_on_id_map(idmaps[0],
vconfig.LibvirtConfigGuestUIDMap,
0, 10000, 1)
self._assert_on_id_map(idmaps[1],
vconfig.LibvirtConfigGuestUIDMap,
1, 20000, 10)
self._assert_on_id_map(idmaps[2],
vconfig.LibvirtConfigGuestGIDMap,
0, 10000, 1)
self._assert_on_id_map(idmaps[3],
vconfig.LibvirtConfigGuestGIDMap,
1, 20000, 10)
def test_get_id_maps_not_lxc(self):
CONF.libvirt.uid_maps = ["0:10000:1", "1:20000:10"]
CONF.libvirt.gid_maps = ["0:10000:1", "1:20000:10"]
idmaps = self.drvr._get_guest_idmaps()
self.assertEqual(0, len(idmaps))
def test_get_id_maps_only_uid(self):
self.flags(virt_type="lxc", group="libvirt")
CONF.libvirt.uid_maps = ["0:10000:1", "1:20000:10"]
CONF.libvirt.gid_maps = []
idmaps = self.drvr._get_guest_idmaps()
self.assertEqual(2, len(idmaps))
self._assert_on_id_map(idmaps[0],
vconfig.LibvirtConfigGuestUIDMap,
0, 10000, 1)
self._assert_on_id_map(idmaps[1],
vconfig.LibvirtConfigGuestUIDMap,
1, 20000, 10)
def test_get_id_maps_only_gid(self):
self.flags(virt_type="lxc", group="libvirt")
CONF.libvirt.uid_maps = []
CONF.libvirt.gid_maps = ["0:10000:1", "1:20000:10"]
idmaps = self.drvr._get_guest_idmaps()
self.assertEqual(2, len(idmaps))
self._assert_on_id_map(idmaps[0],
vconfig.LibvirtConfigGuestGIDMap,
0, 10000, 1)
self._assert_on_id_map(idmaps[1],
vconfig.LibvirtConfigGuestGIDMap,
1, 20000, 10)
def test_instance_on_disk(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(uuid='fake-uuid', id=1)
self.assertFalse(drvr.instance_on_disk(instance))
def test_instance_on_disk_rbd(self):
self.flags(images_type='rbd', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(uuid='fake-uuid', id=1)
self.assertTrue(drvr.instance_on_disk(instance))
def test_get_interfaces(self):
dom_xml = """
<domain type="qemu">
<devices>
<interface type="ethernet">
<mac address="fe:eb:da:ed:ef:ac"/>
<model type="virtio"/>
<target dev="eth0"/>
</interface>
<interface type="bridge">
<mac address="ca:fe:de:ad:be:ef"/>
<model type="virtio"/>
<target dev="br0"/>
</interface>
</devices>
</domain>"""
list_interfaces = ['eth0', 'br0']
drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertEqual(list_interfaces, drv._get_interfaces(dom_xml))
def test_get_disk_xml(self):
dom_xml = """
<domain type="kvm">
<devices>
<disk type="file">
<source file="disk1_file"/>
<target dev="vda" bus="virtio"/>
<serial>0e38683e-f0af-418f-a3f1-6b67ea0f919d</serial>
</disk>
<disk type="block">
<source dev="/path/to/dev/1"/>
<target dev="vdb" bus="virtio" serial="1234"/>
</disk>
</devices>
</domain>
"""
diska_xml = """<disk type="file" device="disk">
<source file="disk1_file"/>
<target bus="virtio" dev="vda"/>
<serial>0e38683e-f0af-418f-a3f1-6b67ea0f919d</serial>
</disk>"""
diskb_xml = """<disk type="block" device="disk">
<source dev="/path/to/dev/1"/>
<target bus="virtio" dev="vdb"/>
</disk>"""
dom = mock.MagicMock()
dom.XMLDesc.return_value = dom_xml
guest = libvirt_guest.Guest(dom)
# NOTE(gcb): etree.tostring(node) returns an extra line with
# some white spaces, need to strip it.
actual_diska_xml = guest.get_disk('vda').to_xml()
self.assertEqual(diska_xml.strip(), actual_diska_xml.strip())
actual_diskb_xml = guest.get_disk('vdb').to_xml()
self.assertEqual(diskb_xml.strip(), actual_diskb_xml.strip())
self.assertIsNone(guest.get_disk('vdc'))
def test_vcpu_model_from_config(self):
drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
vcpu_model = drv._cpu_config_to_vcpu_model(None, None)
self.assertIsNone(vcpu_model)
cpu = vconfig.LibvirtConfigGuestCPU()
feature1 = vconfig.LibvirtConfigGuestCPUFeature()
feature2 = vconfig.LibvirtConfigGuestCPUFeature()
feature1.name = 'sse'
feature1.policy = cpumodel.POLICY_REQUIRE
feature2.name = 'aes'
feature2.policy = cpumodel.POLICY_REQUIRE
cpu.features = set([feature1, feature2])
cpu.mode = cpumodel.MODE_CUSTOM
cpu.sockets = 1
cpu.cores = 2
cpu.threads = 4
vcpu_model = drv._cpu_config_to_vcpu_model(cpu, None)
self.assertEqual(cpumodel.MATCH_EXACT, vcpu_model.match)
self.assertEqual(cpumodel.MODE_CUSTOM, vcpu_model.mode)
self.assertEqual(4, vcpu_model.topology.threads)
self.assertEqual(set(['sse', 'aes']),
set([f.name for f in vcpu_model.features]))
cpu.mode = cpumodel.MODE_HOST_MODEL
vcpu_model_1 = drv._cpu_config_to_vcpu_model(cpu, vcpu_model)
self.assertEqual(cpumodel.MODE_HOST_MODEL, vcpu_model.mode)
self.assertEqual(vcpu_model, vcpu_model_1)
@mock.patch.object(lvm, 'get_volume_size', return_value=10)
@mock.patch.object(host.Host, "get_guest")
@mock.patch.object(dmcrypt, 'delete_volume')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.unfilter_instance')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._undefine_domain')
@mock.patch.object(objects.Instance, 'save')
def test_cleanup_lvm_encrypted(self, mock_save, mock_undefine_domain,
mock_unfilter, mock_delete_volume,
mock_get_guest, mock_get_size):
drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance = objects.Instance(uuid='fake-uuid', id=1,
ephemeral_key_uuid='000-000-000')
instance.system_metadata = {}
block_device_info = {'root_device_name': '/dev/vda',
'ephemerals': [],
'block_device_mapping': []}
self.flags(images_type="lvm",
group='libvirt')
dom_xml = """
<domain type="kvm">
<devices>
<disk type="block">
<driver name='qemu' type='raw' cache='none'/>
<source dev="/dev/mapper/fake-dmcrypt"/>
<target dev="vda" bus="virtio" serial="1234"/>
</disk>
</devices>
</domain>
"""
dom = mock.MagicMock()
dom.XMLDesc.return_value = dom_xml
guest = libvirt_guest.Guest(dom)
mock_get_guest.return_value = guest
drv.cleanup(self.context, instance, 'fake_network', destroy_vifs=False,
block_device_info=block_device_info)
mock_delete_volume.assert_called_once_with('/dev/mapper/fake-dmcrypt')
@mock.patch.object(lvm, 'get_volume_size', return_value=10)
@mock.patch.object(host.Host, "get_guest")
@mock.patch.object(dmcrypt, 'delete_volume')
def _test_cleanup_lvm(self, mock_delete_volume, mock_get_guest, mock_size,
encrypted=False):
drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance = objects.Instance(uuid='fake-uuid', id=1,
ephemeral_key_uuid='000-000-000')
block_device_info = {'root_device_name': '/dev/vda',
'ephemerals': [],
'block_device_mapping': []}
dev_name = 'fake-dmcrypt' if encrypted else 'fake'
dom_xml = """
<domain type="kvm">
<devices>
<disk type="block">
<driver name='qemu' type='raw' cache='none'/>
<source dev="/dev/mapper/%s"/>
<target dev="vda" bus="virtio" serial="1234"/>
</disk>
</devices>
</domain>
""" % dev_name
dom = mock.MagicMock()
dom.XMLDesc.return_value = dom_xml
guest = libvirt_guest.Guest(dom)
mock_get_guest.return_value = guest
drv._cleanup_lvm(instance, block_device_info)
if encrypted:
mock_delete_volume.assert_called_once_with(
'/dev/mapper/fake-dmcrypt')
else:
self.assertFalse(mock_delete_volume.called)
def test_cleanup_lvm(self):
self._test_cleanup_lvm()
def test_cleanup_encrypted_lvm(self):
self._test_cleanup_lvm(encrypted=True)
def test_vcpu_model_to_config(self):
drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
feature = objects.VirtCPUFeature(policy=cpumodel.POLICY_REQUIRE,
name='sse')
feature_1 = objects.VirtCPUFeature(policy=cpumodel.POLICY_FORBID,
name='aes')
topo = objects.VirtCPUTopology(sockets=1, cores=2, threads=4)
vcpu_model = objects.VirtCPUModel(mode=cpumodel.MODE_HOST_MODEL,
features=[feature, feature_1],
topology=topo)
cpu = drv._vcpu_model_to_cpu_config(vcpu_model)
self.assertEqual(cpumodel.MODE_HOST_MODEL, cpu.mode)
self.assertEqual(1, cpu.sockets)
self.assertEqual(4, cpu.threads)
self.assertEqual(2, len(cpu.features))
self.assertEqual(set(['sse', 'aes']),
set([f.name for f in cpu.features]))
self.assertEqual(set([cpumodel.POLICY_REQUIRE,
cpumodel.POLICY_FORBID]),
set([f.policy for f in cpu.features]))
class LibvirtVolumeUsageTestCase(test.NoDBTestCase):
"""Test for LibvirtDriver.get_all_volume_usage."""
def setUp(self):
super(LibvirtVolumeUsageTestCase, self).setUp()
self.drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.c = context.get_admin_context()
self.ins_ref = objects.Instance(
id=1729,
uuid='875a8070-d0b9-4949-8b31-104d125c9a64'
)
# verify bootable volume device path also
self.bdms = [{'volume_id': 1,
'device_name': '/dev/vde'},
{'volume_id': 2,
'device_name': 'vda'}]
def test_get_all_volume_usage(self):
def fake_block_stats(instance_name, disk):
return (169, 688640, 0, 0, -1)
self.stubs.Set(self.drvr, 'block_stats', fake_block_stats)
vol_usage = self.drvr.get_all_volume_usage(self.c,
[dict(instance=self.ins_ref, instance_bdms=self.bdms)])
expected_usage = [{'volume': 1,
'instance': self.ins_ref,
'rd_bytes': 688640, 'wr_req': 0,
'rd_req': 169, 'wr_bytes': 0},
{'volume': 2,
'instance': self.ins_ref,
'rd_bytes': 688640, 'wr_req': 0,
'rd_req': 169, 'wr_bytes': 0}]
self.assertEqual(vol_usage, expected_usage)
def test_get_all_volume_usage_device_not_found(self):
def fake_get_domain(self, instance):
raise exception.InstanceNotFound(instance_id="fakedom")
self.stubs.Set(host.Host, 'get_domain', fake_get_domain)
vol_usage = self.drvr.get_all_volume_usage(self.c,
[dict(instance=self.ins_ref, instance_bdms=self.bdms)])
self.assertEqual(vol_usage, [])
class LibvirtNonblockingTestCase(test.NoDBTestCase):
"""Test libvirtd calls are nonblocking."""
def setUp(self):
super(LibvirtNonblockingTestCase, self).setUp()
self.flags(connection_uri="test:///default",
group='libvirt')
def test_connection_to_primitive(self):
# Test bug 962840.
import nova.virt.libvirt.driver as libvirt_driver
drvr = libvirt_driver.LibvirtDriver('')
drvr.set_host_enabled = mock.Mock()
jsonutils.to_primitive(drvr._conn, convert_instances=True)
def test_tpool_execute_calls_libvirt(self):
conn = fakelibvirt.virConnect()
conn.is_expected = True
self.mox.StubOutWithMock(eventlet.tpool, 'execute')
eventlet.tpool.execute(
fakelibvirt.openAuth,
'test:///default',
mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(conn)
eventlet.tpool.execute(
conn.domainEventRegisterAny,
None,
fakelibvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE,
mox.IgnoreArg(),
mox.IgnoreArg())
if hasattr(fakelibvirt.virConnect, 'registerCloseCallback'):
eventlet.tpool.execute(
conn.registerCloseCallback,
mox.IgnoreArg(),
mox.IgnoreArg())
self.mox.ReplayAll()
driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
c = driver._get_connection()
self.assertEqual(True, c.is_expected)
class LibvirtVolumeSnapshotTestCase(test.NoDBTestCase):
"""Tests for libvirtDriver.volume_snapshot_create/delete."""
def setUp(self):
super(LibvirtVolumeSnapshotTestCase, self).setUp()
self.drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.c = context.get_admin_context()
self.flags(instance_name_template='instance-%s')
self.flags(qemu_allowed_storage_drivers=[], group='libvirt')
# creating instance
self.inst = {}
self.inst['uuid'] = uuidutils.generate_uuid()
self.inst['id'] = '1'
# create domain info
self.dom_xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='disk1_file'/>
<target dev='vda' bus='virtio'/>
<serial>0e38683e-f0af-418f-a3f1-6b67ea0f919d</serial>
</disk>
<disk type='block'>
<source dev='/path/to/dev/1'/>
<target dev='vdb' bus='virtio' serial='1234'/>
</disk>
</devices>
</domain>"""
# alternate domain info with network-backed snapshot chain
self.dom_netdisk_xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='disk1_file'/>
<target dev='vda' bus='virtio'/>
<serial>0e38683e-f0af-418f-a3f1-6b67eaffffff</serial>
</disk>
<disk type='network' device='disk'>
<driver name='qemu' type='qcow2'/>
<source protocol='gluster' name='vol1/root.img'>
<host name='server1' port='24007'/>
</source>
<backingStore type='network' index='1'>
<driver name='qemu' type='qcow2'/>
<source protocol='gluster' name='vol1/snap.img'>
<host name='server1' port='24007'/>
</source>
<backingStore type='network' index='2'>
<driver name='qemu' type='qcow2'/>
<source protocol='gluster' name='vol1/snap-b.img'>
<host name='server1' port='24007'/>
</source>
<backingStore/>
</backingStore>
</backingStore>
<target dev='vdb' bus='virtio'/>
<serial>0e38683e-f0af-418f-a3f1-6b67ea0f919d</serial>
</disk>
</devices>
</domain>
"""
# XML with netdisk attached, and 1 snapshot taken
self.dom_netdisk_xml_2 = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='disk1_file'/>
<target dev='vda' bus='virtio'/>
<serial>0e38683e-f0af-418f-a3f1-6b67eaffffff</serial>
</disk>
<disk type='network' device='disk'>
<driver name='qemu' type='qcow2'/>
<source protocol='gluster' name='vol1/snap.img'>
<host name='server1' port='24007'/>
</source>
<backingStore type='network' index='1'>
<driver name='qemu' type='qcow2'/>
<source protocol='gluster' name='vol1/root.img'>
<host name='server1' port='24007'/>
</source>
<backingStore/>
</backingStore>
<target dev='vdb' bus='virtio'/>
<serial>0e38683e-f0af-418f-a3f1-6b67ea0f919d</serial>
</disk>
</devices>
</domain>
"""
self.create_info = {'type': 'qcow2',
'snapshot_id': '1234-5678',
'new_file': 'new-file'}
self.volume_uuid = '0e38683e-f0af-418f-a3f1-6b67ea0f919d'
self.snapshot_id = '9c3ca9f4-9f4e-4dba-bedd-5c5e4b52b162'
self.delete_info_1 = {'type': 'qcow2',
'file_to_merge': 'snap.img',
'merge_target_file': None}
self.delete_info_2 = {'type': 'qcow2',
'file_to_merge': 'snap.img',
'merge_target_file': 'other-snap.img'}
self.delete_info_3 = {'type': 'qcow2',
'file_to_merge': None,
'merge_target_file': None}
self.delete_info_netdisk = {'type': 'qcow2',
'file_to_merge': 'snap.img',
'merge_target_file': 'root.img'}
self.delete_info_invalid_type = {'type': 'made_up_type',
'file_to_merge': 'some_file',
'merge_target_file':
'some_other_file'}
def tearDown(self):
super(LibvirtVolumeSnapshotTestCase, self).tearDown()
@mock.patch('nova.virt.block_device.DriverVolumeBlockDevice.'
'refresh_connection_info')
@mock.patch('nova.objects.block_device.BlockDeviceMapping.'
'get_by_volume_id')
def test_volume_refresh_connection_info(self, mock_get_by_volume_id,
mock_refresh_connection_info):
fake_bdm = fake_block_device.FakeDbBlockDeviceDict({
'id': 123,
'instance_uuid': 'fake-instance',
'device_name': '/dev/sdb',
'source_type': 'volume',
'destination_type': 'volume',
'volume_id': 'fake-volume-id-1',
'connection_info': '{"fake": "connection_info"}'})
mock_get_by_volume_id.return_value = fake_bdm
self.drvr._volume_refresh_connection_info(self.c, self.inst,
self.volume_uuid)
mock_get_by_volume_id.assert_called_once_with(self.c, self.volume_uuid)
mock_refresh_connection_info.assert_called_once_with(self.c, self.inst,
self.drvr._volume_api, self.drvr)
def test_volume_snapshot_create(self, quiesce=True):
"""Test snapshot creation with file-based disk."""
self.flags(instance_name_template='instance-%s')
self.mox.StubOutWithMock(self.drvr._host, 'get_domain')
self.mox.StubOutWithMock(self.drvr, '_volume_api')
instance = objects.Instance(**self.inst)
new_file = 'new-file'
domain = FakeVirtDomain(fake_xml=self.dom_xml)
self.mox.StubOutWithMock(domain, 'XMLDesc')
self.mox.StubOutWithMock(domain, 'snapshotCreateXML')
domain.XMLDesc(flags=0).AndReturn(self.dom_xml)
snap_xml_src = (
'<domainsnapshot>\n'
' <disks>\n'
' <disk name="disk1_file" snapshot="external" type="file">\n'
' <source file="new-file"/>\n'
' </disk>\n'
' <disk name="vdb" snapshot="no"/>\n'
' </disks>\n'
'</domainsnapshot>\n')
# Older versions of libvirt may be missing these.
fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT = 32
fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE = 64
snap_flags = (fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY |
fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA |
fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT)
snap_flags_q = (snap_flags |
fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE)
if quiesce:
domain.snapshotCreateXML(snap_xml_src, snap_flags_q)
else:
domain.snapshotCreateXML(snap_xml_src, snap_flags_q).\
AndRaise(fakelibvirt.libvirtError(
'quiescing failed, no qemu-ga'))
domain.snapshotCreateXML(snap_xml_src, snap_flags).AndReturn(0)
self.mox.ReplayAll()
self.drvr._volume_snapshot_create(self.c, instance, domain,
self.volume_uuid, new_file)
self.mox.VerifyAll()
def test_volume_snapshot_create_libgfapi(self, quiesce=True):
"""Test snapshot creation with libgfapi network disk."""
self.flags(instance_name_template = 'instance-%s')
self.flags(qemu_allowed_storage_drivers = ['gluster'], group='libvirt')
self.mox.StubOutWithMock(self.drvr._host, 'get_domain')
self.mox.StubOutWithMock(self.drvr, '_volume_api')
self.dom_xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='disk1_file'/>
<target dev='vda' bus='virtio'/>
<serial>0e38683e-f0af-418f-a3f1-6b67ea0f919d</serial>
</disk>
<disk type='block'>
<source protocol='gluster' name='gluster1/volume-1234'>
<host name='127.3.4.5' port='24007'/>
</source>
<target dev='vdb' bus='virtio' serial='1234'/>
</disk>
</devices>
</domain>"""
instance = objects.Instance(**self.inst)
new_file = 'new-file'
domain = FakeVirtDomain(fake_xml=self.dom_xml)
self.mox.StubOutWithMock(domain, 'XMLDesc')
self.mox.StubOutWithMock(domain, 'snapshotCreateXML')
domain.XMLDesc(flags=0).AndReturn(self.dom_xml)
snap_xml_src = (
'<domainsnapshot>\n'
' <disks>\n'
' <disk name="disk1_file" snapshot="external" type="file">\n'
' <source file="new-file"/>\n'
' </disk>\n'
' <disk name="vdb" snapshot="no"/>\n'
' </disks>\n'
'</domainsnapshot>\n')
# Older versions of libvirt may be missing these.
fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT = 32
fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE = 64
snap_flags = (fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY |
fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA |
fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT)
snap_flags_q = (snap_flags |
fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE)
if quiesce:
domain.snapshotCreateXML(snap_xml_src, snap_flags_q)
else:
domain.snapshotCreateXML(snap_xml_src, snap_flags_q).\
AndRaise(fakelibvirt.libvirtError(
'quiescing failed, no qemu-ga'))
domain.snapshotCreateXML(snap_xml_src, snap_flags).AndReturn(0)
self.mox.ReplayAll()
self.drvr._volume_snapshot_create(self.c, instance, domain,
self.volume_uuid, new_file)
self.mox.VerifyAll()
def test_volume_snapshot_create_noquiesce(self):
self.test_volume_snapshot_create(quiesce=False)
@mock.patch.object(host.Host,
'has_min_version', return_value=True)
def test_can_quiesce(self, ver):
self.flags(virt_type='kvm', group='libvirt')
instance = objects.Instance(**self.inst)
image_meta = objects.ImageMeta.from_dict(
{"properties": {
"hw_qemu_guest_agent": "yes"}})
self.assertIsNone(self.drvr._can_quiesce(instance, image_meta))
@mock.patch.object(host.Host,
'has_min_version', return_value=True)
def test_can_quiesce_bad_hyp(self, ver):
self.flags(virt_type='xxx', group='libvirt')
instance = objects.Instance(**self.inst)
image_meta = objects.ImageMeta.from_dict(
{"properties": {
"hw_qemu_guest_agent": "yes"}})
self.assertRaises(exception.InstanceQuiesceNotSupported,
self.drvr._can_quiesce, instance, image_meta)
@mock.patch.object(host.Host,
'has_min_version', return_value=False)
def test_can_quiesce_bad_ver(self, ver):
self.flags(virt_type='kvm', group='libvirt')
instance = objects.Instance(**self.inst)
image_meta = {"properties": {
"hw_qemu_guest_agent": "yes"}}
self.assertRaises(exception.InstanceQuiesceNotSupported,
self.drvr._can_quiesce, instance, image_meta)
@mock.patch.object(host.Host,
'has_min_version', return_value=True)
def test_can_quiesce_agent_not_enable(self, ver):
self.flags(virt_type='kvm', group='libvirt')
instance = objects.Instance(**self.inst)
image_meta = objects.ImageMeta.from_dict({})
self.assertRaises(exception.QemuGuestAgentNotEnabled,
self.drvr._can_quiesce, instance, image_meta)
def test_volume_snapshot_create_outer_success(self):
instance = objects.Instance(**self.inst)
domain = FakeVirtDomain(fake_xml=self.dom_xml)
self.mox.StubOutWithMock(self.drvr._host, 'get_domain')
self.mox.StubOutWithMock(self.drvr, '_volume_api')
self.mox.StubOutWithMock(self.drvr, '_volume_snapshot_create')
self.drvr._host.get_domain(instance).AndReturn(domain)
self.drvr._volume_snapshot_create(self.c,
instance,
domain,
self.volume_uuid,
self.create_info['new_file'])
self.drvr._volume_api.update_snapshot_status(
self.c, self.create_info['snapshot_id'], 'creating')
self.mox.StubOutWithMock(self.drvr._volume_api, 'get_snapshot')
self.drvr._volume_api.get_snapshot(self.c,
self.create_info['snapshot_id']).AndReturn({'status': 'available'})
self.mox.StubOutWithMock(self.drvr, '_volume_refresh_connection_info')
self.drvr._volume_refresh_connection_info(self.c, instance,
self.volume_uuid)
self.mox.ReplayAll()
self.drvr.volume_snapshot_create(self.c, instance, self.volume_uuid,
self.create_info)
def test_volume_snapshot_create_outer_failure(self):
instance = objects.Instance(**self.inst)
domain = FakeVirtDomain(fake_xml=self.dom_xml)
self.mox.StubOutWithMock(self.drvr._host, 'get_domain')
self.mox.StubOutWithMock(self.drvr, '_volume_api')
self.mox.StubOutWithMock(self.drvr, '_volume_snapshot_create')
self.drvr._host.get_domain(instance).AndReturn(domain)
self.drvr._volume_snapshot_create(self.c,
instance,
domain,
self.volume_uuid,
self.create_info['new_file']).\
AndRaise(exception.NovaException('oops'))
self.drvr._volume_api.update_snapshot_status(
self.c, self.create_info['snapshot_id'], 'error')
self.mox.ReplayAll()
self.assertRaises(exception.NovaException,
self.drvr.volume_snapshot_create,
self.c,
instance,
self.volume_uuid,
self.create_info)
def test_volume_snapshot_delete_1(self):
"""Deleting newest snapshot -- blockRebase."""
# libvirt lib doesn't have VIR_DOMAIN_BLOCK_REBASE_RELATIVE flag
fakelibvirt.__dict__.pop('VIR_DOMAIN_BLOCK_REBASE_RELATIVE')
self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt)
instance = objects.Instance(**self.inst)
snapshot_id = 'snapshot-1234'
domain = FakeVirtDomain(fake_xml=self.dom_xml)
self.mox.StubOutWithMock(domain, 'XMLDesc')
domain.XMLDesc(flags=0).AndReturn(self.dom_xml)
self.mox.StubOutWithMock(self.drvr._host, 'get_domain')
self.mox.StubOutWithMock(self.drvr._host, 'has_min_version')
self.mox.StubOutWithMock(domain, 'blockRebase')
self.mox.StubOutWithMock(domain, 'blockCommit')
self.mox.StubOutWithMock(domain, 'blockJobInfo')
self.drvr._host.get_domain(instance).AndReturn(domain)
self.drvr._host.has_min_version(mox.IgnoreArg()).AndReturn(True)
domain.blockRebase('vda', 'snap.img', 0, flags=0)
domain.blockJobInfo('vda', flags=0).AndReturn({'cur': 1, 'end': 1000})
domain.blockJobInfo('vda', flags=0).AndReturn(
{'cur': 1000, 'end': 1000})
self.mox.ReplayAll()
self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid,
snapshot_id, self.delete_info_1)
self.mox.VerifyAll()
fakelibvirt.__dict__.update({'VIR_DOMAIN_BLOCK_REBASE_RELATIVE': 8})
def test_volume_snapshot_delete_relative_1(self):
"""Deleting newest snapshot -- blockRebase using relative flag"""
self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt)
instance = objects.Instance(**self.inst)
snapshot_id = 'snapshot-1234'
domain = FakeVirtDomain(fake_xml=self.dom_xml)
guest = libvirt_guest.Guest(domain)
self.mox.StubOutWithMock(domain, 'XMLDesc')
domain.XMLDesc(flags=0).AndReturn(self.dom_xml)
self.mox.StubOutWithMock(self.drvr._host, 'get_guest')
self.mox.StubOutWithMock(self.drvr._host, 'has_min_version')
self.mox.StubOutWithMock(domain, 'blockRebase')
self.mox.StubOutWithMock(domain, 'blockCommit')
self.mox.StubOutWithMock(domain, 'blockJobInfo')
self.drvr._host.get_guest(instance).AndReturn(guest)
self.drvr._host.has_min_version(mox.IgnoreArg()).AndReturn(True)
domain.blockRebase('vda', 'snap.img', 0,
flags=fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_RELATIVE)
domain.blockJobInfo('vda', flags=0).AndReturn({'cur': 1, 'end': 1000})
domain.blockJobInfo('vda', flags=0).AndReturn(
{'cur': 1000, 'end': 1000})
self.mox.ReplayAll()
self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid,
snapshot_id, self.delete_info_1)
self.mox.VerifyAll()
def test_volume_snapshot_delete_2(self):
"""Deleting older snapshot -- blockCommit."""
# libvirt lib doesn't have VIR_DOMAIN_BLOCK_COMMIT_RELATIVE
fakelibvirt.__dict__.pop('VIR_DOMAIN_BLOCK_COMMIT_RELATIVE')
self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt)
instance = objects.Instance(**self.inst)
snapshot_id = 'snapshot-1234'
domain = FakeVirtDomain(fake_xml=self.dom_xml)
self.mox.StubOutWithMock(domain, 'XMLDesc')
domain.XMLDesc(flags=0).AndReturn(self.dom_xml)
self.mox.StubOutWithMock(self.drvr._host, 'get_domain')
self.mox.StubOutWithMock(self.drvr._host, 'has_min_version')
self.mox.StubOutWithMock(domain, 'blockRebase')
self.mox.StubOutWithMock(domain, 'blockCommit')
self.mox.StubOutWithMock(domain, 'blockJobInfo')
self.drvr._host.get_domain(instance).AndReturn(domain)
self.drvr._host.has_min_version(mox.IgnoreArg()).AndReturn(True)
self.mox.ReplayAll()
self.assertRaises(exception.Invalid,
self.drvr._volume_snapshot_delete,
self.c,
instance,
self.volume_uuid,
snapshot_id,
self.delete_info_2)
fakelibvirt.__dict__.update({'VIR_DOMAIN_BLOCK_COMMIT_RELATIVE': 4})
def test_volume_snapshot_delete_relative_2(self):
"""Deleting older snapshot -- blockCommit using relative flag"""
self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt)
instance = objects.Instance(**self.inst)
snapshot_id = 'snapshot-1234'
domain = FakeVirtDomain(fake_xml=self.dom_xml)
self.mox.StubOutWithMock(domain, 'XMLDesc')
domain.XMLDesc(flags=0).AndReturn(self.dom_xml)
self.mox.StubOutWithMock(self.drvr._host, 'get_domain')
self.mox.StubOutWithMock(self.drvr._host, 'has_min_version')
self.mox.StubOutWithMock(domain, 'blockRebase')
self.mox.StubOutWithMock(domain, 'blockCommit')
self.mox.StubOutWithMock(domain, 'blockJobInfo')
self.drvr._host.get_domain(instance).AndReturn(domain)
self.drvr._host.has_min_version(mox.IgnoreArg()).AndReturn(True)
domain.blockCommit('vda', 'other-snap.img', 'snap.img', 0,
flags=fakelibvirt.VIR_DOMAIN_BLOCK_COMMIT_RELATIVE)
domain.blockJobInfo('vda', flags=0).AndReturn({'cur': 1, 'end': 1000})
domain.blockJobInfo('vda', flags=0).AndReturn({})
self.mox.ReplayAll()
self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid,
snapshot_id, self.delete_info_2)
self.mox.VerifyAll()
def test_volume_snapshot_delete_nonrelative_null_base(self):
# Deleting newest and last snapshot of a volume
# with blockRebase. So base of the new image will be null.
instance = objects.Instance(**self.inst)
snapshot_id = 'snapshot-1234'
domain = FakeVirtDomain(fake_xml=self.dom_xml)
guest = libvirt_guest.Guest(domain)
with contextlib.nested(
mock.patch.object(domain, 'XMLDesc', return_value=self.dom_xml),
mock.patch.object(self.drvr._host, 'get_guest',
return_value=guest),
mock.patch.object(self.drvr._host, 'has_min_version',
return_value=True),
mock.patch.object(domain, 'blockRebase'),
mock.patch.object(domain, 'blockJobInfo',
return_value={'cur': 1000, 'end': 1000})
) as (mock_xmldesc, mock_get_guest, mock_has_min_version,
mock_rebase, mock_job_info):
self.drvr._volume_snapshot_delete(self.c, instance,
self.volume_uuid, snapshot_id,
self.delete_info_3)
mock_xmldesc.assert_called_once_with(flags=0)
mock_get_guest.assert_called_once_with(instance)
mock_has_min_version.assert_called_once_with((1, 1, 1,))
mock_rebase.assert_called_once_with('vda', None, 0, flags=0)
mock_job_info.assert_called_once_with('vda', flags=0)
def test_volume_snapshot_delete_netdisk_nonrelative_null_base(self):
# Deleting newest and last snapshot of a network attached volume
# with blockRebase. So base of the new image will be null.
instance = objects.Instance(**self.inst)
snapshot_id = 'snapshot-1234'
domain = FakeVirtDomain(fake_xml=self.dom_netdisk_xml_2)
guest = libvirt_guest.Guest(domain)
with contextlib.nested(
mock.patch.object(domain, 'XMLDesc',
return_value=self.dom_netdisk_xml_2),
mock.patch.object(self.drvr._host, 'get_guest',
return_value=guest),
mock.patch.object(self.drvr._host, 'has_min_version',
return_value=True),
mock.patch.object(domain, 'blockRebase'),
mock.patch.object(domain, 'blockJobInfo',
return_value={'cur': 1000, 'end': 1000})
) as (mock_xmldesc, mock_get_guest, mock_has_min_version,
mock_rebase, mock_job_info):
self.drvr._volume_snapshot_delete(self.c, instance,
self.volume_uuid, snapshot_id,
self.delete_info_3)
mock_xmldesc.assert_called_once_with(flags=0)
mock_get_guest.assert_called_once_with(instance)
mock_has_min_version.assert_called_once_with((1, 1, 1,))
mock_rebase.assert_called_once_with('vdb', None, 0, flags=0)
mock_job_info.assert_called_once_with('vdb', flags=0)
def test_volume_snapshot_delete_outer_success(self):
instance = objects.Instance(**self.inst)
snapshot_id = 'snapshot-1234'
FakeVirtDomain(fake_xml=self.dom_xml)
self.mox.StubOutWithMock(self.drvr._host, 'get_domain')
self.mox.StubOutWithMock(self.drvr, '_volume_api')
self.mox.StubOutWithMock(self.drvr, '_volume_snapshot_delete')
self.drvr._volume_snapshot_delete(self.c,
instance,
self.volume_uuid,
snapshot_id,
delete_info=self.delete_info_1)
self.drvr._volume_api.update_snapshot_status(
self.c, snapshot_id, 'deleting')
self.mox.StubOutWithMock(self.drvr, '_volume_refresh_connection_info')
self.drvr._volume_refresh_connection_info(self.c, instance,
self.volume_uuid)
self.mox.ReplayAll()
self.drvr.volume_snapshot_delete(self.c, instance, self.volume_uuid,
snapshot_id,
self.delete_info_1)
self.mox.VerifyAll()
def test_volume_snapshot_delete_outer_failure(self):
instance = objects.Instance(**self.inst)
snapshot_id = '1234-9876'
FakeVirtDomain(fake_xml=self.dom_xml)
self.mox.StubOutWithMock(self.drvr._host, 'get_domain')
self.mox.StubOutWithMock(self.drvr, '_volume_api')
self.mox.StubOutWithMock(self.drvr, '_volume_snapshot_delete')
self.drvr._volume_snapshot_delete(self.c,
instance,
self.volume_uuid,
snapshot_id,
delete_info=self.delete_info_1).\
AndRaise(exception.NovaException('oops'))
self.drvr._volume_api.update_snapshot_status(
self.c, snapshot_id, 'error_deleting')
self.mox.ReplayAll()
self.assertRaises(exception.NovaException,
self.drvr.volume_snapshot_delete,
self.c,
instance,
self.volume_uuid,
snapshot_id,
self.delete_info_1)
self.mox.VerifyAll()
def test_volume_snapshot_delete_invalid_type(self):
instance = objects.Instance(**self.inst)
FakeVirtDomain(fake_xml=self.dom_xml)
self.mox.StubOutWithMock(self.drvr._host, 'get_domain')
self.mox.StubOutWithMock(self.drvr, '_volume_api')
self.mox.StubOutWithMock(self.drvr._host, 'has_min_version')
self.drvr._host.has_min_version(mox.IgnoreArg()).AndReturn(True)
self.drvr._volume_api.update_snapshot_status(
self.c, self.snapshot_id, 'error_deleting')
self.mox.ReplayAll()
self.assertRaises(exception.NovaException,
self.drvr.volume_snapshot_delete,
self.c,
instance,
self.volume_uuid,
self.snapshot_id,
self.delete_info_invalid_type)
def test_volume_snapshot_delete_netdisk_1(self):
"""Delete newest snapshot -- blockRebase for libgfapi/network disk."""
class FakeNetdiskDomain(FakeVirtDomain):
def __init__(self, *args, **kwargs):
super(FakeNetdiskDomain, self).__init__(*args, **kwargs)
def XMLDesc(self, flags):
return self.dom_netdisk_xml
# libvirt lib doesn't have VIR_DOMAIN_BLOCK_REBASE_RELATIVE
fakelibvirt.__dict__.pop('VIR_DOMAIN_BLOCK_REBASE_RELATIVE')
self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt)
instance = objects.Instance(**self.inst)
snapshot_id = 'snapshot-1234'
domain = FakeNetdiskDomain(fake_xml=self.dom_netdisk_xml)
self.mox.StubOutWithMock(domain, 'XMLDesc')
domain.XMLDesc(flags=0).AndReturn(self.dom_netdisk_xml)
self.mox.StubOutWithMock(self.drvr._host, 'get_domain')
self.mox.StubOutWithMock(self.drvr._host, 'has_min_version')
self.mox.StubOutWithMock(domain, 'blockRebase')
self.mox.StubOutWithMock(domain, 'blockCommit')
self.mox.StubOutWithMock(domain, 'blockJobInfo')
self.drvr._host.get_domain(instance).AndReturn(domain)
self.drvr._host.has_min_version(mox.IgnoreArg()).AndReturn(True)
domain.blockRebase('vdb', 'vdb[1]', 0, flags=0)
domain.blockJobInfo('vdb', flags=0).AndReturn({'cur': 1, 'end': 1000})
domain.blockJobInfo('vdb', flags=0).AndReturn(
{'cur': 1000, 'end': 1000})
self.mox.ReplayAll()
self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid,
snapshot_id, self.delete_info_1)
self.mox.VerifyAll()
fakelibvirt.__dict__.update({'VIR_DOMAIN_BLOCK_REBASE_RELATIVE': 8})
def test_volume_snapshot_delete_netdisk_relative_1(self):
"""Delete newest snapshot -- blockRebase for libgfapi/network disk."""
class FakeNetdiskDomain(FakeVirtDomain):
def __init__(self, *args, **kwargs):
super(FakeNetdiskDomain, self).__init__(*args, **kwargs)
def XMLDesc(self, flags):
return self.dom_netdisk_xml
self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt)
instance = objects.Instance(**self.inst)
snapshot_id = 'snapshot-1234'
domain = FakeNetdiskDomain(fake_xml=self.dom_netdisk_xml)
self.mox.StubOutWithMock(domain, 'XMLDesc')
domain.XMLDesc(flags=0).AndReturn(self.dom_netdisk_xml)
self.mox.StubOutWithMock(self.drvr._host, 'get_domain')
self.mox.StubOutWithMock(self.drvr._host, 'has_min_version')
self.mox.StubOutWithMock(domain, 'blockRebase')
self.mox.StubOutWithMock(domain, 'blockCommit')
self.mox.StubOutWithMock(domain, 'blockJobInfo')
self.drvr._host.get_domain(instance).AndReturn(domain)
self.drvr._host.has_min_version(mox.IgnoreArg()).AndReturn(True)
domain.blockRebase('vdb', 'vdb[1]', 0,
flags=fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_RELATIVE)
domain.blockJobInfo('vdb', flags=0).AndReturn({'cur': 1, 'end': 1000})
domain.blockJobInfo('vdb', flags=0).AndReturn(
{'cur': 1000, 'end': 1000})
self.mox.ReplayAll()
self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid,
snapshot_id, self.delete_info_1)
self.mox.VerifyAll()
def test_volume_snapshot_delete_netdisk_2(self):
"""Delete older snapshot -- blockCommit for libgfapi/network disk."""
class FakeNetdiskDomain(FakeVirtDomain):
def __init__(self, *args, **kwargs):
super(FakeNetdiskDomain, self).__init__(*args, **kwargs)
def XMLDesc(self, flags):
return self.dom_netdisk_xml
# libvirt lib doesn't have VIR_DOMAIN_BLOCK_COMMIT_RELATIVE
fakelibvirt.__dict__.pop('VIR_DOMAIN_BLOCK_COMMIT_RELATIVE')
self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt)
instance = objects.Instance(**self.inst)
snapshot_id = 'snapshot-1234'
domain = FakeNetdiskDomain(fake_xml=self.dom_netdisk_xml)
self.mox.StubOutWithMock(domain, 'XMLDesc')
domain.XMLDesc(flags=0).AndReturn(self.dom_netdisk_xml)
self.mox.StubOutWithMock(self.drvr._host, 'get_domain')
self.mox.StubOutWithMock(self.drvr._host, 'has_min_version')
self.mox.StubOutWithMock(domain, 'blockRebase')
self.mox.StubOutWithMock(domain, 'blockCommit')
self.mox.StubOutWithMock(domain, 'blockJobInfo')
self.drvr._host.get_domain(instance).AndReturn(domain)
self.drvr._host.has_min_version(mox.IgnoreArg()).AndReturn(True)
self.mox.ReplayAll()
self.assertRaises(exception.Invalid,
self.drvr._volume_snapshot_delete,
self.c,
instance,
self.volume_uuid,
snapshot_id,
self.delete_info_netdisk)
fakelibvirt.__dict__.update({'VIR_DOMAIN_BLOCK_COMMIT_RELATIVE': 4})
def test_volume_snapshot_delete_netdisk_relative_2(self):
"""Delete older snapshot -- blockCommit for libgfapi/network disk."""
class FakeNetdiskDomain(FakeVirtDomain):
def __init__(self, *args, **kwargs):
super(FakeNetdiskDomain, self).__init__(*args, **kwargs)
def XMLDesc(self, flags):
return self.dom_netdisk_xml
self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt)
instance = objects.Instance(**self.inst)
snapshot_id = 'snapshot-1234'
domain = FakeNetdiskDomain(fake_xml=self.dom_netdisk_xml)
self.mox.StubOutWithMock(domain, 'XMLDesc')
domain.XMLDesc(flags=0).AndReturn(self.dom_netdisk_xml)
self.mox.StubOutWithMock(self.drvr._host, 'get_domain')
self.mox.StubOutWithMock(self.drvr._host, 'has_min_version')
self.mox.StubOutWithMock(domain, 'blockRebase')
self.mox.StubOutWithMock(domain, 'blockCommit')
self.mox.StubOutWithMock(domain, 'blockJobInfo')
self.drvr._host.get_domain(instance).AndReturn(domain)
self.drvr._host.has_min_version(mox.IgnoreArg()).AndReturn(True)
domain.blockCommit('vdb', 'vdb[0]', 'vdb[1]', 0,
flags=fakelibvirt.VIR_DOMAIN_BLOCK_COMMIT_RELATIVE)
domain.blockJobInfo('vdb', flags=0).AndReturn({'cur': 1, 'end': 1000})
domain.blockJobInfo('vdb', flags=0).AndReturn(
{'cur': 1000, 'end': 1000})
self.mox.ReplayAll()
self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid,
snapshot_id,
self.delete_info_netdisk)
self.mox.VerifyAll()
def _fake_convert_image(source, dest, out_format,
run_as_root=True):
libvirt_driver.libvirt_utils.files[dest] = ''
class _BaseSnapshotTests(test.NoDBTestCase):
def setUp(self):
super(_BaseSnapshotTests, self).setUp()
self.flags(snapshots_directory='./', group='libvirt')
self.context = context.get_admin_context()
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.driver.libvirt_utils',
fake_libvirt_utils))
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.imagebackend.libvirt_utils',
fake_libvirt_utils))
self.image_service = nova.tests.unit.image.fake.stub_out_image_service(
self.stubs)
self.mock_update_task_state = mock.Mock()
test_instance = _create_test_instance()
self.instance_ref = objects.Instance(**test_instance)
self.instance_ref.info_cache = objects.InstanceInfoCache(
network_info=None)
def _assert_snapshot(self, snapshot, disk_format,
expected_properties=None):
self.mock_update_task_state.assert_has_calls([
mock.call(task_state=task_states.IMAGE_PENDING_UPLOAD),
mock.call(task_state=task_states.IMAGE_UPLOADING,
expected_state=task_states.IMAGE_PENDING_UPLOAD)])
props = snapshot['properties']
self.assertEqual(props['image_state'], 'available')
self.assertEqual(snapshot['status'], 'active')
self.assertEqual(snapshot['disk_format'], disk_format)
self.assertEqual(snapshot['name'], 'test-snap')
if expected_properties:
for expected_key, expected_value in \
six.iteritems(expected_properties):
self.assertEqual(expected_value, props[expected_key])
def _create_image(self, extra_properties=None):
properties = {'instance_id': self.instance_ref['id'],
'user_id': str(self.context.user_id)}
if extra_properties:
properties.update(extra_properties)
sent_meta = {'name': 'test-snap',
'is_public': False,
'status': 'creating',
'properties': properties}
# Create new image. It will be updated in snapshot method
# To work with it from snapshot, the single image_service is needed
recv_meta = self.image_service.create(self.context, sent_meta)
return recv_meta
@mock.patch.object(imagebackend.Image, 'resolve_driver_format')
@mock.patch.object(host.Host, 'get_domain')
def _snapshot(self, image_id, mock_get_domain, mock_resolve):
mock_get_domain.return_value = FakeVirtDomain()
driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
driver.snapshot(self.context, self.instance_ref, image_id,
self.mock_update_task_state)
snapshot = self.image_service.show(self.context, image_id)
return snapshot
def _test_snapshot(self, disk_format, extra_properties=None):
recv_meta = self._create_image(extra_properties=extra_properties)
snapshot = self._snapshot(recv_meta['id'])
self._assert_snapshot(snapshot, disk_format=disk_format,
expected_properties=extra_properties)
class LibvirtSnapshotTests(_BaseSnapshotTests):
def test_ami(self):
# Assign different image_ref from nova/images/fakes for testing ami
self.instance_ref.image_ref = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
self.instance_ref.system_metadata = \
utils.get_system_metadata_from_image(
{'disk_format': 'ami'})
self._test_snapshot(disk_format='ami')
@mock.patch.object(fake_libvirt_utils, 'disk_type', new='raw')
@mock.patch.object(libvirt_driver.imagebackend.images,
'convert_image',
side_effect=_fake_convert_image)
def test_raw(self, mock_convert_image):
self._test_snapshot(disk_format='raw')
def test_qcow2(self):
self._test_snapshot(disk_format='qcow2')
def test_no_image_architecture(self):
self.instance_ref.image_ref = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
self._test_snapshot(disk_format='qcow2')
def test_no_original_image(self):
self.instance_ref.image_ref = '661122aa-1234-dede-fefe-babababababa'
self._test_snapshot(disk_format='qcow2')
def test_snapshot_metadata_image(self):
# Assign an image with an architecture defined (x86_64)
self.instance_ref.image_ref = 'a440c04b-79fa-479c-bed1-0b816eaec379'
extra_properties = {'architecture': 'fake_arch',
'key_a': 'value_a',
'key_b': 'value_b',
'os_type': 'linux'}
self._test_snapshot(disk_format='qcow2',
extra_properties=extra_properties)
class LXCSnapshotTests(LibvirtSnapshotTests):
"""Repeat all of the Libvirt snapshot tests, but with LXC enabled"""
def setUp(self):
super(LXCSnapshotTests, self).setUp()
self.flags(virt_type='lxc', group='libvirt')
class LVMSnapshotTests(_BaseSnapshotTests):
@mock.patch.object(fake_libvirt_utils, 'disk_type', new='lvm')
@mock.patch.object(libvirt_driver.imagebackend.images,
'convert_image',
side_effect=_fake_convert_image)
@mock.patch.object(libvirt_driver.imagebackend.lvm, 'volume_info')
def _test_lvm_snapshot(self, disk_format, mock_volume_info,
mock_convert_image):
self.flags(images_type='lvm',
images_volume_group='nova-vg', group='libvirt')
self._test_snapshot(disk_format=disk_format)
mock_volume_info.assert_has_calls([mock.call('/dev/nova-vg/lv')])
mock_convert_image.assert_called_once_with(
'/dev/nova-vg/lv', mock.ANY, disk_format, run_as_root=True)
def test_raw(self):
self._test_lvm_snapshot('raw')
def test_qcow2(self):
self.flags(snapshot_image_format='qcow2', group='libvirt')
self._test_lvm_snapshot('qcow2')
| felixma/nova | nova/tests/unit/virt/libvirt/test_driver.py | Python | apache-2.0 | 670,986 |
package com.evolveum.midpoint.schrodinger.component;
import com.codeborne.selenide.SelenideElement;
/**
* Created by Viliam Repan (lazyman).
*/
public abstract class Component<T> {
private T parent;
private SelenideElement parentElement;
public Component(T parent) {
this(parent, null);
}
public Component(T parent, SelenideElement parentElement) {
this.parent = parent;
this.parentElement = parentElement;
}
public T and() {
return parent;
}
public T getParent() {
return parent;
}
public SelenideElement getParentElement() {
return parentElement;
}
}
| arnost-starosta/midpoint | tools/schrodinger/src/main/java/com/evolveum/midpoint/schrodinger/component/Component.java | Java | apache-2.0 | 660 |
package org.apache.archiva.scheduler.repository;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.Collection;
import java.util.Collections;
import java.util.Date;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import org.apache.maven.archiva.configuration.ManagedRepositoryConfiguration;
import org.apache.maven.archiva.consumers.AbstractMonitoredConsumer;
import org.apache.maven.archiva.consumers.ConsumerException;
import org.apache.maven.archiva.consumers.KnownRepositoryContentConsumer;
import org.apache.maven.archiva.model.ArtifactReference;
import org.apache.maven.archiva.repository.ManagedRepositoryContent;
import org.apache.maven.archiva.repository.RepositoryContentFactory;
import org.apache.maven.archiva.repository.RepositoryException;
import org.apache.maven.archiva.repository.layout.LayoutException;
public class TestConsumer
extends AbstractMonitoredConsumer
implements KnownRepositoryContentConsumer
{
private Set<ArtifactReference> consumed = new HashSet<ArtifactReference>();
// injected
private RepositoryContentFactory factory;
private ManagedRepositoryContent repository;
public String getId()
{
return "test-consumer";
}
public String getDescription()
{
return null;
}
public boolean isPermanent()
{
return false;
}
public List<String> getIncludes()
{
return Collections.singletonList( "**/**" );
}
public List<String> getExcludes()
{
return null;
}
public void beginScan( ManagedRepositoryConfiguration repository, Date whenGathered )
throws ConsumerException
{
consumed.clear();
try
{
this.repository = factory.getManagedRepositoryContent( repository.getId() );
}
catch ( RepositoryException e )
{
throw new ConsumerException( e.getMessage(), e );
}
}
public void beginScan( ManagedRepositoryConfiguration repository, Date whenGathered, boolean executeOnEntireRepo )
throws ConsumerException
{
beginScan( repository, whenGathered );
}
public void processFile( String path )
throws ConsumerException
{
if ( !path.endsWith( ".sha1" ) && !path.endsWith( ".md5" ) )
{
try
{
consumed.add( repository.toArtifactReference( path ) );
}
catch ( LayoutException e )
{
throw new ConsumerException( e.getMessage(), e );
}
}
}
public void processFile( String path, boolean executeOnEntireRepo )
throws Exception
{
processFile( path );
}
public void completeScan()
{
}
public void completeScan( boolean executeOnEntireRepo )
{
completeScan();
}
public Collection<ArtifactReference> getConsumed()
{
return consumed;
}
} | hiredman/archiva | archiva-modules/archiva-scheduler/archiva-scheduler-repository/src/test/java/org/apache/archiva/scheduler/repository/TestConsumer.java | Java | apache-2.0 | 3,731 |
package com.evilco.configuration.xml.annotation;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
/**
* @auhtor Johannes Donath <johannesd@evil-co.com>
* @copyright Copyright (C) 2014 Evil-Co <http://www.evil-co.org>
*/
@Retention (RetentionPolicy.RUNTIME)
@Target (ElementType.FIELD)
public @interface Property {
/**
* Defines the property name.
* @return
*/
public String value ();
}
| Evil-Co-Legacy/XMLConfiguration | src/main/java/com/evilco/configuration/xml/annotation/Property.java | Java | apache-2.0 | 533 |
// ------------------------------------------------------------------------------
// Copyright (c) 2000 Cadenza New Zealand Ltd
// Distributed under the Boost Software License, Version 1.0. (See accompany-
// ing file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
// ------------------------------------------------------------------------------
// Boost functional.hpp header file
// See http://www.boost.org/libs/functional for documentation.
// ------------------------------------------------------------------------------
// $Id: functional.hpp 36246 2006-12-02 14:17:26Z andreas_huber69 $
// ------------------------------------------------------------------------------
#ifndef BOOST_FUNCTIONAL_HPP
#define BOOST_FUNCTIONAL_HPP
#include <boost/config.hpp>
#include <boost/call_traits.hpp>
#include <functional>
namespace riakboost{} namespace boost = riakboost; namespace riakboost{
#ifndef BOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION
// --------------------------------------------------------------------------
// The following traits classes allow us to avoid the need for ptr_fun
// because the types of arguments and the result of a function can be
// deduced.
//
// In addition to the standard types defined in unary_function and
// binary_function, we add
//
// - function_type, the type of the function or function object itself.
//
// - param_type, the type that should be used for passing the function or
// function object as an argument.
// --------------------------------------------------------------------------
namespace detail
{
template <class Operation>
struct unary_traits_imp;
template <class Operation>
struct unary_traits_imp<Operation*>
{
typedef Operation function_type;
typedef const function_type & param_type;
typedef typename Operation::result_type result_type;
typedef typename Operation::argument_type argument_type;
};
template <class R, class A>
struct unary_traits_imp<R(*)(A)>
{
typedef R (*function_type)(A);
typedef R (*param_type)(A);
typedef R result_type;
typedef A argument_type;
};
template <class Operation>
struct binary_traits_imp;
template <class Operation>
struct binary_traits_imp<Operation*>
{
typedef Operation function_type;
typedef const function_type & param_type;
typedef typename Operation::result_type result_type;
typedef typename Operation::first_argument_type first_argument_type;
typedef typename Operation::second_argument_type second_argument_type;
};
template <class R, class A1, class A2>
struct binary_traits_imp<R(*)(A1,A2)>
{
typedef R (*function_type)(A1,A2);
typedef R (*param_type)(A1,A2);
typedef R result_type;
typedef A1 first_argument_type;
typedef A2 second_argument_type;
};
} // namespace detail
template <class Operation>
struct unary_traits
{
typedef typename detail::unary_traits_imp<Operation*>::function_type function_type;
typedef typename detail::unary_traits_imp<Operation*>::param_type param_type;
typedef typename detail::unary_traits_imp<Operation*>::result_type result_type;
typedef typename detail::unary_traits_imp<Operation*>::argument_type argument_type;
};
template <class R, class A>
struct unary_traits<R(*)(A)>
{
typedef R (*function_type)(A);
typedef R (*param_type)(A);
typedef R result_type;
typedef A argument_type;
};
template <class Operation>
struct binary_traits
{
typedef typename detail::binary_traits_imp<Operation*>::function_type function_type;
typedef typename detail::binary_traits_imp<Operation*>::param_type param_type;
typedef typename detail::binary_traits_imp<Operation*>::result_type result_type;
typedef typename detail::binary_traits_imp<Operation*>::first_argument_type first_argument_type;
typedef typename detail::binary_traits_imp<Operation*>::second_argument_type second_argument_type;
};
template <class R, class A1, class A2>
struct binary_traits<R(*)(A1,A2)>
{
typedef R (*function_type)(A1,A2);
typedef R (*param_type)(A1,A2);
typedef R result_type;
typedef A1 first_argument_type;
typedef A2 second_argument_type;
};
#else // BOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION
// --------------------------------------------------------------------------
// If we have no partial specialisation available, decay to a situation
// that is no worse than in the Standard, i.e., ptr_fun will be required.
// --------------------------------------------------------------------------
template <class Operation>
struct unary_traits
{
typedef Operation function_type;
typedef const Operation& param_type;
typedef typename Operation::result_type result_type;
typedef typename Operation::argument_type argument_type;
};
template <class Operation>
struct binary_traits
{
typedef Operation function_type;
typedef const Operation & param_type;
typedef typename Operation::result_type result_type;
typedef typename Operation::first_argument_type first_argument_type;
typedef typename Operation::second_argument_type second_argument_type;
};
#endif // BOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION
// --------------------------------------------------------------------------
// unary_negate, not1
// --------------------------------------------------------------------------
template <class Predicate>
class unary_negate
: public std::unary_function<typename unary_traits<Predicate>::argument_type,bool>
{
public:
explicit unary_negate(typename unary_traits<Predicate>::param_type x)
:
pred(x)
{}
bool operator()(typename call_traits<typename unary_traits<Predicate>::argument_type>::param_type x) const
{
return !pred(x);
}
private:
typename unary_traits<Predicate>::function_type pred;
};
template <class Predicate>
unary_negate<Predicate> not1(const Predicate &pred)
{
// The cast is to placate Borland C++Builder in certain circumstances.
// I don't think it should be necessary.
return unary_negate<Predicate>((typename unary_traits<Predicate>::param_type)pred);
}
template <class Predicate>
unary_negate<Predicate> not1(Predicate &pred)
{
return unary_negate<Predicate>(pred);
}
// --------------------------------------------------------------------------
// binary_negate, not2
// --------------------------------------------------------------------------
template <class Predicate>
class binary_negate
: public std::binary_function<typename binary_traits<Predicate>::first_argument_type,
typename binary_traits<Predicate>::second_argument_type,
bool>
{
public:
explicit binary_negate(typename binary_traits<Predicate>::param_type x)
:
pred(x)
{}
bool operator()(typename call_traits<typename binary_traits<Predicate>::first_argument_type>::param_type x,
typename call_traits<typename binary_traits<Predicate>::second_argument_type>::param_type y) const
{
return !pred(x,y);
}
private:
typename binary_traits<Predicate>::function_type pred;
};
template <class Predicate>
binary_negate<Predicate> not2(const Predicate &pred)
{
// The cast is to placate Borland C++Builder in certain circumstances.
// I don't think it should be necessary.
return binary_negate<Predicate>((typename binary_traits<Predicate>::param_type)pred);
}
template <class Predicate>
binary_negate<Predicate> not2(Predicate &pred)
{
return binary_negate<Predicate>(pred);
}
// --------------------------------------------------------------------------
// binder1st, bind1st
// --------------------------------------------------------------------------
template <class Operation>
class binder1st
: public std::unary_function<typename binary_traits<Operation>::second_argument_type,
typename binary_traits<Operation>::result_type>
{
public:
binder1st(typename binary_traits<Operation>::param_type x,
typename call_traits<typename binary_traits<Operation>::first_argument_type>::param_type y)
:
op(x), value(y)
{}
typename binary_traits<Operation>::result_type
operator()(typename call_traits<typename binary_traits<Operation>::second_argument_type>::param_type x) const
{
return op(value, x);
}
protected:
typename binary_traits<Operation>::function_type op;
typename binary_traits<Operation>::first_argument_type value;
};
template <class Operation>
inline binder1st<Operation> bind1st(const Operation &op,
typename call_traits<
typename binary_traits<Operation>::first_argument_type
>::param_type x)
{
// The cast is to placate Borland C++Builder in certain circumstances.
// I don't think it should be necessary.
return binder1st<Operation>((typename binary_traits<Operation>::param_type)op, x);
}
template <class Operation>
inline binder1st<Operation> bind1st(Operation &op,
typename call_traits<
typename binary_traits<Operation>::first_argument_type
>::param_type x)
{
return binder1st<Operation>(op, x);
}
// --------------------------------------------------------------------------
// binder2nd, bind2nd
// --------------------------------------------------------------------------
template <class Operation>
class binder2nd
: public std::unary_function<typename binary_traits<Operation>::first_argument_type,
typename binary_traits<Operation>::result_type>
{
public:
binder2nd(typename binary_traits<Operation>::param_type x,
typename call_traits<typename binary_traits<Operation>::second_argument_type>::param_type y)
:
op(x), value(y)
{}
typename binary_traits<Operation>::result_type
operator()(typename call_traits<typename binary_traits<Operation>::first_argument_type>::param_type x) const
{
return op(x, value);
}
protected:
typename binary_traits<Operation>::function_type op;
typename binary_traits<Operation>::second_argument_type value;
};
template <class Operation>
inline binder2nd<Operation> bind2nd(const Operation &op,
typename call_traits<
typename binary_traits<Operation>::second_argument_type
>::param_type x)
{
// The cast is to placate Borland C++Builder in certain circumstances.
// I don't think it should be necessary.
return binder2nd<Operation>((typename binary_traits<Operation>::param_type)op, x);
}
template <class Operation>
inline binder2nd<Operation> bind2nd(Operation &op,
typename call_traits<
typename binary_traits<Operation>::second_argument_type
>::param_type x)
{
return binder2nd<Operation>(op, x);
}
// --------------------------------------------------------------------------
// mem_fun, etc
// --------------------------------------------------------------------------
template <class S, class T>
class mem_fun_t : public std::unary_function<T*, S>
{
public:
explicit mem_fun_t(S (T::*p)())
:
ptr(p)
{}
S operator()(T* p) const
{
return (p->*ptr)();
}
private:
S (T::*ptr)();
};
template <class S, class T, class A>
class mem_fun1_t : public std::binary_function<T*, A, S>
{
public:
explicit mem_fun1_t(S (T::*p)(A))
:
ptr(p)
{}
S operator()(T* p, typename call_traits<A>::param_type x) const
{
return (p->*ptr)(x);
}
private:
S (T::*ptr)(A);
};
template <class S, class T>
class const_mem_fun_t : public std::unary_function<const T*, S>
{
public:
explicit const_mem_fun_t(S (T::*p)() const)
:
ptr(p)
{}
S operator()(const T* p) const
{
return (p->*ptr)();
}
private:
S (T::*ptr)() const;
};
template <class S, class T, class A>
class const_mem_fun1_t : public std::binary_function<const T*, A, S>
{
public:
explicit const_mem_fun1_t(S (T::*p)(A) const)
:
ptr(p)
{}
S operator()(const T* p, typename call_traits<A>::param_type x) const
{
return (p->*ptr)(x);
}
private:
S (T::*ptr)(A) const;
};
template<class S, class T>
inline mem_fun_t<S,T> mem_fun(S (T::*f)())
{
return mem_fun_t<S,T>(f);
}
template<class S, class T, class A>
inline mem_fun1_t<S,T,A> mem_fun(S (T::*f)(A))
{
return mem_fun1_t<S,T,A>(f);
}
#ifndef BOOST_NO_POINTER_TO_MEMBER_CONST
template<class S, class T>
inline const_mem_fun_t<S,T> mem_fun(S (T::*f)() const)
{
return const_mem_fun_t<S,T>(f);
}
template<class S, class T, class A>
inline const_mem_fun1_t<S,T,A> mem_fun(S (T::*f)(A) const)
{
return const_mem_fun1_t<S,T,A>(f);
}
#endif // BOOST_NO_POINTER_TO_MEMBER_CONST
// --------------------------------------------------------------------------
// mem_fun_ref, etc
// --------------------------------------------------------------------------
template <class S, class T>
class mem_fun_ref_t : public std::unary_function<T&, S>
{
public:
explicit mem_fun_ref_t(S (T::*p)())
:
ptr(p)
{}
S operator()(T& p) const
{
return (p.*ptr)();
}
private:
S (T::*ptr)();
};
template <class S, class T, class A>
class mem_fun1_ref_t : public std::binary_function<T&, A, S>
{
public:
explicit mem_fun1_ref_t(S (T::*p)(A))
:
ptr(p)
{}
S operator()(T& p, typename call_traits<A>::param_type x) const
{
return (p.*ptr)(x);
}
private:
S (T::*ptr)(A);
};
template <class S, class T>
class const_mem_fun_ref_t : public std::unary_function<const T&, S>
{
public:
explicit const_mem_fun_ref_t(S (T::*p)() const)
:
ptr(p)
{}
S operator()(const T &p) const
{
return (p.*ptr)();
}
private:
S (T::*ptr)() const;
};
template <class S, class T, class A>
class const_mem_fun1_ref_t : public std::binary_function<const T&, A, S>
{
public:
explicit const_mem_fun1_ref_t(S (T::*p)(A) const)
:
ptr(p)
{}
S operator()(const T& p, typename call_traits<A>::param_type x) const
{
return (p.*ptr)(x);
}
private:
S (T::*ptr)(A) const;
};
template<class S, class T>
inline mem_fun_ref_t<S,T> mem_fun_ref(S (T::*f)())
{
return mem_fun_ref_t<S,T>(f);
}
template<class S, class T, class A>
inline mem_fun1_ref_t<S,T,A> mem_fun_ref(S (T::*f)(A))
{
return mem_fun1_ref_t<S,T,A>(f);
}
#ifndef BOOST_NO_POINTER_TO_MEMBER_CONST
template<class S, class T>
inline const_mem_fun_ref_t<S,T> mem_fun_ref(S (T::*f)() const)
{
return const_mem_fun_ref_t<S,T>(f);
}
template<class S, class T, class A>
inline const_mem_fun1_ref_t<S,T,A> mem_fun_ref(S (T::*f)(A) const)
{
return const_mem_fun1_ref_t<S,T,A>(f);
}
#endif // BOOST_NO_POINTER_TO_MEMBER_CONST
// --------------------------------------------------------------------------
// ptr_fun
// --------------------------------------------------------------------------
template <class Arg, class Result>
class pointer_to_unary_function : public std::unary_function<Arg,Result>
{
public:
explicit pointer_to_unary_function(Result (*f)(Arg))
:
func(f)
{}
Result operator()(typename call_traits<Arg>::param_type x) const
{
return func(x);
}
private:
Result (*func)(Arg);
};
template <class Arg, class Result>
inline pointer_to_unary_function<Arg,Result> ptr_fun(Result (*f)(Arg))
{
return pointer_to_unary_function<Arg,Result>(f);
}
template <class Arg1, class Arg2, class Result>
class pointer_to_binary_function : public std::binary_function<Arg1,Arg2,Result>
{
public:
explicit pointer_to_binary_function(Result (*f)(Arg1, Arg2))
:
func(f)
{}
Result operator()(typename call_traits<Arg1>::param_type x, typename call_traits<Arg2>::param_type y) const
{
return func(x,y);
}
private:
Result (*func)(Arg1, Arg2);
};
template <class Arg1, class Arg2, class Result>
inline pointer_to_binary_function<Arg1,Arg2,Result> ptr_fun(Result (*f)(Arg1, Arg2))
{
return pointer_to_binary_function<Arg1,Arg2,Result>(f);
}
} // namespace riakboost
#endif
| basho-labs/riak-cxx-client | deps/boost-1.47.0/boost/functional.hpp | C++ | apache-2.0 | 18,951 |
#coding=utf-8
import sys
import optparse
if len(sys.argv )!= 3:
sys.stderr.write("usage: python %s inputfile outputfile\n" % sys.argv[0])
#raise SystemExit(1)
p = optparse.OptionParser()
p.add_option("-o", action="store",dest="outfile")
p.add_option("--output", action="store", dest="outfile")
p.set_defaults(debug=False)
#解析命令行
opts, args = p.parse_args()
outfile=opts.outfile | wufengwhu/my_blog | exercise/io/std_in_out.py | Python | apache-2.0 | 399 |
//===--- CSDiag.cpp - Constraint Diagnostics ------------------------------===//
//
// This source file is part of the Swift.org open source project
//
// Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
// Licensed under Apache License v2.0 with Runtime Library Exception
//
// See https://swift.org/LICENSE.txt for license information
// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
//
//===----------------------------------------------------------------------===//
//
// This file implements diagnostics for the type checker.
//
//===----------------------------------------------------------------------===//
#include "ConstraintSystem.h"
#include "CSDiag.h"
#include "CalleeCandidateInfo.h"
#include "MiscDiagnostics.h"
#include "swift/AST/ASTWalker.h"
#include "swift/AST/GenericEnvironment.h"
#include "swift/AST/Initializer.h"
#include "swift/AST/ParameterList.h"
#include "swift/AST/ProtocolConformance.h"
#include "swift/AST/TypeWalker.h"
#include "swift/AST/TypeMatcher.h"
#include "swift/Basic/Defer.h"
#include "swift/Basic/StringExtras.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/SaveAndRestore.h"
using namespace swift;
using namespace constraints;
namespace swift {
std::string getTypeListString(Type type) {
std::string result;
// Always make sure to have at least one set of parens
bool forceParens =
!type->is<TupleType>() && !isa<ParenType>(type.getPointer());
if (forceParens)
result.push_back('(');
llvm::raw_string_ostream OS(result);
type->print(OS);
OS.flush();
if (forceParens)
result.push_back(')');
return result;
}
Type replaceTypeParametersWithUnresolved(Type ty) {
if (!ty) return ty;
if (!ty->hasTypeParameter() && !ty->hasArchetype()) return ty;
auto &ctx = ty->getASTContext();
return ty.transform([&](Type type) -> Type {
if (type->is<ArchetypeType>() ||
type->isTypeParameter())
return ctx.TheUnresolvedType;
return type;
});
}
Type replaceTypeVariablesWithUnresolved(Type ty) {
if (!ty) return ty;
if (!ty->hasTypeVariable()) return ty;
auto &ctx = ty->getASTContext();
return ty.transform([&](Type type) -> Type {
if (type->isTypeVariableOrMember())
return ctx.TheUnresolvedType;
return type;
});
}
};
static bool isUnresolvedOrTypeVarType(Type ty) {
return ty->isTypeVariableOrMember() || ty->is<UnresolvedType>();
}
/// Given a subpath of an old locator, compute its summary flags.
static unsigned recomputeSummaryFlags(ConstraintLocator *oldLocator,
ArrayRef<LocatorPathElt> path) {
if (oldLocator->getSummaryFlags() != 0)
return ConstraintLocator::getSummaryFlagsForPath(path);
return 0;
}
ConstraintLocator *
constraints::simplifyLocator(ConstraintSystem &cs, ConstraintLocator *locator,
SourceRange &range,
ConstraintLocator **targetLocator) {
// Clear out the target locator result.
if (targetLocator)
*targetLocator = nullptr;
// The path to be tacked on to the target locator to identify the specific
// target.
Expr *targetAnchor;
SmallVector<LocatorPathElt, 4> targetPath;
auto path = locator->getPath();
auto anchor = locator->getAnchor();
simplifyLocator(anchor, path, targetAnchor, targetPath, range);
// If we have a target anchor, build and simplify the target locator.
if (targetLocator && targetAnchor) {
SourceRange targetRange;
unsigned targetFlags = recomputeSummaryFlags(locator, targetPath);
auto loc = cs.getConstraintLocator(targetAnchor, targetPath, targetFlags);
*targetLocator = simplifyLocator(cs, loc, targetRange);
}
// If we didn't simplify anything, just return the input.
if (anchor == locator->getAnchor() &&
path.size() == locator->getPath().size()) {
return locator;
}
// Recompute the summary flags if we had any to begin with. This is
// necessary because we might remove e.g. tuple elements from the path.
unsigned summaryFlags = recomputeSummaryFlags(locator, path);
return cs.getConstraintLocator(anchor, path, summaryFlags);
}
void constraints::simplifyLocator(Expr *&anchor,
ArrayRef<LocatorPathElt> &path,
Expr *&targetAnchor,
SmallVectorImpl<LocatorPathElt> &targetPath,
SourceRange &range) {
range = SourceRange();
targetAnchor = nullptr;
while (!path.empty()) {
switch (path[0].getKind()) {
case ConstraintLocator::ApplyArgument:
// Extract application argument.
if (auto applyExpr = dyn_cast<ApplyExpr>(anchor)) {
// The target anchor is the function being called.
targetAnchor = applyExpr->getFn();
targetPath.push_back(path[0]);
anchor = applyExpr->getArg();
path = path.slice(1);
continue;
}
if (auto objectLiteralExpr = dyn_cast<ObjectLiteralExpr>(anchor)) {
targetAnchor = nullptr;
targetPath.clear();
anchor = objectLiteralExpr->getArg();
path = path.slice(1);
continue;
}
break;
case ConstraintLocator::ApplyFunction:
// Extract application function.
if (auto applyExpr = dyn_cast<ApplyExpr>(anchor)) {
// No additional target locator information.
targetAnchor = nullptr;
targetPath.clear();
anchor = applyExpr->getFn();
path = path.slice(1);
continue;
}
// The unresolved member itself is the function.
if (auto unresolvedMember = dyn_cast<UnresolvedMemberExpr>(anchor)) {
if (unresolvedMember->getArgument()) {
// No additional target locator information.
targetAnchor = nullptr;
targetPath.clear();
anchor = unresolvedMember;
path = path.slice(1);
continue;
}
}
break;
case ConstraintLocator::Load:
case ConstraintLocator::RvalueAdjustment:
case ConstraintLocator::ScalarToTuple:
case ConstraintLocator::UnresolvedMember:
// Loads, rvalue adjustment, and scalar-to-tuple conversions are implicit.
path = path.slice(1);
continue;
case ConstraintLocator::NamedTupleElement:
case ConstraintLocator::TupleElement:
// Extract tuple element.
if (auto tupleExpr = dyn_cast<TupleExpr>(anchor)) {
unsigned index = path[0].getValue();
if (index < tupleExpr->getNumElements()) {
// Append this extraction to the target locator path.
if (targetAnchor) {
targetPath.push_back(path[0]);
}
anchor = tupleExpr->getElement(index);
path = path.slice(1);
continue;
}
}
break;
case ConstraintLocator::ApplyArgToParam:
// Extract tuple element.
if (auto tupleExpr = dyn_cast<TupleExpr>(anchor)) {
unsigned index = path[0].getValue();
if (index < tupleExpr->getNumElements()) {
// Append this extraction to the target locator path.
if (targetAnchor) {
targetPath.push_back(path[0]);
}
anchor = tupleExpr->getElement(index);
path = path.slice(1);
continue;
}
}
// Extract subexpression in parentheses.
if (auto parenExpr = dyn_cast<ParenExpr>(anchor)) {
assert(path[0].getValue() == 0);
// Append this extraction to the target locator path.
if (targetAnchor) {
targetPath.push_back(path[0]);
}
anchor = parenExpr->getSubExpr();
path = path.slice(1);
continue;
}
break;
case ConstraintLocator::ConstructorMember:
if (auto typeExpr = dyn_cast<TypeExpr>(anchor)) {
// This is really an implicit 'init' MemberRef, so point at the base,
// i.e. the TypeExpr.
targetAnchor = nullptr;
targetPath.clear();
range = SourceRange();
anchor = typeExpr;
path = path.slice(1);
continue;
}
LLVM_FALLTHROUGH;
case ConstraintLocator::Member:
case ConstraintLocator::MemberRefBase:
if (auto UDE = dyn_cast<UnresolvedDotExpr>(anchor)) {
// No additional target locator information.
targetAnchor = nullptr;
targetPath.clear();
range = UDE->getNameLoc().getSourceRange();
anchor = UDE->getBase();
path = path.slice(1);
continue;
}
break;
case ConstraintLocator::SubscriptIndex:
if (auto subscript = dyn_cast<SubscriptExpr>(anchor)) {
targetAnchor = subscript->getBase();
targetPath.clear();
anchor = subscript->getIndex();
path = path.slice(1);
continue;
}
break;
case ConstraintLocator::SubscriptMember:
if (isa<SubscriptExpr>(anchor)) {
targetAnchor = nullptr;
targetPath.clear();
path = path.slice(1);
continue;
}
break;
case ConstraintLocator::ClosureResult:
if (auto CE = dyn_cast<ClosureExpr>(anchor)) {
if (CE->hasSingleExpressionBody()) {
targetAnchor = nullptr;
targetPath.clear();
anchor = CE->getSingleExpressionBody();
path = path.slice(1);
continue;
}
}
break;
default:
// FIXME: Lots of other cases to handle.
break;
}
// If we get here, we couldn't simplify the path further.
break;
}
}
/// Simplify the given locator down to a specific anchor expression,
/// if possible.
///
/// \returns the anchor expression if it fully describes the locator, or
/// null otherwise.
static Expr *simplifyLocatorToAnchor(ConstraintSystem &cs,
ConstraintLocator *locator) {
if (!locator || !locator->getAnchor())
return nullptr;
SourceRange range;
locator = simplifyLocator(cs, locator, range);
if (!locator->getAnchor() || !locator->getPath().empty())
return nullptr;
return locator->getAnchor();
}
/// \brief Determine the number of distinct overload choices in the
/// provided set.
static unsigned countDistinctOverloads(ArrayRef<OverloadChoice> choices) {
llvm::SmallPtrSet<void *, 4> uniqueChoices;
unsigned result = 0;
for (auto choice : choices) {
if (uniqueChoices.insert(choice.getOpaqueChoiceSimple()).second)
++result;
}
return result;
}
/// \brief Determine the name of the overload in a set of overload choices.
static DeclName getOverloadChoiceName(ArrayRef<OverloadChoice> choices) {
DeclName name;
for (auto choice : choices) {
if (!choice.isDecl())
continue;
DeclName nextName = choice.getDecl()->getFullName();
if (!name) {
name = nextName;
continue;
}
if (name != nextName) {
// Assume all choices have the same base name and only differ in
// argument labels. This may not be a great assumption, but we don't
// really have a way to recover for diagnostics otherwise.
return name.getBaseName();
}
}
return name;
}
/// Returns true if any diagnostics were emitted.
static bool
tryDiagnoseTrailingClosureAmbiguity(TypeChecker &tc,
const Expr *expr,
const Expr *anchor,
ArrayRef<OverloadChoice> choices) {
auto *callExpr = dyn_cast<CallExpr>(expr);
if (!callExpr)
return false;
if (!callExpr->hasTrailingClosure())
return false;
if (callExpr->getFn() != anchor)
return false;
llvm::SmallMapVector<Identifier, const ValueDecl *, 8> choicesByLabel;
for (const OverloadChoice &choice : choices) {
auto *callee = dyn_cast<AbstractFunctionDecl>(choice.getDecl());
if (!callee)
return false;
const ParameterList *paramList = callee->getParameterLists().back();
const ParamDecl *param = paramList->getArray().back();
// Sanity-check that the trailing closure corresponds to this parameter.
if (!param->getInterfaceType()->is<AnyFunctionType>())
return false;
Identifier trailingClosureLabel = param->getArgumentName();
auto &choiceForLabel = choicesByLabel[trailingClosureLabel];
// FIXME: Cargo-culted from diagnoseAmbiguity: apparently the same decl can
// appear more than once?
if (choiceForLabel == callee)
continue;
// If just providing the trailing closure label won't solve the ambiguity,
// don't bother offering the fix-it.
if (choiceForLabel != nullptr)
return false;
choiceForLabel = callee;
}
// If we got here, then all of the choices have unique labels. Offer them in
// order.
for (const auto &choicePair : choicesByLabel) {
auto diag = tc.diagnose(expr->getLoc(),
diag::ambiguous_because_of_trailing_closure,
choicePair.first.empty(),
choicePair.second->getFullName());
swift::fixItEncloseTrailingClosure(tc, diag, callExpr, choicePair.first);
}
return true;
}
static bool diagnoseAmbiguity(ConstraintSystem &cs,
ArrayRef<Solution> solutions,
Expr *expr) {
// Produce a diff of the solutions.
SolutionDiff diff(solutions);
// Find the locators which have the largest numbers of distinct overloads.
Optional<unsigned> bestOverload;
// Overloads are scored by lexicographical comparison of (# of distinct
// overloads, depth, *reverse* of the index). N.B. - cannot be used for the
// reversing: the score version of index == 0 should be > than that of 1, but
// -0 == 0 < UINT_MAX == -1, whereas ~0 == UINT_MAX > UINT_MAX - 1 == ~1.
auto score = [](unsigned distinctOverloads, unsigned depth, unsigned index) {
return std::make_tuple(distinctOverloads, depth, ~index);
};
auto bestScore = score(0, 0, std::numeric_limits<unsigned>::max());
// Get a map of expressions to their depths and post-order traversal indices.
// Heuristically, all other things being equal, we should complain about the
// ambiguous expression that (1) has the most overloads, (2) is deepest, or
// (3) comes earliest in the expression.
auto depthMap = expr->getDepthMap();
auto indexMap = expr->getPreorderIndexMap();
for (unsigned i = 0, n = diff.overloads.size(); i != n; ++i) {
auto &overload = diff.overloads[i];
// If we can't resolve the locator to an anchor expression with no path,
// we can't diagnose this well.
auto *anchor = simplifyLocatorToAnchor(cs, overload.locator);
if (!anchor)
continue;
auto it = indexMap.find(anchor);
if (it == indexMap.end())
continue;
unsigned index = it->second;
it = depthMap.find(anchor);
if (it == depthMap.end())
continue;
unsigned depth = it->second;
// If we don't have a name to hang on to, it'll be hard to diagnose this
// overload.
if (!getOverloadChoiceName(overload.choices))
continue;
unsigned distinctOverloads = countDistinctOverloads(overload.choices);
// We need at least two overloads to make this interesting.
if (distinctOverloads < 2)
continue;
// If we have more distinct overload choices for this locator than for
// prior locators, just keep this locator.
auto thisScore = score(distinctOverloads, depth, index);
if (thisScore > bestScore) {
bestScore = thisScore;
bestOverload = i;
continue;
}
// We have better results. Ignore this one.
}
// FIXME: Should be able to pick the best locator, e.g., based on some
// depth-first numbering of expressions.
if (bestOverload) {
auto &overload = diff.overloads[*bestOverload];
auto name = getOverloadChoiceName(overload.choices);
auto anchor = simplifyLocatorToAnchor(cs, overload.locator);
// Emit the ambiguity diagnostic.
auto &tc = cs.getTypeChecker();
tc.diagnose(anchor->getLoc(),
name.isOperator() ? diag::ambiguous_operator_ref
: diag::ambiguous_decl_ref,
name);
if (tryDiagnoseTrailingClosureAmbiguity(tc, expr, anchor, overload.choices))
return true;
// Emit candidates. Use a SmallPtrSet to make sure only emit a particular
// candidate once. FIXME: Why is one candidate getting into the overload
// set multiple times? (See also tryDiagnoseTrailingClosureAmbiguity.)
SmallPtrSet<Decl*, 8> EmittedDecls;
for (auto choice : overload.choices) {
switch (choice.getKind()) {
case OverloadChoiceKind::Decl:
case OverloadChoiceKind::DeclViaDynamic:
case OverloadChoiceKind::DeclViaBridge:
case OverloadChoiceKind::DeclViaUnwrappedOptional:
// FIXME: show deduced types, etc, etc.
if (EmittedDecls.insert(choice.getDecl()).second)
tc.diagnose(choice.getDecl(), diag::found_candidate);
break;
case OverloadChoiceKind::KeyPathApplication:
// Skip key path applications, since we don't want them to noise up
// unrelated subscript diagnostics.
break;
case OverloadChoiceKind::BaseType:
case OverloadChoiceKind::TupleIndex:
// FIXME: Actually diagnose something here.
break;
}
}
return true;
}
// FIXME: If we inferred different types for literals (for example),
// could diagnose ambiguity that way as well.
return false;
}
/// Given an expression that has a non-lvalue type, dig into it until we find
/// the part of the expression that prevents the entire subexpression from being
/// mutable. For example, in a sequence like "x.v.v = 42" we want to complain
/// about "x" being a let property if "v.v" are both mutable.
///
/// This returns the base subexpression that looks immutable (or that can't be
/// analyzed any further) along with a decl extracted from it if we could.
///
static std::pair<Expr*, ValueDecl*>
resolveImmutableBase(Expr *expr, ConstraintSystem &CS) {
expr = expr->getValueProvidingExpr();
// Provide specific diagnostics for assignment to subscripts whose base expr
// is known to be an rvalue.
if (auto *SE = dyn_cast<SubscriptExpr>(expr)) {
// If we found a decl for the subscript, check to see if it is a set-only
// subscript decl.
SubscriptDecl *member = nullptr;
if (SE->hasDecl())
member = dyn_cast_or_null<SubscriptDecl>(SE->getDecl().getDecl());
if (!member) {
auto loc = CS.getConstraintLocator(SE,ConstraintLocator::SubscriptMember);
member = dyn_cast_or_null<SubscriptDecl>(CS.findResolvedMemberRef(loc));
}
// If it isn't settable, return it.
if (member) {
if (!member->isSettable() ||
!member->isSetterAccessibleFrom(CS.DC))
return { expr, member };
}
// If it is settable, then the base must be the problem, recurse.
return resolveImmutableBase(SE->getBase(), CS);
}
// Look through property references.
if (auto *UDE = dyn_cast<UnresolvedDotExpr>(expr)) {
// If we found a decl for the UDE, check it.
auto loc = CS.getConstraintLocator(UDE, ConstraintLocator::Member);
// If we can resolve a member, we can determine whether it is settable in
// this context.
if (auto *member = CS.findResolvedMemberRef(loc)) {
auto *memberVD = dyn_cast<VarDecl>(member);
// If the member isn't a vardecl (e.g. its a funcdecl), or it isn't
// settable, then it is the problem: return it.
if (!memberVD ||
!member->isSettable(nullptr) ||
!memberVD->isSetterAccessibleFrom(CS.DC))
return { expr, member };
}
// If we weren't able to resolve a member or if it is mutable, then the
// problem must be with the base, recurse.
return resolveImmutableBase(UDE->getBase(), CS);
}
if (auto *MRE = dyn_cast<MemberRefExpr>(expr)) {
// If the member isn't settable, then it is the problem: return it.
if (auto member = dyn_cast<AbstractStorageDecl>(MRE->getMember().getDecl()))
if (!member->isSettable(nullptr) ||
!member->isSetterAccessibleFrom(CS.DC))
return { expr, member };
// If we weren't able to resolve a member or if it is mutable, then the
// problem must be with the base, recurse.
return resolveImmutableBase(MRE->getBase(), CS);
}
if (auto *DRE = dyn_cast<DeclRefExpr>(expr))
return { expr, DRE->getDecl() };
// Look through x!
if (auto *FVE = dyn_cast<ForceValueExpr>(expr))
return resolveImmutableBase(FVE->getSubExpr(), CS);
// Look through x?
if (auto *BOE = dyn_cast<BindOptionalExpr>(expr))
return resolveImmutableBase(BOE->getSubExpr(), CS);
// Look through implicit conversions
if (auto *ICE = dyn_cast<ImplicitConversionExpr>(expr))
if (!isa<LoadExpr>(ICE->getSubExpr()))
return resolveImmutableBase(ICE->getSubExpr(), CS);
return { expr, nullptr };
}
static bool isLoadedLValue(Expr *expr) {
expr = expr->getSemanticsProvidingExpr();
if (isa<LoadExpr>(expr))
return true;
if (auto ifExpr = dyn_cast<IfExpr>(expr))
return isLoadedLValue(ifExpr->getThenExpr())
&& isLoadedLValue(ifExpr->getElseExpr());
return false;
}
static void diagnoseSubElementFailure(Expr *destExpr,
SourceLoc loc,
ConstraintSystem &CS,
Diag<StringRef> diagID,
Diag<Type> unknownDiagID) {
auto &TC = CS.getTypeChecker();
// Walk through the destination expression, resolving what the problem is. If
// we find a node in the lvalue path that is problematic, this returns it.
auto immInfo = resolveImmutableBase(destExpr, CS);
// Otherwise, we cannot resolve this because the available setter candidates
// are all mutating and the base must be mutating. If we dug out a
// problematic decl, we can produce a nice tailored diagnostic.
if (auto *VD = dyn_cast_or_null<VarDecl>(immInfo.second)) {
std::string message = "'";
message += VD->getName().str().str();
message += "'";
if (VD->isCaptureList())
message += " is an immutable capture";
else if (VD->isImplicit())
message += " is immutable";
else if (VD->isLet())
message += " is a 'let' constant";
else if (!VD->isSettable(CS.DC))
message += " is a get-only property";
else if (!VD->isSetterAccessibleFrom(CS.DC))
message += " setter is inaccessible";
else {
message += " is immutable";
}
TC.diagnose(loc, diagID, message)
.highlight(immInfo.first->getSourceRange());
// If this is a simple variable marked with a 'let', emit a note to fixit
// hint it to 'var'.
VD->emitLetToVarNoteIfSimple(CS.DC);
return;
}
// If the underlying expression was a read-only subscript, diagnose that.
if (auto *SD = dyn_cast_or_null<SubscriptDecl>(immInfo.second)) {
StringRef message;
if (!SD->isSettable())
message = "subscript is get-only";
else if (!SD->isSetterAccessibleFrom(CS.DC))
message = "subscript setter is inaccessible";
else
message = "subscript is immutable";
TC.diagnose(loc, diagID, message)
.highlight(immInfo.first->getSourceRange());
return;
}
// If we're trying to set an unapplied method, say that.
if (auto *VD = dyn_cast_or_null<ValueDecl>(immInfo.second)) {
std::string message = "'";
message += VD->getBaseName().getIdentifier().str();
message += "'";
if (auto *AFD = dyn_cast<AbstractFunctionDecl>(VD))
message += AFD->getImplicitSelfDecl() ? " is a method" : " is a function";
else
message += " is not settable";
TC.diagnose(loc, diagID, message)
.highlight(immInfo.first->getSourceRange());
return;
}
// If the expression is the result of a call, it is an rvalue, not a mutable
// lvalue.
if (auto *AE = dyn_cast<ApplyExpr>(immInfo.first)) {
// Handle literals, which are a call to the conversion function.
auto argsTuple =
dyn_cast<TupleExpr>(AE->getArg()->getSemanticsProvidingExpr());
if (isa<CallExpr>(AE) && AE->isImplicit() && argsTuple &&
argsTuple->getNumElements() == 1 &&
isa<LiteralExpr>(argsTuple->getElement(0)->
getSemanticsProvidingExpr())) {
TC.diagnose(loc, diagID, "literals are not mutable");
return;
}
std::string name = "call";
if (isa<PrefixUnaryExpr>(AE) || isa<PostfixUnaryExpr>(AE))
name = "unary operator";
else if (isa<BinaryExpr>(AE))
name = "binary operator";
else if (isa<CallExpr>(AE))
name = "function call";
else if (isa<DotSyntaxCallExpr>(AE) || isa<DotSyntaxBaseIgnoredExpr>(AE))
name = "method call";
if (auto *DRE = dyn_cast<DeclRefExpr>(AE->getFn()->getValueProvidingExpr()))
name = std::string("'") +
DRE->getDecl()->getBaseName().getIdentifier().str().str() + "'";
TC.diagnose(loc, diagID, name + " returns immutable value")
.highlight(AE->getSourceRange());
return;
}
if (auto *ICE = dyn_cast<ImplicitConversionExpr>(immInfo.first))
if (isa<LoadExpr>(ICE->getSubExpr())) {
TC.diagnose(loc, diagID,
"implicit conversion from '" +
CS.getType(ICE->getSubExpr())->getString() + "' to '" +
CS.getType(ICE)->getString() + "' requires a temporary")
.highlight(ICE->getSourceRange());
return;
}
if (auto IE = dyn_cast<IfExpr>(immInfo.first)) {
if (isLoadedLValue(IE)) {
TC.diagnose(loc, diagID,
"result of conditional operator '? :' is never mutable")
.highlight(IE->getQuestionLoc())
.highlight(IE->getColonLoc());
return;
}
}
TC.diagnose(loc, unknownDiagID, CS.getType(destExpr))
.highlight(immInfo.first->getSourceRange());
}
/// Flags that can be used to control name lookup.
enum TCCFlags {
/// Allow the result of the subexpression to be an lvalue. If this is not
/// specified, any lvalue will be forced to be loaded into an rvalue.
TCC_AllowLValue = 0x01,
/// Re-type-check the given subexpression even if the expression has already
/// been checked already. The client is asserting that infinite recursion is
/// not possible because it has relaxed a constraint on the system.
TCC_ForceRecheck = 0x02,
/// tell typeCheckExpression that it is ok to produce an ambiguous result,
/// it can just fill in holes with UnresolvedType and we'll deal with it.
TCC_AllowUnresolvedTypeVariables = 0x04
};
typedef OptionSet<TCCFlags> TCCOptions;
inline TCCOptions operator|(TCCFlags flag1, TCCFlags flag2) {
return TCCOptions(flag1) | flag2;
}
namespace {
/// If a constraint system fails to converge on a solution for a given
/// expression, this class can produce a reasonable diagnostic for the failure
/// by analyzing the remnants of the failed constraint system. (Specifically,
/// left-over inactive, active and failed constraints.)
/// This class does not tune its diagnostics for a specific expression kind,
/// for that, you'll want to use an instance of the FailureDiagnosis class.
class FailureDiagnosis :public ASTVisitor<FailureDiagnosis, /*exprresult*/bool>{
friend class ASTVisitor<FailureDiagnosis, /*exprresult*/bool>;
Expr *expr = nullptr;
ConstraintSystem &CS;
public:
FailureDiagnosis(Expr *expr, ConstraintSystem &cs) : expr(expr), CS(cs) {
assert(expr);
}
template<typename ...ArgTypes>
InFlightDiagnostic diagnose(ArgTypes &&...Args) {
return CS.TC.diagnose(std::forward<ArgTypes>(Args)...);
}
/// Attempt to diagnose a failure without taking into account the specific
/// kind of expression that could not be type checked.
bool diagnoseConstraintFailure();
/// Unless we've already done this, retypecheck the specified child of the
/// current expression on its own, without including any contextual
/// constraints or the parent expr nodes. This is more likely to succeed than
/// type checking the original expression.
///
/// This mention may only be used on immediate children of the current expr
/// node, because ClosureExpr parameters need to be treated specially.
///
/// This can return a new expression (for e.g. when a UnresolvedDeclRef gets
/// resolved) and returns null when the subexpression fails to typecheck.
///
Expr *typeCheckChildIndependently(
Expr *subExpr, Type convertType = Type(),
ContextualTypePurpose convertTypePurpose = CTP_Unused,
TCCOptions options = TCCOptions(),
ExprTypeCheckListener *listener = nullptr,
bool allowFreeTypeVariables = true);
Expr *typeCheckChildIndependently(Expr *subExpr, TCCOptions options,
bool allowFreeTypeVariables = true) {
return typeCheckChildIndependently(subExpr, Type(), CTP_Unused, options,
nullptr, allowFreeTypeVariables);
}
Type getTypeOfTypeCheckedChildIndependently(Expr *subExpr,
TCCOptions options = TCCOptions()) {
auto e = typeCheckChildIndependently(subExpr, options);
return e ? CS.getType(e) : Type();
}
/// This is the same as typeCheckChildIndependently, but works on an arbitrary
/// subexpression of the current node because it handles ClosureExpr parents
/// of the specified node.
Expr *typeCheckArbitrarySubExprIndependently(Expr *subExpr,
TCCOptions options = TCCOptions());
/// Special magic to handle inout exprs and tuples in argument lists.
Expr *typeCheckArgumentChildIndependently(Expr *argExpr, Type argType,
const CalleeCandidateInfo &candidates,
TCCOptions options = TCCOptions());
/// Diagnose common failures due to applications of an argument list to an
/// ApplyExpr or SubscriptExpr.
bool diagnoseParameterErrors(CalleeCandidateInfo &CCI,
Expr *fnExpr, Expr *argExpr,
ArrayRef<Identifier> argLabels);
/// Attempt to diagnose a specific failure from the info we've collected from
/// the failed constraint system.
bool diagnoseExprFailure();
/// Emit an ambiguity diagnostic about the specified expression.
void diagnoseAmbiguity(Expr *E);
/// Attempt to produce a diagnostic for a mismatch between an expression's
/// type and its assumed contextual type.
bool diagnoseContextualConversionError(Expr *expr, Type contextualType,
ContextualTypePurpose CTP);
/// For an expression being type checked with a CTP_CalleeResult contextual
/// type, try to diagnose a problem.
bool diagnoseCalleeResultContextualConversionError();
/// Attempt to produce a diagnostic for a mismatch between a call's
/// type and its assumed contextual type.
bool diagnoseCallContextualConversionErrors(ApplyExpr *callEpxr,
Type contextualType,
ContextualTypePurpose CTP);
private:
/// Validate potential contextual type for type-checking one of the
/// sub-expressions, usually correct/valid types are the ones which
/// either don't have type variables or are not generic, because
/// generic types with left-over type variables or unresolved types
/// degrade quality of diagnostics if allowed to be used as contextual.
///
/// \param contextualType The candidate contextual type.
/// \param CTP The contextual purpose attached to the given candidate.
///
/// \returns Pair of validated type and it's purpose, potentially nullified
/// if it wasn't an appropriate type to be used.
std::pair<Type, ContextualTypePurpose>
validateContextualType(Type contextualType, ContextualTypePurpose CTP);
/// Check the specified closure to see if it is a multi-statement closure with
/// an uninferred type. If so, diagnose the problem with an error and return
/// true.
bool diagnoseAmbiguousMultiStatementClosure(ClosureExpr *closure);
/// Check the associated constraint system to see if it has any archetypes
/// not properly resolved or missing. If so, diagnose the problem with
/// an error and return true.
bool diagnoseArchetypeAmbiguity();
/// Emit an error message about an unbound generic parameter existing, and
/// emit notes referring to the target of a diagnostic, e.g., the function
/// or parameter being used.
void diagnoseUnboundArchetype(ArchetypeType *archetype, Expr *anchor);
/// Produce a diagnostic for a general member-lookup failure (irrespective of
/// the exact expression kind).
bool diagnoseGeneralMemberFailure(Constraint *constraint);
/// Diagnose the lookup of a static member or enum element as instance member.
void diagnoseTypeMemberOnInstanceLookup(Type baseObjTy,
Expr *baseExpr,
DeclName memberName,
DeclNameLoc nameLoc,
ValueDecl *member,
SourceLoc loc);
/// Given a result of name lookup that had no viable results, diagnose the
/// unviable ones.
void diagnoseUnviableLookupResults(MemberLookupResult &lookupResults,
Type baseObjTy, Expr *baseExpr,
DeclName memberName, DeclNameLoc nameLoc,
SourceLoc loc);
/// Produce a diagnostic for a general overload resolution failure
/// (irrespective of the exact expression kind).
bool diagnoseGeneralOverloadFailure(Constraint *constraint);
/// Produce a diagnostic for a general conversion failure (irrespective of the
/// exact expression kind).
bool diagnoseGeneralConversionFailure(Constraint *constraint);
/// Produce a specialized diagnostic if this is an invalid conversion to Bool.
bool diagnoseConversionToBool(Expr *expr, Type exprType);
/// Produce a diagnostic for binary comparisons of the nil literal
/// to other values.
bool diagnoseNilLiteralComparison(Expr *lhsExpr, Expr *rhsExpr,
CalleeCandidateInfo &calleeInfo,
SourceLoc applyLoc);
/// Produce diagnostic for failures related to attributes associated with
/// candidate functions/methods e.g. mutability.
bool diagnoseMethodAttributeFailures(ApplyExpr *expr,
ArrayRef<Identifier> argLabels,
bool hasTrailingClosure,
CalleeCandidateInfo &candidates);
/// Produce diagnostic for failures related to unfulfilled requirements
/// of the generic parameters used as arguments.
bool diagnoseArgumentGenericRequirements(TypeChecker &TC, Expr *callExpr,
Expr *fnExpr, Expr *argExpr,
CalleeCandidateInfo &candidates,
ArrayRef<Identifier> argLabels);
bool diagnoseMemberFailures(
Expr *E, Expr *baseEpxr, ConstraintKind lookupKind, DeclName memberName,
FunctionRefKind funcRefKind, ConstraintLocator *locator,
Optional<std::function<bool(ArrayRef<OverloadChoice>)>> callback = None,
bool includeInaccessibleMembers = true);
bool diagnoseTrailingClosureErrors(ApplyExpr *expr);
bool diagnoseClosureExpr(ClosureExpr *closureExpr, Type contextualType,
std::function<bool(Type, Type)> resultTypeProcessor);
bool diagnoseSubscriptErrors(SubscriptExpr *SE, bool performingSet);
bool visitExpr(Expr *E);
bool visitIdentityExpr(IdentityExpr *E);
bool visitTryExpr(TryExpr *E);
bool visitTupleExpr(TupleExpr *E);
bool visitUnresolvedMemberExpr(UnresolvedMemberExpr *E);
bool visitUnresolvedDotExpr(UnresolvedDotExpr *UDE);
bool visitArrayExpr(ArrayExpr *E);
bool visitDictionaryExpr(DictionaryExpr *E);
bool visitObjectLiteralExpr(ObjectLiteralExpr *E);
bool visitForceValueExpr(ForceValueExpr *FVE);
bool visitBindOptionalExpr(BindOptionalExpr *BOE);
bool visitSubscriptExpr(SubscriptExpr *SE);
bool visitApplyExpr(ApplyExpr *AE);
bool visitAssignExpr(AssignExpr *AE);
bool visitInOutExpr(InOutExpr *IOE);
bool visitCoerceExpr(CoerceExpr *CE);
bool visitIfExpr(IfExpr *IE);
bool visitRebindSelfInConstructorExpr(RebindSelfInConstructorExpr *E);
bool visitCaptureListExpr(CaptureListExpr *CLE);
bool visitClosureExpr(ClosureExpr *CE);
bool visitKeyPathExpr(KeyPathExpr *KPE);
};
} // end anonymous namespace
static bool isMemberConstraint(Constraint *C) {
return C->getClassification() == ConstraintClassification::Member;
}
static bool isOverloadConstraint(Constraint *C) {
if (C->getKind() == ConstraintKind::BindOverload)
return true;
if (C->getKind() != ConstraintKind::Disjunction)
return false;
return C->getNestedConstraints().front()->getKind() ==
ConstraintKind::BindOverload;
}
/// Return true if this constraint is a conversion or requirement between two
/// types.
static bool isConversionConstraint(const Constraint *C) {
return C->getClassification() == ConstraintClassification::Relational;
}
/// Attempt to diagnose a failure without taking into account the specific
/// kind of expression that could not be type checked.
bool FailureDiagnosis::diagnoseConstraintFailure() {
// This is the priority order in which we handle constraints. Things earlier
// in the list are considered to have higher specificity (and thus, higher
// priority) than things lower in the list.
enum ConstraintRanking {
CR_MemberConstraint,
CR_ConversionConstraint,
CR_OverloadConstraint,
CR_OtherConstraint
};
// Start out by classifying all the constraints.
typedef std::pair<Constraint*, ConstraintRanking> RCElt;
std::vector<RCElt> rankedConstraints;
// This is a predicate that classifies constraints according to our
// priorities.
std::function<void (Constraint*)> classifyConstraint = [&](Constraint *C) {
if (isMemberConstraint(C))
return rankedConstraints.push_back({C, CR_MemberConstraint});
if (isOverloadConstraint(C))
return rankedConstraints.push_back({C, CR_OverloadConstraint});
if (isConversionConstraint(C))
return rankedConstraints.push_back({C, CR_ConversionConstraint});
// We occasionally end up with disjunction constraints containing an
// original constraint along with one considered with a fix. If we find
// this situation, add the original one to our list for diagnosis.
if (C->getKind() == ConstraintKind::Disjunction) {
Constraint *Orig = nullptr;
bool AllOthersHaveFixes = true;
for (auto DC : C->getNestedConstraints()) {
// If this is a constraint inside of the disjunction with a fix, ignore
// it.
if (DC->getFix())
continue;
// If we already found a candidate without a fix, we can't do this.
if (Orig) {
AllOthersHaveFixes = false;
break;
}
// Remember this as the exemplar to use.
Orig = DC;
}
if (Orig && AllOthersHaveFixes)
return classifyConstraint(Orig);
// If we got all the way down to a truly ambiguous disjunction constraint
// with a conversion in it, the problem could be that none of the options
// in the disjunction worked.
//
// We don't have a lot of great options here, so (if all else fails),
// we'll attempt to diagnose the issue as though the first option was the
// problem.
rankedConstraints.push_back({
C->getNestedConstraints()[0],
CR_OtherConstraint
});
return;
}
return rankedConstraints.push_back({C, CR_OtherConstraint});
};
// Look at the failed constraint and the general constraint list. Processing
// the failed constraint first slightly biases it in the ranking ahead of
// other failed constraints at the same level.
if (CS.failedConstraint)
classifyConstraint(CS.failedConstraint);
for (auto &C : CS.getConstraints())
classifyConstraint(&C);
// Okay, now that we've classified all the constraints, sort them by their
// priority and privilege the favored constraints.
std::stable_sort(rankedConstraints.begin(), rankedConstraints.end(),
[&] (RCElt LHS, RCElt RHS) {
// Rank things by their kind as the highest priority.
if (LHS.second < RHS.second)
return true;
if (LHS.second > RHS.second)
return false;
// Next priority is favored constraints.
if (LHS.first->isFavored() != RHS.first->isFavored())
return LHS.first->isFavored();
return false;
});
// Now that we have a sorted precedence of constraints to diagnose, charge
// through them.
for (auto elt : rankedConstraints) {
auto C = elt.first;
if (isMemberConstraint(C) && diagnoseGeneralMemberFailure(C))
return true;
if (isConversionConstraint(C) && diagnoseGeneralConversionFailure(C))
return true;
if (isOverloadConstraint(C) && diagnoseGeneralOverloadFailure(C))
return true;
// TODO: There can be constraints that aren't handled here! When this
// happens, we end up diagnosing them as ambiguities that don't make sense.
// This isn't as bad as it seems though, because most of these will be
// diagnosed by expr diagnostics.
}
// Otherwise, all the constraints look ok, diagnose this as an ambiguous
// expression.
return false;
}
bool FailureDiagnosis::diagnoseGeneralMemberFailure(Constraint *constraint) {
assert(isMemberConstraint(constraint));
// Get the referenced base expression from the failed constraint, along with
// the SourceRange for the member ref. In "x.y", this returns the expr for x
// and the source range for y.
auto anchor = expr;
SourceRange memberRange = anchor->getSourceRange();
auto locator = constraint->getLocator();
if (locator) {
locator = simplifyLocator(CS, locator, memberRange);
if (locator->getAnchor())
anchor = locator->getAnchor();
}
// Check to see if this is a locator referring to something we cannot or do
// here: in this case, we ignore paths that end on archetypes witnesses, or
// associated types of the expression.
if (locator && !locator->getPath().empty()) {
// TODO: This should only ignore *unresolved* archetypes. For resolved
// archetypes
return false;
}
return diagnoseMemberFailures(expr, anchor, constraint->getKind(),
constraint->getMember(),
constraint->getFunctionRefKind(), locator);
}
void FailureDiagnosis::
diagnoseTypeMemberOnInstanceLookup(Type baseObjTy,
Expr *baseExpr,
DeclName memberName,
DeclNameLoc nameLoc,
ValueDecl *member,
SourceLoc loc) {
SourceRange baseRange = baseExpr ? baseExpr->getSourceRange() : SourceRange();
Optional<InFlightDiagnostic> Diag;
// If the base of the lookup is a protocol metatype, suggest
// to replace the metatype with 'Self'
// error saying the lookup cannot be on a protocol metatype
if (auto metatypeTy = baseObjTy->getAs<MetatypeType>()) {
assert(metatypeTy->getInstanceType()->isExistentialType());
// Give a customized message if we're accessing a member type
// of a protocol -- otherwise a diagnostic talking about
// static members doesn't make a whole lot of sense
if (auto TAD = dyn_cast<TypeAliasDecl>(member)) {
Diag.emplace(diagnose(loc,
diag::typealias_outside_of_protocol,
TAD->getName()));
} else if (auto ATD = dyn_cast<AssociatedTypeDecl>(member)) {
Diag.emplace(diagnose(loc,
diag::assoc_type_outside_of_protocol,
ATD->getName()));
} else if (isa<ConstructorDecl>(member)) {
Diag.emplace(diagnose(loc,
diag::construct_protocol_by_name,
metatypeTy->getInstanceType()));
} else {
Diag.emplace(diagnose(loc,
diag::could_not_use_type_member_on_protocol_metatype,
baseObjTy, memberName));
}
Diag->highlight(baseRange).highlight(nameLoc.getSourceRange());
// See through function decl context
if (auto parent = CS.DC->getInnermostTypeContext()) {
// If we are in a protocol extension of 'Proto' and we see
// 'Proto.static', suggest 'Self.static'
if (auto extensionContext = parent->getAsProtocolExtensionContext()) {
if (extensionContext->getDeclaredType()->isEqual(
metatypeTy->getInstanceType())) {
Diag->fixItReplace(baseRange, "Self");
}
}
}
return;
}
if (isa<EnumElementDecl>(member))
Diag.emplace(diagnose(loc, diag::could_not_use_enum_element_on_instance,
memberName));
else
Diag.emplace(diagnose(loc, diag::could_not_use_type_member_on_instance,
baseObjTy, memberName));
Diag->highlight(nameLoc.getSourceRange());
// No fix-it if the lookup was qualified
if (baseExpr && !baseExpr->isImplicit())
return;
// Determine the contextual type of the expression
Type contextualType;
for (auto iterateCS = &CS; contextualType.isNull() && iterateCS;
iterateCS = iterateCS->baseCS) {
contextualType = iterateCS->getContextualType();
}
// Try to provide a fix-it that only contains a '.'
if (contextualType) {
if (baseObjTy->isEqual(contextualType)) {
Diag->fixItInsert(loc, ".");
return;
}
}
// Check if the expression is the matching operator ~=, most often used in
// case statements. If so, try to provide a single dot fix-it
const Expr *contextualTypeNode = nullptr;
ConstraintSystem *lastCS = nullptr;
for (auto iterateCS = &CS; iterateCS; iterateCS = iterateCS->baseCS) {
lastCS = iterateCS;
contextualTypeNode = iterateCS->getContextualTypeNode();
}
// The '~=' operator is an overloaded decl ref inside a binaryExpr
if (auto binaryExpr = dyn_cast<BinaryExpr>(contextualTypeNode)) {
if (auto overloadedFn
= dyn_cast<OverloadedDeclRefExpr>(binaryExpr->getFn())) {
if (overloadedFn->getDecls().size() > 0) {
// Fetch any declaration to check if the name is '~='
ValueDecl *decl0 = overloadedFn->getDecls()[0];
if (decl0->getBaseName() == decl0->getASTContext().Id_MatchOperator) {
assert(binaryExpr->getArg()->getElements().size() == 2);
// If the rhs of '~=' is the enum type, a single dot suffixes
// since the type can be inferred
Type secondArgType =
lastCS->getType(binaryExpr->getArg()->getElement(1));
if (secondArgType->isEqual(baseObjTy)) {
Diag->fixItInsert(loc, ".");
return;
}
}
}
}
}
// Fall back to a fix-it with a full type qualifier
auto nominal =
member->getDeclContext()
->getAsNominalTypeOrNominalTypeExtensionContext();
SmallString<32> typeName;
llvm::raw_svector_ostream typeNameStream(typeName);
typeNameStream << nominal->getSelfInterfaceType() << ".";
Diag->fixItInsert(loc, typeNameStream.str());
return;
}
/// When a user refers a enum case with a wrong member name, we try to find a enum
/// element whose name differs from the wrong name only in convention; meaning their
/// lower case counterparts are identical.
/// - DeclName is valid when such a correct case is found; invalid otherwise.
static DeclName
findCorrectEnumCaseName(Type Ty, LookupResult &Result,
DeclName memberName) {
if (!memberName.isSimpleName())
return DeclName();
if (!Ty->is<EnumType>() &&
!Ty->is<BoundGenericEnumType>())
return DeclName();
llvm::SmallVector<DeclName, 4> candidates;
for (auto &correction : Result) {
DeclName correctName = correction.getValueDecl()->getFullName();
if (!correctName.isSimpleName())
continue;
if (!isa<EnumElementDecl>(correction.getValueDecl()))
continue;
if (correctName.getBaseIdentifier().str().equals_lower(
memberName.getBaseIdentifier().str()))
candidates.push_back(correctName);
}
if (candidates.size() == 1)
return candidates.front();
return DeclName();
}
/// Given a result of name lookup that had no viable results, diagnose the
/// unviable ones.
void FailureDiagnosis::
diagnoseUnviableLookupResults(MemberLookupResult &result, Type baseObjTy,
Expr *baseExpr,
DeclName memberName, DeclNameLoc nameLoc,
SourceLoc loc) {
SourceRange baseRange = baseExpr ? baseExpr->getSourceRange() : SourceRange();
// If we found no results at all, mention that fact.
if (result.UnviableCandidates.empty()) {
LookupResult correctionResults;
auto tryTypoCorrection = [&] {
CS.TC.performTypoCorrection(CS.DC, DeclRefKind::Ordinary, baseObjTy,
memberName, nameLoc.getBaseNameLoc(),
defaultMemberLookupOptions,
correctionResults);
};
// TODO: This should handle tuple member lookups, like x.1231 as well.
if (memberName.getBaseName().getKind() == DeclBaseName::Kind::Subscript) {
diagnose(loc, diag::type_not_subscriptable, baseObjTy)
.highlight(baseRange);
} else if (memberName.getBaseName() == "deinit") {
// Specialised diagnostic if trying to access deinitialisers
diagnose(loc, diag::destructor_not_accessible).highlight(baseRange);
} else if (auto metatypeTy = baseObjTy->getAs<MetatypeType>()) {
auto instanceTy = metatypeTy->getInstanceType();
tryTypoCorrection();
if (DeclName rightName = findCorrectEnumCaseName(instanceTy,
correctionResults,
memberName)) {
diagnose(loc, diag::could_not_find_enum_case, instanceTy,
memberName, rightName)
.fixItReplace(nameLoc.getBaseNameLoc(),
rightName.getBaseIdentifier().str());
return;
}
diagnose(loc, diag::could_not_find_type_member, instanceTy, memberName)
.highlight(baseRange).highlight(nameLoc.getSourceRange());
} else if (auto moduleTy = baseObjTy->getAs<ModuleType>()) {
diagnose(baseExpr->getLoc(), diag::no_member_of_module,
moduleTy->getModule()->getName(), memberName)
.highlight(baseRange)
.highlight(nameLoc.getSourceRange());
return;
} else {
diagnose(loc, diag::could_not_find_value_member,
baseObjTy, memberName)
.highlight(baseRange).highlight(nameLoc.getSourceRange());
tryTypoCorrection();
// Check for a few common cases that can cause missing members.
if (baseObjTy->is<EnumType>() && memberName.isSimpleName("rawValue")) {
auto loc = baseObjTy->castTo<EnumType>()->getDecl()->getNameLoc();
if (loc.isValid()) {
diagnose(loc, diag::did_you_mean_raw_type);
return; // Always prefer this over typo corrections.
}
} else if (baseObjTy->isAny()) {
diagnose(loc, diag::any_as_anyobject_fixit)
.fixItInsert(baseExpr->getStartLoc(), "(")
.fixItInsertAfter(baseExpr->getEndLoc(), " as AnyObject)");
return;
}
}
// Note all the correction candidates.
for (auto &correction : correctionResults) {
CS.TC.noteTypoCorrection(memberName, nameLoc,
correction.getValueDecl());
}
// TODO: recover?
return;
}
// Otherwise, we have at least one (and potentially many) viable candidates
// sort them out. If all of the candidates have the same problem (commonly
// because there is exactly one candidate!) diagnose this.
bool sameProblem = true;
auto firstProblem = result.UnviableCandidates[0].second;
ValueDecl *member = nullptr;
for (auto cand : result.UnviableCandidates) {
if (member == nullptr)
member = cand.first.getDecl();
sameProblem &= cand.second == firstProblem;
}
auto instanceTy = baseObjTy;
if (auto *MTT = instanceTy->getAs<AnyMetatypeType>())
instanceTy = MTT->getInstanceType();
if (sameProblem) {
switch (firstProblem) {
case MemberLookupResult::UR_LabelMismatch:
break;
case MemberLookupResult::UR_UnavailableInExistential:
diagnose(loc, diag::could_not_use_member_on_existential,
instanceTy, memberName)
.highlight(baseRange).highlight(nameLoc.getSourceRange());
return;
case MemberLookupResult::UR_InstanceMemberOnType: {
// If the base is an implicit self type reference, and we're in a
// an initializer, then the user wrote something like:
//
// class Foo { let x = 1, y = x }
//
// which runs in type context, not instance context, or
//
// class Bar {
// let otherwise = 1 // instance member
// var x: Int
// func init(x: Int =otherwise) { // default parameter
// self.x = x
// }
// }
//
// in which an instance member is used as a default value for a
// parameter.
//
// Produce a tailored diagnostic for these cases since this
// comes up and is otherwise non-obvious what is going on.
if (baseExpr && baseExpr->isImplicit() && isa<Initializer>(CS.DC)) {
auto *TypeDC = CS.DC->getParent();
bool propertyInitializer = true;
// If the parent context is not a type context, we expect it
// to be a defaulted parameter in a function declaration.
if (!TypeDC->isTypeContext()) {
assert(TypeDC->getContextKind() ==
DeclContextKind::AbstractFunctionDecl &&
"Expected function decl context for initializer!");
TypeDC = TypeDC->getParent();
propertyInitializer = false;
}
assert(TypeDC->isTypeContext() && "Expected type decl context!");
if (TypeDC->getAsNominalTypeOrNominalTypeExtensionContext() ==
instanceTy->getAnyNominal()) {
if (propertyInitializer)
CS.TC.diagnose(nameLoc, diag::instance_member_in_initializer,
memberName);
else
CS.TC.diagnose(nameLoc, diag::instance_member_in_default_parameter,
memberName);
return;
}
}
// Check whether the instance member is declared on parent context and if so
// provide more specialized message.
auto memberTypeContext = member->getDeclContext()->getInnermostTypeContext();
auto currentTypeContext = CS.DC->getInnermostTypeContext();
if (memberTypeContext && currentTypeContext &&
memberTypeContext->getSemanticDepth() <
currentTypeContext->getSemanticDepth()) {
diagnose(loc, diag::could_not_use_instance_member_on_type,
currentTypeContext->getDeclaredInterfaceType(), memberName,
memberTypeContext->getDeclaredInterfaceType(),
true)
.highlight(baseRange).highlight(nameLoc.getSourceRange());
} else {
diagnose(loc, diag::could_not_use_instance_member_on_type,
instanceTy, memberName,
instanceTy,
false)
.highlight(baseRange).highlight(nameLoc.getSourceRange());
}
return;
}
case MemberLookupResult::UR_TypeMemberOnInstance:
diagnoseTypeMemberOnInstanceLookup(baseObjTy, baseExpr,
memberName, nameLoc,
member, loc);
return;
case MemberLookupResult::UR_MutatingMemberOnRValue:
case MemberLookupResult::UR_MutatingGetterOnRValue: {
auto diagIDsubelt = diag::cannot_pass_rvalue_mutating_subelement;
auto diagIDmember = diag::cannot_pass_rvalue_mutating;
if (firstProblem == MemberLookupResult::UR_MutatingGetterOnRValue) {
diagIDsubelt = diag::cannot_pass_rvalue_mutating_getter_subelement;
diagIDmember = diag::cannot_pass_rvalue_mutating_getter;
}
assert(baseExpr && "Cannot have a mutation failure without a base");
diagnoseSubElementFailure(baseExpr, loc, CS, diagIDsubelt, diagIDmember);
return;
}
case MemberLookupResult::UR_Inaccessible: {
auto decl = result.UnviableCandidates[0].first.getDecl();
// FIXME: What if the unviable candidates have different levels of access?
diagnose(nameLoc, diag::candidate_inaccessible, decl->getBaseName(),
decl->getFormalAccess());
for (auto cand : result.UnviableCandidates)
diagnose(cand.first.getDecl(), diag::decl_declared_here, memberName);
return;
}
}
}
// FIXME: Emit candidate set....
// Otherwise, we don't have a specific issue to diagnose. Just say the vague
// 'cannot use' diagnostic.
if (!baseObjTy->isEqual(instanceTy))
diagnose(loc, diag::could_not_use_type_member,
instanceTy, memberName)
.highlight(baseRange).highlight(nameLoc.getSourceRange());
else
diagnose(loc, diag::could_not_use_value_member,
baseObjTy, memberName)
.highlight(baseRange).highlight(nameLoc.getSourceRange());
return;
}
// In the absence of a better conversion constraint failure, point out the
// inability to find an appropriate overload.
bool FailureDiagnosis::diagnoseGeneralOverloadFailure(Constraint *constraint) {
Constraint *bindOverload = constraint;
if (constraint->getKind() == ConstraintKind::Disjunction)
bindOverload = constraint->getNestedConstraints().front();
auto overloadChoice = bindOverload->getOverloadChoice();
auto overloadName = overloadChoice.getName();
// Get the referenced expression from the failed constraint.
auto anchor = expr;
if (auto locator = bindOverload->getLocator()) {
anchor = simplifyLocatorToAnchor(CS, locator);
if (!anchor)
return false;
}
// The anchor for the constraint is almost always an OverloadedDeclRefExpr or
// UnresolvedDotExpr. Look at the parent node in the AST to find the Apply to
// give a better diagnostic.
Expr *call = expr->getParentMap()[anchor];
// We look through some simple things that get in between the overload set
// and the apply.
while (call &&
(isa<IdentityExpr>(call) ||
isa<TryExpr>(call) || isa<ForceTryExpr>(call))) {
call = expr->getParentMap()[call];
}
// FIXME: This is only needed because binops don't respect contextual types.
if (call && isa<ApplyExpr>(call))
return false;
// This happens, for example, with ambiguous OverloadedDeclRefExprs. We should
// just implement visitOverloadedDeclRefExprs and nuke this.
// If we couldn't resolve an argument, then produce a generic "ambiguity"
// diagnostic.
diagnose(anchor->getLoc(), diag::ambiguous_member_overload_set,
overloadName)
.highlight(anchor->getSourceRange());
if (constraint->getKind() == ConstraintKind::Disjunction) {
for (auto elt : constraint->getNestedConstraints()) {
if (elt->getKind() != ConstraintKind::BindOverload) continue;
if (!elt->getOverloadChoice().isDecl()) continue;
auto candidate = elt->getOverloadChoice().getDecl();
diagnose(candidate, diag::found_candidate);
}
}
return true;
}
/// Produce a specialized diagnostic if this is an invalid conversion to Bool.
bool FailureDiagnosis::diagnoseConversionToBool(Expr *expr, Type exprType) {
// Check for "=" converting to Bool. The user probably meant ==.
if (auto *AE = dyn_cast<AssignExpr>(expr->getValueProvidingExpr())) {
diagnose(AE->getEqualLoc(), diag::use_of_equal_instead_of_equality)
.fixItReplace(AE->getEqualLoc(), "==")
.highlight(AE->getDest()->getLoc())
.highlight(AE->getSrc()->getLoc());
return true;
}
// If we're trying to convert something from optional type to Bool, then a
// comparison against nil was probably expected.
// TODO: It would be nice to handle "!x" --> x == false, but we have no way
// to get to the parent expr at present.
if (exprType->getAnyOptionalObjectType()) {
StringRef prefix = "((";
StringRef suffix = ") != nil)";
// Check if we need the inner parentheses.
// Technically we only need them if there's something in 'expr' with
// lower precedence than '!=', but the code actually comes out nicer
// in most cases with parens on anything non-trivial.
if (expr->canAppendPostfixExpression()) {
prefix = prefix.drop_back();
suffix = suffix.drop_front();
}
// FIXME: The outer parentheses may be superfluous too.
diagnose(expr->getLoc(), diag::optional_used_as_boolean, exprType)
.fixItInsert(expr->getStartLoc(), prefix)
.fixItInsertAfter(expr->getEndLoc(), suffix);
return true;
}
return false;
}
bool FailureDiagnosis::diagnoseGeneralConversionFailure(Constraint *constraint){
auto anchor = expr;
bool resolvedAnchorToExpr = false;
if (auto locator = constraint->getLocator()) {
anchor = simplifyLocatorToAnchor(CS, locator);
if (anchor)
resolvedAnchorToExpr = true;
else
anchor = locator->getAnchor();
}
Type fromType =
CS.simplifyType(constraint->getFirstType())->getWithoutImmediateLabel();
if (fromType->hasTypeVariable() && resolvedAnchorToExpr) {
TCCOptions options;
// If we know we're removing a contextual constraint, then we can force a
// type check of the subexpr because we know we're eliminating that
// constraint.
if (CS.getContextualTypePurpose() != CTP_Unused)
options |= TCC_ForceRecheck;
auto sub = typeCheckArbitrarySubExprIndependently(anchor, options);
if (!sub) return true;
fromType = CS.getType(sub);
}
// Bail on constraints that don't relate two types.
if (constraint->getKind() == ConstraintKind::Disjunction
|| constraint->getKind() == ConstraintKind::BindOverload)
return false;
fromType = fromType->getRValueType();
auto toType =
CS.simplifyType(constraint->getSecondType())->getWithoutImmediateLabel();
// Try to simplify irrelevant details of function types. For example, if
// someone passes a "() -> Float" function to a "() throws -> Int"
// parameter, then uttering the "throws" may confuse them into thinking that
// that is the problem, even though there is a clear subtype relation.
if (auto srcFT = fromType->getAs<FunctionType>())
if (auto destFT = toType->getAs<FunctionType>()) {
auto destExtInfo = destFT->getExtInfo();
if (!srcFT->isNoEscape()) destExtInfo = destExtInfo.withNoEscape(false);
if (!srcFT->throws()) destExtInfo = destExtInfo.withThrows(false);
if (destExtInfo != destFT->getExtInfo())
toType = FunctionType::get(destFT->getInput(),
destFT->getResult(), destExtInfo);
// If this is a function conversion that discards throwability or
// noescape, emit a specific diagnostic about that.
if (srcFT->throws() && !destFT->throws()) {
diagnose(expr->getLoc(), diag::throws_functiontype_mismatch,
fromType, toType)
.highlight(expr->getSourceRange());
return true;
}
if (srcFT->isNoEscape() && !destFT->isNoEscape()) {
diagnose(expr->getLoc(), diag::noescape_functiontype_mismatch,
fromType, toType)
.highlight(expr->getSourceRange());
return true;
}
}
// If this is a callee that mismatches an expected return type, we can emit a
// very nice and specific error. In this case, what we'll generally see is
// a failed conversion constraint of "A -> B" to "_ -> C", where the error is
// that B isn't convertible to C.
if (CS.getContextualTypePurpose() == CTP_CalleeResult) {
auto destFT = toType->getAs<FunctionType>();
auto srcFT = fromType->getAs<FunctionType>();
if (destFT && srcFT && !isUnresolvedOrTypeVarType(srcFT->getResult())) {
// Otherwise, the error is that the result types mismatch.
diagnose(expr->getLoc(), diag::invalid_callee_result_type,
srcFT->getResult(), destFT->getResult())
.highlight(expr->getSourceRange());
return true;
}
}
// If simplification has turned this into the same types, then this isn't the
// broken constraint that we're looking for.
if (fromType->isEqual(toType) &&
constraint->getKind() != ConstraintKind::ConformsTo &&
constraint->getKind() != ConstraintKind::LiteralConformsTo)
return false;
// If we have two tuples with mismatching types, produce a tailored
// diagnostic.
if (auto fromTT = fromType->getAs<TupleType>())
if (auto toTT = toType->getAs<TupleType>()) {
if (fromTT->getNumElements() != toTT->getNumElements()) {
diagnose(anchor->getLoc(), diag::tuple_types_not_convertible_nelts,
fromTT, toTT)
.highlight(anchor->getSourceRange());
return true;
}
SmallVector<TupleTypeElt, 4> FromElts;
auto voidTy = CS.getASTContext().TheUnresolvedType;
for (unsigned i = 0, e = fromTT->getNumElements(); i != e; ++i)
FromElts.push_back({ voidTy, fromTT->getElement(i).getName() });
auto TEType = TupleType::get(FromElts, CS.getASTContext());
SmallVector<int, 4> sources;
SmallVector<unsigned, 4> variadicArgs;
// If the shuffle conversion is invalid (e.g. incorrect element labels),
// then we have a type error.
if (computeTupleShuffle(TEType->castTo<TupleType>()->getElements(),
toTT->getElements(), sources, variadicArgs)) {
diagnose(anchor->getLoc(), diag::tuple_types_not_convertible,
fromTT, toTT)
.highlight(anchor->getSourceRange());
return true;
}
}
// If the second type is a type variable, the expression itself is
// ambiguous. Bail out so the general ambiguity diagnosing logic can handle
// it.
if (fromType->hasUnresolvedType() || fromType->hasTypeVariable() ||
toType->hasUnresolvedType() || toType->hasTypeVariable() ||
// FIXME: Why reject unbound generic types here?
fromType->is<UnboundGenericType>())
return false;
// Check for various issues converting to Bool.
if (toType->isBool() && diagnoseConversionToBool(anchor, fromType))
return true;
if (auto PT = toType->getAs<ProtocolType>()) {
if (isa<NilLiteralExpr>(expr->getValueProvidingExpr())) {
diagnose(expr->getLoc(), diag::cannot_use_nil_with_this_type, toType)
.highlight(expr->getSourceRange());
return true;
}
// Emit a conformance error through conformsToProtocol.
if (auto conformance = CS.TC.conformsToProtocol(
fromType, PT->getDecl(), CS.DC, ConformanceCheckFlags::InExpression,
expr->getLoc())) {
if (conformance->isAbstract() ||
!conformance->getConcrete()->isInvalid())
return false;
}
return true;
}
// Due to migration reasons, types used to conform to BooleanType, which
// contain a member var 'boolValue', now does not convert to Bool. This block
// tries to add a specific diagnosis/fixit to explicitly invoke 'boolValue'.
if (toType->isBool() &&
fromType->mayHaveMembers()) {
auto LookupResult = CS.TC.lookupMember(
CS.DC, fromType, DeclName(CS.TC.Context.getIdentifier("boolValue")));
if (!LookupResult.empty()) {
if (isa<VarDecl>(LookupResult.begin()->getValueDecl())) {
if (anchor->canAppendPostfixExpression())
diagnose(anchor->getLoc(), diag::types_not_convertible_use_bool_value,
fromType, toType).fixItInsertAfter(anchor->getEndLoc(),
".boolValue");
else
diagnose(anchor->getLoc(), diag::types_not_convertible_use_bool_value,
fromType, toType).fixItInsert(anchor->getStartLoc(), "(").
fixItInsertAfter(anchor->getEndLoc(), ").boolValue");
return true;
}
}
}
diagnose(anchor->getLoc(), diag::types_not_convertible,
constraint->getKind() == ConstraintKind::Subtype,
fromType, toType)
.highlight(anchor->getSourceRange());
// Check to see if this constraint came from a cast instruction. If so,
// and if this conversion constraint is different than the types being cast,
// produce a note that talks about the overall expression.
//
// TODO: Using parentMap would be more general, rather than requiring the
// issue to be related to the root of the expr under study.
if (auto ECE = dyn_cast<ExplicitCastExpr>(expr))
if (constraint->getLocator() &&
constraint->getLocator()->getAnchor() == ECE->getSubExpr()) {
if (!toType->isEqual(ECE->getCastTypeLoc().getType()))
diagnose(expr->getLoc(), diag::in_cast_expr_types,
CS.getType(ECE->getSubExpr())->getRValueType(),
ECE->getCastTypeLoc().getType()->getRValueType())
.highlight(ECE->getSubExpr()->getSourceRange())
.highlight(ECE->getCastTypeLoc().getSourceRange());
}
return true;
}
namespace {
class ExprTypeSaverAndEraser {
llvm::DenseMap<Expr*, Type> ExprTypes;
llvm::DenseMap<TypeLoc*, std::pair<Type, bool>> TypeLocTypes;
llvm::DenseMap<Pattern*, Type> PatternTypes;
llvm::DenseMap<ParamDecl*, Type> ParamDeclTypes;
llvm::DenseMap<ParamDecl*, Type> ParamDeclInterfaceTypes;
llvm::DenseMap<CollectionExpr*, Expr*> CollectionSemanticExprs;
llvm::DenseSet<ValueDecl*> PossiblyInvalidDecls;
ExprTypeSaverAndEraser(const ExprTypeSaverAndEraser&) = delete;
void operator=(const ExprTypeSaverAndEraser&) = delete;
public:
ExprTypeSaverAndEraser(Expr *E) {
struct TypeSaver : public ASTWalker {
ExprTypeSaverAndEraser *TS;
TypeSaver(ExprTypeSaverAndEraser *TS) : TS(TS) {}
std::pair<bool, Expr *> walkToExprPre(Expr *expr) override {
TS->ExprTypes[expr] = expr->getType();
SWIFT_DEFER {
assert((!expr->getType() || !expr->getType()->hasTypeVariable()
// FIXME: We shouldn't allow these, either.
|| isa<LiteralExpr>(expr)) &&
"Type variable didn't get erased!");
};
// Preserve module expr type data to prevent further lookups.
if (auto *declRef = dyn_cast<DeclRefExpr>(expr))
if (isa<ModuleDecl>(declRef->getDecl()))
return { false, expr };
// Don't strip type info off OtherConstructorDeclRefExpr, because
// CSGen doesn't know how to reconstruct it.
if (isa<OtherConstructorDeclRefExpr>(expr))
return { false, expr };
// If a literal has a Builtin.Int or Builtin.FP type on it already,
// then sema has already expanded out a call to
// Init.init(<builtinliteral>)
// and we don't want it to make
// Init.init(Init.init(<builtinliteral>))
// preserve the type info to prevent this from happening.
if (isa<LiteralExpr>(expr) && !isa<InterpolatedStringLiteralExpr>(expr) &&
!(expr->getType() && expr->getType()->hasError()))
return { false, expr };
// If a ClosureExpr's parameter list has types on the decls, then
// remove them so that they'll get regenerated from the
// associated TypeLocs or resynthesized as fresh typevars.
if (auto *CE = dyn_cast<ClosureExpr>(expr))
for (auto P : *CE->getParameters()) {
if (P->hasType()) {
TS->ParamDeclTypes[P] = P->getType();
P->setType(Type());
}
if (P->hasInterfaceType()) {
TS->ParamDeclInterfaceTypes[P] = P->getInterfaceType();
P->setInterfaceType(Type());
}
TS->PossiblyInvalidDecls.insert(P);
if (P->isInvalid())
P->setInvalid(false);
}
// If we have a CollectionExpr with a type checked SemanticExpr,
// remove it so we can recalculate a new semantic form.
if (auto *CE = dyn_cast<CollectionExpr>(expr)) {
if (auto SE = CE->getSemanticExpr()) {
TS->CollectionSemanticExprs[CE] = SE;
CE->setSemanticExpr(nullptr);
}
}
expr->setType(nullptr);
expr->clearLValueAccessKind();
return { true, expr };
}
// If we find a TypeLoc (e.g. in an as? expr), save and erase it.
bool walkToTypeLocPre(TypeLoc &TL) override {
if (TL.getTypeRepr() && TL.getType()) {
TS->TypeLocTypes[&TL] = { TL.getType(), TL.wasValidated() };
TL.setType(Type(), /*was validated*/false);
}
return true;
}
std::pair<bool, Pattern*> walkToPatternPre(Pattern *P) override {
if (P->hasType()) {
TS->PatternTypes[P] = P->getType();
P->setType(Type());
}
return { true, P };
}
// Don't walk into statements. This handles the BraceStmt in
// non-single-expr closures, so we don't walk into their body.
std::pair<bool, Stmt *> walkToStmtPre(Stmt *S) override {
return { false, S };
}
};
E->walk(TypeSaver(this));
}
void restore() {
for (auto exprElt : ExprTypes)
exprElt.first->setType(exprElt.second);
for (auto typelocElt : TypeLocTypes)
typelocElt.first->setType(typelocElt.second.first,
typelocElt.second.second);
for (auto patternElt : PatternTypes)
patternElt.first->setType(patternElt.second);
for (auto paramDeclElt : ParamDeclTypes) {
assert(!paramDeclElt.first->isLet() || !paramDeclElt.second->is<InOutType>());
paramDeclElt.first->setType(paramDeclElt.second->getInOutObjectType());
}
for (auto paramDeclIfaceElt : ParamDeclInterfaceTypes) {
assert(!paramDeclIfaceElt.first->isLet() || !paramDeclIfaceElt.second->is<InOutType>());
paramDeclIfaceElt.first->setInterfaceType(paramDeclIfaceElt.second->getInOutObjectType());
}
for (auto CSE : CollectionSemanticExprs)
CSE.first->setSemanticExpr(CSE.second);
if (!PossiblyInvalidDecls.empty())
for (auto D : PossiblyInvalidDecls)
if (D->hasInterfaceType())
D->setInvalid(D->getInterfaceType()->hasError());
// Done, don't do redundant work on destruction.
ExprTypes.clear();
TypeLocTypes.clear();
PatternTypes.clear();
PossiblyInvalidDecls.clear();
}
// On destruction, if a type got wiped out, reset it from null to its
// original type. This is helpful because type checking a subexpression
// can lead to replacing the nodes in that subexpression. However, the
// failed ConstraintSystem still has locators pointing to the old nodes,
// and if expr-specific diagnostics fail to turn up anything useful to say,
// we go digging through failed constraints, and expect their locators to
// still be meaningful.
~ExprTypeSaverAndEraser() {
for (auto CSE : CollectionSemanticExprs)
if (!CSE.first->getType())
CSE.first->setSemanticExpr(CSE.second);
for (auto exprElt : ExprTypes)
if (!exprElt.first->getType())
exprElt.first->setType(exprElt.second);
for (auto typelocElt : TypeLocTypes)
if (!typelocElt.first->getType())
typelocElt.first->setType(typelocElt.second.first,
typelocElt.second.second);
for (auto patternElt : PatternTypes)
if (!patternElt.first->hasType())
patternElt.first->setType(patternElt.second);
for (auto paramDeclElt : ParamDeclTypes)
if (!paramDeclElt.first->hasType()) {
paramDeclElt.first->setType(getParamBaseType(paramDeclElt));
}
for (auto paramDeclIfaceElt : ParamDeclInterfaceTypes)
if (!paramDeclIfaceElt.first->hasInterfaceType()) {
paramDeclIfaceElt.first->setInterfaceType(
getParamBaseType(paramDeclIfaceElt));
}
if (!PossiblyInvalidDecls.empty())
for (auto D : PossiblyInvalidDecls)
if (D->hasInterfaceType())
D->setInvalid(D->getInterfaceType()->hasError());
}
private:
static Type getParamBaseType(std::pair<ParamDecl *, Type> &storedParam) {
ParamDecl *param;
Type storedType;
std::tie(param, storedType) = storedParam;
// FIXME: We are currently in process of removing `InOutType`
// so `VarDecl::get{Interface}Type` is going to wrap base
// type into `InOutType` if its flag indicates that it's
// an `inout` parameter declaration. But such type can't
// be restored directly using `VarDecl::set{Interface}Type`
// caller needs additional logic to extract base type.
if (auto *IOT = storedType->getAs<InOutType>()) {
assert(param->isInOut());
return IOT->getObjectType();
}
return storedType;
}
};
} // end anonymous namespace
/// Erase an expression tree's open existentials after a re-typecheck operation.
///
/// This is done in the case of a typecheck failure, after we re-typecheck
/// partially-typechecked subexpressions in a context-free manner.
///
static void eraseOpenedExistentials(Expr *&expr, ConstraintSystem &CS) {
class ExistentialEraser : public ASTWalker {
ConstraintSystem &CS;
llvm::SmallDenseMap<OpaqueValueExpr *, Expr *, 4> OpenExistentials;
public:
ExistentialEraser(ConstraintSystem &CS) : CS(CS) {}
std::pair<bool, Expr *> walkToExprPre(Expr *expr) override {
if (auto OOE = dyn_cast<OpenExistentialExpr>(expr)) {
auto archetypeVal = OOE->getOpaqueValue();
auto base = OOE->getExistentialValue();
// Walk the base expression to ensure we erase any existentials within
// it.
base = base->walk(*this);
bool inserted = OpenExistentials.insert({archetypeVal, base}).second;
assert(inserted && "OpaqueValue appears multiple times?");
(void)inserted;
return { true, OOE->getSubExpr() };
}
if (auto OVE = dyn_cast<OpaqueValueExpr>(expr)) {
auto value = OpenExistentials.find(OVE);
assert(value != OpenExistentials.end() &&
"didn't see this OVE in a containing OpenExistentialExpr?");
return { true, value->second };
}
// Handle collection upcasts specially so that we don't blow up on
// their embedded OVEs.
if (auto CDE = dyn_cast<CollectionUpcastConversionExpr>(expr)) {
if (auto result = CDE->getSubExpr()->walk(*this)) {
CDE->setSubExpr(result);
return { false, CDE };
} else {
return { true, CDE };
}
}
return { true, expr };
}
Expr *walkToExprPost(Expr *expr) override {
if (!CS.hasType(expr))
return expr;
Type type = CS.getType(expr);
if (!type->hasOpenedExistential())
return expr;
type = type.transform([&](Type type) -> Type {
if (auto archetype = type->getAs<ArchetypeType>())
if (auto existentialType = archetype->getOpenedExistentialType())
return existentialType;
return type;
});
CS.setType(expr, type);
return expr;
}
// Don't walk into statements. This handles the BraceStmt in
// non-single-expr closures, so we don't walk into their body.
std::pair<bool, Stmt *> walkToStmtPre(Stmt *S) override {
return { false, S };
}
};
expr = expr->walk(ExistentialEraser(CS));
}
/// Unless we've already done this, retypecheck the specified subexpression on
/// its own, without including any contextual constraints or parent expr
/// nodes. This is more likely to succeed than type checking the original
/// expression.
///
/// This can return a new expression (for e.g. when a UnresolvedDeclRef gets
/// resolved) and returns null when the subexpression fails to typecheck.
Expr *FailureDiagnosis::typeCheckChildIndependently(
Expr *subExpr, Type convertType, ContextualTypePurpose convertTypePurpose,
TCCOptions options, ExprTypeCheckListener *listener,
bool allowFreeTypeVariables) {
// If this sub-expression is currently being diagnosed, refuse to recheck the
// expression (which may lead to infinite recursion). If the client is
// telling us that it knows what it is doing, then believe it.
if (!options.contains(TCC_ForceRecheck)) {
if (CS.TC.isExprBeingDiagnosed(subExpr)) {
auto exprAndCS = CS.TC.getExprBeingDiagnosed(subExpr);
auto *savedExpr = exprAndCS.first;
if (subExpr == savedExpr)
return subExpr;
auto *oldCS = exprAndCS.second;
// The types on the result might have already been cached into
// another CS, but likely not this one.
if (oldCS != &CS)
CS.transferExprTypes(oldCS, savedExpr);
return savedExpr;
}
CS.TC.addExprForDiagnosis(subExpr, std::make_pair(subExpr, &CS));
}
// Validate contextual type before trying to use it.
std::tie(convertType, convertTypePurpose) =
validateContextualType(convertType, convertTypePurpose);
// If we have no contextual type information and the subexpr is obviously a
// overload set, don't recursively simplify this. The recursive solver will
// sometimes pick one based on arbitrary ranking behavior (e.g. like
// which is the most specialized) even then all the constraints are being
// fulfilled by UnresolvedType, which doesn't tell us anything.
if (convertTypePurpose == CTP_Unused &&
(isa<OverloadedDeclRefExpr>(subExpr->getValueProvidingExpr()))) {
return subExpr;
}
// Save any existing type data of the subexpr tree, and reset it to null in
// prep for re-type-checking the tree. If things fail, we can revert the
// types back to their original state.
ExprTypeSaverAndEraser SavedTypeData(subExpr);
// Store off the sub-expression, in case a new one is provided via the
// type check operation.
Expr *preCheckedExpr = subExpr;
// Disable structural checks, because we know that the overall expression
// has type constraint problems, and we don't want to know about any
// syntactic issues in a well-typed subexpression (which might be because
// the context is missing).
TypeCheckExprOptions TCEOptions = TypeCheckExprFlags::DisableStructuralChecks;
// Don't walk into non-single expression closure bodies, because
// ExprTypeSaver and TypeNullifier skip them too.
TCEOptions |= TypeCheckExprFlags::SkipMultiStmtClosures;
// Claim that the result is discarded to preserve the lvalue type of
// the expression.
if (options.contains(TCC_AllowLValue))
TCEOptions |= TypeCheckExprFlags::IsDiscarded;
// If there is no contextual type available, tell typeCheckExpression that it
// is ok to produce an ambiguous result, it can just fill in holes with
// UnresolvedType and we'll deal with it.
if ((!convertType || options.contains(TCC_AllowUnresolvedTypeVariables)) &&
allowFreeTypeVariables)
TCEOptions |= TypeCheckExprFlags::AllowUnresolvedTypeVariables;
// If we're not passing down contextual type information this time, but the
// original failure had type info that wasn't an optional type,
// then set the flag to prefer fixits with force unwrapping.
if (!convertType) {
auto previousType = CS.getContextualType();
if (previousType && previousType->getOptionalObjectType().isNull())
TCEOptions |= TypeCheckExprFlags::PreferForceUnwrapToOptional;
}
// Ensure that the expression we're about to type-check doesn't have
// anything that the type-checker doesn't expect to see. This can happen
// because of repeated type-checking; the removal below, while independently
// important, isn't itself sufficient because of AST mutation.
eraseOpenedExistentials(subExpr, CS);
auto resultTy = CS.TC.typeCheckExpression(
subExpr, CS.DC, TypeLoc::withoutLoc(convertType), convertTypePurpose,
TCEOptions, listener, &CS);
CS.cacheExprTypes(subExpr);
// This is a terrible hack to get around the fact that typeCheckExpression()
// might change subExpr to point to a new OpenExistentialExpr. In that case,
// since the caller passed subExpr by value here, they would be left
// holding on to an expression containing open existential types but
// no OpenExistentialExpr, which breaks invariants enforced by the
// ASTChecker.
eraseOpenedExistentials(subExpr, CS);
// If recursive type checking failed, then an error was emitted. Return
// null to indicate this to the caller.
if (!resultTy)
return nullptr;
// If we type checked the result but failed to get a usable output from it,
// just pretend as though nothing happened.
if (resultTy->is<ErrorType>()) {
subExpr = preCheckedExpr;
if (subExpr->getType())
CS.cacheType(subExpr);
SavedTypeData.restore();
}
CS.TC.addExprForDiagnosis(preCheckedExpr, std::make_pair(subExpr, &CS));
return subExpr;
}
/// This is the same as typeCheckChildIndependently, but works on an arbitrary
/// subexpression of the current node because it handles ClosureExpr parents
/// of the specified node.
Expr *FailureDiagnosis::
typeCheckArbitrarySubExprIndependently(Expr *subExpr, TCCOptions options) {
if (subExpr == expr)
return typeCheckChildIndependently(subExpr, options);
// Construct a parent map for the expr tree we're investigating.
auto parentMap = expr->getParentMap();
ClosureExpr *NearestClosure = nullptr;
// Walk the parents of the specified expression, handling any ClosureExprs.
for (Expr *node = parentMap[subExpr]; node; node = parentMap[node]) {
auto *CE = dyn_cast<ClosureExpr>(node);
if (!CE) continue;
// Keep track of the innermost closure we see that we're jumping into.
if (!NearestClosure)
NearestClosure = CE;
// If we have a ClosureExpr parent of the specified node, check to make sure
// none of its arguments are type variables. If so, these type variables
// would be accessible to name lookup of the subexpression and may thus leak
// in. Reset them to UnresolvedTypes for safe measures.
for (auto param : *CE->getParameters()) {
auto VD = param;
if (VD->getType()->hasTypeVariable() || VD->getType()->hasError()) {
VD->setType(CS.getASTContext().TheUnresolvedType);
VD->setInterfaceType(VD->getType());
}
}
}
// When we're type checking a single-expression closure, we need to reset the
// DeclContext to this closure for the recursive type checking. Otherwise,
// if there is a closure in the subexpression, we can violate invariants.
auto newDC = NearestClosure ? NearestClosure : CS.DC;
llvm::SaveAndRestore<DeclContext *> SavedDC(CS.DC, newDC);
// Otherwise, we're ok to type check the subexpr.
return typeCheckChildIndependently(subExpr, options);
}
/// For an expression being type checked with a CTP_CalleeResult contextual
/// type, try to diagnose a problem.
bool FailureDiagnosis::diagnoseCalleeResultContextualConversionError() {
// Try to dig out the conversion constraint in question to find the contextual
// result type being specified.
Type contextualResultType;
for (auto &c : CS.getConstraints()) {
if (!isConversionConstraint(&c) || !c.getLocator() ||
c.getLocator()->getAnchor() != expr)
continue;
// If we found our contextual type, then we know we have a conversion to
// some function type, and that the result type is concrete. If not,
// ignore it.
auto toType = CS.simplifyType(c.getSecondType());
if (auto *FT = toType->getAs<AnyFunctionType>())
if (!isUnresolvedOrTypeVarType(FT->getResult())) {
contextualResultType = FT->getResult();
break;
}
}
if (!contextualResultType)
return false;
// Retypecheck the callee expression without a contextual type to resolve
// whatever we can in it.
auto callee = typeCheckChildIndependently(expr, TCC_ForceRecheck);
if (!callee)
return true;
// Based on that, compute an overload set.
CalleeCandidateInfo calleeInfo(callee, /*hasTrailingClosure*/false, CS);
switch (calleeInfo.size()) {
case 0:
// If we found no overloads, then there is something else going on here.
return false;
case 1:
// If the callee isn't of function type, then something else has gone wrong.
if (!calleeInfo[0].getResultType())
return false;
diagnose(expr->getLoc(), diag::candidates_no_match_result_type,
calleeInfo.declName, calleeInfo[0].getResultType(),
contextualResultType);
return true;
default:
// Check to see if all of the viable candidates produce the same result,
// this happens for things like "==" and "&&" operators.
if (auto resultTy = calleeInfo[0].getResultType()) {
for (unsigned i = 1, e = calleeInfo.size(); i != e; ++i)
if (auto ty = calleeInfo[i].getResultType())
if (!resultTy->isEqual(ty)) {
resultTy = Type();
break;
}
if (resultTy) {
diagnose(expr->getLoc(), diag::candidates_no_match_result_type,
calleeInfo.declName, calleeInfo[0].getResultType(),
contextualResultType);
return true;
}
}
// Otherwise, produce a candidate set.
diagnose(expr->getLoc(), diag::no_candidates_match_result_type,
calleeInfo.declName, contextualResultType);
calleeInfo.suggestPotentialOverloads(expr->getLoc(), /*isResult*/true);
return true;
}
}
/// Return true if the given type conforms to a known protocol type.
static bool conformsToKnownProtocol(Type fromType, KnownProtocolKind kind,
const ConstraintSystem &CS) {
auto proto = CS.TC.getProtocol(SourceLoc(), kind);
if (!proto)
return false;
if (CS.TC.conformsToProtocol(fromType, proto, CS.DC,
ConformanceCheckFlags::InExpression)) {
return true;
}
return false;
}
static bool isIntegerType(Type fromType, const ConstraintSystem &CS) {
return conformsToKnownProtocol(fromType,
KnownProtocolKind::ExpressibleByIntegerLiteral,
CS);
}
/// Return true if the given type conforms to RawRepresentable.
static Type isRawRepresentable(Type fromType, const ConstraintSystem &CS) {
auto rawReprType =
CS.TC.getProtocol(SourceLoc(), KnownProtocolKind::RawRepresentable);
if (!rawReprType)
return Type();
auto conformance = CS.TC.conformsToProtocol(
fromType, rawReprType, CS.DC, ConformanceCheckFlags::InExpression);
if (!conformance)
return Type();
Type rawTy = ProtocolConformanceRef::getTypeWitnessByName(
fromType, *conformance, CS.getASTContext().Id_RawValue, &CS.TC);
return rawTy;
}
/// Return true if the given type conforms to RawRepresentable, with an
/// underlying type conforming to the given known protocol.
static Type isRawRepresentable(Type fromType, KnownProtocolKind kind,
const ConstraintSystem &CS) {
Type rawTy = isRawRepresentable(fromType, CS);
if (!rawTy || !conformsToKnownProtocol(rawTy, kind, CS))
return Type();
return rawTy;
}
/// Return true if the conversion from fromType to toType is an invalid string
/// index operation.
static bool isIntegerToStringIndexConversion(Type fromType, Type toType,
ConstraintSystem &CS) {
auto kind = KnownProtocolKind::ExpressibleByIntegerLiteral;
return (conformsToKnownProtocol(fromType, kind, CS) &&
toType->getCanonicalType().getString() == "String.CharacterView.Index");
}
/// Attempts to add fix-its for these two mistakes:
///
/// - Passing an integer where a type conforming to RawRepresentable is
/// expected, by wrapping the expression in a call to the contextual
/// type's initializer
///
/// - Passing a type conforming to RawRepresentable where an integer is
/// expected, by wrapping the expression in a call to the rawValue
/// accessor
///
/// - Return true on the fixit is added, false otherwise.
///
/// This helps migration with SDK changes.
static bool tryRawRepresentableFixIts(InFlightDiagnostic &diag,
const ConstraintSystem &CS, Type fromType,
Type toType, KnownProtocolKind kind,
const Expr *expr) {
// The following fixes apply for optional destination types as well.
bool toTypeIsOptional = !toType->getAnyOptionalObjectType().isNull();
toType = toType->lookThroughAllAnyOptionalTypes();
Type fromTypeUnwrapped = fromType->getAnyOptionalObjectType();
bool fromTypeIsOptional = !fromTypeUnwrapped.isNull();
if (fromTypeIsOptional)
fromType = fromTypeUnwrapped;
auto fixIt = [&](StringRef convWrapBefore, StringRef convWrapAfter) {
SourceRange exprRange = expr->getSourceRange();
if (fromTypeIsOptional && toTypeIsOptional) {
// Use optional's map function to convert conditionally, like so:
// expr.map{ T(rawValue: $0) }
bool needsParens = !expr->canAppendPostfixExpression();
std::string mapCodeFix;
if (needsParens) {
diag.fixItInsert(exprRange.Start, "(");
mapCodeFix += ")";
}
mapCodeFix += ".map { ";
mapCodeFix += convWrapBefore;
mapCodeFix += "$0";
mapCodeFix += convWrapAfter;
mapCodeFix += " }";
diag.fixItInsertAfter(exprRange.End, mapCodeFix);
} else if (!fromTypeIsOptional) {
diag.fixItInsert(exprRange.Start, convWrapBefore);
diag.fixItInsertAfter(exprRange.End, convWrapAfter);
} else {
SmallString<16> fixItBefore(convWrapBefore);
SmallString<16> fixItAfter;
if (!expr->canAppendPostfixExpression(true)) {
fixItBefore += "(";
fixItAfter = ")";
}
fixItAfter += "!" + convWrapAfter.str();
diag.flush();
CS.TC
.diagnose(expr->getLoc(),
diag::construct_raw_representable_from_unwrapped_value,
toType, fromType)
.highlight(exprRange)
.fixItInsert(exprRange.Start, fixItBefore)
.fixItInsertAfter(exprRange.End, fixItAfter);
}
};
if (conformsToKnownProtocol(fromType, kind, CS)) {
if (auto rawTy = isRawRepresentable(toType, kind, CS)) {
// Produce before/after strings like 'Result(rawValue: RawType(<expr>))'
// or just 'Result(rawValue: <expr>)'.
std::string convWrapBefore = toType.getString();
convWrapBefore += "(rawValue: ";
std::string convWrapAfter = ")";
if (!isa<LiteralExpr>(expr) &&
!CS.TC.isConvertibleTo(fromType, rawTy, CS.DC)) {
// Only try to insert a converting construction if the protocol is a
// literal protocol and not some other known protocol.
switch (kind) {
#define EXPRESSIBLE_BY_LITERAL_PROTOCOL_WITH_NAME(name, _) \
case KnownProtocolKind::name: break;
#define PROTOCOL_WITH_NAME(name, _) \
case KnownProtocolKind::name: return false;
#include "swift/AST/KnownProtocols.def"
}
convWrapBefore += rawTy->getString();
convWrapBefore += "(";
convWrapAfter += ")";
}
fixIt(convWrapBefore, convWrapAfter);
return true;
}
}
if (auto rawTy = isRawRepresentable(fromType, kind, CS)) {
if (conformsToKnownProtocol(toType, kind, CS)) {
std::string convWrapBefore;
std::string convWrapAfter = ".rawValue";
if (!CS.TC.isConvertibleTo(rawTy, toType, CS.DC)) {
// Only try to insert a converting construction if the protocol is a
// literal protocol and not some other known protocol.
switch (kind) {
#define EXPRESSIBLE_BY_LITERAL_PROTOCOL_WITH_NAME(name, _) \
case KnownProtocolKind::name: break;
#define PROTOCOL_WITH_NAME(name, _) \
case KnownProtocolKind::name: return false;
#include "swift/AST/KnownProtocols.def"
}
convWrapBefore += toType->getString();
convWrapBefore += "(";
convWrapAfter += ")";
}
fixIt(convWrapBefore, convWrapAfter);
return true;
}
}
return false;
}
/// Try to add a fix-it when converting between a collection and its slice type,
/// such as String <-> Substring or (eventually) Array <-> ArraySlice
static bool trySequenceSubsequenceConversionFixIts(InFlightDiagnostic &diag,
ConstraintSystem &CS,
Type fromType, Type toType,
Expr *expr) {
if (CS.TC.Context.getStdlibModule() == nullptr)
return false;
auto String = CS.TC.getStringType(CS.DC);
auto Substring = CS.TC.getSubstringType(CS.DC);
if (!String || !Substring)
return false;
/// FIXME: Remove this flag when void subscripts are implemented.
/// Make this unconditional and remove the if statement.
if (CS.TC.getLangOpts().FixStringToSubstringConversions) {
// String -> Substring conversion
// Add '[]' void subscript call to turn the whole String into a Substring
if (fromType->isEqual(String)) {
if (toType->isEqual(Substring)) {
diag.fixItInsertAfter(expr->getEndLoc (), "[]");
return true;
}
}
}
// Substring -> String conversion
// Wrap in String.init
if (fromType->isEqual(Substring)) {
if (toType->isEqual(String)) {
auto range = expr->getSourceRange();
diag.fixItInsert(range.Start, "String(");
diag.fixItInsertAfter(range.End, ")");
return true;
}
}
return false;
}
/// Attempts to add fix-its for these two mistakes:
///
/// - Passing an integer with the right type but which is getting wrapped with a
/// different integer type unnecessarily. The fixit removes the cast.
///
/// - Passing an integer but expecting different integer type. The fixit adds
/// a wrapping cast.
///
/// - Return true on the fixit is added, false otherwise.
///
/// This helps migration with SDK changes.
static bool tryIntegerCastFixIts(InFlightDiagnostic &diag, ConstraintSystem &CS,
Type fromType, Type toType, Expr *expr) {
if (!isIntegerType(fromType, CS) || !isIntegerType(toType, CS))
return false;
auto getInnerCastedExpr = [&]() -> Expr* {
auto *CE = dyn_cast<CallExpr>(expr);
if (!CE)
return nullptr;
if (!isa<ConstructorRefCallExpr>(CE->getFn()))
return nullptr;
auto *parenE = dyn_cast<ParenExpr>(CE->getArg());
if (!parenE)
return nullptr;
return parenE->getSubExpr();
};
if (Expr *innerE = getInnerCastedExpr()) {
Type innerTy = CS.getType(innerE);
if (CS.TC.isConvertibleTo(innerTy, toType, CS.DC)) {
// Remove the unnecessary cast.
diag.fixItRemoveChars(expr->getLoc(), innerE->getStartLoc())
.fixItRemove(expr->getEndLoc());
return true;
}
}
// Add a wrapping integer cast.
std::string convWrapBefore = toType.getString();
convWrapBefore += "(";
std::string convWrapAfter = ")";
SourceRange exprRange = expr->getSourceRange();
diag.fixItInsert(exprRange.Start, convWrapBefore);
diag.fixItInsertAfter(exprRange.End, convWrapAfter);
return true;
}
static bool addTypeCoerceFixit(InFlightDiagnostic &diag, ConstraintSystem &CS,
Type fromType, Type toType, Expr *expr) {
// Look through optional types; casts can add them, but can't remove extra
// ones.
toType = toType->lookThroughAllAnyOptionalTypes();
CheckedCastKind Kind = CS.getTypeChecker().typeCheckCheckedCast(
fromType, toType, CheckedCastContextKind::None, CS.DC, SourceLoc(),
nullptr, SourceRange());
if (Kind != CheckedCastKind::Unresolved) {
SmallString<32> buffer;
llvm::raw_svector_ostream OS(buffer);
toType->print(OS);
bool canUseAs = Kind == CheckedCastKind::Coercion ||
Kind == CheckedCastKind::BridgingCoercion;
diag.fixItInsert(
Lexer::getLocForEndOfToken(CS.DC->getASTContext().SourceMgr,
expr->getEndLoc()),
(llvm::Twine(canUseAs ? " as " : " as! ") + OS.str()).str());
return true;
}
return false;
}
/// Try to diagnose common errors involving implicitly non-escaping parameters
/// of function type, giving more specific and simpler diagnostics, attaching
/// notes on the parameter, and offering fixits to insert @escaping. Returns
/// true if it detects and issues an error, false if it does nothing.
static bool tryDiagnoseNonEscapingParameterToEscaping(
Expr *expr, Type srcType, Type dstType, ContextualTypePurpose dstPurpose,
ConstraintSystem &CS) {
assert(expr);
// Need to be referencing a parameter of function type
auto declRef = dyn_cast<DeclRefExpr>(expr);
if (!declRef || !isa<ParamDecl>(declRef->getDecl()) ||
!CS.getType(declRef)->is<AnyFunctionType>())
return false;
// Must be from non-escaping function to escaping function. For the
// destination type, we read through optionality to give better diagnostics in
// the event of an implicit promotion.
auto srcFT = srcType->getAs<AnyFunctionType>();
auto dstFT =
dstType->lookThroughAllAnyOptionalTypes()->getAs<AnyFunctionType>();
if (!srcFT || !dstFT || !srcFT->isNoEscape() || dstFT->isNoEscape())
return false;
// Pick a specific diagnostic for the specific use
auto paramDecl = cast<ParamDecl>(declRef->getDecl());
switch (dstPurpose) {
case CTP_CallArgument:
CS.TC.diagnose(declRef->getLoc(), diag::passing_noescape_to_escaping,
paramDecl->getName());
break;
case CTP_AssignSource:
CS.TC.diagnose(declRef->getLoc(), diag::assigning_noescape_to_escaping,
paramDecl->getName());
break;
default:
CS.TC.diagnose(declRef->getLoc(), diag::general_noescape_to_escaping,
paramDecl->getName());
break;
}
// Give a note and fixit
InFlightDiagnostic note = CS.TC.diagnose(
paramDecl->getLoc(), diag::noescape_parameter, paramDecl->getName());
if (!srcFT->isAutoClosure()) {
note.fixItInsert(paramDecl->getTypeLoc().getSourceRange().Start,
"@escaping ");
} // TODO: add in a fixit for autoclosure
return true;
}
bool FailureDiagnosis::diagnoseContextualConversionError(
Expr *expr, Type contextualType, ContextualTypePurpose CTP) {
// If the constraint system has a contextual type, then we can test to see if
// this is the problem that prevents us from solving the system.
if (!contextualType) {
// This contextual conversion constraint doesn't install an actual type.
if (CTP == CTP_CalleeResult)
return diagnoseCalleeResultContextualConversionError();
return false;
}
// Try re-type-checking the expression without the contextual type to see if
// it can work without it. If so, the contextual type is the problem. We
// force a recheck, because "expr" is likely in our table with the extra
// contextual constraint that we know we are relaxing.
TCCOptions options = TCC_ForceRecheck;
if (contextualType->is<InOutType>())
options |= TCC_AllowLValue;
auto recheckedExpr = typeCheckChildIndependently(expr, options);
auto exprType = recheckedExpr ? CS.getType(recheckedExpr) : Type();
// If it failed and diagnosed something, then we're done.
if (!exprType) return true;
// If we contextually had an inout type, and got a non-lvalue result, then
// we fail with a mutability error.
if (contextualType->is<InOutType>() && !exprType->is<LValueType>()) {
diagnoseSubElementFailure(recheckedExpr, recheckedExpr->getLoc(), CS,
diag::cannot_pass_rvalue_inout_subelement,
diag::cannot_pass_rvalue_inout);
return true;
}
// Try to find the contextual type in a variety of ways. If the constraint
// system had a contextual type specified, we use it - it will have a purpose
// indicator which allows us to give a very "to the point" diagnostic.
Diag<Type, Type> diagID;
Diag<Type, Type> diagIDProtocol;
Diag<Type> nilDiag;
std::function<void(void)> nilFollowup;
// If this is conversion failure due to a return statement with an argument
// that cannot be coerced to the result type of the function, emit a
// specific error.
switch (CTP) {
case CTP_Unused:
case CTP_CannotFail:
llvm_unreachable("These contextual type purposes cannot fail with a "
"conversion type specified!");
case CTP_CalleeResult:
llvm_unreachable("CTP_CalleeResult does not actually install a "
"contextual type");
case CTP_Initialization:
diagID = diag::cannot_convert_initializer_value;
diagIDProtocol = diag::cannot_convert_initializer_value_protocol;
nilDiag = diag::cannot_convert_initializer_value_nil;
nilFollowup = [this] {
TypeRepr *patternTR = CS.getContextualTypeLoc().getTypeRepr();
if (!patternTR)
return;
auto diag = diagnose(patternTR->getLoc(), diag::note_make_optional,
OptionalType::get(CS.getContextualType()));
if (patternTR->isSimple()) {
diag.fixItInsertAfter(patternTR->getEndLoc(), "?");
} else {
diag.fixItInsert(patternTR->getStartLoc(), "(");
diag.fixItInsertAfter(patternTR->getEndLoc(), ")?");
}
};
break;
case CTP_ReturnStmt:
// Special case the "conversion to void" case.
if (contextualType->isVoid()) {
diagnose(expr->getLoc(), diag::cannot_return_value_from_void_func)
.highlight(expr->getSourceRange());
return true;
}
diagID = diag::cannot_convert_to_return_type;
diagIDProtocol = diag::cannot_convert_to_return_type_protocol;
nilDiag = diag::cannot_convert_to_return_type_nil;
break;
case CTP_ThrowStmt: {
if (isa<NilLiteralExpr>(expr->getValueProvidingExpr())) {
diagnose(expr->getLoc(), diag::cannot_throw_nil);
return true;
}
if (isUnresolvedOrTypeVarType(exprType) ||
exprType->isEqual(contextualType))
return false;
// If we tried to throw the error code of an error type, suggest object
// construction.
auto &TC = CS.getTypeChecker();
if (auto errorCodeProtocol =
TC.Context.getProtocol(KnownProtocolKind::ErrorCodeProtocol)) {
if (auto conformance =
TC.conformsToProtocol(CS.getType(expr), errorCodeProtocol, CS.DC,
ConformanceCheckFlags::InExpression)) {
Type errorCodeType = CS.getType(expr);
Type errorType =
ProtocolConformanceRef::getTypeWitnessByName(errorCodeType, *conformance,
TC.Context.Id_ErrorType,
&TC)->getCanonicalType();
if (errorType) {
auto diag = diagnose(expr->getLoc(), diag::cannot_throw_error_code,
errorCodeType, errorType);
if (auto unresolvedDot = dyn_cast<UnresolvedDotExpr>(expr)) {
diag.fixItInsert(unresolvedDot->getDotLoc(), "(");
diag.fixItInsertAfter(unresolvedDot->getEndLoc(), ")");
}
return true;
}
}
}
// The conversion destination of throw is always ErrorType (at the moment)
// if this ever expands, this should be a specific form like () is for
// return.
diagnose(expr->getLoc(), diag::cannot_convert_thrown_type, exprType)
.highlight(expr->getSourceRange());
return true;
}
case CTP_EnumCaseRawValue:
diagID = diag::cannot_convert_raw_initializer_value;
diagIDProtocol = diag::cannot_convert_raw_initializer_value;
nilDiag = diag::cannot_convert_raw_initializer_value_nil;
break;
case CTP_DefaultParameter:
diagID = diag::cannot_convert_default_arg_value;
diagIDProtocol = diag::cannot_convert_default_arg_value_protocol;
nilDiag = diag::cannot_convert_default_arg_value_nil;
break;
case CTP_CallArgument:
diagID = diag::cannot_convert_argument_value;
diagIDProtocol = diag::cannot_convert_argument_value_protocol;
nilDiag = diag::cannot_convert_argument_value_nil;
break;
case CTP_ClosureResult:
diagID = diag::cannot_convert_closure_result;
diagIDProtocol = diag::cannot_convert_closure_result_protocol;
nilDiag = diag::cannot_convert_closure_result_nil;
break;
case CTP_ArrayElement:
diagID = diag::cannot_convert_array_element;
diagIDProtocol = diag::cannot_convert_array_element_protocol;
nilDiag = diag::cannot_convert_array_element_nil;
break;
case CTP_DictionaryKey:
diagID = diag::cannot_convert_dict_key;
diagIDProtocol = diag::cannot_convert_dict_key_protocol;
nilDiag = diag::cannot_convert_dict_key_nil;
break;
case CTP_DictionaryValue:
diagID = diag::cannot_convert_dict_value;
diagIDProtocol = diag::cannot_convert_dict_value_protocol;
nilDiag = diag::cannot_convert_dict_value_nil;
break;
case CTP_CoerceOperand:
diagID = diag::cannot_convert_coerce;
diagIDProtocol = diag::cannot_convert_coerce_protocol;
nilDiag = diag::cannot_convert_coerce_nil;
break;
case CTP_AssignSource:
diagID = diag::cannot_convert_assign;
diagIDProtocol = diag::cannot_convert_assign_protocol;
nilDiag = diag::cannot_convert_assign_nil;
break;
}
// If we're diagnostic an issue with 'nil', produce a specific diagnostic,
// instead of uttering ExpressibleByNilLiteral.
if (isa<NilLiteralExpr>(expr->getValueProvidingExpr())) {
diagnose(expr->getLoc(), nilDiag, contextualType);
if (nilFollowup)
nilFollowup();
return true;
}
// If we don't have a type for the expression, then we cannot use it in
// conversion constraint diagnostic generation. If the types match, then it
// must not be the contextual type that is the problem.
if (isUnresolvedOrTypeVarType(exprType) ||
exprType->isEqual(contextualType)) {
return false;
}
// If we're trying to convert something of type "() -> T" to T, then we
// probably meant to call the value.
if (auto srcFT = exprType->getAs<AnyFunctionType>()) {
if (srcFT->getInput()->isVoid() &&
!isUnresolvedOrTypeVarType(srcFT->getResult()) &&
CS.TC.isConvertibleTo(srcFT->getResult(), contextualType, CS.DC)) {
diagnose(expr->getLoc(), diag::missing_nullary_call, srcFT->getResult())
.highlight(expr->getSourceRange())
.fixItInsertAfter(expr->getEndLoc(), "()");
return true;
}
}
// If this is a conversion from T to () in a call argument context, it is
// almost certainly an extra argument being passed in.
if (CTP == CTP_CallArgument && contextualType->isVoid()) {
diagnose(expr->getLoc(), diag::extra_argument_to_nullary_call)
.highlight(expr->getSourceRange());
return true;
}
// If we're trying to convert something to Bool, check to see if it is for
// a known reason.
if (contextualType->isBool() && diagnoseConversionToBool(expr, exprType))
return true;
exprType = exprType->getRValueType();
// Special case of some common conversions involving Swift.String
// indexes, catching cases where people attempt to index them with an integer.
if (isIntegerToStringIndexConversion(exprType, contextualType, CS)) {
diagnose(expr->getLoc(), diag::string_index_not_integer,
exprType->getRValueType())
.highlight(expr->getSourceRange());
diagnose(expr->getLoc(), diag::string_index_not_integer_note);
return true;
}
// When converting from T to [T] or UnsafePointer<T>, we can offer fixit to wrap
// the expr with brackets.
auto *genericType = contextualType->getAs<BoundGenericType>();
if (genericType) {
auto *contextDecl = genericType->getDecl();
if (contextDecl == CS.TC.Context.getArrayDecl()) {
for (Type arg : genericType->getGenericArgs()) {
if (arg->isEqual(exprType)) {
diagnose(expr->getLoc(), diagID, exprType, contextualType)
.fixItInsert(expr->getStartLoc(), "[")
.fixItInsert(Lexer::getLocForEndOfToken(CS.TC.Context.SourceMgr,
expr->getEndLoc()),
"]");
return true;
}
}
} else if (contextDecl == CS.TC.Context.getUnsafePointerDecl() ||
contextDecl == CS.TC.Context.getUnsafeMutablePointerDecl() ||
contextDecl == CS.TC.Context.getUnsafeRawPointerDecl() ||
contextDecl == CS.TC.Context.getUnsafeMutableRawPointerDecl()) {
for (Type arg : genericType->getGenericArgs()) {
if (arg->isEqual(exprType) && CS.getType(expr)->hasLValueType()) {
diagnose(expr->getLoc(), diagID, exprType, contextualType).
fixItInsert(expr->getStartLoc(), "&");
return true;
}
}
}
}
// Try for better/more specific diagnostics for non-escaping to @escaping
if (tryDiagnoseNonEscapingParameterToEscaping(expr, exprType, contextualType,
CTP, CS))
return true;
// Don't attempt fixits if we have an unsolved type variable, since
// the recovery path's recursion into the type checker via typeCheckCast()
// will confuse matters.
if (exprType->hasTypeVariable())
return false;
// When complaining about conversion to a protocol type, complain about
// conformance instead of "conversion".
if (contextualType->is<ProtocolType>() ||
contextualType->is<ProtocolCompositionType>())
diagID = diagIDProtocol;
// Try to simplify irrelevant details of function types. For example, if
// someone passes a "() -> Float" function to a "() throws -> Int"
// parameter, then uttering the "throws" may confuse them into thinking that
// that is the problem, even though there is a clear subtype relation.
if (auto srcFT = exprType->getAs<FunctionType>())
if (auto destFT = contextualType->getAs<FunctionType>()) {
auto destExtInfo = destFT->getExtInfo();
if (!srcFT->isNoEscape()) destExtInfo = destExtInfo.withNoEscape(false);
if (!srcFT->throws()) destExtInfo = destExtInfo.withThrows(false);
if (destExtInfo != destFT->getExtInfo())
contextualType = FunctionType::get(destFT->getInput(),
destFT->getResult(), destExtInfo);
// If this is a function conversion that discards throwability or
// noescape, emit a specific diagnostic about that.
if (srcFT->throws() && !destFT->throws())
diagID = diag::throws_functiontype_mismatch;
else if (srcFT->isNoEscape() && !destFT->isNoEscape())
diagID = diag::noescape_functiontype_mismatch;
}
InFlightDiagnostic diag = diagnose(expr->getLoc(), diagID,
exprType, contextualType);
diag.highlight(expr->getSourceRange());
// Try to convert between a sequence and its subsequence, notably
// String <-> Substring.
if (trySequenceSubsequenceConversionFixIts(diag, CS, exprType, contextualType,
expr)) {
return true;
}
// Attempt to add a fixit for the error.
switch (CTP) {
case CTP_CallArgument:
case CTP_ArrayElement:
case CTP_DictionaryKey:
case CTP_DictionaryValue:
case CTP_AssignSource:
case CTP_Initialization:
case CTP_ReturnStmt:
tryRawRepresentableFixIts(diag, CS, exprType, contextualType,
KnownProtocolKind::ExpressibleByIntegerLiteral,
expr) ||
tryRawRepresentableFixIts(diag, CS, exprType, contextualType,
KnownProtocolKind::ExpressibleByStringLiteral,
expr) ||
tryIntegerCastFixIts(diag, CS, exprType, contextualType, expr) ||
addTypeCoerceFixit(diag, CS, exprType, contextualType, expr);
break;
default:
// FIXME: Other contextual conversions too?
break;
}
return true;
}
/// When an assignment to an expression is detected and the destination is
/// invalid, emit a detailed error about the condition.
void ConstraintSystem::diagnoseAssignmentFailure(Expr *dest, Type destTy,
SourceLoc equalLoc) {
auto &TC = getTypeChecker();
// Diagnose obvious assignments to literals.
if (isa<LiteralExpr>(dest->getValueProvidingExpr())) {
TC.diagnose(equalLoc, diag::cannot_assign_to_literal);
return;
}
// Diagnose assignments to let-properties in delegating initializers.
if (auto *member = dyn_cast<UnresolvedDotExpr>(dest)) {
if (auto *ctor = dyn_cast<ConstructorDecl>(DC)) {
if (auto *baseRef = dyn_cast<DeclRefExpr>(member->getBase())) {
if (baseRef->getDecl() == ctor->getImplicitSelfDecl() &&
ctor->getDelegatingOrChainedInitKind(nullptr) ==
ConstructorDecl::BodyInitKind::Delegating) {
auto resolved = resolveImmutableBase(member, *this);
assert(resolved.first == member);
TC.diagnose(equalLoc, diag::assignment_let_property_delegating_init,
member->getName());
if (resolved.second) {
TC.diagnose(resolved.second, diag::decl_declared_here,
member->getName());
}
return;
}
}
}
}
Diag<StringRef> diagID;
if (isa<DeclRefExpr>(dest))
diagID = diag::assignment_lhs_is_immutable_variable;
else if (isa<ForceValueExpr>(dest))
diagID = diag::assignment_bang_has_immutable_subcomponent;
else if (isa<UnresolvedDotExpr>(dest) || isa<MemberRefExpr>(dest))
diagID = diag::assignment_lhs_is_immutable_property;
else if (isa<SubscriptExpr>(dest))
diagID = diag::assignment_subscript_has_immutable_base;
else {
diagID = diag::assignment_lhs_is_immutable_variable;
}
diagnoseSubElementFailure(dest, equalLoc, *this, diagID,
diag::assignment_lhs_not_lvalue);
}
//===----------------------------------------------------------------------===//
// Diagnose assigning variable to itself.
//===----------------------------------------------------------------------===//
static Decl *findSimpleReferencedDecl(const Expr *E) {
if (auto *LE = dyn_cast<LoadExpr>(E))
E = LE->getSubExpr();
if (auto *DRE = dyn_cast<DeclRefExpr>(E))
return DRE->getDecl();
return nullptr;
}
static std::pair<Decl *, Decl *> findReferencedDecl(const Expr *E) {
E = E->getValueProvidingExpr();
if (auto *LE = dyn_cast<LoadExpr>(E))
return findReferencedDecl(LE->getSubExpr());
if (auto *AE = dyn_cast<AssignExpr>(E))
return findReferencedDecl(AE->getDest());
if (auto *D = findSimpleReferencedDecl(E))
return std::make_pair(nullptr, D);
if (auto *MRE = dyn_cast<MemberRefExpr>(E)) {
if (auto *BaseDecl = findSimpleReferencedDecl(MRE->getBase()))
return std::make_pair(BaseDecl, MRE->getMember().getDecl());
}
return std::make_pair(nullptr, nullptr);
}
bool TypeChecker::diagnoseSelfAssignment(const Expr *E) {
auto AE = dyn_cast<AssignExpr>(E);
if (!AE)
return false;
auto LHSDecl = findReferencedDecl(AE->getDest());
auto RHSDecl = findReferencedDecl(AE->getSrc());
if (LHSDecl.second && LHSDecl == RHSDecl) {
diagnose(AE->getLoc(), LHSDecl.first ? diag::self_assignment_prop
: diag::self_assignment_var)
.highlight(AE->getDest()->getSourceRange())
.highlight(AE->getSrc()->getSourceRange());
return true;
}
return false;
}
static bool isSymmetricBinaryOperator(const CalleeCandidateInfo &CCI) {
// If we don't have at least one known candidate, don't trigger.
if (CCI.candidates.empty()) return false;
for (auto &candidate : CCI.candidates) {
// Each candidate must be a non-assignment operator function.
auto decl = dyn_cast_or_null<FuncDecl>(candidate.getDecl());
if (!decl) return false;
auto op = dyn_cast_or_null<InfixOperatorDecl>(decl->getOperatorDecl());
if (!op || !op->getPrecedenceGroup() ||
op->getPrecedenceGroup()->isAssignment())
return false;
// It must have exactly two parameters.
auto params = decl->getParameterLists().back();
if (params->size() != 2) return false;
// Require the types to be the same.
if (!params->get(0)->getInterfaceType()->isEqual(
params->get(1)->getInterfaceType()))
return false;
}
return true;
}
/// Determine whether any of the given callee candidates have a default value.
static bool candidatesHaveAnyDefaultValues(
const CalleeCandidateInfo &candidates) {
for (const auto &cand : candidates.candidates) {
auto function = dyn_cast_or_null<AbstractFunctionDecl>(cand.getDecl());
if (!function) continue;
auto paramLists = function->getParameterLists();
if (cand.level >= paramLists.size()) continue;
auto paramList = paramLists[cand.level];
for (auto param : *paramList) {
if (param->getDefaultArgumentKind() != DefaultArgumentKind::None)
return true;
}
}
return false;
}
/// Find the tuple element that can be initialized by a scalar.
static Optional<unsigned> getElementForScalarInitOfArg(
const TupleType *tupleTy,
const CalleeCandidateInfo &candidates) {
// Empty tuples cannot be initialized with a scalar.
if (tupleTy->getNumElements() == 0) return None;
auto getElementForScalarInitSimple =
[](const TupleType *tupleTy) -> Optional<unsigned> {
int index = tupleTy->getElementForScalarInit();
if (index < 0) return None;
return index;
};
// If there aren't any candidates, we're done.
if (candidates.empty()) return getElementForScalarInitSimple(tupleTy);
// Dig out the candidate.
const auto &cand = candidates[0];
auto function = dyn_cast_or_null<AbstractFunctionDecl>(cand.getDecl());
if (!function) return getElementForScalarInitSimple(tupleTy);
auto paramLists = function->getParameterLists();
if (cand.level >= paramLists.size())
return getElementForScalarInitSimple(tupleTy);
auto paramList = paramLists[cand.level];
if (tupleTy->getNumElements() != paramList->size())
return getElementForScalarInitSimple(tupleTy);
// Find a tuple element without a default.
Optional<unsigned> elementWithoutDefault;
for (unsigned i : range(tupleTy->getNumElements())) {
auto param = paramList->get(i);
// Skip parameters with default arguments.
if (param->getDefaultArgumentKind() != DefaultArgumentKind::None)
continue;
// If we already have an element without a default, check whether there are
// two fields that need initialization.
if (elementWithoutDefault) {
// Variadic fields are okay; they'll just end up being empty.
if (param->isVariadic()) continue;
// If the element we saw before was variadic, it can be empty as well.
auto priorParam = paramList->get(*elementWithoutDefault);
if (!priorParam->isVariadic()) return None;
}
elementWithoutDefault = i;
}
if (elementWithoutDefault) return elementWithoutDefault;
// All of the fields have default values; initialize the first one.
return 0;
}
/// Return true if the argument of a CallExpr (or related node) has a trailing
/// closure.
static bool callArgHasTrailingClosure(Expr *E) {
if (!E) return false;
if (auto *PE = dyn_cast<ParenExpr>(E))
return PE->hasTrailingClosure();
else if (auto *TE = dyn_cast<TupleExpr>(E))
return TE->hasTrailingClosure();
return false;
}
/// Special magic to handle inout exprs and tuples in argument lists.
Expr *FailureDiagnosis::
typeCheckArgumentChildIndependently(Expr *argExpr, Type argType,
const CalleeCandidateInfo &candidates,
TCCOptions options) {
// Grab one of the candidates (if present) and get its input list to help
// identify operators that have implicit inout arguments.
Type exampleInputType;
if (!candidates.empty()) {
exampleInputType = candidates[0].getArgumentType();
// If we found a single candidate, and have no contextually known argument
// type information, use that one candidate as the type information for
// subexpr checking.
//
// TODO: If all candidates have the same type for some argument, we could
// pass down partial information.
if (candidates.size() == 1 && !argType)
argType = candidates[0].getArgumentType();
}
// If our candidates are instance members at curry level #0, then the argument
// being provided is the receiver type for the instance. We produce better
// diagnostics when we don't force the self type down.
if (argType && !candidates.empty())
if (auto decl = candidates[0].getDecl())
if (decl->isInstanceMember() && candidates[0].level == 0 &&
!isa<SubscriptDecl>(decl))
argType = Type();
// Similarly, we get better results when we don't push argument types down
// to symmetric operators.
if (argType && isSymmetricBinaryOperator(candidates))
argType = Type();
// FIXME: This should all just be a matter of getting the type of the
// sub-expression, but this doesn't work well when typeCheckChildIndependently
// is over-conservative w.r.t. TupleExprs.
auto *TE = dyn_cast<TupleExpr>(argExpr);
if (!TE) {
// If the argument isn't a tuple, it is some scalar value for a
// single-argument call.
if (exampleInputType && exampleInputType->is<InOutType>())
options |= TCC_AllowLValue;
// If the argtype is a tuple type with default arguments, or a labeled tuple
// with a single element, pull the scalar element type for the subexpression
// out. If we can't do that and the tuple has default arguments, we have to
// punt on passing down the type information, since type checking the
// subexpression won't be able to find the default argument provider.
if (argType) {
if (auto argTT = argType->getAs<TupleType>()) {
if (auto scalarElt = getElementForScalarInitOfArg(argTT, candidates)) {
// If we found the single argument being initialized, use it.
auto &arg = argTT->getElement(*scalarElt);
// If the argument being specified is actually varargs, then we're
// just specifying one element of a variadic list. Use the type of
// the individual varargs argument, not the overall array type.
if (arg.isVararg())
argType = arg.getVarargBaseTy();
else
argType = arg.getType();
} else if (candidatesHaveAnyDefaultValues(candidates)) {
argType = Type();
}
} else if (candidatesHaveAnyDefaultValues(candidates)) {
argType = Type();
}
}
auto CTPurpose = argType ? CTP_CallArgument : CTP_Unused;
return typeCheckChildIndependently(argExpr, argType, CTPurpose, options);
}
// If we know the requested argType to use, use computeTupleShuffle to produce
// the shuffle of input arguments to destination values. It requires a
// TupleType to compute the mapping from argExpr. Conveniently, it doesn't
// care about the actual types though, so we can just use 'void' for them.
// FIXME: This doesn't need to be limited to tuple types.
if (argType && argType->is<TupleType>()) {
// Decompose the parameter type.
SmallVector<AnyFunctionType::Param, 4> params;
AnyFunctionType::decomposeInput(argType, params);
// If we have a candidate function around, compute the position of its
// default arguments.
SmallVector<bool, 4> defaultMap;
if (candidates.empty()) {
defaultMap.assign(params.size(), false);
} else {
computeDefaultMap(argType, candidates[0].getDecl(),
candidates[0].level, defaultMap);
}
// Form a set of call arguments, using a dummy type (Void), because the
// argument/parameter matching code doesn't need it.
auto voidTy = CS.getASTContext().TheEmptyTupleType;
SmallVector<AnyFunctionType::Param, 4> args;
for (unsigned i = 0, e = TE->getNumElements(); i != e; ++i) {
args.push_back(AnyFunctionType::Param(voidTy, TE->getElementName(i), {}));
}
/// Use a match call argument listener that allows relabeling.
struct RelabelMatchCallArgumentListener : MatchCallArgumentListener {
bool relabelArguments(ArrayRef<Identifier> newNames) override {
return false;
}
} listener;
SmallVector<ParamBinding, 4> paramBindings;
if (!matchCallArguments(args, params, defaultMap,
callArgHasTrailingClosure(argExpr),
/*allowFixes=*/true,
listener, paramBindings)) {
SmallVector<Expr*, 4> resultElts(TE->getNumElements(), nullptr);
SmallVector<TupleTypeElt, 4> resultEltTys(TE->getNumElements(), voidTy);
// Perform analysis of the input elements.
for (unsigned paramIdx : range(paramBindings.size())) {
// Extract the parameter.
const auto ¶m = params[paramIdx];
// Determine the parameter type.
if (param.isInOut())
options |= TCC_AllowLValue;
// Look at each of the arguments assigned to this parameter.
auto currentParamType = param.getType();
for (auto inArgNo : paramBindings[paramIdx]) {
// Determine the argument type.
auto currentArgType = TE->getElement(inArgNo);
auto exprResult =
typeCheckChildIndependently(currentArgType, currentParamType,
CTP_CallArgument, options);
// If there was an error type checking this argument, then we're done.
if (!exprResult)
return nullptr;
// If the caller expected something inout, but we didn't have
// something of inout type, diagnose it.
if (auto IOE =
dyn_cast<InOutExpr>(exprResult->getSemanticsProvidingExpr())) {
if (!param.isInOut()) {
diagnose(exprResult->getLoc(), diag::extra_address_of,
CS.getType(exprResult)->getInOutObjectType())
.highlight(exprResult->getSourceRange())
.fixItRemove(IOE->getStartLoc());
return nullptr;
}
}
auto resultTy = CS.getType(exprResult);
resultElts[inArgNo] = exprResult;
resultEltTys[inArgNo] = {resultTy->getInOutObjectType(),
TE->getElementName(inArgNo),
ParameterTypeFlags().withInOut(resultTy->is<InOutType>())};
}
}
auto TT = TupleType::get(resultEltTys, CS.getASTContext());
return CS.cacheType(TupleExpr::create(
CS.getASTContext(), TE->getLParenLoc(), resultElts,
TE->getElementNames(), TE->getElementNameLocs(), TE->getRParenLoc(),
TE->hasTrailingClosure(), TE->isImplicit(), TT));
}
}
// Get the simplified type of each element and rebuild the aggregate.
SmallVector<TupleTypeElt, 4> resultEltTys;
SmallVector<Expr*, 4> resultElts;
TupleType *exampleInputTuple = nullptr;
if (exampleInputType)
exampleInputTuple = exampleInputType->getAs<TupleType>();
for (unsigned i = 0, e = TE->getNumElements(); i != e; i++) {
if (exampleInputTuple && i < exampleInputTuple->getNumElements() &&
exampleInputTuple->getElement(i).isInOut())
options |= TCC_AllowLValue;
auto elExpr = typeCheckChildIndependently(TE->getElement(i), options);
if (!elExpr) return nullptr; // already diagnosed.
resultElts.push_back(elExpr);
auto resFlags =
ParameterTypeFlags().withInOut(elExpr->isSemanticallyInOutExpr());
resultEltTys.push_back({CS.getType(elExpr)->getInOutObjectType(),
TE->getElementName(i), resFlags});
}
auto TT = TupleType::get(resultEltTys, CS.getASTContext());
return CS.cacheType(TupleExpr::create(
CS.getASTContext(), TE->getLParenLoc(), resultElts, TE->getElementNames(),
TE->getElementNameLocs(), TE->getRParenLoc(), TE->hasTrailingClosure(),
TE->isImplicit(), TT));
}
static bool diagnoseImplicitSelfErrors(Expr *fnExpr, Expr *argExpr,
CalleeCandidateInfo &CCI,
ArrayRef<Identifier> argLabels,
ConstraintSystem &CS) {
// If candidate list is empty it means that problem is somewhere else,
// since we need to have candidates which might be shadowing other funcs.
if (CCI.empty() || !CCI[0].getDecl())
return false;
auto &TC = CS.TC;
// Call expression is formed as 'foo.bar' where 'foo' might be an
// implicit "Self" reference, such use wouldn't provide good diagnostics
// for situations where instance members have equal names to functions in
// Swift Standard Library e.g. min/max.
auto UDE = dyn_cast<UnresolvedDotExpr>(fnExpr);
if (!UDE)
return false;
auto baseExpr = dyn_cast<DeclRefExpr>(UDE->getBase());
if (!baseExpr)
return false;
auto baseDecl = baseExpr->getDecl();
if (!baseExpr->isImplicit() || baseDecl->getFullName() != TC.Context.Id_self)
return false;
// Our base expression is an implicit 'self.' reference e.g.
//
// extension Sequence {
// func test() -> Int {
// return max(1, 2)
// }
// }
//
// In this example the Sequence class already has two methods named 'max'
// none of which accept two arguments, but there is a function in
// Swift Standard Library called 'max' which does accept two arguments,
// so user might have called that by mistake without realizing that
// compiler would add implicit 'self.' prefix to the call of 'max'.
ExprCleaner cleanup(argExpr);
auto argType = CS.getType(argExpr);
// If argument wasn't properly type-checked, let's retry without changing AST.
if (!argType || argType->hasUnresolvedType() || argType->hasTypeVariable() ||
argType->hasTypeParameter()) {
auto *argTuple = dyn_cast<TupleExpr>(argExpr);
if (!argTuple) {
// Bail out if we don't have a well-formed argument list.
return false;
}
// Let's type check individual argument expressions without any
// contextual information to try to recover an argument type that
// matches what the user actually wrote instead of what the typechecker
// expects.
SmallVector<TupleTypeElt, 4> elts;
for (unsigned i = 0, e = argTuple->getNumElements(); i < e; ++i) {
ConcreteDeclRef ref = nullptr;
auto *el = argTuple->getElement(i);
auto typeResult =
TC.getTypeOfExpressionWithoutApplying(el, CS.DC, ref);
if (!typeResult)
return false;
auto flags = ParameterTypeFlags().withInOut(typeResult->is<InOutType>());
elts.push_back(TupleTypeElt(typeResult->getInOutObjectType(),
argTuple->getElementName(i),
flags));
}
argType = TupleType::get(elts, CS.getASTContext());
}
auto typeKind = argType->getKind();
if (typeKind != TypeKind::Tuple && typeKind != TypeKind::Paren)
return false;
// If argument type couldn't be properly resolved or has errors,
// we can't diagnose anything in here, it points to the different problem.
if (isUnresolvedOrTypeVarType(argType) || argType->hasError())
return false;
auto context = CS.DC;
using CandidateMap =
llvm::SmallDenseMap<ValueDecl *, llvm::SmallVector<OverloadChoice, 2>>;
auto getBaseKind = [](ValueDecl *base) -> DescriptiveDeclKind {
DescriptiveDeclKind kind = DescriptiveDeclKind::Module;
if (!base)
return kind;
auto context = base->getDeclContext();
do {
if (isa<ExtensionDecl>(context))
return DescriptiveDeclKind::Extension;
if (auto nominal = dyn_cast<NominalTypeDecl>(context)) {
kind = nominal->getDescriptiveKind();
break;
}
context = context->getParent();
} while (context);
return kind;
};
auto getBaseName = [](DeclContext *context) -> DeclName {
if (auto generic =
context->getAsNominalTypeOrNominalTypeExtensionContext()) {
return generic->getName();
} else if (context->isModuleScopeContext())
return context->getParentModule()->getName();
else
llvm_unreachable("Unsupported base");
};
auto diagnoseShadowing = [&](ValueDecl *base,
ArrayRef<OverloadChoice> candidates) -> bool {
CalleeCandidateInfo calleeInfo(base ? base->getInterfaceType() : nullptr,
candidates, CCI.hasTrailingClosure, CS,
base);
calleeInfo.filterList(argType, argLabels);
auto diagnostic = diag::member_shadows_global_function_near_match;
switch (calleeInfo.closeness) {
case CC_Unavailable:
case CC_Inaccessible:
case CC_SelfMismatch:
case CC_ArgumentLabelMismatch:
case CC_ArgumentCountMismatch:
case CC_GeneralMismatch:
return false;
case CC_NonLValueInOut:
case CC_OneArgumentNearMismatch:
case CC_OneArgumentMismatch:
case CC_OneGenericArgumentNearMismatch:
case CC_OneGenericArgumentMismatch:
case CC_ArgumentNearMismatch:
case CC_ArgumentMismatch:
case CC_GenericNonsubstitutableMismatch:
break; // Near match cases
case CC_ExactMatch:
diagnostic = diag::member_shadows_global_function;
break;
}
auto choice = calleeInfo.candidates[0].getDecl();
auto baseKind = getBaseKind(base);
auto baseName = getBaseName(choice->getDeclContext());
auto origCandidate = CCI[0].getDecl();
TC.diagnose(UDE->getLoc(), diagnostic, UDE->getName(),
origCandidate->getDescriptiveKind(),
origCandidate->getFullName(), choice->getDescriptiveKind(),
choice->getFullName(), baseKind, baseName);
auto topLevelDiag = diag::fix_unqualified_access_top_level;
if (baseKind == DescriptiveDeclKind::Module)
topLevelDiag = diag::fix_unqualified_access_top_level_multi;
auto name = baseName.getBaseIdentifier();
SmallString<32> namePlusDot = name.str();
namePlusDot.push_back('.');
TC.diagnose(UDE->getLoc(), topLevelDiag, namePlusDot,
choice->getDescriptiveKind(), name)
.fixItInsert(UDE->getStartLoc(), namePlusDot);
for (auto &candidate : calleeInfo.candidates) {
if (auto decl = candidate.getDecl())
TC.diagnose(decl, diag::decl_declared_here, decl->getFullName());
}
return true;
};
// For each of the parent contexts, let's try to find any candidates
// which have the same name and the same number of arguments as callee.
while (context->getParent()) {
auto result = TC.lookupUnqualified(context, UDE->getName(), UDE->getLoc());
context = context->getParent();
if (!result || result.empty())
continue;
CandidateMap candidates;
for (const auto &candidate : result) {
auto base = candidate.getBaseDecl();
auto decl = candidate.getValueDecl();
if ((base && base->isInvalid()) || decl->isInvalid())
continue;
// If base is present but it doesn't represent a valid nominal,
// we can't use current candidate as one of the choices.
if (base && !base->getInterfaceType()->getNominalOrBoundGenericNominal())
continue;
auto context = decl->getDeclContext();
// We are only interested in static or global functions, because
// there is no way to call anything else properly.
if (!decl->isStatic() && !context->isModuleScopeContext())
continue;
OverloadChoice choice(base ? base->getInterfaceType() : nullptr,
decl, UDE->getFunctionRefKind());
if (base) { // Let's group all of the candidates have a common base.
candidates[base].push_back(choice);
continue;
}
// If there is no base, it means this is one of the global functions,
// let's try to diagnose its shadowing inline.
if (diagnoseShadowing(base, choice))
return true;
}
if (candidates.empty())
continue;
for (const auto &candidate : candidates) {
if (diagnoseShadowing(candidate.getFirst(), candidate.getSecond()))
return true;
}
}
return false;
}
// It is a somewhat common error to try to access an instance method as a
// curried member on the type, instead of using an instance, e.g. the user
// wrote:
//
// Foo.doThing(42, b: 19)
//
// instead of:
//
// myFoo.doThing(42, b: 19)
//
// Check for this situation and handle it gracefully.
static bool
diagnoseInstanceMethodAsCurriedMemberOnType(CalleeCandidateInfo &CCI,
Expr *fnExpr, Expr *argExpr) {
for (auto &candidate : CCI.candidates) {
auto argTy = candidate.getArgumentType();
if (!argTy)
return false;
auto *decl = candidate.getDecl();
if (!decl)
return false;
// If this is an exact match at the level 1 of the parameters, but
// there is still something wrong with the expression nevertheless
// it might be worth while to check if it's instance method as curried
// member of type problem.
if (CCI.closeness == CC_ExactMatch &&
(decl->isInstanceMember() && candidate.level == 1))
continue;
auto params = candidate.getUncurriedFunctionType()->getParams();
SmallVector<bool, 4> defaultMap;
computeDefaultMap(argTy, decl, candidate.level, defaultMap);
// If one of the candidates is an instance method with a single parameter
// at the level 0, this might be viable situation for calling instance
// method as curried member of type problem.
if (params.size() != 1 || !decl->isInstanceMember() || candidate.level > 0)
return false;
}
auto &TC = CCI.CS.TC;
if (auto UDE = dyn_cast<UnresolvedDotExpr>(fnExpr)) {
auto baseExpr = UDE->getBase();
auto baseType = CCI.CS.getType(baseExpr);
if (auto *MT = baseType->getAs<MetatypeType>()) {
auto DC = CCI.CS.DC;
auto instanceType = MT->getInstanceType();
// If the base is an implicit self type reference, and we're in a
// an initializer, then the user wrote something like:
//
// class Foo { let val = initFn() }
// or
// class Bar { func something(x: Int = initFn()) }
//
// which runs in type context, not instance context. Produce a tailored
// diagnostic since this comes up and is otherwise non-obvious what is
// going on.
if (baseExpr->isImplicit() && isa<Initializer>(DC)) {
auto *TypeDC = DC->getParent();
bool propertyInitializer = true;
// If the parent context is not a type context, we expect it
// to be a defaulted parameter in a function declaration.
if (!TypeDC->isTypeContext()) {
assert(TypeDC->getContextKind() ==
DeclContextKind::AbstractFunctionDecl &&
"Expected function decl context for initializer!");
TypeDC = TypeDC->getParent();
propertyInitializer = false;
}
assert(TypeDC->isTypeContext() && "Expected type decl context!");
if (TypeDC->getAsNominalTypeOrNominalTypeExtensionContext() ==
instanceType->getAnyNominal()) {
if (propertyInitializer)
TC.diagnose(UDE->getLoc(), diag::instance_member_in_initializer,
UDE->getName());
else
TC.diagnose(UDE->getLoc(),
diag::instance_member_in_default_parameter,
UDE->getName());
return true;
}
}
// If this is a situation like this `self.foo(A())()` and self != A
// let's say that `self` is not convertible to A.
if (auto nominalType = CCI.CS.getType(argExpr)->getAs<NominalType>()) {
if (!instanceType->isEqual(nominalType)) {
TC.diagnose(argExpr->getStartLoc(), diag::types_not_convertible,
false, nominalType, instanceType);
return true;
}
}
// Otherwise, complain about use of instance value on type.
if (isa<TypeExpr>(baseExpr)) {
TC.diagnose(UDE->getLoc(), diag::instance_member_use_on_type,
instanceType, UDE->getName())
.highlight(baseExpr->getSourceRange());
} else {
TC.diagnose(UDE->getLoc(), diag::could_not_use_instance_member_on_type,
instanceType, UDE->getName(), instanceType, false)
.highlight(baseExpr->getSourceRange());
}
return true;
}
}
return false;
}
static bool diagnoseTupleParameterMismatch(CalleeCandidateInfo &CCI,
Type paramType, Type argType,
Expr *fnExpr, Expr *argExpr,
bool isTopLevel = true) {
// Try to diagnose function call tuple parameter splat only if
// there is no trailing or argument closure, because
// FailureDiagnosis::visitClosureExpr will produce better
// diagnostic and fix-it for trailing closure case.
if (isTopLevel) {
if (CCI.hasTrailingClosure)
return false;
if (auto *parenExpr = dyn_cast<ParenExpr>(argExpr)) {
if (isa<ClosureExpr>(parenExpr->getSubExpr()))
return false;
}
}
if (auto *paramFnType = paramType->getAs<AnyFunctionType>()) {
// Only if both of the parameter and argument types are functions
// let's recur into diagnosing their arguments.
if (auto *argFnType = argType->getAs<AnyFunctionType>())
return diagnoseTupleParameterMismatch(CCI, paramFnType->getInput(),
argFnType->getInput(), fnExpr,
argExpr, /* isTopLevel */ false);
return false;
}
unsigned parameterCount = 1, argumentCount = 1;
// Don't try to desugar ParenType which is going to result in incorrect
// inferred argument/parameter count.
if (auto *paramTypeTy = dyn_cast<TupleType>(paramType.getPointer()))
parameterCount = paramTypeTy->getNumElements();
if (auto *argTupleTy = dyn_cast<TupleType>(argType.getPointer()))
argumentCount = argTupleTy->getNumElements();
if (parameterCount == 1 && argumentCount > 1) {
// Let's see if inferred argument is actually a tuple inside of Paren.
auto *paramTupleTy = paramType->getAs<TupleType>();
if (!paramTupleTy)
return false;
// Looks like the number of tuple elements matches number
// of function arguments, which means we can we can emit an
// error about an attempt to make use of tuple splat or tuple
// destructuring, unfortunately we can't provide a fix-it for
// this case.
if (paramTupleTy->getNumElements() == argumentCount) {
auto &TC = CCI.CS.TC;
if (isTopLevel) {
if (auto *decl = CCI[0].getDecl()) {
Identifier name;
auto kind = decl->getDescriptiveKind();
// Constructors/descructors and subscripts don't really have names.
if (!(isa<ConstructorDecl>(decl) || isa<DestructorDecl>(decl) ||
isa<SubscriptDecl>(decl))) {
name = decl->getBaseName().getIdentifier();
}
TC.diagnose(argExpr->getLoc(), diag::single_tuple_parameter_mismatch,
kind, name, paramType, !name.empty())
.highlight(argExpr->getSourceRange())
.fixItInsertAfter(argExpr->getStartLoc(), "(")
.fixItInsert(argExpr->getEndLoc(), ")");
} else {
TC.diagnose(argExpr->getLoc(),
diag::unknown_single_tuple_parameter_mismatch, paramType)
.highlight(argExpr->getSourceRange())
.fixItInsertAfter(argExpr->getStartLoc(), "(")
.fixItInsert(argExpr->getEndLoc(), ")");
}
} else {
TC.diagnose(argExpr->getLoc(),
diag::nested_tuple_parameter_destructuring, paramType,
CCI.CS.getType(fnExpr));
}
return true;
}
}
return false;
}
class ArgumentMatcher : public MatchCallArgumentListener {
TypeChecker &TC;
Expr *FnExpr;
Expr *ArgExpr;
ArrayRef<AnyFunctionType::Param> &Parameters;
SmallVectorImpl<bool> &DefaultMap;
SmallVectorImpl<AnyFunctionType::Param> &Arguments;
CalleeCandidateInfo CandidateInfo;
// Indicates if problem has been found and diagnostic was emitted.
bool Diagnosed = false;
// Indicates if functions we are trying to call is a subscript.
bool IsSubscript;
// Stores parameter bindings determined by call to matchCallArguments.
SmallVector<ParamBinding, 4> Bindings;
public:
ArgumentMatcher(Expr *fnExpr, Expr *argExpr,
ArrayRef<AnyFunctionType::Param> ¶ms,
SmallVectorImpl<bool> &defaultMap,
SmallVectorImpl<AnyFunctionType::Param> &args,
CalleeCandidateInfo &CCI, bool isSubscript)
: TC(CCI.CS.TC), FnExpr(fnExpr), ArgExpr(argExpr), Parameters(params),
DefaultMap(defaultMap), Arguments(args), CandidateInfo(CCI),
IsSubscript(isSubscript) {}
void extraArgument(unsigned extraArgIdx) override {
auto name = Arguments[extraArgIdx].getLabel();
Expr *arg = ArgExpr;
auto tuple = dyn_cast<TupleExpr>(ArgExpr);
if (tuple)
arg = tuple->getElement(extraArgIdx);
auto loc = arg->getLoc();
if (tuple && extraArgIdx == tuple->getNumElements() - 1 &&
tuple->hasTrailingClosure())
TC.diagnose(loc, diag::extra_trailing_closure_in_call)
.highlight(arg->getSourceRange());
else if (Parameters.empty()) {
auto Paren = dyn_cast<ParenExpr>(ArgExpr);
Expr *SubExpr = nullptr;
if (Paren) {
SubExpr = Paren->getSubExpr();
}
if (SubExpr && CandidateInfo.CS.getType(SubExpr) &&
CandidateInfo.CS.getType(SubExpr)->isVoid()) {
TC.diagnose(loc, diag::extra_argument_to_nullary_call)
.fixItRemove(SubExpr->getSourceRange());
} else {
TC.diagnose(loc, diag::extra_argument_to_nullary_call)
.highlight(ArgExpr->getSourceRange());
}
} else if (name.empty())
TC.diagnose(loc, diag::extra_argument_positional)
.highlight(arg->getSourceRange());
else
TC.diagnose(loc, diag::extra_argument_named, name)
.highlight(arg->getSourceRange());
Diagnosed = true;
}
void missingArgument(unsigned missingParamIdx) override {
auto ¶m = Parameters[missingParamIdx];
Identifier name = param.getLabel();
// Search insertion index.
unsigned argIdx = 0;
for (int Idx = missingParamIdx - 1; Idx >= 0; --Idx) {
if (Bindings[Idx].empty())
continue;
argIdx = Bindings[Idx].back() + 1;
break;
}
unsigned insertableEndIdx = Arguments.size();
if (CandidateInfo.hasTrailingClosure)
insertableEndIdx -= 1;
// Build argument string for fix-it.
SmallString<32> insertBuf;
llvm::raw_svector_ostream insertText(insertBuf);
if (argIdx != 0)
insertText << ", ";
if (!name.empty())
insertText << name.str() << ": ";
Type Ty = param.getType();
// Explode inout type.
if (param.isInOut()) {
insertText << "&";
Ty = param.getType()->getInOutObjectType();
}
// @autoclosure; the type should be the result type.
if (auto FT = param.getType()->getAs<AnyFunctionType>())
if (FT->isAutoClosure())
Ty = FT->getResult();
insertText << "<#" << Ty << "#>";
if (argIdx == 0 && insertableEndIdx != 0)
insertText << ", ";
SourceLoc insertLoc;
if (argIdx > insertableEndIdx) {
// Unreachable for now.
// FIXME: matchCallArguments() doesn't detect "missing argument after
// trailing closure". E.g.
// func fn(x: Int, y: () -> Int, z: Int) { ... }
// fn(x: 1) { return 1 }
// is diagnosed as "missing argument for 'y'" (missingParamIdx 1).
// It should be "missing argument for 'z'" (missingParamIdx 2).
} else if (auto *TE = dyn_cast<TupleExpr>(ArgExpr)) {
// fn():
// fn([argMissing])
// fn(argX, argY):
// fn([argMissing, ]argX, argY)
// fn(argX[, argMissing], argY)
// fn(argX, argY[, argMissing])
// fn(argX) { closure }:
// fn([argMissing, ]argX) { closure }
// fn(argX[, argMissing]) { closure }
// fn(argX[, closureLabel: ]{closure}[, argMissing)] // Not impl.
if (insertableEndIdx == 0)
insertLoc = TE->getRParenLoc();
else if (argIdx != 0)
insertLoc = Lexer::getLocForEndOfToken(
TC.Context.SourceMgr, TE->getElement(argIdx - 1)->getEndLoc());
else {
insertLoc = TE->getElementNameLoc(0);
if (insertLoc.isInvalid())
insertLoc = TE->getElement(0)->getStartLoc();
}
} else if (auto *PE = dyn_cast<ParenExpr>(ArgExpr)) {
assert(argIdx <= 1);
if (PE->getRParenLoc().isValid()) {
// fn(argX):
// fn([argMissing, ]argX)
// fn(argX[, argMissing])
// fn() { closure }:
// fn([argMissing]) {closure}
// fn([closureLabel: ]{closure}[, argMissing]) // Not impl.
if (insertableEndIdx == 0)
insertLoc = PE->getRParenLoc();
else if (argIdx == 0)
insertLoc = PE->getSubExpr()->getStartLoc();
else
insertLoc = Lexer::getLocForEndOfToken(TC.Context.SourceMgr,
PE->getSubExpr()->getEndLoc());
} else {
// fn { closure }:
// fn[(argMissing)] { closure }
// fn[(closureLabel:] { closure }[, missingArg)] // Not impl.
assert(!IsSubscript && "bracket less subscript");
assert(PE->hasTrailingClosure() &&
"paren less ParenExpr without trailing closure");
insertBuf.insert(insertBuf.begin(), '(');
insertBuf.insert(insertBuf.end(), ')');
insertLoc = Lexer::getLocForEndOfToken(TC.Context.SourceMgr,
FnExpr->getEndLoc());
}
} else {
auto &CS = CandidateInfo.CS;
(void)CS;
// FIXME: Due to a quirk of CSApply, we can end up without a
// ParenExpr if the argument has an '@lvalue TupleType'.
assert((isa<TupleType>(CS.getType(ArgExpr).getPointer()) ||
isa<ParenType>(CS.getType(ArgExpr).getPointer())) &&
"unexpected argument expression type");
insertLoc = ArgExpr->getLoc();
// Can't be TupleShuffleExpr because this argExpr is not yet resolved.
}
assert(insertLoc.isValid() && "missing argument after trailing closure?");
if (name.empty())
TC.diagnose(insertLoc, diag::missing_argument_positional,
missingParamIdx + 1)
.fixItInsert(insertLoc, insertText.str());
else
TC.diagnose(insertLoc, diag::missing_argument_named, name)
.fixItInsert(insertLoc, insertText.str());
auto candidate = CandidateInfo[0];
if (candidate.getDecl())
TC.diagnose(candidate.getDecl(), diag::decl_declared_here,
candidate.getDecl()->getFullName());
Diagnosed = true;
}
void missingLabel(unsigned paramIdx) override {
auto tuple = cast<TupleExpr>(ArgExpr);
TC.diagnose(tuple->getElement(paramIdx)->getStartLoc(),
diag::missing_argument_labels, false,
Parameters[paramIdx].getLabel().str(), IsSubscript);
Diagnosed = true;
}
void outOfOrderArgument(unsigned argIdx, unsigned prevArgIdx) override {
auto tuple = cast<TupleExpr>(ArgExpr);
Identifier first = tuple->getElementName(argIdx);
Identifier second = tuple->getElementName(prevArgIdx);
// Build a mapping from arguments to parameters.
SmallVector<unsigned, 4> argBindings(tuple->getNumElements());
for (unsigned paramIdx = 0; paramIdx != Bindings.size(); ++paramIdx) {
for (auto argIdx : Bindings[paramIdx])
argBindings[argIdx] = paramIdx;
}
auto argRange = [&](unsigned argIdx, Identifier label) -> SourceRange {
auto range = tuple->getElement(argIdx)->getSourceRange();
if (!label.empty())
range.Start = tuple->getElementNameLoc(argIdx);
unsigned paramIdx = argBindings[argIdx];
if (Bindings[paramIdx].size() > 1)
range.End = tuple->getElement(Bindings[paramIdx].back())->getEndLoc();
return range;
};
auto firstRange = argRange(argIdx, first);
auto secondRange = argRange(prevArgIdx, second);
SourceLoc diagLoc = firstRange.Start;
auto addFixIts = [&](InFlightDiagnostic diag) {
diag.highlight(firstRange).highlight(secondRange);
// Move the misplaced argument by removing it from one location and
// inserting it in another location. To maintain argument comma
// separation, since the argument is always moving to an earlier index
// the preceding comma and whitespace is removed and a new trailing
// comma and space is inserted with the moved argument.
auto &SM = TC.Context.SourceMgr;
auto text = SM.extractText(
Lexer::getCharSourceRangeFromSourceRange(SM, firstRange));
auto removalRange =
SourceRange(Lexer::getLocForEndOfToken(
SM, tuple->getElement(argIdx - 1)->getEndLoc()),
firstRange.End);
diag.fixItRemove(removalRange);
diag.fixItInsert(secondRange.Start, text.str() + ", ");
};
// There are 4 diagnostic messages variations depending on
// labeled/unlabeled arguments.
if (first.empty() && second.empty()) {
addFixIts(TC.diagnose(diagLoc,
diag::argument_out_of_order_unnamed_unnamed,
argIdx + 1, prevArgIdx + 1));
} else if (first.empty() && !second.empty()) {
addFixIts(TC.diagnose(diagLoc, diag::argument_out_of_order_unnamed_named,
argIdx + 1, second));
} else if (!first.empty() && second.empty()) {
addFixIts(TC.diagnose(diagLoc, diag::argument_out_of_order_named_unnamed,
first, prevArgIdx + 1));
} else {
addFixIts(TC.diagnose(diagLoc, diag::argument_out_of_order_named_named,
first, second));
}
Diagnosed = true;
}
bool relabelArguments(ArrayRef<Identifier> newNames) override {
assert(!newNames.empty() && "No arguments were re-labeled");
// Let's diagnose labeling problem but only related to corrected ones.
if (diagnoseArgumentLabelError(TC, ArgExpr, newNames, IsSubscript))
Diagnosed = true;
return true;
}
bool diagnose() {
// Use matchCallArguments to determine how close the argument list is (in
// shape) to the specified candidates parameters. This ignores the
// concrete types of the arguments, looking only at the argument labels.
matchCallArguments(Arguments, Parameters, DefaultMap,
CandidateInfo.hasTrailingClosure,
/*allowFixes:*/ true, *this, Bindings);
return Diagnosed;
}
};
/// Emit a class of diagnostics that we only know how to generate when
/// there is exactly one candidate we know about. Return true if an error
/// is emitted.
static bool
diagnoseSingleCandidateFailures(CalleeCandidateInfo &CCI, Expr *fnExpr,
Expr *argExpr,
ArrayRef<Identifier> argLabels) {
// We only handle the situation where there is exactly one candidate
// here.
if (CCI.size() != 1)
return false;
auto candidate = CCI[0];
auto &TC = CCI.CS.TC;
auto argTy = candidate.getArgumentType();
if (!argTy)
return false;
auto params = candidate.getUncurriedFunctionType()->getParams();
SmallVector<bool, 4> defaultMap;
computeDefaultMap(argTy, candidate.getDecl(), candidate.level,
defaultMap);
auto args = decomposeArgType(CCI.CS.getType(argExpr), argLabels);
// Check the case where a raw-representable type is constructed from an
// argument with the same type:
//
// MyEnumType(MyEnumType.foo)
//
// This is missing 'rawValue:' label, but a better fix is to just remove
// the unnecessary constructor call:
//
// MyEnumType.foo
//
if (params.size() == 1 && args.size() == 1 && candidate.getDecl() &&
isa<ConstructorDecl>(candidate.getDecl()) && candidate.level == 1) {
AnyFunctionType::Param &arg = args[0];
auto resTy =
candidate.getResultType()->lookThroughAllAnyOptionalTypes();
auto rawTy = isRawRepresentable(resTy, CCI.CS);
if (rawTy && arg.getType() && resTy->isEqual(arg.getType())) {
auto getInnerExpr = [](Expr *E) -> Expr * {
auto *parenE = dyn_cast<ParenExpr>(E);
if (!parenE)
return nullptr;
return parenE->getSubExpr();
};
Expr *innerE = getInnerExpr(argExpr);
InFlightDiagnostic diag = TC.diagnose(
fnExpr->getLoc(),
diag::invalid_initialization_parameter_same_type, resTy);
diag.highlight((innerE ? innerE : argExpr)->getSourceRange());
if (innerE) {
// Remove the unnecessary constructor call.
diag.fixItRemoveChars(fnExpr->getLoc(), innerE->getStartLoc())
.fixItRemove(argExpr->getEndLoc());
}
return true;
}
}
if (diagnoseTupleParameterMismatch(CCI, candidate.getArgumentType(),
CCI.CS.getType(argExpr), fnExpr,
argExpr))
return true;
// We only handle structural errors here.
if (CCI.closeness != CC_ArgumentLabelMismatch &&
CCI.closeness != CC_ArgumentCountMismatch)
return false;
// If we have a single candidate that failed to match the argument list,
// attempt to use matchCallArguments to diagnose the problem.
return ArgumentMatcher(fnExpr, argExpr, params, defaultMap, args, CCI,
isa<SubscriptExpr>(fnExpr))
.diagnose();
}
static bool isRawRepresentableMismatch(Type fromType, Type toType,
KnownProtocolKind kind,
const ConstraintSystem &CS) {
toType = toType->lookThroughAllAnyOptionalTypes();
fromType = fromType->lookThroughAllAnyOptionalTypes();
// First check if this is an attempt to convert from something to
// raw representable.
if (conformsToKnownProtocol(fromType, kind, CS)) {
if (isRawRepresentable(toType, kind, CS))
return true;
}
// Otherwise, it might be an attempt to convert from raw representable
// to its raw value.
if (isRawRepresentable(fromType, kind, CS)) {
if (conformsToKnownProtocol(toType, kind, CS))
return true;
}
return false;
}
static bool diagnoseRawRepresentableMismatch(CalleeCandidateInfo &CCI,
Expr *argExpr,
ArrayRef<Identifier> argLabels) {
// We are only interested in cases which are
// unrelated to argument count or label mismatches.
switch (CCI.closeness) {
case CC_OneArgumentNearMismatch:
case CC_OneArgumentMismatch:
case CC_OneGenericArgumentNearMismatch:
case CC_OneGenericArgumentMismatch:
case CC_ArgumentNearMismatch:
case CC_ArgumentMismatch:
break;
default:
return false;
}
auto argType = CCI.CS.getType(argExpr);
if (!argType || argType->hasTypeVariable() || argType->hasUnresolvedType())
return false;
ArrayRef<KnownProtocolKind> rawRepresentableProtocols = {
KnownProtocolKind::ExpressibleByStringLiteral,
KnownProtocolKind::ExpressibleByIntegerLiteral};
const auto &CS = CCI.CS;
auto arguments = decomposeArgType(argType, argLabels);
auto *tupleArgs = dyn_cast<TupleExpr>(argExpr);
for (auto &candidate : CCI.candidates) {
auto *decl = candidate.getDecl();
if (!decl)
continue;
auto parameters = candidate.getUncurriedFunctionType()->getParams();
SmallVector<bool, 4> defaultMap;
computeDefaultMap(candidate.getArgumentType(), decl,
candidate.level, defaultMap);
if (parameters.size() != arguments.size())
continue;
for (unsigned i = 0, n = parameters.size(); i != n; ++i) {
auto paramType = parameters[i].getType();
auto argType = arguments[i].getType();
for (auto kind : rawRepresentableProtocols) {
// If trying to convert from raw type to raw representable,
// or vice versa from raw representable (e.g. enum) to raw type.
if (!isRawRepresentableMismatch(argType, paramType, kind, CS))
continue;
auto *expr = argExpr;
if (tupleArgs)
expr = tupleArgs->getElement(i);
auto diag =
CS.TC.diagnose(expr->getLoc(), diag::cannot_convert_argument_value,
argType, paramType);
tryRawRepresentableFixIts(diag, CS, argType, paramType, kind, expr);
return true;
}
}
}
return false;
}
// Extract expression for failed argument number
static Expr *getFailedArgumentExpr(CalleeCandidateInfo CCI, Expr *argExpr) {
if (auto *TE = dyn_cast<TupleExpr>(argExpr))
return TE->getElement(CCI.failedArgument.argumentNumber);
else if (auto *PE = dyn_cast<ParenExpr>(argExpr)) {
assert(CCI.failedArgument.argumentNumber == 0 &&
"Unexpected argument #");
return PE->getSubExpr();
} else {
assert(CCI.failedArgument.argumentNumber == 0 &&
"Unexpected argument #");
return argExpr;
}
}
/// If the candidate set has been narrowed down to a specific structural
/// problem, e.g. that there are too few parameters specified or that argument
/// labels don't match up, diagnose that error and return true.
bool FailureDiagnosis::diagnoseParameterErrors(CalleeCandidateInfo &CCI,
Expr *fnExpr, Expr *argExpr,
ArrayRef<Identifier> argLabels) {
if (auto *MTT = CS.getType(fnExpr)->getAs<MetatypeType>()) {
auto instTy = MTT->getInstanceType();
if (instTy->getAnyNominal()) {
// If we are invoking a constructor on a nominal type and there are
// absolutely no candidates, then they must all be private.
if (CCI.size() == 0 || (CCI.size() == 1 && CCI.candidates[0].getDecl() &&
isa<ProtocolDecl>(CCI.candidates[0].getDecl()))) {
CS.TC.diagnose(fnExpr->getLoc(), diag::no_accessible_initializers,
instTy);
return true;
}
// continue below
} else if (!instTy->is<TupleType>()) {
// If we are invoking a constructor on a non-nominal type, the expression
// is malformed.
SourceRange initExprRange(fnExpr->getSourceRange().Start,
argExpr->getSourceRange().End);
CS.TC.diagnose(fnExpr->getLoc(), instTy->isExistentialType() ?
diag::construct_protocol_by_name :
diag::non_nominal_no_initializers, instTy)
.highlight(initExprRange);
return true;
}
}
// Try to diagnose errors related to the use of implicit self reference.
if (diagnoseImplicitSelfErrors(fnExpr, argExpr, CCI, argLabels, CS))
return true;
if (diagnoseInstanceMethodAsCurriedMemberOnType(CCI, fnExpr, argExpr))
return true;
// Do all the stuff that we only have implemented when there is a single
// candidate.
if (diagnoseSingleCandidateFailures(CCI, fnExpr, argExpr, argLabels))
return true;
// If we have a failure where the candidate set differs on exactly one
// argument, and where we have a consistent mismatch across the candidate set
// (often because there is only one candidate in the set), then diagnose this
// as a specific problem of passing something of the wrong type into a
// parameter.
//
// We don't generally want to use this path to diagnose calls to
// symmetrically-typed binary operators because it's likely that both
// operands contributed to the type.
if ((CCI.closeness == CC_OneArgumentMismatch ||
CCI.closeness == CC_OneArgumentNearMismatch ||
CCI.closeness == CC_OneGenericArgumentMismatch ||
CCI.closeness == CC_OneGenericArgumentNearMismatch ||
CCI.closeness == CC_GenericNonsubstitutableMismatch) &&
CCI.failedArgument.isValid() &&
!isSymmetricBinaryOperator(CCI)) {
// Map the argument number into an argument expression.
TCCOptions options = TCC_ForceRecheck;
if (CCI.failedArgument.parameterType->is<InOutType>())
options |= TCC_AllowLValue;
// It could be that the argument doesn't conform to an archetype.
Expr *badArgExpr = getFailedArgumentExpr(CCI, argExpr);
if (CCI.diagnoseGenericParameterErrors(badArgExpr))
return true;
// Re-type-check the argument with the expected type of the candidate set.
// This should produce a specific and tailored diagnostic saying that the
// type mismatches with expectations.
Type paramType = CCI.failedArgument.parameterType;
if (!typeCheckChildIndependently(badArgExpr, paramType,
CTP_CallArgument, options))
return true;
}
return false;
}
bool FailureDiagnosis::diagnoseSubscriptErrors(SubscriptExpr *SE, bool inAssignmentDestination) {
auto baseExpr = typeCheckChildIndependently(SE->getBase());
if (!baseExpr) return true;
auto baseType = CS.getType(baseExpr);
if (isa<NilLiteralExpr>(baseExpr)) {
diagnose(baseExpr->getLoc(), diag::cannot_subscript_nil_literal)
.highlight(baseExpr->getSourceRange());
return true;
}
std::function<bool(ArrayRef<OverloadChoice>)> callback =
[&](ArrayRef<OverloadChoice> candidates) -> bool {
CalleeCandidateInfo calleeInfo(Type(), candidates, SE->hasTrailingClosure(),
CS, /*selfAlreadyApplied*/ false);
// We're about to typecheck the index list, which needs to be processed with
// self already applied.
for (unsigned i = 0, e = calleeInfo.size(); i != e; ++i)
++calleeInfo.candidates[i].level;
auto indexExpr =
typeCheckArgumentChildIndependently(SE->getIndex(), Type(), calleeInfo);
if (!indexExpr)
return true;
// Back to analyzing the candidate list with self applied.
for (unsigned i = 0, e = calleeInfo.size(); i != e; ++i)
--calleeInfo.candidates[i].level;
ArrayRef<Identifier> argLabels = SE->getArgumentLabels();
if (diagnoseParameterErrors(calleeInfo, SE, indexExpr, argLabels))
return true;
auto indexType = CS.getType(indexExpr);
auto decomposedBaseType = decomposeArgType(baseType, {Identifier()});
auto decomposedIndexType = decomposeArgType(indexType, argLabels);
calleeInfo.filterList(
[&](UncurriedCandidate cand) -> CalleeCandidateInfo::ClosenessResultTy {
// Classify how close this match is. Non-subscript decls don't match.
auto subscriptDecl = dyn_cast_or_null<SubscriptDecl>(cand.getDecl());
if (!subscriptDecl ||
(inAssignmentDestination && !subscriptDecl->isSettable()))
return {CC_GeneralMismatch, {}};
// Check whether the self type matches.
auto selfConstraint = CC_ExactMatch;
if (calleeInfo.evaluateCloseness(cand, decomposedBaseType).first !=
CC_ExactMatch)
selfConstraint = CC_SelfMismatch;
// Increase the uncurry level to look past the self argument to the
// indices.
cand.level++;
// Explode out multi-index subscripts to find the best match.
auto indexResult =
calleeInfo.evaluateCloseness(cand, decomposedIndexType);
if (selfConstraint > indexResult.first)
return {selfConstraint, {}};
return indexResult;
});
// If the closest matches all mismatch on self, we either have something
// that cannot be subscripted, or an ambiguity.
if (calleeInfo.closeness == CC_SelfMismatch) {
diagnose(SE->getLoc(), diag::cannot_subscript_base, baseType)
.highlight(SE->getBase()->getSourceRange());
// FIXME: Should suggest overload set, but we're not ready for that until
// it points to candidates and identifies the self type in the diagnostic.
// calleeInfo.suggestPotentialOverloads(SE->getLoc());
return true;
}
// Any other failures relate to the index list.
for (unsigned i = 0, e = calleeInfo.size(); i != e; ++i)
++calleeInfo.candidates[i].level;
// TODO: Is there any reason to check for CC_NonLValueInOut here?
if (calleeInfo.closeness == CC_ExactMatch) {
auto message = diag::ambiguous_subscript;
// If there is an exact match on the argument with
// a single candidate, let's type-check subscript
// as a whole to figure out if there is any structural
// problem after all.
if (calleeInfo.size() == 1) {
Expr *expr = SE;
ConcreteDeclRef decl = nullptr;
message = diag::cannot_subscript_with_index;
if (CS.TC.getTypeOfExpressionWithoutApplying(expr, CS.DC, decl))
return false;
// If we are down to a single candidate but with an unresolved
// index type, we can substitute in the base type to get a simpler
// and more concrete expected type for this subscript decl, in order
// to diagnose a better error.
if (baseType && indexType->hasUnresolvedType()) {
UncurriedCandidate cand = calleeInfo.candidates[0];
auto candType = baseType->getTypeOfMember(CS.DC->getParentModule(),
cand.getDecl(), nullptr);
auto paramsType = candType->getAs<FunctionType>()->getInput();
if (!typeCheckChildIndependently(indexExpr, paramsType,
CTP_CallArgument, TCC_ForceRecheck))
return true;
}
}
diagnose(SE->getLoc(), message, baseType, indexType)
.highlight(indexExpr->getSourceRange())
.highlight(baseExpr->getSourceRange());
// FIXME: suggestPotentialOverloads should do this.
// calleeInfo.suggestPotentialOverloads(SE->getLoc());
for (auto candidate : calleeInfo.candidates)
if (auto decl = candidate.getDecl())
diagnose(decl, diag::found_candidate);
else
diagnose(candidate.getExpr()->getLoc(), diag::found_candidate);
return true;
}
if (diagnoseParameterErrors(calleeInfo, SE, indexExpr, argLabels))
return true;
// Diagnose some simple and common errors.
if (calleeInfo.diagnoseSimpleErrors(SE))
return true;
// If we haven't found a diagnostic yet, and we are in an assignment's
// destination, continue with diagnosing the assignment rather than giving
// a last resort diagnostic here.
if (inAssignmentDestination)
return false;
diagnose(SE->getLoc(), diag::cannot_subscript_with_index, baseType,
indexType);
calleeInfo.suggestPotentialOverloads(SE->getLoc());
return true;
};
auto locator =
CS.getConstraintLocator(SE, ConstraintLocator::SubscriptMember);
return diagnoseMemberFailures(SE, baseExpr, ConstraintKind::ValueMember,
DeclBaseName::createSubscript(),
FunctionRefKind::DoubleApply, locator,
callback);
}
bool FailureDiagnosis::visitSubscriptExpr(SubscriptExpr *SE) {
return diagnoseSubscriptErrors(SE, /* inAssignmentDestination = */ false);
}
namespace {
/// Type checking listener for pattern binding initializers.
class CalleeListener : public ExprTypeCheckListener {
Type contextualType;
public:
explicit CalleeListener(Type contextualType)
: contextualType(contextualType) { }
bool builtConstraints(ConstraintSystem &cs, Expr *expr) override {
// If we have no contextual type, there is nothing to do.
if (!contextualType) return false;
// If the expression is obviously something that produces a metatype,
// then don't put a constraint on it.
auto semExpr = expr->getValueProvidingExpr();
if (isa<TypeExpr>(semExpr))
return false;
// We're making the expr have a function type, whose result is the same
// as our contextual type.
auto inputLocator =
cs.getConstraintLocator(expr, ConstraintLocator::FunctionResult);
auto tv = cs.createTypeVariable(inputLocator,
TVO_CanBindToLValue |
TVO_CanBindToInOut |
TVO_PrefersSubtypeBinding);
// In order to make this work, we pick the most general function type and
// use a conversion constraint. This gives us:
// "$T0 throws -> contextualType"
// this allows things that are throws and not throws, and allows escape
// and noescape functions.
auto extInfo = FunctionType::ExtInfo().withThrows();
auto fTy = FunctionType::get(tv, contextualType, extInfo);
auto locator = cs.getConstraintLocator(expr);
// Add a conversion constraint between the types.
cs.addConstraint(ConstraintKind::Conversion, cs.getType(expr), fTy,
locator, /*isFavored*/ true);
return false;
}
};
} // end anonymous namespace
/// Return true if this function name is a comparison operator. This is a
/// simple heuristic used to guide comparison related diagnostics.
static bool isNameOfStandardComparisonOperator(StringRef opName) {
return opName == "==" || opName == "!=" ||
opName == "===" || opName == "!==" ||
opName == "<" || opName == ">" ||
opName == "<=" || opName == ">=";
}
bool FailureDiagnosis::diagnoseNilLiteralComparison(
Expr *lhsExpr, Expr *rhsExpr, CalleeCandidateInfo &calleeInfo,
SourceLoc applyLoc) {
auto overloadName = calleeInfo.declName;
// Only diagnose for comparison operators.
if (!isNameOfStandardComparisonOperator(overloadName))
return false;
Expr *otherExpr = lhsExpr;
Expr *nilExpr = rhsExpr;
// Swap if we picked the wrong side as the nil literal.
if (!isa<NilLiteralExpr>(nilExpr->getValueProvidingExpr()))
std::swap(otherExpr, nilExpr);
// Bail if neither side is a nil literal.
if (!isa<NilLiteralExpr>(nilExpr->getValueProvidingExpr()))
return false;
// Bail if both sides are a nil literal.
if (isa<NilLiteralExpr>(otherExpr->getValueProvidingExpr()))
return false;
auto otherType = CS.getType(otherExpr)->getRValueType();
// Bail if we were unable to determine the other type.
if (isUnresolvedOrTypeVarType(otherType))
return false;
// Regardless of whether the type has reference or value semantics,
// comparison with nil is illegal, albeit for different reasons spelled
// out by the diagnosis.
if (otherType->getAnyOptionalObjectType() &&
(overloadName == "!==" || overloadName == "===")) {
auto revisedName = overloadName;
revisedName.pop_back();
// If we made it here, then we're trying to perform a comparison with
// reference semantics rather than value semantics. The fixit will
// lop off the extra '=' in the operator.
diagnose(applyLoc,
diag::value_type_comparison_with_nil_illegal_did_you_mean,
otherType)
.fixItReplace(applyLoc, revisedName);
} else {
diagnose(applyLoc, diag::value_type_comparison_with_nil_illegal, otherType)
.highlight(otherExpr->getSourceRange());
}
return true;
}
bool FailureDiagnosis::diagnoseMethodAttributeFailures(
swift::ApplyExpr *callExpr, ArrayRef<Identifier> argLabels,
bool hasTrailingClosure, CalleeCandidateInfo &candidates) {
auto UDE = dyn_cast<UnresolvedDotExpr>(callExpr->getFn());
if (!UDE)
return false;
auto argExpr = callExpr->getArg();
auto argType = CS.getType(argExpr);
// If type of the argument hasn't been established yet, we can't diagnose.
if (!argType || isUnresolvedOrTypeVarType(argType))
return false;
// Let's filter our candidate list based on that type.
candidates.filterList(argType, argLabels);
if (candidates.closeness == CC_ExactMatch)
return false;
// And if filtering didn't give an exact match, such means that problem
// might be related to function attributes which is best diagnosed by
// unviable member candidates, if any.
auto base = UDE->getBase();
auto baseType = CS.getType(base);
// This handles following situation:
// struct S {
// mutating func f(_ i: Int) {}
// func f(_ f: Float) {}
// }
//
// Given struct has an overloaded method "f" with a single argument of
// multiple different types, one of the overloads is marked as
// "mutating", which means it can only be applied on LValue base type.
// So when struct is used like this:
//
// let answer: Int = 42
// S().f(answer)
//
// Constraint system generator is going to pick `f(_ f: Float)` as
// only possible overload candidate because "base" of the call is immutable
// and contextual information about argument type is not available yet.
// Such leads to incorrect contextual conversion failure diagnostic because
// type of the argument is going to resolved as (Int) no matter what.
// To workaround that fact and improve diagnostic of such cases we are going
// to try and collect all unviable candidates for a given call and check if
// at least one of them matches established argument type before even trying
// to re-check argument expression.
auto results = CS.performMemberLookup(
ConstraintKind::ValueMember, UDE->getName(), baseType,
UDE->getFunctionRefKind(), CS.getConstraintLocator(UDE),
/*includeInaccessibleMembers=*/false);
if (results.UnviableCandidates.empty())
return false;
SmallVector<OverloadChoice, 2> choices;
for (auto &unviable : results.UnviableCandidates)
choices.push_back(OverloadChoice(baseType, unviable.first.getDecl(),
UDE->getFunctionRefKind()));
CalleeCandidateInfo unviableCandidates(baseType, choices, hasTrailingClosure,
CS);
// Filter list of the unviable candidates based on the
// already established type of the argument expression.
unviableCandidates.filterList(argType, argLabels);
// If one of the unviable candidates matches arguments exactly,
// that means that actual problem is related to function attributes.
if (unviableCandidates.closeness == CC_ExactMatch) {
diagnoseUnviableLookupResults(results, baseType, base, UDE->getName(),
UDE->getNameLoc(), UDE->getLoc());
return true;
}
return false;
}
bool FailureDiagnosis::diagnoseArgumentGenericRequirements(
TypeChecker &TC, Expr *callExpr, Expr *fnExpr, Expr *argExpr,
CalleeCandidateInfo &candidates, ArrayRef<Identifier> argLabels) {
if (candidates.closeness != CC_ExactMatch || candidates.size() != 1)
return false;
AbstractFunctionDecl *AFD = nullptr;
if (auto *DRE = dyn_cast<DeclRefExpr>(fnExpr)) {
AFD = dyn_cast<AbstractFunctionDecl>(DRE->getDecl());
} else if (auto *candidate = candidates[0].getDecl()) {
AFD = dyn_cast<AbstractFunctionDecl>(candidate);
}
if (!AFD || !AFD->getGenericSignature() || !AFD->hasInterfaceType())
return false;
auto env = AFD->getGenericEnvironment();
if (!env)
return false;
auto const &candidate = candidates.candidates[0];
auto params = candidate.getUncurriedFunctionType()->getParams();
SmallVector<bool, 4> defaultMap;
computeDefaultMap(candidate.getArgumentType(), candidate.getDecl(),
candidate.level, defaultMap);
auto args = decomposeArgType(CS.getType(argExpr), argLabels);
SmallVector<ParamBinding, 4> bindings;
MatchCallArgumentListener listener;
if (matchCallArguments(args, params, defaultMap,
candidates.hasTrailingClosure,
/*allowFixes=*/false, listener, bindings))
return false;
TypeSubstitutionMap substitutions;
// First, let's collect all of the archetypes and their substitutions,
// that's going to help later on if there are cross-archetype
// requirements e.g. <A, B where A.Element == B.Element>.
for (unsigned i = 0, e = bindings.size(); i != e; ++i) {
auto param = params[i];
auto paramType = param.getType()->getInOutObjectType();
auto archetype = paramType->getAs<ArchetypeType>();
if (!archetype)
continue;
// Bindings specify the arguments that source the parameter. The only case
// this returns a non-singular value is when there are varargs in play.
for (auto argNo : bindings[i]) {
auto argType = args[argNo]
.getType()
->getWithoutSpecifierType()
->getRValueObjectType();
if (argType->is<ArchetypeType>()) {
diagnoseUnboundArchetype(archetype, fnExpr);
return true;
}
if (isUnresolvedOrTypeVarType(argType) || argType->hasError())
return false;
// Record substitution from generic parameter to the argument type.
substitutions[archetype->getInterfaceType()->getCanonicalType()
->castTo<SubstitutableType>()] = argType;
}
}
if (substitutions.empty())
return false;
class RequirementsListener : public GenericRequirementsCheckListener {
ConstraintSystem &CS;
AbstractFunctionDecl *Candidate;
TypeSubstitutionFn Substitutions;
Expr *CallExpr;
Expr *FnExpr;
Expr *ArgExpr;
public:
RequirementsListener(ConstraintSystem &cs, AbstractFunctionDecl *AFD,
TypeSubstitutionFn subs,
Expr *callExpr, Expr *fnExpr, Expr *argExpr)
: CS(cs), Candidate(AFD), Substitutions(subs), CallExpr(callExpr),
FnExpr(fnExpr), ArgExpr(argExpr) {}
bool shouldCheck(RequirementKind kind, Type first, Type second) override {
// This means that we have encountered requirement which references
// generic parameter not used in the arguments, we can't diagnose it here.
return !(first->hasTypeParameter() || first->isTypeVariableOrMember());
}
bool diagnoseUnsatisfiedRequirement(
const Requirement &req, Type first, Type second,
ArrayRef<ParentConditionalConformance> parents) override {
Diag<Type, Type, Type, Type, StringRef> note;
switch (req.getKind()) {
case RequirementKind::Conformance:
case RequirementKind::Layout:
return false;
case RequirementKind::Superclass:
note = diag::candidate_types_inheritance_requirement;
break;
case RequirementKind::SameType:
note = diag::candidate_types_equal_requirement;
break;
}
TypeChecker &TC = CS.TC;
SmallVector<char, 8> scratch;
auto overloadName = Candidate->getFullName().getString(scratch);
if (isa<BinaryExpr>(CallExpr) && isa<TupleExpr>(ArgExpr)) {
auto argTuple = cast<TupleExpr>(ArgExpr);
auto lhsExpr = argTuple->getElement(0),
rhsExpr = argTuple->getElement(1);
auto lhsType = CS.getType(lhsExpr)->getRValueType();
auto rhsType = CS.getType(rhsExpr)->getRValueType();
TC.diagnose(FnExpr->getLoc(), diag::cannot_apply_binop_to_args,
overloadName, lhsType, rhsType)
.highlight(lhsExpr->getSourceRange())
.highlight(rhsExpr->getSourceRange());
} else if (isa<PrefixUnaryExpr>(CallExpr) ||
isa<PostfixUnaryExpr>(CallExpr)) {
TC.diagnose(ArgExpr->getLoc(), diag::cannot_apply_unop_to_arg,
overloadName, CS.getType(ArgExpr));
} else {
bool isInitializer = isa<ConstructorDecl>(Candidate);
TC.diagnose(ArgExpr->getLoc(), diag::cannot_call_with_params,
overloadName, getTypeListString(CS.getType(ArgExpr)),
isInitializer);
}
auto rawFirstType = req.getFirstType();
auto rawSecondType = req.getSecondType();
auto *genericSig = Candidate->getGenericSignature();
TC.diagnose(Candidate, note, first, second,
rawFirstType, rawSecondType,
TypeChecker::gatherGenericParamBindingsText(
{rawFirstType, rawSecondType},
genericSig->getGenericParams(),
Substitutions));
ParentConditionalConformance::diagnoseConformanceStack(
TC.Diags, Candidate->getLoc(), parents);
return true;
}
};
auto *dc = env->getOwningDeclContext();
auto substitutionFn = QueryTypeSubstitutionMap{substitutions};
RequirementsListener genericReqListener(CS, AFD, substitutionFn,
callExpr, fnExpr, argExpr);
auto result = TC.checkGenericArguments(
dc, callExpr->getLoc(), fnExpr->getLoc(), AFD->getInterfaceType(),
env->getGenericSignature()->getGenericParams(),
env->getGenericSignature()->getRequirements(),
substitutionFn,
LookUpConformanceInModule{dc->getParentModule()}, nullptr,
ConformanceCheckFlags::SuppressDependencyTracking, &genericReqListener);
assert(result != RequirementCheckResult::UnsatisfiedDependency);
// Note: If result is RequirementCheckResult::SubstitutionFailure, we did
// not emit a diagnostic, so we must return false in that case.
return result == RequirementCheckResult::Failure;
}
/// When initializing Unsafe[Mutable]Pointer<T> from Unsafe[Mutable]RawPointer,
/// issue a diagnostic that refers to the API for binding memory to a type.
static bool isCastToTypedPointer(ConstraintSystem &CS, const Expr *Fn,
const Expr *Arg) {
auto &Ctx = CS.DC->getASTContext();
auto *TypeExp = dyn_cast<TypeExpr>(Fn);
auto *ParenExp = dyn_cast<ParenExpr>(Arg);
if (!TypeExp || !ParenExp)
return false;
auto InitType = CS.getInstanceType(TypeExp);
auto ArgType = CS.getType(ParenExp->getSubExpr());
if (InitType.isNull() || ArgType.isNull())
return false;
// unwrap one level of Optional
if (auto ArgOptType = ArgType->getOptionalObjectType())
ArgType = ArgOptType;
auto *InitNom = InitType->getAnyNominal();
if (!InitNom)
return false;
if (InitNom != Ctx.getUnsafeMutablePointerDecl()
&& InitNom != Ctx.getUnsafePointerDecl()) {
return false;
}
auto *ArgNom = ArgType->getAnyNominal();
if (!ArgNom)
return false;
if (ArgNom != Ctx.getUnsafeMutableRawPointerDecl()
&& ArgNom != Ctx.getUnsafeRawPointerDecl()) {
return false;
}
return true;
}
static bool diagnoseClosureExplicitParameterMismatch(
ConstraintSystem &CS, SourceLoc loc,
ArrayRef<AnyFunctionType::Param> params,
ArrayRef<AnyFunctionType::Param> args) {
// We are not trying to diagnose structural problems with top-level
// arguments here.
if (params.size() != args.size())
return false;
for (unsigned i = 0, n = params.size(); i != n; ++i) {
auto paramType = params[i].getType();
auto argType = args[i].getType();
if (auto paramFnType = paramType->getAs<AnyFunctionType>()) {
if (auto argFnType = argType->getAs<AnyFunctionType>())
return diagnoseClosureExplicitParameterMismatch(
CS, loc, paramFnType->getParams(), argFnType->getParams());
}
if (!paramType || !argType || isUnresolvedOrTypeVarType(paramType) ||
isUnresolvedOrTypeVarType(argType))
continue;
if (!CS.TC.isConvertibleTo(argType, paramType, CS.DC)) {
CS.TC.diagnose(loc, diag::types_not_convertible, false, paramType,
argType);
return true;
}
}
return false;
}
bool FailureDiagnosis::diagnoseTrailingClosureErrors(ApplyExpr *callExpr) {
if (!callExpr->hasTrailingClosure())
return false;
auto *DC = CS.DC;
auto *fnExpr = callExpr->getFn();
auto *argExpr = callExpr->getArg();
ClosureExpr *closureExpr = nullptr;
if (auto *PE = dyn_cast<ParenExpr>(argExpr)) {
closureExpr = dyn_cast<ClosureExpr>(PE->getSubExpr());
} else {
return false;
}
if (!closureExpr)
return false;
class CallResultListener : public ExprTypeCheckListener {
Type expectedResultType;
public:
explicit CallResultListener(Type resultType)
: expectedResultType(resultType) {}
bool builtConstraints(ConstraintSystem &cs, Expr *expr) override {
if (!expectedResultType)
return false;
auto resultType = cs.getType(expr);
auto *locator = cs.getConstraintLocator(expr);
// Since we know that this is trailing closure, format of the
// type could be like this - ((Input) -> Result) -> ClosureResult
// which we can leverage to create specific conversion for
// result type of the call itself, this might help us gain
// some valuable contextual information.
if (auto *fnType = resultType->getAs<AnyFunctionType>()) {
cs.addConstraint(ConstraintKind::Conversion, fnType->getResult(),
expectedResultType, locator);
} else if (auto *typeVar = resultType->getAs<TypeVariableType>()) {
auto tv =
cs.createTypeVariable(cs.getConstraintLocator(expr),
TVO_CanBindToLValue | TVO_CanBindToInOut |
TVO_PrefersSubtypeBinding);
auto extInfo = FunctionType::ExtInfo().withThrows();
auto fTy = FunctionType::get(ParenType::get(cs.getASTContext(), tv),
expectedResultType, extInfo);
// Add a conversion constraint between the types.
cs.addConstraint(ConstraintKind::Conversion, typeVar, fTy, locator,
/*isFavored*/ true);
}
return false;
}
};
SmallVector<Type, 4> possibleTypes;
auto currentType = CS.getType(fnExpr);
// If current type has type variables or unresolved types
// let's try to re-typecheck it to see if we can get some
// more information about what is going on.
if (currentType->hasTypeVariable() || currentType->hasUnresolvedType()) {
auto contextualType = CS.getContextualType();
CallResultListener listener(contextualType);
CS.TC.getPossibleTypesOfExpressionWithoutApplying(
fnExpr, CS.DC, possibleTypes, FreeTypeVariableBinding::UnresolvedType,
&listener);
// Looks like there is there a contextual mismatch
// related to function type, let's try to diagnose it.
if (possibleTypes.empty() && contextualType &&
!contextualType->hasUnresolvedType())
return diagnoseContextualConversionError(callExpr, contextualType,
CS.getContextualTypePurpose());
} else {
possibleTypes.push_back(currentType);
}
for (auto type : possibleTypes) {
auto *fnType = type->getAs<AnyFunctionType>();
if (!fnType)
continue;
auto paramType = fnType->getInput();
switch (paramType->getKind()) {
case TypeKind::Tuple: {
auto tuple = paramType->getAs<TupleType>();
if (tuple->getNumElements() != 1)
continue;
paramType = tuple->getElement(0).getType();
break;
}
case TypeKind::Paren:
paramType = paramType->getWithoutParens();
break;
default:
return false;
}
if (auto paramFnType = paramType->getAs<AnyFunctionType>()) {
auto closureType = CS.getType(closureExpr);
if (auto *argFnType = closureType->getAs<AnyFunctionType>()) {
auto *params = closureExpr->getParameters();
auto loc = params ? params->getStartLoc() : closureExpr->getStartLoc();
if (diagnoseClosureExplicitParameterMismatch(
CS, loc, argFnType->getParams(), paramFnType->getParams()))
return true;
}
}
auto processor = [&](Type resultType, Type expectedResultType) -> bool {
if (resultType && expectedResultType) {
if (!resultType->isEqual(expectedResultType)) {
CS.TC.diagnose(closureExpr->getEndLoc(),
diag::cannot_convert_closure_result, resultType,
expectedResultType);
return true;
}
// Looks like both actual and expected result types match,
// there is nothing we can diagnose in this case.
return false;
}
// If we got a result type, let's re-typecheck the function using it,
// maybe we can find a problem where contextually we expect one type
// but trailing closure produces completely different one.
auto fnType = paramType->getAs<AnyFunctionType>();
if (!fnType)
return false;
class ClosureCalleeListener : public ExprTypeCheckListener {
Type InputType;
Type ResultType;
public:
explicit ClosureCalleeListener(Type inputType, Type resultType)
: InputType(inputType), ResultType(resultType) {}
bool builtConstraints(ConstraintSystem &cs, Expr *expr) override {
if (!InputType || !ResultType)
return false;
auto expectedType = FunctionType::get(InputType, ResultType);
cs.addConstraint(ConstraintKind::Conversion, cs.getType(expr),
expectedType, cs.getConstraintLocator(expr),
/*isFavored*/ true);
return false;
}
};
auto expectedArgType = FunctionType::get(fnType->getInput(), resultType,
fnType->getExtInfo());
llvm::SaveAndRestore<DeclContext *> SavedDC(CS.DC, DC);
ClosureCalleeListener listener(expectedArgType, CS.getContextualType());
return !typeCheckChildIndependently(callExpr->getFn(), Type(),
CTP_CalleeResult, TCC_ForceRecheck,
&listener);
};
// Let's see if there are any structural problems with closure itself.
if (diagnoseClosureExpr(closureExpr, paramType, processor))
return true;
}
return false;
}
/// Check if there failure associated with expression is related
/// to given contextual type.
bool FailureDiagnosis::diagnoseCallContextualConversionErrors(
ApplyExpr *callExpr, Type contextualType, ContextualTypePurpose CTP) {
if (!contextualType || contextualType->hasUnresolvedType())
return false;
auto &TC = CS.TC;
auto *DC = CS.DC;
auto typeCheckExpr = [](TypeChecker &TC, Expr *expr, DeclContext *DC,
SmallVectorImpl<Type> &types,
Type contextualType = Type()) {
CalleeListener listener(contextualType);
TC.getPossibleTypesOfExpressionWithoutApplying(
expr, DC, types, FreeTypeVariableBinding::Disallow, &listener);
};
// First let's type-check expression without contextual type, and
// see if that's going to produce a type, if so, let's type-check
// again, this time using given contextual type.
SmallVector<Type, 4> withoutContextual;
typeCheckExpr(TC, callExpr, DC, withoutContextual);
// If there are no types returned, it means that problem was
// nothing to do with contextual information, probably parameter/argument
// mismatch.
if (withoutContextual.empty())
return false;
SmallVector<Type, 4> withContextual;
typeCheckExpr(TC, callExpr, DC, withContextual, contextualType);
// If type-checking with contextual type didn't produce any results
// it means that we have a contextual mismatch.
if (withContextual.empty())
return diagnoseContextualConversionError(callExpr, contextualType, CTP);
// If call produces a single type when type-checked with contextual
// expression, it means that the problem is elsewhere, any other
// outcome is ambiguous.
return false;
}
// Check if there is a structural problem in the function expression
// by performing type checking with the option to allow unresolved
// type variables. If that is going to produce a function type with
// unresolved result let's not re-typecheck the function expression,
// because it might produce unrelated diagnostics due to lack of
// contextual information.
static bool shouldTypeCheckFunctionExpr(TypeChecker &TC, DeclContext *DC,
Expr *fnExpr) {
if (!isa<UnresolvedDotExpr>(fnExpr))
return true;
SmallVector<Type, 4> fnTypes;
TC.getPossibleTypesOfExpressionWithoutApplying(fnExpr, DC, fnTypes,
FreeTypeVariableBinding::UnresolvedType);
if (fnTypes.size() == 1) {
// Some member types depend on the arguments to produce a result type,
// type-checking such expressions without associated arguments is
// going to produce unrelated diagnostics.
if (auto fn = fnTypes[0]->getAs<AnyFunctionType>()) {
auto resultType = fn->getResult();
if (resultType->hasUnresolvedType() || resultType->hasTypeVariable())
return false;
}
}
// Might be a structural problem related to the member itself.
return true;
}
bool FailureDiagnosis::visitApplyExpr(ApplyExpr *callExpr) {
// If this call involves trailing closure as an argument,
// let's treat it specially, because re-typecheck of the
// either function or arguments might results in diagnosing
// of the unrelated problems due to luck of context.
if (diagnoseTrailingClosureErrors(callExpr))
return true;
if (diagnoseCallContextualConversionErrors(callExpr, CS.getContextualType(),
CS.getContextualTypePurpose()))
return true;
auto *fnExpr = callExpr->getFn();
auto originalFnType = CS.getType(callExpr->getFn());
if (shouldTypeCheckFunctionExpr(CS.TC, CS.DC, fnExpr)) {
// Type check the function subexpression to resolve a type for it if
// possible.
fnExpr = typeCheckChildIndependently(callExpr->getFn());
if (!fnExpr)
return true;
}
SWIFT_DEFER {
if (!fnExpr) return;
// If it's a member operator reference, put the operator back.
if (auto operatorRef = fnExpr->getMemberOperatorRef())
callExpr->setFn(operatorRef);
};
auto getFuncType = [](Type type) -> Type {
auto fnType = type->getRValueType();
if (auto objectType = fnType->getImplicitlyUnwrappedOptionalObjectType())
return objectType;
return fnType;
};
auto fnType = getFuncType(CS.getType(fnExpr));
// Let's see if this has to do with member vs. property error
// because sometimes when there is a member and a property declared
// on the nominal type with the same name. Type-checking function
// expression separately from arguments might produce solution for
// the property instead of the member.
if (!fnType->is<AnyFunctionType>() &&
isa<UnresolvedDotExpr>(callExpr->getFn())) {
fnExpr = callExpr->getFn();
SmallVector<Type, 4> types;
CS.TC.getPossibleTypesOfExpressionWithoutApplying(fnExpr, CS.DC, types);
auto isFunctionType = [getFuncType](Type type) -> bool {
return type && getFuncType(type)->is<AnyFunctionType>();
};
auto fnTypes = std::find_if(types.begin(), types.end(), isFunctionType);
if (fnTypes != types.end()) {
auto funcType = getFuncType(*fnTypes);
// If there is only one function type, let's use it.
if (std::none_of(std::next(fnTypes), types.end(), isFunctionType))
fnType = funcType;
} else {
fnType = getFuncType(originalFnType);
}
}
// If we have a contextual type, and if we have an ambiguously typed function
// result from our previous check, we re-type-check it using this contextual
// type to inform the result type of the callee.
//
// We only do this as a second pass because the first pass we just did may
// return something of obviously non-function-type. If this happens, we
// produce better diagnostics below by diagnosing this here rather than trying
// to peel apart the failed conversion to function type.
if (CS.getContextualType() &&
(isUnresolvedOrTypeVarType(fnType) ||
(fnType->is<AnyFunctionType>() && fnType->hasUnresolvedType()))) {
// FIXME: Prevent typeCheckChildIndependently from transforming expressions,
// because if we try to typecheck OSR expression with contextual type,
// it'll end up converting it into DeclRefExpr based on contextual info,
// instead let's try to get a type without applying and filter callee
// candidates later on.
CalleeListener listener(CS.getContextualType());
if (isa<OverloadSetRefExpr>(fnExpr)) {
assert(!cast<OverloadSetRefExpr>(fnExpr)->getReferencedDecl() &&
"unexpected declaration reference");
ConcreteDeclRef decl = nullptr;
Type type = CS.TC.getTypeOfExpressionWithoutApplying(
fnExpr, CS.DC, decl, FreeTypeVariableBinding::UnresolvedType,
&listener);
if (type)
fnType = getFuncType(type);
} else {
fnExpr = typeCheckChildIndependently(callExpr->getFn(), Type(),
CTP_CalleeResult, TCC_ForceRecheck,
&listener);
if (!fnExpr)
return true;
fnType = getFuncType(CS.getType(fnExpr));
}
}
// If we resolved a concrete expression for the callee, and it has
// non-function/non-metatype type, then we cannot call it!
if (!isUnresolvedOrTypeVarType(fnType) &&
!fnType->is<AnyFunctionType>() && !fnType->is<MetatypeType>()) {
auto arg = callExpr->getArg();
if (fnType->is<ExistentialMetatypeType>()) {
auto diag = diagnose(arg->getStartLoc(),
diag::missing_init_on_metatype_initialization);
diag.highlight(fnExpr->getSourceRange());
} else {
auto diag = diagnose(arg->getStartLoc(),
diag::cannot_call_non_function_value, fnType);
diag.highlight(fnExpr->getSourceRange());
// If the argument is an empty tuple, then offer a
// fix-it to remove the empty tuple and use the value
// directly.
if (auto tuple = dyn_cast<TupleExpr>(arg)) {
if (tuple->getNumElements() == 0) {
diag.fixItRemove(arg->getSourceRange());
}
}
}
// If the argument is a trailing ClosureExpr (i.e. {....}) and it is on
// the line after the callee, then it's likely the user forgot to
// write "do" before their brace stmt.
// Note that line differences of more than 1 are diagnosed during parsing.
if (auto *PE = dyn_cast<ParenExpr>(arg))
if (PE->hasTrailingClosure() && isa<ClosureExpr>(PE->getSubExpr())) {
auto *closure = cast<ClosureExpr>(PE->getSubExpr());
auto &SM = CS.getASTContext().SourceMgr;
if (closure->hasAnonymousClosureVars() &&
closure->getParameters()->size() == 0 &&
1 + SM.getLineNumber(callExpr->getFn()->getEndLoc()) ==
SM.getLineNumber(closure->getStartLoc())) {
diagnose(closure->getStartLoc(), diag::brace_stmt_suggest_do)
.fixItInsert(closure->getStartLoc(), "do ");
}
}
return true;
}
bool hasTrailingClosure = callArgHasTrailingClosure(callExpr->getArg());
// Collect a full candidate list of callees based on the partially type
// checked function.
CalleeCandidateInfo calleeInfo(fnExpr, hasTrailingClosure, CS);
// Filter list of the candidates based on the known function type.
if (auto fn = fnType->getAs<AnyFunctionType>()) {
using Closeness = CalleeCandidateInfo::ClosenessResultTy;
calleeInfo.filterList([&](UncurriedCandidate candidate) -> Closeness {
auto resultType = candidate.getResultType();
if (!resultType)
return {CC_GeneralMismatch, {}};
// FIXME: Handle matching of the generic types properly.
// Currently we don't filter result types containing generic parameters
// because there is no easy way to do that, and candidate set is going
// to be pruned by matching of the argument types later on anyway, so
// it's better to over report than to be too conservative.
if (resultType->isEqual(fn->getResult()))
return {CC_ExactMatch, {}};
return {CC_GeneralMismatch, {}};
});
}
// Filter the candidate list based on the argument we may or may not have.
calleeInfo.filterContextualMemberList(callExpr->getArg());
SmallVector<Identifier, 2> argLabelsScratch;
ArrayRef<Identifier> argLabels =
callExpr->getArgumentLabels(argLabelsScratch);
if (diagnoseParameterErrors(calleeInfo, callExpr->getFn(),
callExpr->getArg(), argLabels))
return true;
// There might be a candidate with correct argument types but it's not
// used by constraint solver because it doesn't have correct attributes,
// let's try to diagnose such situation there right before type checking
// argument expression, because that would overwrite original argument types.
if (diagnoseMethodAttributeFailures(callExpr, argLabels, hasTrailingClosure,
calleeInfo))
return true;
Type argType; // Type of the argument list, if knowable.
if (auto FTy = fnType->getAs<AnyFunctionType>())
argType = FTy->getInput();
else if (auto MTT = fnType->getAs<AnyMetatypeType>()) {
// If we are constructing a tuple with initializer syntax, the expected
// argument list is the tuple type itself - and there is no initdecl.
auto instanceTy = MTT->getInstanceType();
if (auto tupleTy = instanceTy->getAs<TupleType>()) {
argType = tupleTy;
}
}
// If there is a failing constraint associated with current constraint
// system which points to the argument/parameter mismatch, let's use
// that information while re-typechecking argument expression, this
// makes it a lot easier to determine contextual mismatch.
if (CS.failedConstraint && !hasTrailingClosure) {
auto *constraint = CS.failedConstraint;
if (constraint->getKind() == ConstraintKind::ArgumentTupleConversion) {
if (auto *locator = constraint->getLocator()) {
if (locator->getAnchor() == callExpr) {
argType = constraint->getSecondType();
if (auto *typeVar = argType->getAs<TypeVariableType>())
argType = CS.getFixedType(typeVar);
}
}
}
}
// Get the expression result of type checking the arguments to the call
// independently, so we have some idea of what we're working with.
//
auto argExpr = typeCheckArgumentChildIndependently(callExpr->getArg(),
argType, calleeInfo,
TCC_AllowUnresolvedTypeVariables);
if (!argExpr)
return true; // already diagnosed.
calleeInfo.filterList(CS.getType(argExpr), argLabels);
if (diagnoseParameterErrors(calleeInfo, callExpr->getFn(), argExpr,
argLabels))
return true;
// Diagnose some simple and common errors.
if (calleeInfo.diagnoseSimpleErrors(callExpr))
return true;
// Force recheck of the arg expression because we allowed unresolved types
// before, and that turned out not to help, and now we want any diagnoses
// from disallowing them.
argExpr = typeCheckArgumentChildIndependently(callExpr->getArg(), argType,
calleeInfo, TCC_ForceRecheck);
if (!argExpr)
return true; // already diagnosed.
// A common error is to apply an operator that only has inout forms (e.g. +=)
// to non-lvalues (e.g. a local let). Produce a nice diagnostic for this
// case.
if (calleeInfo.closeness == CC_NonLValueInOut) {
Diag<StringRef> subElementDiagID;
Diag<Type> rvalueDiagID;
Expr *diagExpr = nullptr;
if (isa<PrefixUnaryExpr>(callExpr) || isa<PostfixUnaryExpr>(callExpr)) {
subElementDiagID = diag::cannot_apply_lvalue_unop_to_subelement;
rvalueDiagID = diag::cannot_apply_lvalue_unop_to_rvalue;
diagExpr = argExpr;
} else if (isa<BinaryExpr>(callExpr)) {
subElementDiagID = diag::cannot_apply_lvalue_binop_to_subelement;
rvalueDiagID = diag::cannot_apply_lvalue_binop_to_rvalue;
if (auto argTuple = dyn_cast<TupleExpr>(argExpr))
diagExpr = argTuple->getElement(0);
}
if (diagExpr) {
diagnoseSubElementFailure(diagExpr, callExpr->getFn()->getLoc(), CS,
subElementDiagID, rvalueDiagID);
return true;
}
}
// Handle argument label mismatches when we have multiple candidates.
if (calleeInfo.closeness == CC_ArgumentLabelMismatch) {
auto args = decomposeArgType(CS.getType(argExpr), argLabels);
// If we have multiple candidates that we fail to match, just say we have
// the wrong labels and list the candidates out.
// TODO: It would be nice to use an analog of getTypeListString that
// doesn't include the argument types.
diagnose(callExpr->getLoc(), diag::wrong_argument_labels_overload,
getParamListAsString(args))
.highlight(argExpr->getSourceRange());
// Did the user intend on invoking a different overload?
calleeInfo.suggestPotentialOverloads(fnExpr->getLoc());
return true;
}
auto overloadName = calleeInfo.declName;
// Local function to check if the error with argument type is
// related to contextual type information of the enclosing expression
// rather than resolution of argument expression itself.
auto isContextualConversionFailure = [&](Expr *argExpr) -> bool {
// If we found an exact match, this must be a problem with a conversion from
// the result of the call to the expected type. Diagnose this as a
// conversion failure.
if (calleeInfo.closeness == CC_ExactMatch)
return true;
if (!CS.getContextualType() ||
(calleeInfo.closeness != CC_ArgumentMismatch &&
calleeInfo.closeness != CC_OneGenericArgumentMismatch))
return false;
CalleeCandidateInfo candidates(fnExpr, hasTrailingClosure, CS);
// Filter original list of choices based on the deduced type of
// argument expression after force re-check.
candidates.filterContextualMemberList(argExpr);
// One of the candidates matches exactly, which means that
// this is a contextual type conversion failure, we can't diagnose here.
return candidates.closeness == CC_ExactMatch;
};
// Otherwise, we have a generic failure. Diagnose it with a generic error
// message now.
if (isa<BinaryExpr>(callExpr) && isa<TupleExpr>(argExpr)) {
auto argTuple = cast<TupleExpr>(argExpr);
auto lhsExpr = argTuple->getElement(0), rhsExpr = argTuple->getElement(1);
auto lhsType = CS.getType(lhsExpr)->getRValueType();
auto rhsType = CS.getType(rhsExpr)->getRValueType();
// Diagnose any comparisons with the nil literal.
if (diagnoseNilLiteralComparison(lhsExpr, rhsExpr, calleeInfo,
callExpr->getLoc()))
return true;
if (callExpr->isImplicit() && overloadName == "~=") {
// This binop was synthesized when typechecking an expression pattern.
auto diag = lhsType->is<UnresolvedType>()
? diagnose(lhsExpr->getLoc(),
diag::cannot_match_unresolved_expr_pattern_with_value,
rhsType)
: diagnose(lhsExpr->getLoc(),
diag::cannot_match_expr_pattern_with_value,
lhsType, rhsType);
diag.highlight(lhsExpr->getSourceRange());
diag.highlight(rhsExpr->getSourceRange());
if (auto optUnwrappedType = rhsType->getOptionalObjectType()) {
if (lhsType->isEqual(optUnwrappedType)) {
diag.fixItInsertAfter(lhsExpr->getEndLoc(), "?");
}
}
return true;
}
// Diagnose attempts to compare reference equality of certain types.
if (overloadName == "===" || overloadName == "!==") {
// Functions.
if (lhsType->is<AnyFunctionType>() || rhsType->is<AnyFunctionType>()) {
diagnose(callExpr->getLoc(), diag::cannot_reference_compare_types,
overloadName, lhsType, rhsType)
.highlight(lhsExpr->getSourceRange())
.highlight(rhsExpr->getSourceRange());
return true;
}
}
if (diagnoseArgumentGenericRequirements(CS.TC, callExpr, fnExpr, argExpr,
calleeInfo, argLabels))
return true;
if (isContextualConversionFailure(argTuple))
return false;
if (diagnoseRawRepresentableMismatch(calleeInfo, argExpr, argLabels))
return true;
if (!lhsType->isEqual(rhsType)) {
diagnose(callExpr->getLoc(), diag::cannot_apply_binop_to_args,
overloadName, lhsType, rhsType)
.highlight(lhsExpr->getSourceRange())
.highlight(rhsExpr->getSourceRange());
} else {
diagnose(callExpr->getLoc(), diag::cannot_apply_binop_to_same_args,
overloadName, lhsType)
.highlight(lhsExpr->getSourceRange())
.highlight(rhsExpr->getSourceRange());
}
if (lhsType->isEqual(rhsType) &&
isNameOfStandardComparisonOperator(overloadName) &&
lhsType->is<EnumType>() &&
!lhsType->getAs<EnumType>()->getDecl()
->hasOnlyCasesWithoutAssociatedValues()) {
diagnose(callExpr->getLoc(),
diag::no_binary_op_overload_for_enum_with_payload,
overloadName);
} else {
calleeInfo.suggestPotentialOverloads(callExpr->getLoc());
}
return true;
}
// If all of the arguments are a perfect match, let's check if there
// are problems with requirements placed on generic parameters, because
// CalleeCandidateInfo validates only conformance of the parameters
// to their protocol types (if any) but it doesn't check additional
// requirements placed on e.g. nested types or between parameters.
if (diagnoseArgumentGenericRequirements(CS.TC, callExpr, fnExpr, argExpr,
calleeInfo, argLabels))
return true;
// If we have a failure where closeness is an exact match, but there is
// still a failed argument, it is because one (or more) of the arguments
// types are unresolved.
if (calleeInfo.closeness == CC_ExactMatch && calleeInfo.failedArgument.isValid()) {
diagnoseAmbiguity(getFailedArgumentExpr(calleeInfo, argExpr));
return true;
}
if (isContextualConversionFailure(argExpr))
return false;
// Generate specific error messages for unary operators.
if (isa<PrefixUnaryExpr>(callExpr) || isa<PostfixUnaryExpr>(callExpr)) {
assert(!overloadName.empty());
diagnose(argExpr->getLoc(), diag::cannot_apply_unop_to_arg, overloadName,
CS.getType(argExpr));
calleeInfo.suggestPotentialOverloads(argExpr->getLoc());
return true;
}
if (CS.getType(argExpr)->hasUnresolvedType())
return false;
if (diagnoseRawRepresentableMismatch(calleeInfo, argExpr, argLabels))
return true;
std::string argString = getTypeListString(CS.getType(argExpr));
// If we couldn't get the name of the callee, then it must be something of a
// more complex "value of function type".
if (overloadName.empty()) {
// If we couldn't infer the result type of the closure expr, then we have
// some sort of ambiguity, let the ambiguity diagnostic stuff handle this.
if (auto ffty = fnType->getAs<AnyFunctionType>())
if (ffty->getResult()->hasTypeVariable()) {
diagnoseAmbiguity(fnExpr);
return true;
}
// The most common unnamed value of closure type is a ClosureExpr, so
// special case it.
if (isa<ClosureExpr>(fnExpr->getValueProvidingExpr())) {
if (fnType->hasTypeVariable())
diagnose(argExpr->getStartLoc(), diag::cannot_invoke_closure, argString)
.highlight(fnExpr->getSourceRange());
else
diagnose(argExpr->getStartLoc(), diag::cannot_invoke_closure_type,
fnType, argString)
.highlight(fnExpr->getSourceRange());
} else if (fnType->hasTypeVariable()) {
diagnose(argExpr->getStartLoc(), diag::cannot_call_function_value,
argString)
.highlight(fnExpr->getSourceRange());
} else {
diagnose(argExpr->getStartLoc(), diag::cannot_call_value_of_function_type,
fnType, argString)
.highlight(fnExpr->getSourceRange());
}
return true;
}
if (auto MTT = fnType->getAs<MetatypeType>()) {
if (MTT->getInstanceType()->isExistentialType()) {
diagnose(fnExpr->getLoc(), diag::construct_protocol_value, fnType);
return true;
}
}
// If we have an argument list (i.e., a scalar, or a non-zero-element tuple)
// then diagnose with some specificity about the arguments.
bool isInitializer = isa<TypeExpr>(fnExpr);
if (isa<TupleExpr>(argExpr) &&
cast<TupleExpr>(argExpr)->getNumElements() == 0) {
// Emit diagnostics that say "no arguments".
diagnose(fnExpr->getLoc(), diag::cannot_call_with_no_params,
overloadName, isInitializer);
} else {
diagnose(fnExpr->getLoc(), diag::cannot_call_with_params,
overloadName, argString, isInitializer);
}
if (isCastToTypedPointer(CS, fnExpr, argExpr)) {
diagnose(fnExpr->getLoc(), diag::pointer_init_to_type)
.highlight(argExpr->getSourceRange());
}
// Did the user intend on invoking a different overload?
calleeInfo.suggestPotentialOverloads(fnExpr->getLoc());
return true;
}
bool FailureDiagnosis::visitAssignExpr(AssignExpr *assignExpr) {
// Diagnose obvious assignments to literals.
if (isa<LiteralExpr>(assignExpr->getDest()->getValueProvidingExpr())) {
diagnose(assignExpr->getLoc(), diag::cannot_assign_to_literal);
return true;
}
if (CS.TC.diagnoseSelfAssignment(assignExpr))
return true;
// Type check the destination first, so we can coerce the source to it.
auto destExpr = typeCheckChildIndependently(assignExpr->getDest(),
TCC_AllowLValue);
if (!destExpr) return true;
auto destType = CS.getType(destExpr);
if (destType->is<UnresolvedType>() || destType->hasTypeVariable()) {
// Look closer into why destination has unresolved types since such
// means that destination has diagnosable structural problems, and it's
// better to diagnose destination (if possible) before moving on to
// the source of the assignment.
destExpr = typeCheckChildIndependently(
destExpr, TCC_AllowLValue | TCC_ForceRecheck, false);
if (!destExpr)
return true;
// If re-checking destination didn't produce diagnostic, let's just type
// check the source without contextual information. If it succeeds, then we
// win, but if it fails, we'll have to diagnose this another way.
return !typeCheckChildIndependently(assignExpr->getSrc());
}
// If the result type is a non-lvalue, then we are failing because it is
// immutable and that's not a great thing to assign to.
if (!destType->hasLValueType()) {
// If the destination is a subscript, the problem may actually be that we
// incorrectly decided on a get-only subscript overload, and we may be able
// to come up with a better diagnosis by looking only at subscript candidates
// that are set-able.
if (auto subscriptExpr = dyn_cast<SubscriptExpr>(destExpr)) {
if (diagnoseSubscriptErrors(subscriptExpr, /* inAssignmentDestination = */ true))
return true;
}
CS.diagnoseAssignmentFailure(destExpr, destType, assignExpr->getLoc());
return true;
}
auto *srcExpr = assignExpr->getSrc();
auto contextualType = destType->getRValueType();
// Let's try to type-check assignment source expression without using
// destination as a contextual type, that allows us to diagnose
// contextual problems related to source much easier.
//
// If source expression requires contextual type to be present,
// let's avoid this step because it's always going to fail.
{
auto *srcExpr = assignExpr->getSrc();
ExprTypeSaverAndEraser eraser(srcExpr);
ConcreteDeclRef ref = nullptr;
auto type = CS.TC.getTypeOfExpressionWithoutApplying(srcExpr, CS.DC, ref);
if (type && !type->isEqual(contextualType))
return diagnoseContextualConversionError(
assignExpr->getSrc(), contextualType, CTP_AssignSource);
}
srcExpr = typeCheckChildIndependently(assignExpr->getSrc(), contextualType,
CTP_AssignSource);
if (!srcExpr)
return true;
// If we are assigning to _ and have unresolved types on the RHS, then we have
// an ambiguity problem.
if (isa<DiscardAssignmentExpr>(destExpr->getSemanticsProvidingExpr()) &&
CS.getType(srcExpr)->hasUnresolvedType()) {
diagnoseAmbiguity(srcExpr);
return true;
}
return false;
}
/// Return true if this type is known to be an ArrayType.
static bool isKnownToBeArrayType(Type ty) {
if (!ty) return false;
auto bgt = ty->getAs<BoundGenericType>();
if (!bgt) return false;
auto &ctx = bgt->getASTContext();
return bgt->getDecl() == ctx.getArrayDecl();
}
bool FailureDiagnosis::visitInOutExpr(InOutExpr *IOE) {
// If we have a contextual type, it must be an inout type.
auto contextualType = CS.getContextualType();
if (contextualType) {
// If the contextual type is one of the UnsafePointer<T> types, then the
// contextual type of the subexpression must be T.
Type unwrappedType = contextualType;
if (auto unwrapped = contextualType->getAnyOptionalObjectType())
unwrappedType = unwrapped;
PointerTypeKind pointerKind;
if (auto pointerEltType =
unwrappedType->getAnyPointerElementType(pointerKind)) {
// If the element type is Void, then we allow any input type, since
// everything is convertible to UnsafeRawPointer
if (pointerEltType->isVoid())
contextualType = Type();
else
contextualType = pointerEltType;
// Furthermore, if the subexpr type is already known to be an array type,
// then we must have an attempt at an array to pointer conversion.
if (isKnownToBeArrayType(CS.getType(IOE->getSubExpr()))) {
// If we're converting to an UnsafeMutablePointer, then the pointer to
// the first element is being passed in. The array is ok, so long as
// it is mutable.
if (pointerKind == PTK_UnsafeMutablePointer) {
contextualType = ArraySliceType::get(contextualType);
} else if (pointerKind == PTK_UnsafePointer || pointerKind == PTK_UnsafeRawPointer) {
// If we're converting to an UnsafePointer, then the programmer
// specified an & unnecessarily. Produce a fixit hint to remove it.
diagnose(IOE->getLoc(), diag::extra_address_of_unsafepointer,
unwrappedType)
.highlight(IOE->getSourceRange())
.fixItRemove(IOE->getStartLoc());
return true;
}
}
} else if (contextualType->is<InOutType>()) {
contextualType = contextualType->getInOutObjectType();
} else {
// If the caller expected something inout, but we didn't have
// something of inout type, diagnose it.
diagnose(IOE->getLoc(), diag::extra_address_of, contextualType)
.highlight(IOE->getSourceRange())
.fixItRemove(IOE->getStartLoc());
return true;
}
}
auto subExpr = typeCheckChildIndependently(IOE->getSubExpr(), contextualType,
CS.getContextualTypePurpose(),
TCC_AllowLValue);
if (!subExpr) return true;
auto subExprType = CS.getType(subExpr);
// The common cause is that the operand is not an lvalue.
if (!subExprType->hasLValueType()) {
diagnoseSubElementFailure(subExpr, IOE->getLoc(), CS,
diag::cannot_pass_rvalue_inout_subelement,
diag::cannot_pass_rvalue_inout);
return true;
}
return false;
}
bool FailureDiagnosis::visitCoerceExpr(CoerceExpr *CE) {
// Coerce the input to whatever type is specified by the CoerceExpr.
auto expr = typeCheckChildIndependently(CE->getSubExpr(),
CE->getCastTypeLoc().getType(),
CTP_CoerceOperand);
if (!expr)
return true;
auto ref = expr->getReferencedDecl();
if (auto *decl = ref.getDecl()) {
// Without explicit coercion we might end up
// type-checking sub-expression as unavaible
// declaration, let's try to diagnose that here.
if (AvailableAttr::isUnavailable(decl))
return CS.TC.diagnoseExplicitUnavailability(
decl, expr->getSourceRange(), CS.DC, dyn_cast<ApplyExpr>(expr));
}
return false;
}
bool FailureDiagnosis::visitForceValueExpr(ForceValueExpr *FVE) {
auto argExpr = typeCheckChildIndependently(FVE->getSubExpr());
if (!argExpr) return true;
auto argType = CS.getType(argExpr);
// If the subexpression type checks as a non-optional type, then that is the
// error. Produce a specific diagnostic about this.
if (!isUnresolvedOrTypeVarType(argType) &&
argType->getAnyOptionalObjectType().isNull()) {
diagnose(FVE->getLoc(), diag::invalid_force_unwrap, argType)
.fixItRemove(FVE->getExclaimLoc())
.highlight(FVE->getSourceRange());
return true;
}
return false;
}
bool FailureDiagnosis::visitBindOptionalExpr(BindOptionalExpr *BOE) {
auto argExpr = typeCheckChildIndependently(BOE->getSubExpr());
if (!argExpr) return true;
auto argType = CS.getType(argExpr);
// If the subexpression type checks as a non-optional type, then that is the
// error. Produce a specific diagnostic about this.
if (!isUnresolvedOrTypeVarType(argType) &&
argType->getAnyOptionalObjectType().isNull()) {
diagnose(BOE->getQuestionLoc(), diag::invalid_optional_chain, argType)
.highlight(BOE->getSourceRange())
.fixItRemove(BOE->getQuestionLoc());
return true;
}
return false;
}
bool FailureDiagnosis::visitIfExpr(IfExpr *IE) {
auto typeCheckClauseExpr = [&](Expr *clause, Type contextType = Type(),
ContextualTypePurpose convertPurpose =
CTP_Unused) -> Expr * {
// Provide proper contextual type when type conversion is specified.
return typeCheckChildIndependently(clause, contextType, convertPurpose,
TCCOptions(), nullptr, false);
};
// Check all of the subexpressions independently.
auto condExpr = typeCheckClauseExpr(IE->getCondExpr());
if (!condExpr) return true;
auto trueExpr = typeCheckClauseExpr(IE->getThenExpr(), CS.getContextualType(),
CS.getContextualTypePurpose());
if (!trueExpr) return true;
auto falseExpr = typeCheckClauseExpr(
IE->getElseExpr(), CS.getContextualType(), CS.getContextualTypePurpose());
if (!falseExpr) return true;
// If the true/false values already match, it must be a contextual problem.
if (CS.getType(trueExpr)->isEqual(CS.getType(falseExpr)))
return false;
// Otherwise, the true/false result types must not be matching.
diagnose(IE->getColonLoc(), diag::if_expr_cases_mismatch,
CS.getType(trueExpr), CS.getType(falseExpr))
.highlight(trueExpr->getSourceRange())
.highlight(falseExpr->getSourceRange());
return true;
}
bool FailureDiagnosis::
visitRebindSelfInConstructorExpr(RebindSelfInConstructorExpr *E) {
// Don't walk the children for this node, it leads to multiple diagnostics
// because of how sema injects this node into the type checker.
return false;
}
bool FailureDiagnosis::visitCaptureListExpr(CaptureListExpr *CLE) {
// Always walk into the closure of a capture list expression.
return visitClosureExpr(CLE->getClosureBody());
}
static bool isInvalidClosureResultType(Type resultType) {
return !resultType || resultType->hasUnresolvedType() ||
resultType->hasTypeVariable() || resultType->hasArchetype();
}
bool FailureDiagnosis::visitClosureExpr(ClosureExpr *CE) {
return diagnoseClosureExpr(
CE, CS.getContextualType(),
[&](Type resultType, Type expectedResultType) -> bool {
if (isInvalidClosureResultType(expectedResultType))
return false;
// Following situations are possible:
// * No result type - possible structurable problem in the body;
// * Function result type - possible use of function without calling it,
// which is properly diagnosed by actual type-check call.
if (resultType && !resultType->getRValueType()->is<AnyFunctionType>()) {
if (!resultType->isEqual(expectedResultType)) {
diagnose(CE->getEndLoc(), diag::cannot_convert_closure_result,
resultType, expectedResultType);
return true;
}
}
return false;
});
}
bool FailureDiagnosis::diagnoseClosureExpr(
ClosureExpr *CE, Type contextualType,
std::function<bool(Type, Type)> resultTypeProcessor) {
// Look through IUO because it doesn't influence
// neither parameter nor return type diagnostics itself,
// but if we have function type inside, that might
// signficantly improve diagnostic quality.
if (contextualType) {
if (auto IUO =
CS.lookThroughImplicitlyUnwrappedOptionalType(contextualType))
contextualType = IUO;
}
Type expectedResultType;
// If we have a contextual type available for this closure, apply it to the
// ParamDecls in our parameter list. This ensures that any uses of them get
// appropriate types.
if (contextualType && contextualType->is<AnyFunctionType>()) {
auto fnType = contextualType->getAs<AnyFunctionType>();
auto *params = CE->getParameters();
Type inferredArgType = fnType->getInput();
// It is very common for a contextual type to disagree with the argument
// list built into the closure expr. This can be because the closure expr
// had an explicitly specified pattern, a la:
// { a,b in ... }
// or could be because the closure has an implicitly generated one:
// { $0 + $1 }
// in either case, we want to produce nice and clear diagnostics.
unsigned actualArgCount = params->size();
unsigned inferredArgCount = 1;
// Don't try to desugar ParenType which is going to result in incorrect
// inferred argument count.
if (auto *argTupleTy = dyn_cast<TupleType>(inferredArgType.getPointer()))
inferredArgCount = argTupleTy->getNumElements();
if (actualArgCount != inferredArgCount) {
// If the closure didn't specify any arguments and it is in a context that
// needs some, produce a fixit to turn "{...}" into "{ _,_ in ...}".
if (actualArgCount == 0 && CE->getInLoc().isInvalid()) {
auto diag =
diagnose(CE->getStartLoc(), diag::closure_argument_list_missing,
inferredArgCount);
StringRef fixText; // We only handle the most common cases.
if (inferredArgCount == 1)
fixText = " _ in ";
else if (inferredArgCount == 2)
fixText = " _,_ in ";
else if (inferredArgCount == 3)
fixText = " _,_,_ in ";
if (!fixText.empty()) {
// Determine if there is already a space after the { in the closure to
// make sure we introduce the right whitespace.
auto afterBrace = CE->getStartLoc().getAdvancedLoc(1);
auto text = CS.TC.Context.SourceMgr.extractText({afterBrace, 1});
if (text.size() == 1 && text == " ")
fixText = fixText.drop_back();
else
fixText = fixText.drop_front();
diag.fixItInsertAfter(CE->getStartLoc(), fixText);
}
return true;
}
if (inferredArgCount == 1 && actualArgCount > 1) {
// Let's see if inferred argument is actually a tuple inside of Paren.
if (auto *argTupleTy = inferredArgType->getAs<TupleType>()) {
// Looks like the number of closure parameters matches number
// of inferred arguments, which means we can we can emit an
// error about an attempt to make use of tuple splat or tuple
// destructuring and provide a proper fix-it.
if (argTupleTy->getNumElements() == actualArgCount) {
// In case of implicit parameters e.g. $0, $1 we
// can't really provide good fix-it because
// structure of parameter type itself is unclear.
for (auto *param : params->getArray()) {
if (param->isImplicit()) {
diagnose(params->getStartLoc(),
diag::closure_tuple_parameter_destructuring_implicit,
argTupleTy);
return true;
}
}
auto diag = diagnose(params->getStartLoc(),
diag::closure_tuple_parameter_destructuring,
argTupleTy);
Type actualArgType;
if (auto *actualFnType = CS.getType(CE)->getAs<AnyFunctionType>())
actualArgType = actualFnType->getInput();
auto *closureBody = CE->getBody();
if (!closureBody)
return true;
auto &sourceMgr = CS.getASTContext().SourceMgr;
auto bodyStmts = closureBody->getElements();
SourceLoc bodyLoc;
// If the body is empty let's put the cursor
// right after "in", otherwise make it start
// location of the first statement in the body.
if (bodyStmts.empty())
bodyLoc = Lexer::getLocForEndOfToken(sourceMgr, CE->getInLoc());
else
bodyLoc = bodyStmts.front().getStartLoc();
SmallString<64> fixIt;
llvm::raw_svector_ostream OS(fixIt);
// If this is multi-line closure we'd have to insert new lines
// in the suggested 'let' to keep the structure of the code intact,
// otherwise just use ';' to keep everything on the same line.
auto inLine = sourceMgr.getLineNumber(CE->getInLoc());
auto bodyLine = sourceMgr.getLineNumber(bodyLoc);
auto isMultiLineClosure = bodyLine > inLine;
auto indent = bodyStmts.empty() ? "" : Lexer::getIndentationForLine(
sourceMgr, bodyLoc);
SmallString<16> parameter;
llvm::raw_svector_ostream parameterOS(parameter);
parameterOS << "(";
interleave(params->getArray(),
[&](const ParamDecl *param) {
parameterOS << param->getNameStr();
},
[&] { parameterOS << ", "; });
parameterOS << ")";
// Check if there are any explicit types associated
// with parameters, if there are, we'll have to add
// type information to the replacement argument.
bool explicitTypes = false;
for (auto *param : params->getArray()) {
if (param->getTypeLoc().getTypeRepr()) {
explicitTypes = true;
break;
}
}
if (isMultiLineClosure)
OS << '\n' << indent;
// Let's form 'let <name> : [<type>]? = arg' expression.
OS << "let " << parameterOS.str() << " = arg"
<< (isMultiLineClosure ? "\n" + indent : "; ");
SmallString<64> argName;
llvm::raw_svector_ostream nameOS(argName);
if (explicitTypes) {
nameOS << "(arg: " << argTupleTy->getString() << ")";
} else {
nameOS << "(arg)";
}
if (CE->hasSingleExpressionBody()) {
// Let's see if we need to add result type to the argument/fix-it:
// - if the there is a result type associated with the closure;
// - and it's not a void type;
// - and it hasn't been explicitly written.
auto resultType = fnType->getResult();
auto hasResult = [](Type resultType) -> bool {
return resultType && !resultType->isVoid();
};
auto isValidType = [](Type resultType) -> bool {
return resultType && !resultType->hasUnresolvedType() &&
!resultType->hasTypeVariable();
};
// If there an expected result type but it hasn't been explicitly
// provided, let's add it to the argument.
if (hasResult(resultType) && !CE->hasExplicitResultType()) {
nameOS << " -> ";
if (isValidType(resultType))
nameOS << resultType->getString();
else
nameOS << "<#Result#>";
}
if (auto stmt = bodyStmts.front().get<Stmt *>()) {
// If the body is a single expression with implicit return.
if (isa<ReturnStmt>(stmt) && stmt->isImplicit()) {
// And there is non-void expected result type,
// because we add 'let' expression to the body
// we need to make such 'return' explicit.
if (hasResult(resultType))
OS << "return ";
}
}
}
diag.fixItReplace(params->getSourceRange(), nameOS.str())
.fixItInsert(bodyLoc, OS.str());
return true;
}
}
}
bool onlyAnonymousParams =
std::all_of(params->begin(), params->end(), [](ParamDecl *param) {
return !param->hasName();
});
// Okay, the wrong number of arguments was used, complain about that.
// Before doing so, strip attributes off the function type so that they
// don't confuse the issue.
fnType = FunctionType::get(fnType->getInput(), fnType->getResult());
auto diag = diagnose(
params->getStartLoc(), diag::closure_argument_list_tuple, fnType,
inferredArgCount, actualArgCount, (actualArgCount == 1));
// If closure expects no parameters but N was given,
// and all of them are anonymous let's suggest removing them.
if (inferredArgCount == 0 && onlyAnonymousParams) {
auto inLoc = CE->getInLoc();
auto &sourceMgr = CS.getASTContext().SourceMgr;
if (inLoc.isValid())
diag.fixItRemoveChars(params->getStartLoc(),
Lexer::getLocForEndOfToken(sourceMgr, inLoc));
return true;
}
// If the number of parameters is less than number of inferred
// and all of the parameters are anonymous, let's suggest a fix-it
// with the rest of the missing parameters.
if (actualArgCount < inferredArgCount) {
SmallString<32> fixIt;
llvm::raw_svector_ostream OS(fixIt);
OS << ",";
auto numMissing = inferredArgCount - actualArgCount;
for (unsigned i = 0; i != numMissing; ++i) {
OS << ((onlyAnonymousParams) ? "_" : "<#arg#>");
OS << ((i == numMissing - 1) ? " " : ",");
}
diag.fixItInsertAfter(params->getEndLoc(), OS.str());
}
return true;
}
// Coerce parameter types here only if there are no unresolved
if (CS.TC.coerceParameterListToType(params, CE, fnType))
return true;
for (auto param : *params) {
auto paramType = param->getType();
// If this is unresolved 'inout' parameter, it's better to drop
// 'inout' from type because that might help to diagnose actual problem
// e.g. type inference doesn't give us much information anyway.
if (param->isInOut() && paramType->hasUnresolvedType()) {
assert(!param->isLet() || !paramType->is<InOutType>());
param->setType(CS.getASTContext().TheUnresolvedType);
param->setInterfaceType(paramType->getInOutObjectType());
param->setSpecifier(swift::VarDecl::Specifier::Owned);
}
}
expectedResultType = fnType->getResult();
} else {
// Defend against type variables from our constraint system leaking into
// recursive constraints systems formed when checking the body of the
// closure. These typevars come into them when the body does name
// lookups against the parameter decls.
//
// Handle this by rewriting the arguments to UnresolvedType().
for (auto VD : *CE->getParameters()) {
if (VD->getType()->hasTypeVariable() || VD->getType()->hasError()) {
VD->setType(CS.getASTContext().TheUnresolvedType);
VD->setInterfaceType(VD->getType()->getInOutObjectType());
}
}
}
// If this is a complex leaf closure, there is nothing more we can do.
if (!CE->hasSingleExpressionBody())
return false;
if (isInvalidClosureResultType(expectedResultType))
expectedResultType = Type();
// When we're type checking a single-expression closure, we need to reset the
// DeclContext to this closure for the recursive type checking. Otherwise,
// if there is a closure in the subexpression, we can violate invariants.
{
llvm::SaveAndRestore<DeclContext *> SavedDC(CS.DC, CE);
// Explicitly disallow to produce solutions with unresolved type variables,
// because there is no auxiliary logic which would handle that and it's
// better to allow failure diagnosis to run directly on the closure body.
// Note that presence of contextual type implicitly forbids such solutions,
// but it's not always reset.
if (expectedResultType && !CE->hasExplicitResultType()) {
ExprCleaner cleaner(CE);
auto closure = CE->getSingleExpressionBody();
ConcreteDeclRef decl = nullptr;
// Let's try to compute result type without mutating AST and
// using expected (contextual) result type, that's going to help
// diagnose situations where contextual type expected one result
// type but actual closure produces a different one without explicitly
// declaring it (e.g. by using anonymous parameters).
auto type = CS.TC.getTypeOfExpressionWithoutApplying(
closure, CS.DC, decl, FreeTypeVariableBinding::Disallow);
if (type && resultTypeProcessor(type, expectedResultType))
return true;
}
// If the closure had an expected result type, use it.
if (CE->hasExplicitResultType())
expectedResultType = CE->getExplicitResultTypeLoc().getType();
// If we couldn't diagnose anything related to the contextual result type
// let's run proper type-check with expected type and try to verify it.
auto CTP = expectedResultType ? CTP_ClosureResult : CTP_Unused;
auto *bodyExpr = typeCheckChildIndependently(CE->getSingleExpressionBody(),
expectedResultType, CTP,
TCCOptions(), nullptr, false);
if (!bodyExpr)
return true;
if (resultTypeProcessor(CS.getType(bodyExpr), expectedResultType))
return true;
}
// If the body of the closure looked ok, then look for a contextual type
// error. This is necessary because FailureDiagnosis::diagnoseExprFailure
// doesn't do this for closures.
if (contextualType) {
auto fnType = contextualType->getAs<AnyFunctionType>();
if (!fnType || fnType->isEqual(CS.getType(CE)))
return false;
auto contextualResultType = fnType->getResult();
// If the result type was unknown, it doesn't really make
// sense to diagnose from expected to unknown here.
if (isInvalidClosureResultType(contextualResultType))
return false;
// If the closure had an explicitly written return type incompatible with
// the contextual type, diagnose that.
if (CE->hasExplicitResultType() &&
CE->getExplicitResultTypeLoc().getTypeRepr()) {
auto explicitResultTy = CE->getExplicitResultTypeLoc().getType();
if (fnType && !explicitResultTy->isEqual(contextualResultType)) {
auto repr = CE->getExplicitResultTypeLoc().getTypeRepr();
diagnose(repr->getStartLoc(), diag::incorrect_explicit_closure_result,
explicitResultTy, fnType->getResult())
.fixItReplace(repr->getSourceRange(),fnType->getResult().getString());
return true;
}
}
}
// Otherwise, we can't produce a specific diagnostic.
return false;
}
static bool diagnoseKeyPathUnsupportedOperations(TypeChecker &TC,
KeyPathExpr *KPE) {
if (KPE->isObjC())
return false;
using ComponentKind = KeyPathExpr::Component::Kind;
const auto components = KPE->getComponents();
if (auto *rootType = KPE->getRootType()) {
if (isa<TupleTypeRepr>(rootType)) {
auto first = components.front();
if (first.getKind() == ComponentKind::UnresolvedProperty) {
TC.diagnose(first.getLoc(),
diag::unsupported_keypath_tuple_element_reference);
return true;
}
}
}
return false;
}
// Ported version of TypeChecker::checkObjCKeyPathExpr which works
// with new Smart KeyPath feature.
static bool diagnoseKeyPathComponents(ConstraintSystem &CS, KeyPathExpr *KPE,
Type rootType) {
auto &TC = CS.TC;
// The key path string we're forming.
SmallString<32> keyPathScratch;
llvm::raw_svector_ostream keyPathOS(keyPathScratch);
// Captures the state of semantic resolution.
enum State {
Beginning,
ResolvingType,
ResolvingProperty,
ResolvingArray,
ResolvingSet,
ResolvingDictionary,
} state = Beginning;
/// Determine whether we are currently resolving a property.
auto isResolvingProperty = [&] {
switch (state) {
case Beginning:
case ResolvingType:
return false;
case ResolvingProperty:
case ResolvingArray:
case ResolvingSet:
case ResolvingDictionary:
return true;
}
llvm_unreachable("Unhandled State in switch.");
};
// The type of AnyObject, which is used whenever we don't have
// sufficient type information.
Type anyObjectType = TC.Context.getAnyObjectType();
// Local function to update the state after we've resolved a
// component.
Type currentType = rootType;
auto updateState = [&](bool isProperty, Type newType) {
// Strip off optionals.
newType = newType->lookThroughAllAnyOptionalTypes();
// If updating to a type, just set the new type; there's nothing
// more to do.
if (!isProperty) {
assert(state == Beginning || state == ResolvingType);
state = ResolvingType;
currentType = newType;
return;
}
// We're updating to a property. Determine whether we're looking
// into a bridged Swift collection of some sort.
if (auto boundGeneric = newType->getAs<BoundGenericType>()) {
auto nominal = boundGeneric->getDecl();
// Array<T>
if (nominal == TC.Context.getArrayDecl()) {
// Further lookups into the element type.
state = ResolvingArray;
currentType = boundGeneric->getGenericArgs()[0];
return;
}
// Set<T>
if (nominal == TC.Context.getSetDecl()) {
// Further lookups into the element type.
state = ResolvingSet;
currentType = boundGeneric->getGenericArgs()[0];
return;
}
// Dictionary<K, V>
if (nominal == TC.Context.getDictionaryDecl()) {
// Key paths look into the keys of a dictionary; further
// lookups into the value type.
state = ResolvingDictionary;
currentType = boundGeneric->getGenericArgs()[1];
return;
}
}
// Determine whether we're looking into a Foundation collection.
if (auto classDecl = newType->getClassOrBoundGenericClass()) {
if (classDecl->isObjC() && classDecl->hasClangNode()) {
SmallString<32> scratch;
StringRef objcClassName = classDecl->getObjCRuntimeName(scratch);
// NSArray
if (objcClassName == "NSArray") {
// The element type is unknown, so use AnyObject.
state = ResolvingArray;
currentType = anyObjectType;
return;
}
// NSSet
if (objcClassName == "NSSet") {
// The element type is unknown, so use AnyObject.
state = ResolvingSet;
currentType = anyObjectType;
return;
}
// NSDictionary
if (objcClassName == "NSDictionary") {
// Key paths look into the keys of a dictionary; there's no
// type to help us here.
state = ResolvingDictionary;
currentType = anyObjectType;
return;
}
}
}
// It's just a property.
state = ResolvingProperty;
currentType = newType;
};
// Local function to perform name lookup for the current index.
auto performLookup = [&](DeclBaseName componentName, SourceLoc componentNameLoc,
Type &lookupType) -> LookupResult {
assert(currentType && "Non-beginning state must have a type");
if (!currentType->mayHaveMembers())
return LookupResult();
// Determine the type in which the lookup should occur. If we have
// a bridged value type, this will be the Objective-C class to
// which it is bridged.
if (auto bridgedClass = TC.Context.getBridgedToObjC(CS.DC, currentType))
lookupType = bridgedClass;
else
lookupType = currentType;
// Look for a member with the given name within this type.
return TC.lookupMember(CS.DC, lookupType, componentName);
};
// Local function to print a component to the string.
bool needDot = false;
auto printComponent = [&](DeclBaseName component) {
if (needDot)
keyPathOS << ".";
else
needDot = true;
keyPathOS << component;
};
bool isInvalid = false;
SmallVector<KeyPathExpr::Component, 4> resolvedComponents;
for (auto &component : KPE->getComponents()) {
auto componentNameLoc = component.getLoc();
DeclBaseName componentName;
switch (auto kind = component.getKind()) {
case KeyPathExpr::Component::Kind::UnresolvedProperty: {
auto componentFullName = component.getUnresolvedDeclName();
componentName = componentFullName.getBaseIdentifier();
break;
}
case KeyPathExpr::Component::Kind::UnresolvedSubscript:
componentName = DeclBaseName::createSubscript();
break;
case KeyPathExpr::Component::Kind::Invalid:
case KeyPathExpr::Component::Kind::OptionalChain:
case KeyPathExpr::Component::Kind::OptionalForce:
// FIXME: Diagnose optional chaining and forcing properly.
return false;
case KeyPathExpr::Component::Kind::OptionalWrap:
case KeyPathExpr::Component::Kind::Property:
case KeyPathExpr::Component::Kind::Subscript:
llvm_unreachable("already resolved!");
}
// If we are resolving into a dictionary, any component is
// well-formed because the keys are unknown dynamically.
if (state == ResolvingDictionary) {
// Just print the component unchanged; there's no checking we
// can do here.
printComponent(componentName);
// From here, we're resolving a property. Use the current type.
updateState(/*isProperty=*/true, currentType);
continue;
}
// Look for this component.
Type lookupType;
LookupResult lookup =
performLookup(componentName, componentNameLoc, lookupType);
// If we didn't find anything, try to apply typo-correction.
bool resultsAreFromTypoCorrection = false;
if (!lookup) {
TC.performTypoCorrection(CS.DC, DeclRefKind::Ordinary, lookupType,
componentName, componentNameLoc,
(lookupType ? defaultMemberTypeLookupOptions
: defaultUnqualifiedLookupOptions),
lookup);
if (currentType)
TC.diagnose(componentNameLoc, diag::could_not_find_type_member,
currentType, componentName);
else
TC.diagnose(componentNameLoc, diag::use_unresolved_identifier,
componentName, false);
// Note all the correction candidates.
for (auto &result : lookup) {
TC.noteTypoCorrection(componentName, DeclNameLoc(componentNameLoc),
result.getValueDecl());
}
isInvalid = true;
if (!lookup)
break;
// Remember that these are from typo correction.
resultsAreFromTypoCorrection = true;
}
// If we have more than one result, filter out unavailable or
// obviously unusable candidates.
if (lookup.size() > 1) {
lookup.filter([&](LookupResultEntry result) -> bool {
// Drop unavailable candidates.
if (result.getValueDecl()->getAttrs().isUnavailable(TC.Context))
return false;
// Drop non-property, non-type candidates.
if (!isa<VarDecl>(result.getValueDecl()) &&
!isa<TypeDecl>(result.getValueDecl()))
return false;
return true;
});
}
// If we *still* have more than one result, fail.
if (lookup.size() > 1) {
// Don't diagnose ambiguities if the results are from typo correction.
if (resultsAreFromTypoCorrection)
break;
if (lookupType)
TC.diagnose(componentNameLoc, diag::ambiguous_member_overload_set,
componentName);
else
TC.diagnose(componentNameLoc, diag::ambiguous_decl_ref, componentName);
for (auto result : lookup) {
TC.diagnose(result.getValueDecl(), diag::decl_declared_here,
result.getValueDecl()->getFullName());
}
isInvalid = true;
break;
}
auto found = lookup.front().getValueDecl();
// Handle property references.
if (auto var = dyn_cast<VarDecl>(found)) {
TC.validateDecl(var);
// Resolve this component to the variable we found.
auto varRef = ConcreteDeclRef(var);
auto resolved =
KeyPathExpr::Component::forProperty(varRef, Type(), componentNameLoc);
resolvedComponents.push_back(resolved);
updateState(/*isProperty=*/true,
var->getInterfaceType()->getRValueObjectType());
continue;
}
// Handle type references.
if (auto type = dyn_cast<TypeDecl>(found)) {
// We cannot refer to a type via a property.
if (isResolvingProperty()) {
TC.diagnose(componentNameLoc, diag::expr_keypath_type_of_property,
componentName, currentType);
isInvalid = true;
break;
}
// We cannot refer to a generic type.
if (type->getDeclaredInterfaceType()->hasTypeParameter()) {
TC.diagnose(componentNameLoc, diag::expr_keypath_generic_type,
componentName);
isInvalid = true;
break;
}
Type newType;
if (lookupType && !lookupType->isAnyObject()) {
newType = lookupType->getTypeOfMember(CS.DC->getParentModule(), type,
type->getDeclaredInterfaceType());
} else {
newType = type->getDeclaredInterfaceType();
}
if (!newType) {
isInvalid = true;
break;
}
updateState(/*isProperty=*/false, newType);
continue;
}
continue;
}
return isInvalid;
}
bool FailureDiagnosis::visitKeyPathExpr(KeyPathExpr *KPE) {
if (diagnoseKeyPathUnsupportedOperations(CS.TC, KPE))
return true;
auto contextualType = CS.getContextualType();
auto components = KPE->getComponents();
assert(!components.empty() && "smart key path components cannot be empty.");
auto &firstComponent = components.front();
using ComponentKind = KeyPathExpr::Component::Kind;
ClassDecl *klass;
Type parentType, rootType, valueType;
switch (firstComponent.getKind()) {
case ComponentKind::UnresolvedProperty:
case ComponentKind::UnresolvedSubscript: {
// If there is no contextual type we can't really do anything,
// as in case of unresolved member expression, which relies on
// contextual information.
if (!contextualType)
return false;
if (auto *BGT = contextualType->getAs<BoundGenericClassType>()) {
auto genericArgs = BGT->getGenericArgs();
klass = BGT->getDecl();
parentType = BGT->getParent();
// Smart Key Path can either have 1 argument - root type or
// two arguments - root and value type.
assert(genericArgs.size() == 1 || genericArgs.size() == 2);
rootType = genericArgs.front();
if (genericArgs.size() == 2)
valueType = genericArgs.back();
}
break;
}
default:
return false;
}
// If there is no root type associated with expression we can't
// really diagnose anything here, it's most likely ambiguity.
if (!rootType)
return false;
// If we know value type, it might be contextual mismatch between
// the actual type of the path vs. given by the caller.
if (valueType && !valueType->hasUnresolvedType()) {
struct KeyPathListener : public ExprTypeCheckListener {
ClassDecl *Decl;
Type ParentType;
Type RootType;
KeyPathListener(ClassDecl *decl, Type parent, Type root)
: Decl(decl), ParentType(parent), RootType(root) {}
bool builtConstraints(ConstraintSystem &cs, Expr *expr) override {
auto *locator = cs.getConstraintLocator(expr);
auto valueType = cs.createTypeVariable(locator, TVO_CanBindToInOut);
auto keyPathType =
BoundGenericClassType::get(Decl, ParentType, {RootType, valueType});
cs.addConstraint(ConstraintKind::Conversion, cs.getType(expr),
keyPathType, locator, /*isFavored*/ true);
return false;
}
};
Expr *expr = KPE;
KeyPathListener listener(klass, parentType, rootType);
ConcreteDeclRef concreteDecl;
auto derivedType = CS.TC.getTypeOfExpressionWithoutApplying(
expr, CS.DC, concreteDecl, FreeTypeVariableBinding::Disallow,
&listener);
if (derivedType) {
if (auto *BGT = derivedType->getAs<BoundGenericClassType>()) {
auto derivedValueType = BGT->getGenericArgs().back();
if (!CS.TC.isConvertibleTo(valueType, derivedValueType, CS.DC)) {
diagnose(KPE->getLoc(),
diag::expr_smart_keypath_value_covert_to_contextual_type,
derivedValueType, valueType);
return true;
}
}
}
}
// Looks like this is not a problem with contextual value type, let's see
// if there is something wrong with the path itself, maybe one of the
// components is incorrectly typed or doesn't exist...
return diagnoseKeyPathComponents(CS, KPE, rootType);
}
bool FailureDiagnosis::visitArrayExpr(ArrayExpr *E) {
// If we had a contextual type, then it either conforms to
// ExpressibleByArrayLiteral or it is an invalid contextual type.
auto contextualType = CS.getContextualType();
if (!contextualType) {
return false;
}
// If our contextual type is an optional, look through them, because we're
// surely initializing whatever is inside.
contextualType = contextualType->lookThroughAllAnyOptionalTypes();
// Validate that the contextual type conforms to ExpressibleByArrayLiteral and
// figure out what the contextual element type is in place.
auto ALC = CS.TC.getProtocol(E->getLoc(),
KnownProtocolKind::ExpressibleByArrayLiteral);
if (!ALC)
return visitExpr(E);
// Check to see if the contextual type conforms.
if (auto Conformance
= CS.TC.conformsToProtocol(contextualType, ALC, CS.DC,
ConformanceCheckFlags::InExpression)) {
Type contextualElementType =
ProtocolConformanceRef::getTypeWitnessByName(
contextualType, *Conformance,
CS.getASTContext().Id_ArrayLiteralElement, &CS.TC)
->getDesugaredType();
// Type check each of the subexpressions in place, passing down the contextual
// type information if we have it.
for (auto elt : E->getElements()) {
if (typeCheckChildIndependently(elt, contextualElementType,
CTP_ArrayElement) == nullptr) {
return true;
}
}
return false;
}
auto DLC
= CS.TC.getProtocol(E->getLoc(),
KnownProtocolKind::ExpressibleByDictionaryLiteral);
if (!DLC)
return visitExpr(E);
if (auto Conformance
= CS.TC.conformsToProtocol(contextualType, DLC, CS.DC,
ConformanceCheckFlags::InExpression)) {
// If the contextual type conforms to ExpressibleByDictionaryLiteral and
// this is an empty array, then they meant "[:]".
auto numElements = E->getNumElements();
if (numElements == 0) {
diagnose(E->getStartLoc(), diag::should_use_empty_dictionary_literal)
.fixItInsert(E->getEndLoc(), ":");
return true;
}
// If the contextual type conforms to ExpressibleByDictionaryLiteral, then
// they wrote "x = [1,2]" but probably meant "x = [1:2]".
if ((numElements & 1) == 0 && numElements > 0) {
bool isIniting = CS.getContextualTypePurpose() == CTP_Initialization;
diagnose(E->getStartLoc(), diag::should_use_dictionary_literal,
contextualType, isIniting);
auto diag = diagnose(E->getStartLoc(), diag::meant_dictionary_lit);
// Change every other comma into a colon, only if the number
// of commas present matches the number of elements, because
// otherwise it might a structural problem with the expression
// e.g. ["a""b": 1].
const auto commaLocs = E->getCommaLocs();
if (commaLocs.size() == numElements - 1) {
for (unsigned i = 0, e = numElements / 2; i != e; ++i)
diag.fixItReplace(commaLocs[i*2], ":");
}
return true;
}
return false;
}
// If that didn't turn up an issue, then we don't know what to do.
// TODO: When a contextual type is missing, we could try to diagnose cases
// where the element types mismatch... but theoretically they should type
// unify to Any, so that could never happen?
return false;
}
bool FailureDiagnosis::visitDictionaryExpr(DictionaryExpr *E) {
Type contextualKeyType, contextualValueType;
auto keyTypePurpose = CTP_Unused, valueTypePurpose = CTP_Unused;
// If we had a contextual type, then it either conforms to
// ExpressibleByDictionaryLiteral or it is an invalid contextual type.
if (auto contextualType = CS.getContextualType()) {
// If our contextual type is an optional, look through them, because we're
// surely initializing whatever is inside.
contextualType = contextualType->lookThroughAllAnyOptionalTypes();
auto DLC = CS.TC.getProtocol(
E->getLoc(), KnownProtocolKind::ExpressibleByDictionaryLiteral);
if (!DLC) return visitExpr(E);
// Validate the contextual type conforms to ExpressibleByDictionaryLiteral
// and figure out what the contextual Key/Value types are in place.
auto Conformance = CS.TC.conformsToProtocol(
contextualType, DLC, CS.DC, ConformanceCheckFlags::InExpression);
if (!Conformance) {
diagnose(E->getStartLoc(), diag::type_is_not_dictionary, contextualType)
.highlight(E->getSourceRange());
return true;
}
contextualKeyType =
ProtocolConformanceRef::getTypeWitnessByName(
contextualType, *Conformance, CS.getASTContext().Id_Key, &CS.TC)
->getDesugaredType();
contextualValueType =
ProtocolConformanceRef::getTypeWitnessByName(
contextualType, *Conformance, CS.getASTContext().Id_Value, &CS.TC)
->getDesugaredType();
assert(contextualKeyType && contextualValueType &&
"Could not find Key/Value DictionaryLiteral associated types from"
" contextual type conformance");
keyTypePurpose = CTP_DictionaryKey;
valueTypePurpose = CTP_DictionaryValue;
}
// Type check each of the subexpressions in place, passing down the contextual
// type information if we have it.
for (auto elt : E->getElements()) {
auto TE = dyn_cast<TupleExpr>(elt);
if (!TE || TE->getNumElements() != 2) continue;
if (!typeCheckChildIndependently(TE->getElement(0),
contextualKeyType, keyTypePurpose))
return true;
if (!typeCheckChildIndependently(TE->getElement(1),
contextualValueType, valueTypePurpose))
return true;
}
// If that didn't turn up an issue, then we don't know what to do.
// TODO: When a contextual type is missing, we could try to diagnose cases
// where the element types mismatch. There is no Any equivalent since they
// keys need to be hashable.
return false;
}
/// When an object literal fails to typecheck because its protocol's
/// corresponding default type has not been set in the global namespace (e.g.
/// _ColorLiteralType), suggest that the user import the appropriate module for
/// the target.
bool FailureDiagnosis::visitObjectLiteralExpr(ObjectLiteralExpr *E) {
auto &TC = CS.getTypeChecker();
// Type check the argument first.
auto protocol = TC.getLiteralProtocol(E);
if (!protocol)
return false;
DeclName constrName = TC.getObjectLiteralConstructorName(E);
assert(constrName);
ArrayRef<ValueDecl *> constrs = protocol->lookupDirect(constrName);
if (constrs.size() != 1 || !isa<ConstructorDecl>(constrs.front()))
return false;
auto *constr = cast<ConstructorDecl>(constrs.front());
auto paramType = TC.getObjectLiteralParameterType(E, constr);
if (!typeCheckChildIndependently(
E->getArg(), paramType, CTP_CallArgument))
return true;
// Conditions for showing this diagnostic:
// * The object literal protocol's default type is unimplemented
if (TC.getDefaultType(protocol, CS.DC))
return false;
// * The object literal has no contextual type
if (CS.getContextualType())
return false;
// Figure out what import to suggest.
auto &Ctx = CS.getASTContext();
const auto &target = Ctx.LangOpts.Target;
StringRef importModule;
StringRef importDefaultTypeName;
if (protocol == Ctx.getProtocol(KnownProtocolKind::ExpressibleByColorLiteral)) {
if (target.isMacOSX()) {
importModule = "AppKit";
importDefaultTypeName = "NSColor";
} else if (target.isiOS() || target.isTvOS()) {
importModule = "UIKit";
importDefaultTypeName = "UIColor";
}
} else if (protocol == Ctx.getProtocol(
KnownProtocolKind::ExpressibleByImageLiteral)) {
if (target.isMacOSX()) {
importModule = "AppKit";
importDefaultTypeName = "NSImage";
} else if (target.isiOS() || target.isTvOS()) {
importModule = "UIKit";
importDefaultTypeName = "UIImage";
}
} else if (protocol == Ctx.getProtocol(
KnownProtocolKind::ExpressibleByFileReferenceLiteral)) {
importModule = "Foundation";
importDefaultTypeName = "URL";
}
// Emit the diagnostic.
const auto plainName = E->getLiteralKindPlainName();
TC.diagnose(E->getLoc(), diag::object_literal_default_type_missing,
plainName);
if (!importModule.empty()) {
TC.diagnose(E->getLoc(), diag::object_literal_resolve_import,
importModule, importDefaultTypeName, plainName);
}
return true;
}
bool FailureDiagnosis::visitUnresolvedMemberExpr(UnresolvedMemberExpr *E) {
// If we have no contextual type, there is no way to resolve this. Just
// diagnose this as an ambiguity.
if (!CS.getContextualType())
return false;
// OTOH, if we do have a contextual type, we can provide a more specific
// error. Dig out the UnresolvedValueMember constraint for this expr node.
Constraint *memberConstraint = nullptr;
auto checkConstraint = [&](Constraint *C) {
if (C->getKind() == ConstraintKind::UnresolvedValueMember &&
simplifyLocatorToAnchor(CS, C->getLocator()) == E)
memberConstraint = C;
};
if (CS.failedConstraint)
checkConstraint(CS.failedConstraint);
for (auto &C : CS.getConstraints()) {
if (memberConstraint) break;
checkConstraint(&C);
}
// If we can't find the member constraint in question, then we failed.
if (!memberConstraint)
return false;
std::function<bool(ArrayRef<OverloadChoice>)> callback = [&](
ArrayRef<OverloadChoice> candidates) {
bool hasTrailingClosure = callArgHasTrailingClosure(E->getArgument());
// Dump all of our viable candidates into a CalleeCandidateInfo & sort it
// out.
CalleeCandidateInfo candidateInfo(Type(), candidates, hasTrailingClosure,
CS);
// Filter the candidate list based on the argument we may or may not have.
candidateInfo.filterContextualMemberList(E->getArgument());
// If we have multiple candidates, then we have an ambiguity.
if (candidateInfo.size() != 1) {
SourceRange argRange;
if (auto arg = E->getArgument())
argRange = arg->getSourceRange();
diagnose(E->getNameLoc(), diag::ambiguous_member_overload_set,
E->getName())
.highlight(argRange);
candidateInfo.suggestPotentialOverloads(E->getNameLoc().getBaseNameLoc());
return true;
}
auto *argExpr = E->getArgument();
auto candidateArgTy = candidateInfo[0].getArgumentType();
// Depending on how we matched, produce tailored diagnostics.
switch (candidateInfo.closeness) {
case CC_NonLValueInOut: // First argument is inout but no lvalue present.
case CC_OneArgumentMismatch: // All arguments except one match.
case CC_OneArgumentNearMismatch:
case CC_OneGenericArgumentMismatch:
case CC_OneGenericArgumentNearMismatch:
case CC_GenericNonsubstitutableMismatch:
case CC_SelfMismatch: // Self argument mismatches.
case CC_ArgumentNearMismatch: // Argument list mismatch.
case CC_ArgumentMismatch: // Argument list mismatch.
llvm_unreachable("These aren't produced by filterContextualMemberList");
return false;
case CC_ExactMatch: { // This is a perfect match for the arguments.
// If we have an exact match, then we must have an argument list, check
// it.
if (candidateArgTy) {
assert(argExpr && "Exact match without argument?");
if (!typeCheckArgumentChildIndependently(argExpr, candidateArgTy,
candidateInfo))
return true;
}
// If the argument is a match, then check the result type. We might have
// looked up a contextual member whose result type disagrees with the
// expected result type.
auto resultTy = candidateInfo[0].getResultType();
if (!resultTy)
resultTy = candidateInfo[0].getUncurriedType();
if (resultTy && !CS.getContextualType()->is<UnboundGenericType>() &&
!CS.TC.isConvertibleTo(resultTy, CS.getContextualType(), CS.DC)) {
diagnose(E->getNameLoc(), diag::expected_result_in_contextual_member,
E->getName(), resultTy, CS.getContextualType());
return true;
}
// Otherwise, this is an exact match, return false to diagnose this as an
// ambiguity. It must be some other problem, such as failing to infer a
// generic argument on the enum type.
return false;
}
case CC_Unavailable:
case CC_Inaccessible:
// Diagnose some simple and common errors.
return candidateInfo.diagnoseSimpleErrors(E);
case CC_ArgumentLabelMismatch:
case CC_ArgumentCountMismatch: {
// If we have no argument, the candidates must have expected one.
if (!argExpr) {
if (!candidateArgTy)
return false; // Candidate must be incorrect for some other reason.
// Pick one of the arguments that are expected as an exemplar.
if (candidateArgTy->isVoid()) {
// If this member is () -> T, suggest adding parentheses.
diagnose(E->getNameLoc(), diag::expected_parens_in_contextual_member,
E->getName())
.fixItInsertAfter(E->getEndLoc(), "()");
} else {
diagnose(E->getNameLoc(),
diag::expected_argument_in_contextual_member, E->getName(),
candidateArgTy);
}
return true;
}
assert(argExpr && candidateArgTy && "Exact match without an argument?");
return diagnoseSingleCandidateFailures(candidateInfo, E, argExpr,
E->getArgumentLabels());
}
case CC_GeneralMismatch: { // Something else is wrong.
// If an argument value was specified, but this member expects no
// arguments,
// then we fail with a nice error message.
if (!candidateArgTy) {
if (CS.getType(argExpr)->isVoid()) {
diagnose(E->getNameLoc(),
diag::unexpected_parens_in_contextual_member, E->getName())
.fixItRemove(E->getArgument()->getSourceRange());
} else {
diagnose(E->getNameLoc(),
diag::unexpected_argument_in_contextual_member, E->getName())
.highlight(E->getArgument()->getSourceRange());
}
return true;
}
return false;
}
}
llvm_unreachable("all cases should be handled");
};
return diagnoseMemberFailures(E, nullptr, memberConstraint->getKind(),
memberConstraint->getMember(),
memberConstraint->getFunctionRefKind(),
memberConstraint->getLocator(), callback);
}
bool FailureDiagnosis::diagnoseMemberFailures(
Expr *E, Expr *baseExpr, ConstraintKind lookupKind, DeclName memberName,
FunctionRefKind funcRefKind, ConstraintLocator *locator,
Optional<std::function<bool(ArrayRef<OverloadChoice>)>> callback,
bool includeInaccessibleMembers) {
auto isInitializer = memberName.isSimpleName(CS.TC.Context.Id_init);
// Get the referenced base expression from the failed constraint, along with
// the SourceRange for the member ref. In "x.y", this returns the expr for x
// and the source range for y.
SourceRange memberRange;
SourceLoc BaseLoc;
DeclNameLoc NameLoc;
Type baseTy, baseObjTy;
// UnresolvedMemberExpr doesn't have "base" expression,
// it's represented as ".foo", which means that we need
// to get base from the context.
if (auto *UME = dyn_cast<UnresolvedMemberExpr>(E)) {
memberRange = E->getSourceRange();
BaseLoc = E->getLoc();
NameLoc = UME->getNameLoc();
baseTy = CS.getContextualType();
if (!baseTy)
return false;
// If we succeeded, get ready to do the member lookup.
baseObjTy = baseTy->getRValueType();
// If the base object is already a metatype type, then something weird is
// going on. For now, just generate a generic error.
if (baseObjTy->is<MetatypeType>())
return false;
baseTy = baseObjTy = MetatypeType::get(baseObjTy);
} else {
memberRange = baseExpr->getSourceRange();
if (locator)
locator = simplifyLocator(CS, locator, memberRange);
BaseLoc = baseExpr->getLoc();
NameLoc = DeclNameLoc(memberRange.Start);
// Retypecheck the anchor type, which is the base of the member expression.
baseExpr =
typeCheckArbitrarySubExprIndependently(baseExpr, TCC_AllowLValue);
if (!baseExpr)
return true;
baseTy = CS.getType(baseExpr)->getWithoutSpecifierType();
baseObjTy = baseTy;
}
// If the base type is an IUO, look through it. Odds are, the code is not
// trying to find a member of it.
if (auto objTy = CS.lookThroughImplicitlyUnwrappedOptionalType(baseObjTy))
baseTy = baseObjTy = objTy;
// If the base of this property access is a function that takes an empty
// argument list, then the most likely problem is that the user wanted to
// call the function, e.g. in "a.b.c" where they had to write "a.b().c".
// Produce a specific diagnostic + fixit for this situation.
if (auto baseFTy = baseObjTy->getAs<AnyFunctionType>()) {
if (baseExpr && baseFTy->getInput()->isVoid()) {
SourceLoc insertLoc = baseExpr->getEndLoc();
if (auto *DRE = dyn_cast<DeclRefExpr>(baseExpr)) {
diagnose(baseExpr->getLoc(), diag::did_not_call_function,
DRE->getDecl()->getBaseName().getIdentifier())
.fixItInsertAfter(insertLoc, "()");
return true;
}
if (auto *DSCE = dyn_cast<DotSyntaxCallExpr>(baseExpr))
if (auto *DRE = dyn_cast<DeclRefExpr>(DSCE->getFn())) {
diagnose(baseExpr->getLoc(), diag::did_not_call_method,
DRE->getDecl()->getBaseName().getIdentifier())
.fixItInsertAfter(insertLoc, "()");
return true;
}
diagnose(baseExpr->getLoc(), diag::did_not_call_function_value)
.fixItInsertAfter(insertLoc, "()");
return true;
}
}
// If this is a tuple, then the index needs to be valid.
if (auto tuple = baseObjTy->getAs<TupleType>()) {
StringRef nameStr = memberName.getBaseIdentifier().str();
int fieldIdx = -1;
// Resolve a number reference into the tuple type.
unsigned Value = 0;
if (!nameStr.getAsInteger(10, Value) && Value < tuple->getNumElements()) {
fieldIdx = Value;
} else {
fieldIdx = tuple->getNamedElementId(memberName.getBaseIdentifier());
}
if (fieldIdx != -1)
return false; // Lookup is valid.
diagnose(BaseLoc, diag::could_not_find_tuple_member, baseObjTy, memberName)
.highlight(memberRange);
return true;
}
// If this is initializer/constructor lookup we are dealing this.
if (isInitializer) {
// Let's check what is the base type we are trying to look it up on
// because only MetatypeType is viable to find constructor on, as per
// rules in ConstraintSystem::performMemberLookup.
if (!baseTy->is<AnyMetatypeType>()) {
baseTy = MetatypeType::get(baseTy, CS.getASTContext());
}
}
// If base type has unresolved generic parameters, such might mean
// that it's initializer with erroneous argument, otherwise this would
// be a simple ambiguous archetype case, neither can be diagnosed here.
if (baseTy->hasTypeParameter() && baseTy->hasUnresolvedType())
return false;
MemberLookupResult result =
CS.performMemberLookup(lookupKind, memberName, baseTy, funcRefKind,
locator, includeInaccessibleMembers);
switch (result.OverallResult) {
case MemberLookupResult::Unsolved:
// If we couldn't resolve a specific type for the base expression, then we
// cannot produce a specific diagnostic.
return false;
case MemberLookupResult::ErrorAlreadyDiagnosed:
// If an error was already emitted, then we're done, don't emit anything
// redundant.
return true;
case MemberLookupResult::HasResults:
break;
}
SmallVector<OverloadChoice, 4> viableCandidatesToReport;
for (auto candidate : result.ViableCandidates)
if (candidate.getKind() != OverloadChoiceKind::KeyPathApplication)
viableCandidatesToReport.push_back(candidate);
// Since the lookup was allowing inaccessible members, let's check
// if it found anything of that sort, which is easy to diagnose.
bool allUnavailable = !CS.TC.getLangOpts().DisableAvailabilityChecking;
bool allInaccessible = true;
for (auto &member : viableCandidatesToReport) {
if (!member.isDecl()) {
// if there is no declaration, this choice is implicitly available.
allUnavailable = false;
continue;
}
auto decl = member.getDecl();
// Check availability of the found choice.
if (!decl->getAttrs().isUnavailable(CS.getASTContext()))
allUnavailable = false;
if (decl->isAccessibleFrom(CS.DC))
allInaccessible = false;
}
// diagnoseSimpleErrors() should have diagnosed this scenario.
assert(!allInaccessible || viableCandidatesToReport.empty());
if (result.UnviableCandidates.empty() && isInitializer &&
!baseObjTy->is<AnyMetatypeType>()) {
if (auto ctorRef = dyn_cast<UnresolvedDotExpr>(E)) {
// Diagnose 'super.init', which can only appear inside another
// initializer, specially.
if (isa<SuperRefExpr>(ctorRef->getBase())) {
diagnose(BaseLoc, diag::super_initializer_not_in_initializer);
return true;
}
// Suggest inserting a call to 'type(of:)' to construct another object
// of the same dynamic type.
SourceRange fixItRng = ctorRef->getNameLoc().getSourceRange();
// Surround the caller in `type(of:)`.
diagnose(BaseLoc, diag::init_not_instance_member)
.fixItInsert(fixItRng.Start, "type(of: ")
.fixItInsertAfter(fixItRng.End, ")");
return true;
}
}
if (viableCandidatesToReport.empty()) {
// If this was an optional type let's check if the base type
// has requested member, if so - generate nice error saying that
// optional was not unwrapped, otherwise say that type value has
// no such member.
if (auto *OT = dyn_cast<OptionalType>(baseObjTy.getPointer())) {
auto optionalResult = CS.performMemberLookup(
lookupKind, memberName, OT->getBaseType(), funcRefKind, locator,
/*includeInaccessibleMembers*/ false);
switch (optionalResult.OverallResult) {
case MemberLookupResult::ErrorAlreadyDiagnosed:
// If an error was already emitted, then we're done, don't emit anything
// redundant.
return true;
case MemberLookupResult::Unsolved:
case MemberLookupResult::HasResults:
break;
}
if (!optionalResult.ViableCandidates.empty()) {
// By default we assume that the LHS type is not optional.
StringRef fixIt = "!";
auto contextualType = CS.getContextualType();
if (contextualType && isa<OptionalType>(contextualType.getPointer()))
fixIt = "?";
diagnose(BaseLoc, diag::missing_unwrap_optional, baseObjTy)
.fixItInsertAfter(baseExpr->getEndLoc(), fixIt);
return true;
}
}
// FIXME: Dig out the property DeclNameLoc.
diagnoseUnviableLookupResults(result, baseObjTy, baseExpr, memberName,
NameLoc, BaseLoc);
return true;
}
if (allUnavailable) {
auto firstDecl = viableCandidatesToReport[0].getDecl();
// FIXME: We need the enclosing CallExpr to rewrite the argument labels.
if (CS.TC.diagnoseExplicitUnavailability(firstDecl, BaseLoc, CS.DC,
/*call*/ nullptr))
return true;
}
return callback.hasValue() ? (*callback)(viableCandidatesToReport) : false;
}
bool FailureDiagnosis::visitUnresolvedDotExpr(UnresolvedDotExpr *UDE) {
auto *baseExpr = UDE->getBase();
auto *locator = CS.getConstraintLocator(UDE, ConstraintLocator::Member);
if (!locator)
return false;
return diagnoseMemberFailures(UDE, baseExpr, ConstraintKind::ValueMember,
UDE->getName(), UDE->getFunctionRefKind(),
locator);
}
/// A TupleExpr propagate contextual type information down to its children and
/// can be erroneous when there is a label mismatch etc.
bool FailureDiagnosis::visitTupleExpr(TupleExpr *TE) {
// If we know the requested argType to use, use computeTupleShuffle to produce
// the shuffle of input arguments to destination values. It requires a
// TupleType to compute the mapping from argExpr. Conveniently, it doesn't
// care about the actual types though, so we can just use 'void' for them.
if (!CS.getContextualType() || !CS.getContextualType()->is<TupleType>())
return visitExpr(TE);
auto contextualTT = CS.getContextualType()->castTo<TupleType>();
SmallVector<TupleTypeElt, 4> ArgElts;
auto voidTy = CS.getASTContext().TheEmptyTupleType;
for (unsigned i = 0, e = TE->getNumElements(); i != e; ++i)
ArgElts.push_back({ voidTy, TE->getElementName(i) });
auto TEType = TupleType::get(ArgElts, CS.getASTContext());
if (!TEType->is<TupleType>())
return visitExpr(TE);
SmallVector<int, 4> sources;
SmallVector<unsigned, 4> variadicArgs;
// If the shuffle is invalid, then there is a type error. We could diagnose
// it specifically here, but the general logic does a fine job so we let it
// do it.
if (computeTupleShuffle(TEType->castTo<TupleType>()->getElements(),
contextualTT->getElements(), sources, variadicArgs))
return visitExpr(TE);
// If we got a correct shuffle, we can perform the analysis of all of
// the input elements, with their expected types.
for (unsigned i = 0, e = sources.size(); i != e; ++i) {
// If the value is taken from a default argument, ignore it.
if (sources[i] == TupleShuffleExpr::DefaultInitialize ||
sources[i] == TupleShuffleExpr::Variadic ||
sources[i] == TupleShuffleExpr::CallerDefaultInitialize)
continue;
assert(sources[i] >= 0 && "Unknown sources index");
// Otherwise, it must match the corresponding expected argument type.
unsigned inArgNo = sources[i];
TCCOptions options;
if (contextualTT->getElement(i).isInOut())
options |= TCC_AllowLValue;
auto actualType = contextualTT->getElementType(i);
auto exprResult =
typeCheckChildIndependently(TE->getElement(inArgNo), actualType,
CS.getContextualTypePurpose(), options);
// If there was an error type checking this argument, then we're done.
if (!exprResult) return true;
// If the caller expected something inout, but we didn't have
// something of inout type, diagnose it.
if (auto IOE =
dyn_cast<InOutExpr>(exprResult->getSemanticsProvidingExpr())) {
if (!contextualTT->getElement(i).isInOut()) {
diagnose(exprResult->getLoc(), diag::extra_address_of,
CS.getType(exprResult)->getInOutObjectType())
.highlight(exprResult->getSourceRange())
.fixItRemove(IOE->getStartLoc());
return true;
}
}
}
if (!variadicArgs.empty()) {
auto varargsTy = contextualTT->getVarArgsBaseType();
for (unsigned i = 0, e = variadicArgs.size(); i != e; ++i) {
unsigned inArgNo = variadicArgs[i];
auto expr = typeCheckChildIndependently(
TE->getElement(inArgNo), varargsTy, CS.getContextualTypePurpose());
// If there was an error type checking this argument, then we're done.
if (!expr) return true;
}
}
return false;
}
/// An IdentityExpr doesn't change its argument, but it *can* propagate its
/// contextual type information down.
bool FailureDiagnosis::visitIdentityExpr(IdentityExpr *E) {
auto contextualType = CS.getContextualType();
// If we have a paren expr and our contextual type is a ParenType, remove the
// paren expr sugar.
if (contextualType)
contextualType = contextualType->getWithoutParens();
if (!typeCheckChildIndependently(E->getSubExpr(), contextualType,
CS.getContextualTypePurpose()))
return true;
return false;
}
/// A TryExpr doesn't change it's argument, nor does it change the contextual
/// type.
bool FailureDiagnosis::visitTryExpr(TryExpr *E) {
return visit(E->getSubExpr());
}
bool FailureDiagnosis::visitExpr(Expr *E) {
// Check each of our immediate children to see if any of them are
// independently invalid.
bool errorInSubExpr = false;
E->forEachImmediateChildExpr([&](Expr *Child) -> Expr* {
// If we already found an error, stop checking.
if (errorInSubExpr) return Child;
// Otherwise just type check the subexpression independently. If that
// succeeds, then we stitch the result back into our expression.
if (typeCheckChildIndependently(Child, TCC_AllowLValue))
return Child;
// Otherwise, it failed, which emitted a diagnostic. Keep track of this
// so that we don't emit multiple diagnostics.
errorInSubExpr = true;
return Child;
});
// If any of the children were errors, we're done.
if (errorInSubExpr)
return true;
// Otherwise, produce a more generic error.
return false;
}
bool FailureDiagnosis::diagnoseExprFailure() {
assert(expr);
// Our general approach is to do a depth first traversal of the broken
// expression tree, type checking as we go. If we find a subtree that cannot
// be type checked on its own (even to an incomplete type) then that is where
// we focus our attention. If we do find a type, we use it to check for
// contextual type mismatches.
return visit(expr);
}
/// Given a specific expression and the remnants of the failed constraint
/// system, produce a specific diagnostic.
///
/// This is guaranteed to always emit an error message.
///
void ConstraintSystem::diagnoseFailureForExpr(Expr *expr) {
// Continue simplifying any active constraints left in the system. We can end
// up with them because the solver bails out as soon as it sees a Failure. We
// don't want to leave them around in the system because later diagnostics
// will assume they are unsolvable and may otherwise leave the system in an
// inconsistent state.
simplify(/*ContinueAfterFailures*/true);
// Look through RebindSelfInConstructorExpr to avoid weird Sema issues.
if (auto *RB = dyn_cast<RebindSelfInConstructorExpr>(expr))
expr = RB->getSubExpr();
FailureDiagnosis diagnosis(expr, *this);
// Now, attempt to diagnose the failure from the info we've collected.
if (diagnosis.diagnoseExprFailure())
return;
// If this is a contextual conversion problem, dig out some information.
if (diagnosis.diagnoseContextualConversionError(expr, getContextualType(),
getContextualTypePurpose()))
return;
// If we can diagnose a problem based on the constraints left laying around in
// the system, do so now.
if (diagnosis.diagnoseConstraintFailure())
return;
// If no one could find a problem with this expression or constraint system,
// then it must be well-formed... but is ambiguous. Handle this by diagnostic
// various cases that come up.
diagnosis.diagnoseAmbiguity(expr);
}
// FIXME: Instead of doing this, we should store the decl in the type
// variable, or in the locator.
static bool hasArchetype(const GenericTypeDecl *generic,
ArchetypeType *archetype) {
assert(!archetype->getOpenedExistentialType() &&
!archetype->getParent());
auto genericEnv = generic->getGenericEnvironment();
if (!genericEnv)
return false;
return archetype->getGenericEnvironment() == genericEnv;
}
static void noteArchetypeSource(const TypeLoc &loc, ArchetypeType *archetype,
ConstraintSystem &cs) {
const GenericTypeDecl *FoundDecl = nullptr;
const ComponentIdentTypeRepr *FoundGenericTypeBase = nullptr;
// Walk the TypeRepr to find the type in question.
if (auto typerepr = loc.getTypeRepr()) {
struct FindGenericTypeDecl : public ASTWalker {
const GenericTypeDecl *FoundDecl = nullptr;
const ComponentIdentTypeRepr *FoundGenericTypeBase = nullptr;
ArchetypeType *Archetype;
FindGenericTypeDecl(ArchetypeType *Archetype)
: Archetype(Archetype) {}
bool walkToTypeReprPre(TypeRepr *T) override {
// If we already emitted the note, we're done.
if (FoundDecl) return false;
if (auto ident = dyn_cast<ComponentIdentTypeRepr>(T)) {
auto *generic =
dyn_cast_or_null<GenericTypeDecl>(ident->getBoundDecl());
if (generic && hasArchetype(generic, Archetype)) {
FoundDecl = generic;
FoundGenericTypeBase = ident;
return false;
}
}
// Keep walking.
return true;
}
} findGenericTypeDecl(archetype);
typerepr->walk(findGenericTypeDecl);
FoundDecl = findGenericTypeDecl.FoundDecl;
FoundGenericTypeBase = findGenericTypeDecl.FoundGenericTypeBase;
}
// If we didn't find the type in the TypeRepr, fall back to the type in the
// type checked expression.
if (!FoundDecl) {
if (const GenericTypeDecl *generic = loc.getType()->getAnyGeneric())
if (hasArchetype(generic, archetype))
FoundDecl = generic;
}
auto &tc = cs.getTypeChecker();
if (FoundDecl) {
Type type;
if (auto *nominal = dyn_cast<NominalTypeDecl>(FoundDecl))
type = nominal->getDeclaredType();
else if (auto *typeAlias = dyn_cast<TypeAliasDecl>(FoundDecl))
type = typeAlias->getUnboundGenericType();
else
type = FoundDecl->getDeclaredInterfaceType();
tc.diagnose(FoundDecl, diag::archetype_declared_in_type, archetype, type);
}
if (FoundGenericTypeBase && !isa<GenericIdentTypeRepr>(FoundGenericTypeBase)){
assert(FoundDecl);
// If we can, prefer using any types already fixed by the constraint system.
// This lets us produce fixes like `Pair<Int, Any>` instead of defaulting to
// `Pair<Any, Any>`.
// Right now we only handle this when the type that's at fault is the
// top-level type passed to this function.
if (loc.getType().isNull()) {
return;
}
ArrayRef<Type> genericArgs;
if (auto *boundGenericTy = loc.getType()->getAs<BoundGenericType>()) {
if (boundGenericTy->getDecl() == FoundDecl)
genericArgs = boundGenericTy->getGenericArgs();
}
auto getPreferredType =
[&](const GenericTypeParamDecl *genericParam) -> Type {
// If we were able to get the generic arguments (i.e. the types used at
// FoundDecl's use site), we can prefer those...
if (genericArgs.empty())
return Type();
Type preferred = genericArgs[genericParam->getIndex()];
if (!preferred || preferred->hasError())
return Type();
// ...but only if they were actually resolved by the constraint system
// despite the failure.
Type maybeFixedType = cs.getFixedTypeRecursive(preferred,
/*wantRValue*/true);
if (maybeFixedType->hasTypeVariable() ||
maybeFixedType->hasUnresolvedType()) {
return Type();
}
return maybeFixedType;
};
SmallString<64> genericParamBuf;
if (tc.getDefaultGenericArgumentsString(genericParamBuf, FoundDecl,
getPreferredType)) {
tc.diagnose(FoundGenericTypeBase->getLoc(),
diag::unbound_generic_parameter_explicit_fix)
.fixItInsertAfter(FoundGenericTypeBase->getEndLoc(), genericParamBuf);
}
}
}
std::pair<Type, ContextualTypePurpose>
FailureDiagnosis::validateContextualType(Type contextualType,
ContextualTypePurpose CTP) {
if (!contextualType)
return {contextualType, CTP};
// If we're asked to convert to an autoclosure, then we really want to
// convert to the result of it.
if (auto *FT = contextualType->getAs<AnyFunctionType>())
if (FT->isAutoClosure())
contextualType = FT->getResult();
// Since some of the contextual types might be tuples e.g. subscript argument
// is a tuple or paren wrapping a tuple, it's required to recursively check
// its elements to determine nullability of the contextual type, because it
// might contain archetypes.
std::function<bool(Type)> shouldNullifyType = [&](Type type) -> bool {
switch (type->getDesugaredType()->getKind()) {
case TypeKind::Archetype:
case TypeKind::Unresolved:
return true;
case TypeKind::BoundGenericEnum:
case TypeKind::BoundGenericClass:
case TypeKind::BoundGenericStruct:
case TypeKind::UnboundGeneric:
case TypeKind::GenericFunction:
case TypeKind::Metatype:
return type->hasUnresolvedType();
case TypeKind::Tuple: {
auto tupleType = type->getAs<TupleType>();
for (auto &element : tupleType->getElements()) {
if (shouldNullifyType(element.getType()))
return true;
}
break;
}
default:
return false;
}
return false;
};
bool shouldNullify = false;
if (auto objectType = contextualType->getWithoutSpecifierType()) {
// Note that simply checking for `objectType->hasUnresolvedType()` is not
// appropriate in this case standalone, because if it's in a function,
// for example, or inout type, we still want to preserve it's skeleton
/// because that helps to diagnose inout argument issues. Complete
// nullification is only appropriate for generic types with unresolved
// types or standalone archetypes because that's going to give
// sub-expression solver a chance to try and compute type as it sees fit
// and higher level code would have a chance to check it, which avoids
// diagnostic messages like `cannot convert (_) -> _ to (Int) -> Void`.
shouldNullify = shouldNullifyType(objectType);
}
// If the conversion type contains no info, drop it.
if (shouldNullify)
return {Type(), CTP_Unused};
// Remove all of the potentially leftover type variables or type parameters
// from the contextual type to be used by new solver.
contextualType = replaceTypeParametersWithUnresolved(contextualType);
contextualType = replaceTypeVariablesWithUnresolved(contextualType);
return {contextualType, CTP};
}
/// Check the specified closure to see if it is a multi-statement closure with
/// an uninferred type. If so, diagnose the problem with an error and return
/// true.
bool FailureDiagnosis::
diagnoseAmbiguousMultiStatementClosure(ClosureExpr *closure) {
if (closure->hasSingleExpressionBody() ||
closure->hasExplicitResultType())
return false;
auto closureType = CS.getType(closure)->getAs<AnyFunctionType>();
if (!closureType ||
!(closureType->getResult()->hasUnresolvedType() ||
closureType->getResult()->hasTypeVariable()))
return false;
// Okay, we have a multi-statement closure expr that has no inferred result,
// type, in the context of a larger expression. The user probably expected
// the compiler to infer the result type of the closure from the body of the
// closure, which Swift doesn't do for multi-statement closures. Try to be
// helpful by digging into the body of the closure, looking for a return
// statement, and inferring the result type from it. If we can figure that
// out, we can produce a fixit hint.
class ReturnStmtFinder : public ASTWalker {
SmallVectorImpl<ReturnStmt*> &returnStmts;
public:
ReturnStmtFinder(SmallVectorImpl<ReturnStmt*> &returnStmts)
: returnStmts(returnStmts) {}
// Walk through statements, so we find returns hiding in if/else blocks etc.
std::pair<bool, Stmt *> walkToStmtPre(Stmt *S) override {
// Keep track of any return statements we find.
if (auto RS = dyn_cast<ReturnStmt>(S))
returnStmts.push_back(RS);
return { true, S };
}
// Don't walk into anything else, since they cannot contain statements
// that can return from the current closure.
std::pair<bool, Expr *> walkToExprPre(Expr *E) override {
return { false, E };
}
std::pair<bool, Pattern*> walkToPatternPre(Pattern *P) override {
return { false, P };
}
bool walkToDeclPre(Decl *D) override { return false; }
bool walkToTypeLocPre(TypeLoc &TL) override { return false; }
bool walkToTypeReprPre(TypeRepr *T) override { return false; }
bool walkToParameterListPre(ParameterList *PL) override { return false; }
};
SmallVector<ReturnStmt*, 4> Returns;
closure->getBody()->walk(ReturnStmtFinder(Returns));
// If we found a return statement inside of the closure expression, then go
// ahead and type check the body to see if we can determine a type.
for (auto RS : Returns) {
llvm::SaveAndRestore<DeclContext *> SavedDC(CS.DC, closure);
// Otherwise, we're ok to type check the subexpr.
Type resultType;
if (RS->hasResult()) {
auto resultExpr = RS->getResult();
ConcreteDeclRef decl = nullptr;
// If return expression uses closure parameters, which have/are
// type variables, such means that we won't be able to
// type-check result correctly and, unfortunately,
// we are going to leak type variables from the parent
// constraint system through declaration types.
bool hasUnresolvedParams = false;
resultExpr->forEachChildExpr([&](Expr *childExpr) -> Expr *{
if (auto DRE = dyn_cast<DeclRefExpr>(childExpr)) {
if (auto param = dyn_cast<ParamDecl>(DRE->getDecl())) {
auto paramType = param->hasType() ? param->getType() : Type();
if (!paramType || paramType->hasTypeVariable()) {
hasUnresolvedParams = true;
return nullptr;
}
}
}
return childExpr;
});
if (hasUnresolvedParams)
continue;
CS.TC.preCheckExpression(resultExpr, CS.DC);
// Obtain type of the result expression without applying solutions,
// because otherwise this might result in leaking of type variables,
// since we are not resetting result statement and if expression is
// successfully type-checked its type cleanup is going to be disabled
// (we are allowing unresolved types), and as a side-effect it might
// also be transformed e.g. OverloadedDeclRefExpr -> DeclRefExpr.
auto type = CS.TC.getTypeOfExpressionWithoutApplying(
resultExpr, CS.DC, decl, FreeTypeVariableBinding::UnresolvedType);
if (type)
resultType = type;
}
// If we found a type, presuppose it was the intended result and insert a
// fixit hint.
if (resultType && !isUnresolvedOrTypeVarType(resultType)) {
std::string resultTypeStr = resultType->getString();
// If there is a location for an 'in' token, then the argument list was
// specified somehow but no return type was. Insert a "-> ReturnType "
// before the in token.
if (closure->getInLoc().isValid()) {
diagnose(closure->getLoc(), diag::cannot_infer_closure_result_type)
.fixItInsert(closure->getInLoc(), "-> " + resultTypeStr + " ");
return true;
}
// Otherwise, the closure must take zero arguments. We know this
// because the if one or more argument is specified, a multi-statement
// closure *must* name them, or explicitly ignore them with "_ in".
//
// As such, we insert " () -> ReturnType in " right after the '{' that
// starts the closure body.
auto insertString = " () -> " + resultTypeStr + " " + "in ";
diagnose(closure->getLoc(), diag::cannot_infer_closure_result_type)
.fixItInsertAfter(closure->getBody()->getLBraceLoc(), insertString);
return true;
}
}
diagnose(closure->getLoc(), diag::cannot_infer_closure_result_type);
return true;
}
/// Check the associated constraint system to see if it has any archetypes
/// not properly resolved or missing. If so, diagnose the problem with
/// an error and return true.
bool FailureDiagnosis::diagnoseArchetypeAmbiguity() {
using Archetype = std::tuple<ArchetypeType *, ConstraintLocator *, unsigned>;
llvm::SmallVector<Archetype, 2> unboundParams;
// Check out all of the type variables lurking in the system. If any are
// unbound archetypes, then the problem is that it couldn't be resolved.
for (auto tv : CS.getTypeVariables()) {
auto &impl = tv->getImpl();
if (impl.hasRepresentativeOrFixed())
continue;
// If this is a conversion to a type variable used to form an archetype,
// Then diagnose this as a generic parameter that could not be resolved.
auto archetype = impl.getArchetype();
// Only diagnose archetypes that don't have a parent, i.e., ones
// that correspond to generic parameters.
if (!archetype || archetype->getParent())
continue;
// Number of constraints related to particular unbound parameter
// is significant indicator of the problem, because if there are
// no constraints associated with it, that means it can't ever be resolved,
// such helps to diagnose situations like: struct S<A, B> { init(_ a: A) {}}
// because type B would have no constraints associated with it.
unsigned numConstraints = 0;
{
llvm::SmallVector<Constraint *, 2> constraints;
CS.getConstraintGraph().gatherConstraints(
tv, constraints, ConstraintGraph::GatheringKind::EquivalenceClass);
for (auto constraint : constraints) {
// We are not interested in ConformsTo constraints because
// such constraints specify restrictions on the archetypes themselves.
if (constraint->getKind() == ConstraintKind::ConformsTo)
continue;
// Some of the bind constraints specify relations between
// parent type and it's member fields/types, we are not
// interested in that, since it's not related to archetype resolution.
if (constraint->getKind() == ConstraintKind::Bind) {
if (auto locator = constraint->getLocator()) {
auto anchor = locator->getAnchor();
if (anchor && isa<UnresolvedDotExpr>(anchor))
continue;
}
}
numConstraints++;
}
}
auto locator = impl.getLocator();
unboundParams.push_back(
std::make_tuple(archetype, locator, numConstraints));
}
// We've found unbound generic parameters, let's diagnose
// based on the number of constraints each one is related to.
if (!unboundParams.empty()) {
// Let's prioritize archetypes that don't have any constraints associated.
std::stable_sort(unboundParams.begin(), unboundParams.end(),
[](Archetype a, Archetype b) {
return std::get<2>(a) < std::get<2>(b);
});
auto param = unboundParams.front();
diagnoseUnboundArchetype(std::get<0>(param),
std::get<1>(param)->getAnchor());
return true;
}
return false;
}
/// Emit an error message about an unbound generic parameter existing, and
/// emit notes referring to the target of a diagnostic, e.g., the function
/// or parameter being used.
void FailureDiagnosis::diagnoseUnboundArchetype(ArchetypeType *archetype,
Expr *anchor) {
auto &tc = CS.getTypeChecker();
// The archetype may come from the explicit type in a cast expression.
if (auto *ECE = dyn_cast_or_null<ExplicitCastExpr>(anchor)) {
tc.diagnose(ECE->getLoc(), diag::unbound_generic_parameter_cast,
archetype, ECE->getCastTypeLoc().getType())
.highlight(ECE->getCastTypeLoc().getSourceRange());
// Emit a note specifying where this came from, if we can find it.
noteArchetypeSource(ECE->getCastTypeLoc(), archetype, CS);
return;
}
// A very common cause of this diagnostic is a situation where a closure expr
// has no inferred type, due to being a multiline closure. Check to see if
// this is the case and (if so), speculatively diagnose that as the problem.
bool didDiagnose = false;
expr->forEachChildExpr([&](Expr *subExpr) -> Expr*{
auto closure = dyn_cast<ClosureExpr>(subExpr);
if (!didDiagnose && closure)
didDiagnose = diagnoseAmbiguousMultiStatementClosure(closure);
return subExpr;
});
if (didDiagnose) return;
// Otherwise, emit an error message on the expr we have, and emit a note
// about where the archetype came from.
tc.diagnose(expr->getLoc(), diag::unbound_generic_parameter, archetype);
// If we have an anchor, drill into it to emit a
// "note: archetype declared here".
if (!anchor) return;
if (auto TE = dyn_cast<TypeExpr>(anchor)) {
// Emit a note specifying where this came from, if we can find it.
noteArchetypeSource(TE->getTypeLoc(), archetype, CS);
return;
}
ConcreteDeclRef resolved;
// Simple case: direct reference to a declaration.
if (auto dre = dyn_cast<DeclRefExpr>(anchor))
resolved = dre->getDeclRef();
// Simple case: direct reference to a declaration.
if (auto MRE = dyn_cast<MemberRefExpr>(anchor))
resolved = MRE->getMember();
if (auto OCDRE = dyn_cast<OtherConstructorDeclRefExpr>(anchor))
resolved = OCDRE->getDeclRef();
// We couldn't resolve the locator to a declaration, so we're done.
if (!resolved)
return;
auto decl = resolved.getDecl();
if (auto FD = dyn_cast<FuncDecl>(decl)) {
auto name = FD->getFullName();
auto diagID = name.isOperator() ? diag::note_call_to_operator
: diag::note_call_to_func;
tc.diagnose(decl, diagID, name);
return;
}
// FIXME: Specialize for implicitly-generated constructors.
if (isa<ConstructorDecl>(decl)) {
tc.diagnose(decl, diag::note_call_to_initializer);
return;
}
if (auto PD = dyn_cast<ParamDecl>(decl)) {
tc.diagnose(decl, diag::note_init_parameter, PD->getName());
return;
}
// FIXME: Other decl types too.
}
/// Emit an ambiguity diagnostic about the specified expression.
void FailureDiagnosis::diagnoseAmbiguity(Expr *E) {
// First, let's try to diagnose any problems related to ambiguous
// archetypes (generic parameters) present in the constraint system.
if (diagnoseArchetypeAmbiguity())
return;
// Unresolved/Anonymous ClosureExprs are common enough that we should give
// them tailored diagnostics.
if (auto CE = dyn_cast<ClosureExpr>(E->getValueProvidingExpr())) {
// If this is a multi-statement closure with no explicit result type, emit
// a note to clue the developer in.
if (diagnoseAmbiguousMultiStatementClosure(CE))
return;
diagnose(E->getLoc(), diag::cannot_infer_closure_type)
.highlight(E->getSourceRange());
return;
}
// A DiscardAssignmentExpr (spelled "_") needs contextual type information to
// infer its type. If we see one at top level, diagnose that it must be part
// of an assignment so we don't get a generic "expression is ambiguous" error.
if (isa<DiscardAssignmentExpr>(E)) {
diagnose(E->getLoc(), diag::discard_expr_outside_of_assignment)
.highlight(E->getSourceRange());
return;
}
// Diagnose ".foo" expressions that lack context specifically.
if (auto UME =
dyn_cast<UnresolvedMemberExpr>(E->getSemanticsProvidingExpr())) {
if (!CS.getContextualType()) {
diagnose(E->getLoc(), diag::unresolved_member_no_inference,UME->getName())
.highlight(SourceRange(UME->getDotLoc(),
UME->getNameLoc().getSourceRange().End));
return;
}
}
// Diagnose empty collection literals that lack context specifically.
if (auto CE = dyn_cast<CollectionExpr>(E->getSemanticsProvidingExpr())) {
if (CE->getNumElements() == 0) {
diagnose(E->getLoc(), diag::unresolved_collection_literal)
.highlight(E->getSourceRange());
return;
}
}
// Diagnose 'nil' without a contextual type.
if (isa<NilLiteralExpr>(E->getSemanticsProvidingExpr())) {
diagnose(E->getLoc(), diag::unresolved_nil_literal)
.highlight(E->getSourceRange());
return;
}
// A very common cause of this diagnostic is a situation where a closure expr
// has no inferred type, due to being a multiline closure. Check to see if
// this is the case and (if so), speculatively diagnose that as the problem.
bool didDiagnose = false;
E->forEachChildExpr([&](Expr *subExpr) -> Expr*{
auto closure = dyn_cast<ClosureExpr>(subExpr);
if (!didDiagnose && closure)
didDiagnose = diagnoseAmbiguousMultiStatementClosure(closure);
return subExpr;
});
if (didDiagnose) return;
// Attempt to re-type-check the entire expression, allowing ambiguity, but
// ignoring a contextual type.
if (expr == E) {
auto exprType = getTypeOfTypeCheckedChildIndependently(expr);
// If it failed and diagnosed something, then we're done.
if (!exprType) return;
// If we were able to find something more specific than "unknown" (perhaps
// something like "[_:_]" for a dictionary literal), include it in the
// diagnostic.
if (!isUnresolvedOrTypeVarType(exprType)) {
diagnose(E->getLoc(), diag::specific_type_of_expression_is_ambiguous,
exprType)
.highlight(E->getSourceRange());
return;
}
}
// If there are no posted constraints or failures, then there was
// not enough contextual information available to infer a type for the
// expression.
diagnose(E->getLoc(), diag::type_of_expression_is_ambiguous)
.highlight(E->getSourceRange());
}
/// If an UnresolvedDotExpr, SubscriptMember, etc has been resolved by the
/// constraint system, return the decl that it references.
ValueDecl *ConstraintSystem::findResolvedMemberRef(ConstraintLocator *locator) {
auto *resolvedOverloadSets = this->getResolvedOverloadSets();
if (!resolvedOverloadSets) return nullptr;
// Search through the resolvedOverloadSets to see if we have a resolution for
// this member. This is an O(n) search, but only happens when producing an
// error diagnostic.
for (auto resolved = resolvedOverloadSets;
resolved; resolved = resolved->Previous) {
if (resolved->Locator != locator) continue;
// We only handle the simplest decl binding.
if (resolved->Choice.getKind() != OverloadChoiceKind::Decl)
return nullptr;
return resolved->Choice.getDecl();
}
return nullptr;
}
bool ConstraintSystem::salvage(SmallVectorImpl<Solution> &viable, Expr *expr) {
if (TC.getLangOpts().DebugConstraintSolver) {
auto &log = TC.Context.TypeCheckerDebug->getStream();
log << "---Attempting to salvage and emit diagnostics---\n";
}
// Attempt to solve again, capturing all states that come from our attempts to
// select overloads or bind type variables.
//
// FIXME: can this be removed? We need to arrange for recordFixes to be
// eliminated.
viable.clear();
{
// Set up solver state.
SolverState state(expr, *this);
state.recordFixes = true;
// Solve the system.
solveRec(viable, FreeTypeVariableBinding::Disallow);
// Check whether we have a best solution; this can happen if we found
// a series of fixes that worked.
if (auto best = findBestSolution(viable, state.ExprWeights,
/*minimize=*/true)) {
if (*best != 0)
viable[0] = std::move(viable[*best]);
viable.erase(viable.begin() + 1, viable.end());
return false;
}
// FIXME: If we were able to actually fix things along the way,
// we may have to hunt for the best solution. For now, we don't care.
// Remove solutions that require fixes; the fixes in those systems should
// be diagnosed rather than any ambiguity.
auto hasFixes = [](const Solution &sol) { return !sol.Fixes.empty(); };
auto newEnd = std::remove_if(viable.begin(), viable.end(), hasFixes);
viable.erase(newEnd, viable.end());
// If there are multiple solutions, try to diagnose an ambiguity.
if (viable.size() > 1) {
if (getASTContext().LangOpts.DebugConstraintSolver) {
auto &log = getASTContext().TypeCheckerDebug->getStream();
log << "---Ambiguity error: "
<< viable.size() << " solutions found---\n";
int i = 0;
for (auto &solution : viable) {
log << "---Ambiguous solution #" << i++ << "---\n";
solution.dump(log);
log << "\n";
}
}
if (diagnoseAmbiguity(*this, viable, expr)) {
return true;
}
}
// Fall through to produce diagnostics.
}
if (getExpressionTooComplex(viable)) {
TC.diagnose(expr->getLoc(), diag::expression_too_complex).
highlight(expr->getSourceRange());
return true;
}
// If all else fails, diagnose the failure by looking through the system's
// constraints.
diagnoseFailureForExpr(expr);
return true;
}
| frootloops/swift | lib/Sema/CSDiag.cpp | C++ | apache-2.0 | 323,270 |
package dns
// Copyright (c) Microsoft and contributors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"encoding/json"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/to"
"net/http"
)
// RecordType enumerates the values for record type.
type RecordType string
const (
// A ...
A RecordType = "A"
// AAAA ...
AAAA RecordType = "AAAA"
// CAA ...
CAA RecordType = "CAA"
// CNAME ...
CNAME RecordType = "CNAME"
// MX ...
MX RecordType = "MX"
// NS ...
NS RecordType = "NS"
// PTR ...
PTR RecordType = "PTR"
// SOA ...
SOA RecordType = "SOA"
// SRV ...
SRV RecordType = "SRV"
// TXT ...
TXT RecordType = "TXT"
)
// PossibleRecordTypeValues returns an array of possible values for the RecordType const type.
func PossibleRecordTypeValues() []RecordType {
return []RecordType{A, AAAA, CAA, CNAME, MX, NS, PTR, SOA, SRV, TXT}
}
// AaaaRecord an AAAA record.
type AaaaRecord struct {
// Ipv6Address - The IPv6 address of this AAAA record.
Ipv6Address *string `json:"ipv6Address,omitempty"`
}
// ARecord an A record.
type ARecord struct {
// Ipv4Address - The IPv4 address of this A record.
Ipv4Address *string `json:"ipv4Address,omitempty"`
}
// CaaRecord a CAA record.
type CaaRecord struct {
// Flags - The flags for this CAA record as an integer between 0 and 255.
Flags *int32 `json:"flags,omitempty"`
// Tag - The tag for this CAA record.
Tag *string `json:"tag,omitempty"`
// Value - The value for this CAA record.
Value *string `json:"value,omitempty"`
}
// CloudError an error message
type CloudError struct {
// Error - The error message body
Error *CloudErrorBody `json:"error,omitempty"`
}
// CloudErrorBody the body of an error message
type CloudErrorBody struct {
// Code - The error code
Code *string `json:"code,omitempty"`
// Message - A description of what caused the error
Message *string `json:"message,omitempty"`
// Target - The target resource of the error message
Target *string `json:"target,omitempty"`
// Details - Extra error information
Details *[]CloudErrorBody `json:"details,omitempty"`
}
// CnameRecord a CNAME record.
type CnameRecord struct {
// Cname - The canonical name for this CNAME record.
Cname *string `json:"cname,omitempty"`
}
// MxRecord an MX record.
type MxRecord struct {
// Preference - The preference value for this MX record.
Preference *int32 `json:"preference,omitempty"`
// Exchange - The domain name of the mail host for this MX record.
Exchange *string `json:"exchange,omitempty"`
}
// NsRecord an NS record.
type NsRecord struct {
// Nsdname - The name server name for this NS record.
Nsdname *string `json:"nsdname,omitempty"`
}
// PtrRecord a PTR record.
type PtrRecord struct {
// Ptrdname - The PTR target domain name for this PTR record.
Ptrdname *string `json:"ptrdname,omitempty"`
}
// RecordSet describes a DNS record set (a collection of DNS records with the same name and type).
type RecordSet struct {
autorest.Response `json:"-"`
// ID - The ID of the record set.
ID *string `json:"id,omitempty"`
// Name - The name of the record set.
Name *string `json:"name,omitempty"`
// Type - The type of the record set.
Type *string `json:"type,omitempty"`
// Etag - The etag of the record set.
Etag *string `json:"etag,omitempty"`
// RecordSetProperties - The properties of the record set.
*RecordSetProperties `json:"properties,omitempty"`
}
// MarshalJSON is the custom marshaler for RecordSet.
func (rs RecordSet) MarshalJSON() ([]byte, error) {
objectMap := make(map[string]interface{})
if rs.ID != nil {
objectMap["id"] = rs.ID
}
if rs.Name != nil {
objectMap["name"] = rs.Name
}
if rs.Type != nil {
objectMap["type"] = rs.Type
}
if rs.Etag != nil {
objectMap["etag"] = rs.Etag
}
if rs.RecordSetProperties != nil {
objectMap["properties"] = rs.RecordSetProperties
}
return json.Marshal(objectMap)
}
// UnmarshalJSON is the custom unmarshaler for RecordSet struct.
func (rs *RecordSet) UnmarshalJSON(body []byte) error {
var m map[string]*json.RawMessage
err := json.Unmarshal(body, &m)
if err != nil {
return err
}
for k, v := range m {
switch k {
case "id":
if v != nil {
var ID string
err = json.Unmarshal(*v, &ID)
if err != nil {
return err
}
rs.ID = &ID
}
case "name":
if v != nil {
var name string
err = json.Unmarshal(*v, &name)
if err != nil {
return err
}
rs.Name = &name
}
case "type":
if v != nil {
var typeVar string
err = json.Unmarshal(*v, &typeVar)
if err != nil {
return err
}
rs.Type = &typeVar
}
case "etag":
if v != nil {
var etag string
err = json.Unmarshal(*v, &etag)
if err != nil {
return err
}
rs.Etag = &etag
}
case "properties":
if v != nil {
var recordSetProperties RecordSetProperties
err = json.Unmarshal(*v, &recordSetProperties)
if err != nil {
return err
}
rs.RecordSetProperties = &recordSetProperties
}
}
}
return nil
}
// RecordSetListResult the response to a record set List operation.
type RecordSetListResult struct {
autorest.Response `json:"-"`
// Value - Information about the record sets in the response.
Value *[]RecordSet `json:"value,omitempty"`
// NextLink - The continuation token for the next page of results.
NextLink *string `json:"nextLink,omitempty"`
}
// RecordSetListResultIterator provides access to a complete listing of RecordSet values.
type RecordSetListResultIterator struct {
i int
page RecordSetListResultPage
}
// Next advances to the next value. If there was an error making
// the request the iterator does not advance and the error is returned.
func (iter *RecordSetListResultIterator) Next() error {
iter.i++
if iter.i < len(iter.page.Values()) {
return nil
}
err := iter.page.Next()
if err != nil {
iter.i--
return err
}
iter.i = 0
return nil
}
// NotDone returns true if the enumeration should be started or is not yet complete.
func (iter RecordSetListResultIterator) NotDone() bool {
return iter.page.NotDone() && iter.i < len(iter.page.Values())
}
// Response returns the raw server response from the last page request.
func (iter RecordSetListResultIterator) Response() RecordSetListResult {
return iter.page.Response()
}
// Value returns the current value or a zero-initialized value if the
// iterator has advanced beyond the end of the collection.
func (iter RecordSetListResultIterator) Value() RecordSet {
if !iter.page.NotDone() {
return RecordSet{}
}
return iter.page.Values()[iter.i]
}
// IsEmpty returns true if the ListResult contains no values.
func (rslr RecordSetListResult) IsEmpty() bool {
return rslr.Value == nil || len(*rslr.Value) == 0
}
// recordSetListResultPreparer prepares a request to retrieve the next set of results.
// It returns nil if no more results exist.
func (rslr RecordSetListResult) recordSetListResultPreparer() (*http.Request, error) {
if rslr.NextLink == nil || len(to.String(rslr.NextLink)) < 1 {
return nil, nil
}
return autorest.Prepare(&http.Request{},
autorest.AsJSON(),
autorest.AsGet(),
autorest.WithBaseURL(to.String(rslr.NextLink)))
}
// RecordSetListResultPage contains a page of RecordSet values.
type RecordSetListResultPage struct {
fn func(RecordSetListResult) (RecordSetListResult, error)
rslr RecordSetListResult
}
// Next advances to the next page of values. If there was an error making
// the request the page does not advance and the error is returned.
func (page *RecordSetListResultPage) Next() error {
next, err := page.fn(page.rslr)
if err != nil {
return err
}
page.rslr = next
return nil
}
// NotDone returns true if the page enumeration should be started or is not yet complete.
func (page RecordSetListResultPage) NotDone() bool {
return !page.rslr.IsEmpty()
}
// Response returns the raw server response from the last page request.
func (page RecordSetListResultPage) Response() RecordSetListResult {
return page.rslr
}
// Values returns the slice of values for the current page or nil if there are no values.
func (page RecordSetListResultPage) Values() []RecordSet {
if page.rslr.IsEmpty() {
return nil
}
return *page.rslr.Value
}
// RecordSetProperties represents the properties of the records in the record set.
type RecordSetProperties struct {
// Metadata - The metadata attached to the record set.
Metadata map[string]*string `json:"metadata"`
// TTL - The TTL (time-to-live) of the records in the record set.
TTL *int64 `json:"TTL,omitempty"`
// Fqdn - Fully qualified domain name of the record set.
Fqdn *string `json:"fqdn,omitempty"`
// ARecords - The list of A records in the record set.
ARecords *[]ARecord `json:"ARecords,omitempty"`
// AaaaRecords - The list of AAAA records in the record set.
AaaaRecords *[]AaaaRecord `json:"AAAARecords,omitempty"`
// MxRecords - The list of MX records in the record set.
MxRecords *[]MxRecord `json:"MXRecords,omitempty"`
// NsRecords - The list of NS records in the record set.
NsRecords *[]NsRecord `json:"NSRecords,omitempty"`
// PtrRecords - The list of PTR records in the record set.
PtrRecords *[]PtrRecord `json:"PTRRecords,omitempty"`
// SrvRecords - The list of SRV records in the record set.
SrvRecords *[]SrvRecord `json:"SRVRecords,omitempty"`
// TxtRecords - The list of TXT records in the record set.
TxtRecords *[]TxtRecord `json:"TXTRecords,omitempty"`
// CnameRecord - The CNAME record in the record set.
CnameRecord *CnameRecord `json:"CNAMERecord,omitempty"`
// SoaRecord - The SOA record in the record set.
SoaRecord *SoaRecord `json:"SOARecord,omitempty"`
// CaaRecords - The list of CAA records in the record set.
CaaRecords *[]CaaRecord `json:"caaRecords,omitempty"`
}
// MarshalJSON is the custom marshaler for RecordSetProperties.
func (rsp RecordSetProperties) MarshalJSON() ([]byte, error) {
objectMap := make(map[string]interface{})
if rsp.Metadata != nil {
objectMap["metadata"] = rsp.Metadata
}
if rsp.TTL != nil {
objectMap["TTL"] = rsp.TTL
}
if rsp.Fqdn != nil {
objectMap["fqdn"] = rsp.Fqdn
}
if rsp.ARecords != nil {
objectMap["ARecords"] = rsp.ARecords
}
if rsp.AaaaRecords != nil {
objectMap["AAAARecords"] = rsp.AaaaRecords
}
if rsp.MxRecords != nil {
objectMap["MXRecords"] = rsp.MxRecords
}
if rsp.NsRecords != nil {
objectMap["NSRecords"] = rsp.NsRecords
}
if rsp.PtrRecords != nil {
objectMap["PTRRecords"] = rsp.PtrRecords
}
if rsp.SrvRecords != nil {
objectMap["SRVRecords"] = rsp.SrvRecords
}
if rsp.TxtRecords != nil {
objectMap["TXTRecords"] = rsp.TxtRecords
}
if rsp.CnameRecord != nil {
objectMap["CNAMERecord"] = rsp.CnameRecord
}
if rsp.SoaRecord != nil {
objectMap["SOARecord"] = rsp.SoaRecord
}
if rsp.CaaRecords != nil {
objectMap["caaRecords"] = rsp.CaaRecords
}
return json.Marshal(objectMap)
}
// RecordSetUpdateParameters parameters supplied to update a record set.
type RecordSetUpdateParameters struct {
// RecordSet - Specifies information about the record set being updated.
RecordSet *RecordSet `json:"RecordSet,omitempty"`
}
// Resource common properties of an Azure Resource Manager resource
type Resource struct {
// ID - Resource ID.
ID *string `json:"id,omitempty"`
// Name - Resource name.
Name *string `json:"name,omitempty"`
// Type - Resource type.
Type *string `json:"type,omitempty"`
// Location - Resource location.
Location *string `json:"location,omitempty"`
// Tags - Resource tags.
Tags map[string]*string `json:"tags"`
}
// MarshalJSON is the custom marshaler for Resource.
func (r Resource) MarshalJSON() ([]byte, error) {
objectMap := make(map[string]interface{})
if r.ID != nil {
objectMap["id"] = r.ID
}
if r.Name != nil {
objectMap["name"] = r.Name
}
if r.Type != nil {
objectMap["type"] = r.Type
}
if r.Location != nil {
objectMap["location"] = r.Location
}
if r.Tags != nil {
objectMap["tags"] = r.Tags
}
return json.Marshal(objectMap)
}
// SoaRecord an SOA record.
type SoaRecord struct {
// Host - The domain name of the authoritative name server for this SOA record.
Host *string `json:"host,omitempty"`
// Email - The email contact for this SOA record.
Email *string `json:"email,omitempty"`
// SerialNumber - The serial number for this SOA record.
SerialNumber *int64 `json:"serialNumber,omitempty"`
// RefreshTime - The refresh value for this SOA record.
RefreshTime *int64 `json:"refreshTime,omitempty"`
// RetryTime - The retry time for this SOA record.
RetryTime *int64 `json:"retryTime,omitempty"`
// ExpireTime - The expire time for this SOA record.
ExpireTime *int64 `json:"expireTime,omitempty"`
// MinimumTTL - The minimum value for this SOA record. By convention this is used to determine the negative caching duration.
MinimumTTL *int64 `json:"minimumTTL,omitempty"`
}
// SrvRecord an SRV record.
type SrvRecord struct {
// Priority - The priority value for this SRV record.
Priority *int32 `json:"priority,omitempty"`
// Weight - The weight value for this SRV record.
Weight *int32 `json:"weight,omitempty"`
// Port - The port value for this SRV record.
Port *int32 `json:"port,omitempty"`
// Target - The target domain name for this SRV record.
Target *string `json:"target,omitempty"`
}
// SubResource a reference to a another resource
type SubResource struct {
// ID - Resource Id.
ID *string `json:"id,omitempty"`
}
// TxtRecord a TXT record.
type TxtRecord struct {
// Value - The text value of this TXT record.
Value *[]string `json:"value,omitempty"`
}
// Zone describes a DNS zone.
type Zone struct {
autorest.Response `json:"-"`
// Etag - The etag of the zone.
Etag *string `json:"etag,omitempty"`
// ZoneProperties - The properties of the zone.
*ZoneProperties `json:"properties,omitempty"`
// ID - Resource ID.
ID *string `json:"id,omitempty"`
// Name - Resource name.
Name *string `json:"name,omitempty"`
// Type - Resource type.
Type *string `json:"type,omitempty"`
// Location - Resource location.
Location *string `json:"location,omitempty"`
// Tags - Resource tags.
Tags map[string]*string `json:"tags"`
}
// MarshalJSON is the custom marshaler for Zone.
func (z Zone) MarshalJSON() ([]byte, error) {
objectMap := make(map[string]interface{})
if z.Etag != nil {
objectMap["etag"] = z.Etag
}
if z.ZoneProperties != nil {
objectMap["properties"] = z.ZoneProperties
}
if z.ID != nil {
objectMap["id"] = z.ID
}
if z.Name != nil {
objectMap["name"] = z.Name
}
if z.Type != nil {
objectMap["type"] = z.Type
}
if z.Location != nil {
objectMap["location"] = z.Location
}
if z.Tags != nil {
objectMap["tags"] = z.Tags
}
return json.Marshal(objectMap)
}
// UnmarshalJSON is the custom unmarshaler for Zone struct.
func (z *Zone) UnmarshalJSON(body []byte) error {
var m map[string]*json.RawMessage
err := json.Unmarshal(body, &m)
if err != nil {
return err
}
for k, v := range m {
switch k {
case "etag":
if v != nil {
var etag string
err = json.Unmarshal(*v, &etag)
if err != nil {
return err
}
z.Etag = &etag
}
case "properties":
if v != nil {
var zoneProperties ZoneProperties
err = json.Unmarshal(*v, &zoneProperties)
if err != nil {
return err
}
z.ZoneProperties = &zoneProperties
}
case "id":
if v != nil {
var ID string
err = json.Unmarshal(*v, &ID)
if err != nil {
return err
}
z.ID = &ID
}
case "name":
if v != nil {
var name string
err = json.Unmarshal(*v, &name)
if err != nil {
return err
}
z.Name = &name
}
case "type":
if v != nil {
var typeVar string
err = json.Unmarshal(*v, &typeVar)
if err != nil {
return err
}
z.Type = &typeVar
}
case "location":
if v != nil {
var location string
err = json.Unmarshal(*v, &location)
if err != nil {
return err
}
z.Location = &location
}
case "tags":
if v != nil {
var tags map[string]*string
err = json.Unmarshal(*v, &tags)
if err != nil {
return err
}
z.Tags = tags
}
}
}
return nil
}
// ZoneListResult the response to a Zone List or ListAll operation.
type ZoneListResult struct {
autorest.Response `json:"-"`
// Value - Information about the DNS zones.
Value *[]Zone `json:"value,omitempty"`
// NextLink - The continuation token for the next page of results.
NextLink *string `json:"nextLink,omitempty"`
}
// ZoneListResultIterator provides access to a complete listing of Zone values.
type ZoneListResultIterator struct {
i int
page ZoneListResultPage
}
// Next advances to the next value. If there was an error making
// the request the iterator does not advance and the error is returned.
func (iter *ZoneListResultIterator) Next() error {
iter.i++
if iter.i < len(iter.page.Values()) {
return nil
}
err := iter.page.Next()
if err != nil {
iter.i--
return err
}
iter.i = 0
return nil
}
// NotDone returns true if the enumeration should be started or is not yet complete.
func (iter ZoneListResultIterator) NotDone() bool {
return iter.page.NotDone() && iter.i < len(iter.page.Values())
}
// Response returns the raw server response from the last page request.
func (iter ZoneListResultIterator) Response() ZoneListResult {
return iter.page.Response()
}
// Value returns the current value or a zero-initialized value if the
// iterator has advanced beyond the end of the collection.
func (iter ZoneListResultIterator) Value() Zone {
if !iter.page.NotDone() {
return Zone{}
}
return iter.page.Values()[iter.i]
}
// IsEmpty returns true if the ListResult contains no values.
func (zlr ZoneListResult) IsEmpty() bool {
return zlr.Value == nil || len(*zlr.Value) == 0
}
// zoneListResultPreparer prepares a request to retrieve the next set of results.
// It returns nil if no more results exist.
func (zlr ZoneListResult) zoneListResultPreparer() (*http.Request, error) {
if zlr.NextLink == nil || len(to.String(zlr.NextLink)) < 1 {
return nil, nil
}
return autorest.Prepare(&http.Request{},
autorest.AsJSON(),
autorest.AsGet(),
autorest.WithBaseURL(to.String(zlr.NextLink)))
}
// ZoneListResultPage contains a page of Zone values.
type ZoneListResultPage struct {
fn func(ZoneListResult) (ZoneListResult, error)
zlr ZoneListResult
}
// Next advances to the next page of values. If there was an error making
// the request the page does not advance and the error is returned.
func (page *ZoneListResultPage) Next() error {
next, err := page.fn(page.zlr)
if err != nil {
return err
}
page.zlr = next
return nil
}
// NotDone returns true if the page enumeration should be started or is not yet complete.
func (page ZoneListResultPage) NotDone() bool {
return !page.zlr.IsEmpty()
}
// Response returns the raw server response from the last page request.
func (page ZoneListResultPage) Response() ZoneListResult {
return page.zlr
}
// Values returns the slice of values for the current page or nil if there are no values.
func (page ZoneListResultPage) Values() []Zone {
if page.zlr.IsEmpty() {
return nil
}
return *page.zlr.Value
}
// ZoneProperties represents the properties of the zone.
type ZoneProperties struct {
// MaxNumberOfRecordSets - The maximum number of record sets that can be created in this DNS zone. This is a read-only property and any attempt to set this value will be ignored.
MaxNumberOfRecordSets *int64 `json:"maxNumberOfRecordSets,omitempty"`
// NumberOfRecordSets - The current number of record sets in this DNS zone. This is a read-only property and any attempt to set this value will be ignored.
NumberOfRecordSets *int64 `json:"numberOfRecordSets,omitempty"`
// NameServers - The name servers for this DNS zone. This is a read-only property and any attempt to set this value will be ignored.
NameServers *[]string `json:"nameServers,omitempty"`
}
// ZonesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running operation.
type ZonesDeleteFuture struct {
azure.Future
}
// Result returns the result of the asynchronous operation.
// If the operation has not completed it will return an error.
func (future *ZonesDeleteFuture) Result(client ZonesClient) (ar autorest.Response, err error) {
var done bool
done, err = future.Done(client)
if err != nil {
err = autorest.NewErrorWithError(err, "dns.ZonesDeleteFuture", "Result", future.Response(), "Polling failure")
return
}
if !done {
err = azure.NewAsyncOpIncompleteError("dns.ZonesDeleteFuture")
return
}
ar.Response = future.Response()
return
}
| linzhaoming/origin | vendor/github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2017-09-01/dns/models.go | GO | apache-2.0 | 21,335 |
import io
import json
import zlib
from unittest import mock
import pytest
import aiohttp
from aiohttp import payload
from aiohttp.hdrs import (CONTENT_DISPOSITION, CONTENT_ENCODING,
CONTENT_TRANSFER_ENCODING, CONTENT_TYPE)
from aiohttp.helpers import parse_mimetype
from aiohttp.multipart import MultipartResponseWrapper
from aiohttp.streams import DEFAULT_LIMIT as stream_reader_default_limit
from aiohttp.streams import StreamReader
from aiohttp.test_utils import make_mocked_coro
BOUNDARY = b'--:'
@pytest.fixture
def buf():
return bytearray()
@pytest.fixture
def stream(buf):
writer = mock.Mock()
async def write(chunk):
buf.extend(chunk)
writer.write.side_effect = write
return writer
@pytest.fixture
def writer():
return aiohttp.MultipartWriter(boundary=':')
class Response:
def __init__(self, headers, content):
self.headers = headers
self.content = content
class Stream:
def __init__(self, content):
self.content = io.BytesIO(content)
async def read(self, size=None):
return self.content.read(size)
def at_eof(self):
return self.content.tell() == len(self.content.getbuffer())
async def readline(self):
return self.content.readline()
def unread_data(self, data):
self.content = io.BytesIO(data + self.content.read())
class StreamWithShortenRead(Stream):
def __init__(self, content):
self._first = True
super().__init__(content)
async def read(self, size=None):
if size is not None and self._first:
self._first = False
size = size // 2
return await super().read(size)
class TestMultipartResponseWrapper:
def test_at_eof(self):
wrapper = MultipartResponseWrapper(mock.Mock(),
mock.Mock())
wrapper.at_eof()
assert wrapper.resp.content.at_eof.called
async def test_next(self):
wrapper = MultipartResponseWrapper(mock.Mock(),
mock.Mock())
wrapper.stream.next = make_mocked_coro(b'')
wrapper.stream.at_eof.return_value = False
await wrapper.next()
assert wrapper.stream.next.called
async def test_release(self):
wrapper = MultipartResponseWrapper(mock.Mock(),
mock.Mock())
wrapper.resp.release = make_mocked_coro(None)
await wrapper.release()
assert wrapper.resp.release.called
async def test_release_when_stream_at_eof(self):
wrapper = MultipartResponseWrapper(mock.Mock(),
mock.Mock())
wrapper.resp.release = make_mocked_coro(None)
wrapper.stream.next = make_mocked_coro(b'')
wrapper.stream.at_eof.return_value = True
await wrapper.next()
assert wrapper.stream.next.called
assert wrapper.resp.release.called
class TestPartReader:
async def test_next(self):
obj = aiohttp.BodyPartReader(
BOUNDARY, {}, Stream(b'Hello, world!\r\n--:'))
result = await obj.next()
assert b'Hello, world!' == result
assert obj.at_eof()
async def test_next_next(self):
obj = aiohttp.BodyPartReader(
BOUNDARY, {}, Stream(b'Hello, world!\r\n--:'))
result = await obj.next()
assert b'Hello, world!' == result
assert obj.at_eof()
result = await obj.next()
assert result is None
async def test_read(self):
obj = aiohttp.BodyPartReader(
BOUNDARY, {}, Stream(b'Hello, world!\r\n--:'))
result = await obj.read()
assert b'Hello, world!' == result
assert obj.at_eof()
async def test_read_chunk_at_eof(self):
obj = aiohttp.BodyPartReader(
BOUNDARY, {}, Stream(b'--:'))
obj._at_eof = True
result = await obj.read_chunk()
assert b'' == result
async def test_read_chunk_without_content_length(self):
obj = aiohttp.BodyPartReader(
BOUNDARY, {}, Stream(b'Hello, world!\r\n--:'))
c1 = await obj.read_chunk(8)
c2 = await obj.read_chunk(8)
c3 = await obj.read_chunk(8)
assert c1 + c2 == b'Hello, world!'
assert c3 == b''
async def test_read_incomplete_chunk(self, loop):
stream = Stream(b'')
def prepare(data):
f = loop.create_future()
f.set_result(data)
return f
with mock.patch.object(stream, 'read', side_effect=[
prepare(b'Hello, '),
prepare(b'World'),
prepare(b'!\r\n--:'),
prepare(b'')
]):
obj = aiohttp.BodyPartReader(
BOUNDARY, {}, stream)
c1 = await obj.read_chunk(8)
assert c1 == b'Hello, '
c2 = await obj.read_chunk(8)
assert c2 == b'World'
c3 = await obj.read_chunk(8)
assert c3 == b'!'
async def test_read_all_at_once(self):
stream = Stream(b'Hello, World!\r\n--:--\r\n')
obj = aiohttp.BodyPartReader(BOUNDARY, {}, stream)
result = await obj.read_chunk()
assert b'Hello, World!' == result
result = await obj.read_chunk()
assert b'' == result
assert obj.at_eof()
async def test_read_incomplete_body_chunked(self):
stream = Stream(b'Hello, World!\r\n-')
obj = aiohttp.BodyPartReader(BOUNDARY, {}, stream)
result = b''
with pytest.raises(AssertionError):
for _ in range(4):
result += await obj.read_chunk(7)
assert b'Hello, World!\r\n-' == result
async def test_read_boundary_with_incomplete_chunk(self, loop):
stream = Stream(b'')
def prepare(data):
f = loop.create_future()
f.set_result(data)
return f
with mock.patch.object(stream, 'read', side_effect=[
prepare(b'Hello, World'),
prepare(b'!\r\n'),
prepare(b'--:'),
prepare(b'')
]):
obj = aiohttp.BodyPartReader(
BOUNDARY, {}, stream)
c1 = await obj.read_chunk(12)
assert c1 == b'Hello, World'
c2 = await obj.read_chunk(8)
assert c2 == b'!'
c3 = await obj.read_chunk(8)
assert c3 == b''
async def test_multi_read_chunk(self):
stream = Stream(b'Hello,\r\n--:\r\n\r\nworld!\r\n--:--')
obj = aiohttp.BodyPartReader(BOUNDARY, {}, stream)
result = await obj.read_chunk(8)
assert b'Hello,' == result
result = await obj.read_chunk(8)
assert b'' == result
assert obj.at_eof()
async def test_read_chunk_properly_counts_read_bytes(self):
expected = b'.' * 10
size = len(expected)
obj = aiohttp.BodyPartReader(
BOUNDARY, {'CONTENT-LENGTH': size},
StreamWithShortenRead(expected + b'\r\n--:--'))
result = bytearray()
while True:
chunk = await obj.read_chunk()
if not chunk:
break
result.extend(chunk)
assert size == len(result)
assert b'.' * size == result
assert obj.at_eof()
async def test_read_does_not_read_boundary(self):
stream = Stream(b'Hello, world!\r\n--:')
obj = aiohttp.BodyPartReader(
BOUNDARY, {}, stream)
result = await obj.read()
assert b'Hello, world!' == result
assert b'--:' == (await stream.read())
async def test_multiread(self):
obj = aiohttp.BodyPartReader(
BOUNDARY, {}, Stream(b'Hello,\r\n--:\r\n\r\nworld!\r\n--:--'))
result = await obj.read()
assert b'Hello,' == result
result = await obj.read()
assert b'' == result
assert obj.at_eof()
async def test_read_multiline(self):
obj = aiohttp.BodyPartReader(
BOUNDARY, {}, Stream(b'Hello\n,\r\nworld!\r\n--:--'))
result = await obj.read()
assert b'Hello\n,\r\nworld!' == result
result = await obj.read()
assert b'' == result
assert obj.at_eof()
async def test_read_respects_content_length(self):
obj = aiohttp.BodyPartReader(
BOUNDARY, {'CONTENT-LENGTH': 100500},
Stream(b'.' * 100500 + b'\r\n--:--'))
result = await obj.read()
assert b'.' * 100500 == result
assert obj.at_eof()
async def test_read_with_content_encoding_gzip(self):
obj = aiohttp.BodyPartReader(
BOUNDARY, {CONTENT_ENCODING: 'gzip'},
Stream(b'\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\x03\x0b\xc9\xccMU'
b'(\xc9W\x08J\xcdI\xacP\x04\x00$\xfb\x9eV\x0e\x00\x00\x00'
b'\r\n--:--'))
result = await obj.read(decode=True)
assert b'Time to Relax!' == result
async def test_read_with_content_encoding_deflate(self):
obj = aiohttp.BodyPartReader(
BOUNDARY, {CONTENT_ENCODING: 'deflate'},
Stream(b'\x0b\xc9\xccMU(\xc9W\x08J\xcdI\xacP\x04\x00\r\n--:--'))
result = await obj.read(decode=True)
assert b'Time to Relax!' == result
async def test_read_with_content_encoding_identity(self):
thing = (b'\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\x03\x0b\xc9\xccMU'
b'(\xc9W\x08J\xcdI\xacP\x04\x00$\xfb\x9eV\x0e\x00\x00\x00'
b'\r\n')
obj = aiohttp.BodyPartReader(
BOUNDARY, {CONTENT_ENCODING: 'identity'},
Stream(thing + b'--:--'))
result = await obj.read(decode=True)
assert thing[:-2] == result
async def test_read_with_content_encoding_unknown(self):
obj = aiohttp.BodyPartReader(
BOUNDARY, {CONTENT_ENCODING: 'snappy'},
Stream(b'\x0e4Time to Relax!\r\n--:--'))
with pytest.raises(RuntimeError):
await obj.read(decode=True)
async def test_read_with_content_transfer_encoding_base64(self):
obj = aiohttp.BodyPartReader(
BOUNDARY, {CONTENT_TRANSFER_ENCODING: 'base64'},
Stream(b'VGltZSB0byBSZWxheCE=\r\n--:--'))
result = await obj.read(decode=True)
assert b'Time to Relax!' == result
async def test_read_with_content_transfer_encoding_quoted_printable(self):
obj = aiohttp.BodyPartReader(
BOUNDARY, {CONTENT_TRANSFER_ENCODING: 'quoted-printable'},
Stream(b'=D0=9F=D1=80=D0=B8=D0=B2=D0=B5=D1=82,'
b' =D0=BC=D0=B8=D1=80!\r\n--:--'))
result = await obj.read(decode=True)
expected = (b'\xd0\x9f\xd1\x80\xd0\xb8\xd0\xb2\xd0\xb5\xd1\x82,'
b' \xd0\xbc\xd0\xb8\xd1\x80!')
assert result == expected
@pytest.mark.parametrize('encoding', ('binary', '8bit', '7bit'))
async def test_read_with_content_transfer_encoding_binary(self, encoding):
data = b'\xd0\x9f\xd1\x80\xd0\xb8\xd0\xb2\xd0\xb5\xd1\x82,' \
b' \xd0\xbc\xd0\xb8\xd1\x80!'
obj = aiohttp.BodyPartReader(
BOUNDARY, {CONTENT_TRANSFER_ENCODING: encoding},
Stream(data + b'\r\n--:--'))
result = await obj.read(decode=True)
assert data == result
async def test_read_with_content_transfer_encoding_unknown(self):
obj = aiohttp.BodyPartReader(
BOUNDARY, {CONTENT_TRANSFER_ENCODING: 'unknown'},
Stream(b'\x0e4Time to Relax!\r\n--:--'))
with pytest.raises(RuntimeError):
await obj.read(decode=True)
async def test_read_text(self):
obj = aiohttp.BodyPartReader(
BOUNDARY, {}, Stream(b'Hello, world!\r\n--:--'))
result = await obj.text()
assert 'Hello, world!' == result
async def test_read_text_default_encoding(self):
obj = aiohttp.BodyPartReader(
BOUNDARY, {},
Stream('Привет, Мир!\r\n--:--'.encode('utf-8')))
result = await obj.text()
assert 'Привет, Мир!' == result
async def test_read_text_encoding(self):
obj = aiohttp.BodyPartReader(
BOUNDARY, {},
Stream('Привет, Мир!\r\n--:--'.encode('cp1251')))
result = await obj.text(encoding='cp1251')
assert 'Привет, Мир!' == result
async def test_read_text_guess_encoding(self):
obj = aiohttp.BodyPartReader(
BOUNDARY, {CONTENT_TYPE: 'text/plain;charset=cp1251'},
Stream('Привет, Мир!\r\n--:--'.encode('cp1251')))
result = await obj.text()
assert 'Привет, Мир!' == result
async def test_read_text_compressed(self):
obj = aiohttp.BodyPartReader(
BOUNDARY, {CONTENT_ENCODING: 'deflate',
CONTENT_TYPE: 'text/plain'},
Stream(b'\x0b\xc9\xccMU(\xc9W\x08J\xcdI\xacP\x04\x00\r\n--:--'))
result = await obj.text()
assert 'Time to Relax!' == result
async def test_read_text_while_closed(self):
obj = aiohttp.BodyPartReader(
BOUNDARY, {CONTENT_TYPE: 'text/plain'}, Stream(b''))
obj._at_eof = True
result = await obj.text()
assert '' == result
async def test_read_json(self):
obj = aiohttp.BodyPartReader(
BOUNDARY, {CONTENT_TYPE: 'application/json'},
Stream(b'{"test": "passed"}\r\n--:--'))
result = await obj.json()
assert {'test': 'passed'} == result
async def test_read_json_encoding(self):
obj = aiohttp.BodyPartReader(
BOUNDARY, {CONTENT_TYPE: 'application/json'},
Stream('{"тест": "пассед"}\r\n--:--'.encode('cp1251')))
result = await obj.json(encoding='cp1251')
assert {'тест': 'пассед'} == result
async def test_read_json_guess_encoding(self):
obj = aiohttp.BodyPartReader(
BOUNDARY, {CONTENT_TYPE: 'application/json; charset=cp1251'},
Stream('{"тест": "пассед"}\r\n--:--'.encode('cp1251')))
result = await obj.json()
assert {'тест': 'пассед'} == result
async def test_read_json_compressed(self):
obj = aiohttp.BodyPartReader(
BOUNDARY, {CONTENT_ENCODING: 'deflate',
CONTENT_TYPE: 'application/json'},
Stream(b'\xabV*I-.Q\xb2RP*H,.NMQ\xaa\x05\x00\r\n--:--'))
result = await obj.json()
assert {'test': 'passed'} == result
async def test_read_json_while_closed(self):
stream = Stream(b'')
obj = aiohttp.BodyPartReader(
BOUNDARY, {CONTENT_TYPE: 'application/json'}, stream)
obj._at_eof = True
result = await obj.json()
assert result is None
async def test_read_form(self):
obj = aiohttp.BodyPartReader(
BOUNDARY, {CONTENT_TYPE: 'application/x-www-form-urlencoded'},
Stream(b'foo=bar&foo=baz&boo=\r\n--:--'))
result = await obj.form()
assert [('foo', 'bar'), ('foo', 'baz'), ('boo', '')] == result
async def test_read_form_encoding(self):
obj = aiohttp.BodyPartReader(
BOUNDARY, {CONTENT_TYPE: 'application/x-www-form-urlencoded'},
Stream('foo=bar&foo=baz&boo=\r\n--:--'.encode('cp1251')))
result = await obj.form(encoding='cp1251')
assert [('foo', 'bar'), ('foo', 'baz'), ('boo', '')] == result
async def test_read_form_guess_encoding(self):
obj = aiohttp.BodyPartReader(
BOUNDARY,
{CONTENT_TYPE: 'application/x-www-form-urlencoded; charset=utf-8'},
Stream('foo=bar&foo=baz&boo=\r\n--:--'.encode('utf-8')))
result = await obj.form()
assert [('foo', 'bar'), ('foo', 'baz'), ('boo', '')] == result
async def test_read_form_while_closed(self):
stream = Stream(b'')
obj = aiohttp.BodyPartReader(
BOUNDARY,
{CONTENT_TYPE: 'application/x-www-form-urlencoded'}, stream)
obj._at_eof = True
result = await obj.form()
assert result is None
async def test_readline(self):
obj = aiohttp.BodyPartReader(
BOUNDARY, {}, Stream(b'Hello\n,\r\nworld!\r\n--:--'))
result = await obj.readline()
assert b'Hello\n' == result
result = await obj.readline()
assert b',\r\n' == result
result = await obj.readline()
assert b'world!' == result
result = await obj.readline()
assert b'' == result
assert obj.at_eof()
async def test_release(self):
stream = Stream(b'Hello,\r\n--:\r\n\r\nworld!\r\n--:--')
obj = aiohttp.BodyPartReader(
BOUNDARY, {}, stream)
await obj.release()
assert obj.at_eof()
assert b'--:\r\n\r\nworld!\r\n--:--' == stream.content.read()
async def test_release_respects_content_length(self):
obj = aiohttp.BodyPartReader(
BOUNDARY, {'CONTENT-LENGTH': 100500},
Stream(b'.' * 100500 + b'\r\n--:--'))
result = await obj.release()
assert result is None
assert obj.at_eof()
async def test_release_release(self):
stream = Stream(b'Hello,\r\n--:\r\n\r\nworld!\r\n--:--')
obj = aiohttp.BodyPartReader(
BOUNDARY, {}, stream)
await obj.release()
await obj.release()
assert b'--:\r\n\r\nworld!\r\n--:--' == stream.content.read()
async def test_filename(self):
part = aiohttp.BodyPartReader(
BOUNDARY,
{CONTENT_DISPOSITION: 'attachment; filename=foo.html'},
None)
assert 'foo.html' == part.filename
async def test_reading_long_part(self):
size = 2 * stream_reader_default_limit
protocol = mock.Mock(_reading_paused=False)
stream = StreamReader(protocol)
stream.feed_data(b'0' * size + b'\r\n--:--')
stream.feed_eof()
obj = aiohttp.BodyPartReader(
BOUNDARY, {}, stream)
data = await obj.read()
assert len(data) == size
class TestMultipartReader:
def test_from_response(self):
resp = Response({CONTENT_TYPE: 'multipart/related;boundary=":"'},
Stream(b'--:\r\n\r\nhello\r\n--:--'))
res = aiohttp.MultipartReader.from_response(resp)
assert isinstance(res,
MultipartResponseWrapper)
assert isinstance(res.stream,
aiohttp.MultipartReader)
def test_bad_boundary(self):
resp = Response(
{CONTENT_TYPE: 'multipart/related;boundary=' + 'a' * 80},
Stream(b''))
with pytest.raises(ValueError):
aiohttp.MultipartReader.from_response(resp)
def test_dispatch(self):
reader = aiohttp.MultipartReader(
{CONTENT_TYPE: 'multipart/related;boundary=":"'},
Stream(b'--:\r\n\r\necho\r\n--:--'))
res = reader._get_part_reader({CONTENT_TYPE: 'text/plain'})
assert isinstance(res, reader.part_reader_cls)
def test_dispatch_bodypart(self):
reader = aiohttp.MultipartReader(
{CONTENT_TYPE: 'multipart/related;boundary=":"'},
Stream(b'--:\r\n\r\necho\r\n--:--'))
res = reader._get_part_reader({CONTENT_TYPE: 'text/plain'})
assert isinstance(res, reader.part_reader_cls)
def test_dispatch_multipart(self):
reader = aiohttp.MultipartReader(
{CONTENT_TYPE: 'multipart/related;boundary=":"'},
Stream(b'----:--\r\n'
b'\r\n'
b'test\r\n'
b'----:--\r\n'
b'\r\n'
b'passed\r\n'
b'----:----\r\n'
b'--:--'))
res = reader._get_part_reader(
{CONTENT_TYPE: 'multipart/related;boundary=--:--'})
assert isinstance(res, reader.__class__)
def test_dispatch_custom_multipart_reader(self):
class CustomReader(aiohttp.MultipartReader):
pass
reader = aiohttp.MultipartReader(
{CONTENT_TYPE: 'multipart/related;boundary=":"'},
Stream(b'----:--\r\n'
b'\r\n'
b'test\r\n'
b'----:--\r\n'
b'\r\n'
b'passed\r\n'
b'----:----\r\n'
b'--:--'))
reader.multipart_reader_cls = CustomReader
res = reader._get_part_reader(
{CONTENT_TYPE: 'multipart/related;boundary=--:--'})
assert isinstance(res, CustomReader)
async def test_emit_next(self):
reader = aiohttp.MultipartReader(
{CONTENT_TYPE: 'multipart/related;boundary=":"'},
Stream(b'--:\r\n\r\necho\r\n--:--'))
res = await reader.next()
assert isinstance(res, reader.part_reader_cls)
async def test_invalid_boundary(self):
reader = aiohttp.MultipartReader(
{CONTENT_TYPE: 'multipart/related;boundary=":"'},
Stream(b'---:\r\n\r\necho\r\n---:--'))
with pytest.raises(ValueError):
await reader.next()
async def test_release(self):
reader = aiohttp.MultipartReader(
{CONTENT_TYPE: 'multipart/mixed;boundary=":"'},
Stream(b'--:\r\n'
b'Content-Type: multipart/related;boundary=--:--\r\n'
b'\r\n'
b'----:--\r\n'
b'\r\n'
b'test\r\n'
b'----:--\r\n'
b'\r\n'
b'passed\r\n'
b'----:----\r\n'
b'\r\n'
b'--:--'))
await reader.release()
assert reader.at_eof()
async def test_release_release(self):
reader = aiohttp.MultipartReader(
{CONTENT_TYPE: 'multipart/related;boundary=":"'},
Stream(b'--:\r\n\r\necho\r\n--:--'))
await reader.release()
assert reader.at_eof()
await reader.release()
assert reader.at_eof()
async def test_release_next(self):
reader = aiohttp.MultipartReader(
{CONTENT_TYPE: 'multipart/related;boundary=":"'},
Stream(b'--:\r\n\r\necho\r\n--:--'))
await reader.release()
assert reader.at_eof()
res = await reader.next()
assert res is None
async def test_second_next_releases_previous_object(self):
reader = aiohttp.MultipartReader(
{CONTENT_TYPE: 'multipart/related;boundary=":"'},
Stream(b'--:\r\n'
b'\r\n'
b'test\r\n'
b'--:\r\n'
b'\r\n'
b'passed\r\n'
b'--:--'))
first = await reader.next()
assert isinstance(first, aiohttp.BodyPartReader)
second = await reader.next()
assert first.at_eof()
assert not second.at_eof()
async def test_release_without_read_the_last_object(self):
reader = aiohttp.MultipartReader(
{CONTENT_TYPE: 'multipart/related;boundary=":"'},
Stream(b'--:\r\n'
b'\r\n'
b'test\r\n'
b'--:\r\n'
b'\r\n'
b'passed\r\n'
b'--:--'))
first = await reader.next()
second = await reader.next()
third = await reader.next()
assert first.at_eof()
assert second.at_eof()
assert second.at_eof()
assert third is None
async def test_read_chunk_by_length_doesnt_breaks_reader(self):
reader = aiohttp.MultipartReader(
{CONTENT_TYPE: 'multipart/related;boundary=":"'},
Stream(b'--:\r\n'
b'Content-Length: 4\r\n\r\n'
b'test'
b'\r\n--:\r\n'
b'Content-Length: 6\r\n\r\n'
b'passed'
b'\r\n--:--'))
body_parts = []
while True:
read_part = b''
part = await reader.next()
if part is None:
break
while not part.at_eof():
read_part += await part.read_chunk(3)
body_parts.append(read_part)
assert body_parts == [b'test', b'passed']
async def test_read_chunk_from_stream_doesnt_breaks_reader(self):
reader = aiohttp.MultipartReader(
{CONTENT_TYPE: 'multipart/related;boundary=":"'},
Stream(b'--:\r\n'
b'\r\n'
b'chunk'
b'\r\n--:\r\n'
b'\r\n'
b'two_chunks'
b'\r\n--:--'))
body_parts = []
while True:
read_part = b''
part = await reader.next()
if part is None:
break
while not part.at_eof():
chunk = await part.read_chunk(5)
assert chunk
read_part += chunk
body_parts.append(read_part)
assert body_parts == [b'chunk', b'two_chunks']
async def test_reading_skips_prelude(self):
reader = aiohttp.MultipartReader(
{CONTENT_TYPE: 'multipart/related;boundary=":"'},
Stream(b'Multi-part data is not supported.\r\n'
b'\r\n'
b'--:\r\n'
b'\r\n'
b'test\r\n'
b'--:\r\n'
b'\r\n'
b'passed\r\n'
b'--:--'))
first = await reader.next()
assert isinstance(first, aiohttp.BodyPartReader)
second = await reader.next()
assert first.at_eof()
assert not second.at_eof()
async def test_writer(writer):
assert writer.size == 0
assert writer.boundary == ':'
async def test_writer_serialize_io_chunk(buf, stream, writer):
flo = io.BytesIO(b'foobarbaz')
writer.append(flo)
await writer.write(stream)
assert (buf == b'--:\r\nContent-Type: application/octet-stream'
b'\r\nContent-Length: 9\r\n\r\nfoobarbaz\r\n--:--\r\n')
async def test_writer_serialize_json(buf, stream, writer):
writer.append_json({'привет': 'мир'})
await writer.write(stream)
assert (b'{"\\u043f\\u0440\\u0438\\u0432\\u0435\\u0442":'
b' "\\u043c\\u0438\\u0440"}' in buf)
async def test_writer_serialize_form(buf, stream, writer):
data = [('foo', 'bar'), ('foo', 'baz'), ('boo', 'zoo')]
writer.append_form(data)
await writer.write(stream)
assert (b'foo=bar&foo=baz&boo=zoo' in buf)
async def test_writer_serialize_form_dict(buf, stream, writer):
data = {'hello': 'мир'}
writer.append_form(data)
await writer.write(stream)
assert (b'hello=%D0%BC%D0%B8%D1%80' in buf)
async def test_writer_write(buf, stream, writer):
writer.append('foo-bar-baz')
writer.append_json({'test': 'passed'})
writer.append_form({'test': 'passed'})
writer.append_form([('one', 1), ('two', 2)])
sub_multipart = aiohttp.MultipartWriter(boundary='::')
sub_multipart.append('nested content')
sub_multipart.headers['X-CUSTOM'] = 'test'
writer.append(sub_multipart)
await writer.write(stream)
assert (
(b'--:\r\n'
b'Content-Type: text/plain; charset=utf-8\r\n'
b'Content-Length: 11\r\n\r\n'
b'foo-bar-baz'
b'\r\n'
b'--:\r\n'
b'Content-Type: application/json\r\n'
b'Content-Length: 18\r\n\r\n'
b'{"test": "passed"}'
b'\r\n'
b'--:\r\n'
b'Content-Type: application/x-www-form-urlencoded\r\n'
b'Content-Length: 11\r\n\r\n'
b'test=passed'
b'\r\n'
b'--:\r\n'
b'Content-Type: application/x-www-form-urlencoded\r\n'
b'Content-Length: 11\r\n\r\n'
b'one=1&two=2'
b'\r\n'
b'--:\r\n'
b'Content-Type: multipart/mixed; boundary="::"\r\n'
b'X-CUSTOM: test\r\nContent-Length: 93\r\n\r\n'
b'--::\r\n'
b'Content-Type: text/plain; charset=utf-8\r\n'
b'Content-Length: 14\r\n\r\n'
b'nested content\r\n'
b'--::--\r\n'
b'\r\n'
b'--:--\r\n') == bytes(buf))
async def test_writer_write_no_close_boundary(buf, stream):
writer = aiohttp.MultipartWriter(boundary=':')
writer.append('foo-bar-baz')
writer.append_json({'test': 'passed'})
writer.append_form({'test': 'passed'})
writer.append_form([('one', 1), ('two', 2)])
await writer.write(stream, close_boundary=False)
assert (
(b'--:\r\n'
b'Content-Type: text/plain; charset=utf-8\r\n'
b'Content-Length: 11\r\n\r\n'
b'foo-bar-baz'
b'\r\n'
b'--:\r\n'
b'Content-Type: application/json\r\n'
b'Content-Length: 18\r\n\r\n'
b'{"test": "passed"}'
b'\r\n'
b'--:\r\n'
b'Content-Type: application/x-www-form-urlencoded\r\n'
b'Content-Length: 11\r\n\r\n'
b'test=passed'
b'\r\n'
b'--:\r\n'
b'Content-Type: application/x-www-form-urlencoded\r\n'
b'Content-Length: 11\r\n\r\n'
b'one=1&two=2'
b'\r\n') == bytes(buf))
async def test_writer_serialize_with_content_encoding_gzip(buf, stream,
writer):
writer.append('Time to Relax!', {CONTENT_ENCODING: 'gzip'})
await writer.write(stream)
headers, message = bytes(buf).split(b'\r\n\r\n', 1)
assert (b'--:\r\nContent-Encoding: gzip\r\n'
b'Content-Type: text/plain; charset=utf-8' == headers)
decompressor = zlib.decompressobj(wbits=16+zlib.MAX_WBITS)
data = decompressor.decompress(message.split(b'\r\n')[0])
data += decompressor.flush()
assert b'Time to Relax!' == data
async def test_writer_serialize_with_content_encoding_deflate(buf, stream,
writer):
writer.append('Time to Relax!', {CONTENT_ENCODING: 'deflate'})
await writer.write(stream)
headers, message = bytes(buf).split(b'\r\n\r\n', 1)
assert (b'--:\r\nContent-Encoding: deflate\r\n'
b'Content-Type: text/plain; charset=utf-8' == headers)
thing = b'\x0b\xc9\xccMU(\xc9W\x08J\xcdI\xacP\x04\x00\r\n--:--\r\n'
assert thing == message
async def test_writer_serialize_with_content_encoding_identity(buf, stream,
writer):
thing = b'\x0b\xc9\xccMU(\xc9W\x08J\xcdI\xacP\x04\x00'
writer.append(thing, {CONTENT_ENCODING: 'identity'})
await writer.write(stream)
headers, message = bytes(buf).split(b'\r\n\r\n', 1)
assert (b'--:\r\nContent-Encoding: identity\r\n'
b'Content-Type: application/octet-stream\r\n'
b'Content-Length: 16' == headers)
assert thing == message.split(b'\r\n')[0]
def test_writer_serialize_with_content_encoding_unknown(buf, stream,
writer):
with pytest.raises(RuntimeError):
writer.append('Time to Relax!', {CONTENT_ENCODING: 'snappy'})
async def test_writer_with_content_transfer_encoding_base64(buf, stream,
writer):
writer.append('Time to Relax!', {CONTENT_TRANSFER_ENCODING: 'base64'})
await writer.write(stream)
headers, message = bytes(buf).split(b'\r\n\r\n', 1)
assert (b'--:\r\nContent-Transfer-Encoding: base64\r\n'
b'Content-Type: text/plain; charset=utf-8' ==
headers)
assert b'VGltZSB0byBSZWxheCE=' == message.split(b'\r\n')[0]
async def test_writer_content_transfer_encoding_quote_printable(buf, stream,
writer):
writer.append('Привет, мир!',
{CONTENT_TRANSFER_ENCODING: 'quoted-printable'})
await writer.write(stream)
headers, message = bytes(buf).split(b'\r\n\r\n', 1)
assert (b'--:\r\nContent-Transfer-Encoding: quoted-printable\r\n'
b'Content-Type: text/plain; charset=utf-8' == headers)
assert (b'=D0=9F=D1=80=D0=B8=D0=B2=D0=B5=D1=82,'
b' =D0=BC=D0=B8=D1=80!' == message.split(b'\r\n')[0])
def test_writer_content_transfer_encoding_unknown(buf, stream, writer):
with pytest.raises(RuntimeError):
writer.append('Time to Relax!', {CONTENT_TRANSFER_ENCODING: 'unknown'})
class TestMultipartWriter:
def test_default_subtype(self, writer):
mimetype = parse_mimetype(writer.headers.get(CONTENT_TYPE))
assert 'multipart' == mimetype.type
assert 'mixed' == mimetype.subtype
def test_unquoted_boundary(self):
writer = aiohttp.MultipartWriter(boundary='abc123')
expected = {CONTENT_TYPE: 'multipart/mixed; boundary=abc123'}
assert expected == writer.headers
def test_quoted_boundary(self):
writer = aiohttp.MultipartWriter(boundary=R'\"')
expected = {CONTENT_TYPE: R'multipart/mixed; boundary="\\\""'}
assert expected == writer.headers
def test_bad_boundary(self):
with pytest.raises(ValueError):
aiohttp.MultipartWriter(boundary='тест')
with pytest.raises(ValueError):
aiohttp.MultipartWriter(boundary='test\n')
def test_default_headers(self, writer):
expected = {CONTENT_TYPE: 'multipart/mixed; boundary=":"'}
assert expected == writer.headers
def test_iter_parts(self, writer):
writer.append('foo')
writer.append('bar')
writer.append('baz')
assert 3 == len(list(writer))
def test_append(self, writer):
assert 0 == len(writer)
writer.append('hello, world!')
assert 1 == len(writer)
assert isinstance(writer._parts[0][0], payload.Payload)
def test_append_with_headers(self, writer):
writer.append('hello, world!', {'x-foo': 'bar'})
assert 1 == len(writer)
assert 'x-foo' in writer._parts[0][0].headers
assert writer._parts[0][0].headers['x-foo'] == 'bar'
def test_append_json(self, writer):
writer.append_json({'foo': 'bar'})
assert 1 == len(writer)
part = writer._parts[0][0]
assert part.headers[CONTENT_TYPE] == 'application/json'
def test_append_part(self, writer):
part = payload.get_payload(
'test', headers={CONTENT_TYPE: 'text/plain'})
writer.append(part, {CONTENT_TYPE: 'test/passed'})
assert 1 == len(writer)
part = writer._parts[0][0]
assert part.headers[CONTENT_TYPE] == 'test/passed'
def test_append_json_overrides_content_type(self, writer):
writer.append_json({'foo': 'bar'}, {CONTENT_TYPE: 'test/passed'})
assert 1 == len(writer)
part = writer._parts[0][0]
assert part.headers[CONTENT_TYPE] == 'test/passed'
def test_append_form(self, writer):
writer.append_form({'foo': 'bar'}, {CONTENT_TYPE: 'test/passed'})
assert 1 == len(writer)
part = writer._parts[0][0]
assert part.headers[CONTENT_TYPE] == 'test/passed'
def test_append_multipart(self, writer):
subwriter = aiohttp.MultipartWriter(boundary=':')
subwriter.append_json({'foo': 'bar'})
writer.append(subwriter, {CONTENT_TYPE: 'test/passed'})
assert 1 == len(writer)
part = writer._parts[0][0]
assert part.headers[CONTENT_TYPE] == 'test/passed'
async def test_write(self, writer, stream):
await writer.write(stream)
def test_with(self):
with aiohttp.MultipartWriter(boundary=':') as writer:
writer.append('foo')
writer.append(b'bar')
writer.append_json({'baz': True})
assert 3 == len(writer)
def test_append_int_not_allowed(self):
with pytest.raises(TypeError):
with aiohttp.MultipartWriter(boundary=':') as writer:
writer.append(1)
def test_append_float_not_allowed(self):
with pytest.raises(TypeError):
with aiohttp.MultipartWriter(boundary=':') as writer:
writer.append(1.1)
def test_append_none_not_allowed(self):
with pytest.raises(TypeError):
with aiohttp.MultipartWriter(boundary=':') as writer:
writer.append(None)
async def test_async_for_reader(loop):
data = [
{"test": "passed"},
42,
b'plain text',
b'aiohttp\n',
b'no epilogue']
reader = aiohttp.MultipartReader(
headers={CONTENT_TYPE: 'multipart/mixed; boundary=":"'},
content=Stream(b'\r\n'.join([
b'--:',
b'Content-Type: application/json',
b'',
json.dumps(data[0]).encode(),
b'--:',
b'Content-Type: application/json',
b'',
json.dumps(data[1]).encode(),
b'--:',
b'Content-Type: multipart/related; boundary="::"',
b'',
b'--::',
b'Content-Type: text/plain',
b'',
data[2],
b'--::',
b'Content-Disposition: attachment; filename="aiohttp"',
b'Content-Type: text/plain',
b'Content-Length: 28',
b'Content-Encoding: gzip',
b'',
b'\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\x03K\xcc\xcc\xcf())'
b'\xe0\x02\x00\xd6\x90\xe2O\x08\x00\x00\x00',
b'--::',
b'Content-Type: multipart/related; boundary=":::"',
b'',
b'--:::',
b'Content-Type: text/plain',
b'',
data[4],
b'--:::--',
b'--::--',
b'',
b'--:--',
b''])))
idata = iter(data)
async def check(reader):
async for part in reader:
if isinstance(part, aiohttp.BodyPartReader):
if part.headers[CONTENT_TYPE] == 'application/json':
assert next(idata) == (await part.json())
else:
assert next(idata) == await part.read(decode=True)
else:
await check(part)
await check(reader)
async def test_async_for_bodypart(loop):
part = aiohttp.BodyPartReader(
boundary=b'--:',
headers={},
content=Stream(b'foobarbaz\r\n--:--'))
async for data in part:
assert data == b'foobarbaz'
| rutsky/aiohttp | tests/test_multipart.py | Python | apache-2.0 | 38,605 |
// Lexer.cpp
// Copyright (c) 2014 - The Foreseeable Future, zhiayang@gmail.com
// Licensed under the Apache License Version 2.0.
#include <string>
#include <ctype.h>
#include <cassert>
#include <iostream>
#include "../include/parser.h"
// #include "../Utf8String/String.h"
namespace Parser
{
static void skipWhitespace(std::string& line, PosInfo& pos)
{
size_t startpos = line.find_first_not_of(" \t");
if(startpos != std::string::npos)
{
pos.col += startpos;
line = line.substr(startpos);
}
}
// warning: messy function
Token getNextToken(std::string& stream, PosInfo& pos)
{
if(stream.length() == 0)
return Token();
int read = 0;
// first eat all whitespace
skipWhitespace(stream, pos);
Token tok;
tok.posinfo = pos;
// check compound symbols first.
if(stream.find("==") == 0)
{
tok.text = "==";
tok.type = TType::EqualsTo;
read = 2;
}
else if(stream.find(">=") == 0)
{
tok.text = ">=";
tok.type = TType::GreaterEquals;
read = 2;
}
else if(stream.find("<=") == 0)
{
tok.text = "<=";
tok.type = TType::LessThanEquals;
read = 2;
}
else if(stream.find("!=") == 0)
{
tok.text = "!=";
tok.type = TType::NotEquals;
read = 2;
}
else if(stream.find("||") == 0)
{
tok.text = "||";
tok.type = TType::LogicalOr;
read = 2;
}
else if(stream.find("&&") == 0)
{
tok.text = "&&";
tok.type = TType::LogicalAnd;
read = 2;
}
else if(stream.find("->") == 0)
{
tok.text = "->";
tok.type = TType::Arrow;
read = 2;
}
else if(stream.find("//") == 0)
{
tok.text = "//";
std::stringstream ss(stream);
std::getline(ss, tok.text, '\n');
read = tok.text.length();
// pos.line++;
tok.type = TType::Comment;
}
else if(stream.find("<<") == 0)
{
tok.text = "<<";
tok.type = TType::ShiftLeft;
read = 2;
}
else if(stream.find(">>") == 0)
{
tok.text = ">>";
tok.type = TType::ShiftRight;
}
else if(stream.find("++") == 0)
{
tok.text = "++";
tok.type = TType::DoublePlus;
read = 2;
}
else if(stream.find("--") == 0)
{
tok.text = "--";
tok.type = TType::DoubleMinus;
read = 2;
}
else if(stream.find("+=") == 0)
{
tok.text = "+=";
tok.type = TType::PlusEq;
read = 2;
}
else if(stream.find("-=") == 0)
{
tok.text = "+=";
tok.type = TType::MinusEq;
read = 2;
}
else if(stream.find("*=") == 0)
{
tok.text = "+=";
tok.type = TType::MultiplyEq;
read = 2;
}
else if(stream.find("/=") == 0)
{
tok.text = "+=";
tok.type = TType::DivideEq;
read = 2;
}
else if(stream.find("%=") == 0)
{
tok.text = "%=";
tok.type = TType::ModEq;
read = 2;
}
else if(stream.find("<<=") == 0)
{
tok.text = "<<=";
tok.type = TType::ShiftLeftEq;
read = 3;
}
else if(stream.find(">>=") == 0)
{
tok.text = ">>=";
tok.type = TType::ShiftRightEq;
read = 3;
}
else if(stream.find("...") == 0)
{
tok.text = "...";
tok.type = TType::Elipsis;
read = 3;
}
else if(stream.find("::") == 0)
{
tok.text = "::";
tok.type = TType::DoubleColon;
read = 2;
}
// unicode stuff
else if(stream.find("ƒ") == 0)
{
tok.text = "func";
tok.type = TType::Func;
read = std::string("ƒ").length();
}
else if(stream.find("fi") == 0)
{
tok.text = "ffi";
tok.type = TType::ForeignFunc;
read = std::string("fi").length();
}
else if(stream.find("÷") == 0)
{
tok.text = "÷";
tok.type = TType::Divide;
read = std::string("÷").length();
}
else if(stream.find("≠") == 0)
{
tok.text = "≠";
tok.type = TType::NotEquals;
read = std::string("≠").length();
}
else if(stream.find("≤") == 0)
{
tok.text = "≤";
tok.type = TType::LessThanEquals;
read = std::string("≤").length();
}
else if(stream.find("≥") == 0)
{
tok.text = "≥";
tok.type = TType::GreaterEquals;
read = std::string("≥").length();
}
else if(isdigit(stream[0]))
{
std::string num;
// read until whitespace
std::stringstream str;
str << stream;
int tmp = 0;
while(isdigit(tmp = str.get()))
num += (char) tmp;
int base = 10;
if(num == "0" && (tmp == 'x' || tmp == 'X' || tmp == 'b' || tmp == 'B'))
{
if(tmp == 'x' || tmp == 'X')
base = 16;
else if(tmp == 'b' || tmp == 'B')
base = 2;
num = "";
while(tmp = str.get(), (base == 16 ? isxdigit(tmp) : (tmp == '0' || tmp == '1')))
num += (char) tmp;
}
if(tmp != '.')
{
// we're an int, set shit and return
tok.type = TType::Integer;
try
{
// makes sure we get the right shit done
std::stoll(num, nullptr, base);
}
catch(std::exception)
{
Parser::parserError("Invalid number '%s'\n", num.c_str());
}
if(base == 16)
num = "0x" + num;
else if(base == 2)
num = "0b" + num;
}
else if(base == 10)
{
num += '.';
while(isdigit(tmp = str.get()))
num += (char) tmp;
tok.type = TType::Decimal;
try
{
// makes sure we get the right shit done
std::stod(num);
}
catch(std::exception)
{
Parser::parserError("Invalid number\n");
}
}
else
{
Parser::parserError("Decimals in hexadecimal representation are not supported");
}
// make sure the next char is not a letter, prevents things like
// 98091824097foobar from working when 'foobar' is a var name
// hack below to let us see the next letter without stringstream eating the space
stream = stream.substr(num.length());
if(stream.length() > 0 && isalpha(stream[0]))
Parser::parserError("Malformed integer literal");
read = 0; // done above
tok.text = num;
}
else if(isalpha(stream[0]) || stream[0] == '_' || !isascii(stream[0]))
{
std::string id;
// read until whitespace
std::stringstream str;
str << stream;
int tmp = 0;
while(tmp = str.get(), (isascii(tmp) && (isalnum(tmp) || tmp == '_')) || !isascii(tmp))
id += (char) tmp;
read = id.length();
tok.text = id;
// check for keywords
if(id == "class") tok.type = TType::Class;
else if(id == "func") tok.type = TType::Func;
else if(id == "import") tok.type = TType::Import;
else if(id == "var") tok.type = TType::Var;
else if(id == "val") tok.type = TType::Val;
else if(id == "let") tok.type = TType::Val;
else if(id == "for") tok.type = TType::For;
else if(id == "while") tok.type = TType::While;
else if(id == "if") tok.type = TType::If;
else if(id == "else") tok.type = TType::Else;
else if(id == "return") tok.type = TType::Return;
else if(id == "as") { tok.type = TType::As; if(tmp == '!') { read++; tok.text = "as!"; } }
else if(id == "is") tok.type = TType::Is;
else if(id == "switch") tok.type = TType::Switch;
else if(id == "case") tok.type = TType::Case;
else if(id == "enum") tok.type = TType::Enum;
else if(id == "ffi") tok.type = TType::ForeignFunc;
else if(id == "struct") tok.type = TType::Struct;
else if(id == "true") tok.type = TType::True;
else if(id == "false") tok.type = TType::False;
else if(id == "static") tok.type = TType::Static;
else if(id == "break") tok.type = TType::Break;
else if(id == "continue") tok.type = TType::Continue;
else if(id == "do") tok.type = TType::Do;
else if(id == "loop") tok.type = TType::Loop;
else if(id == "defer") tok.type = TType::Defer;
else if(id == "public") tok.type = TType::Public;
else if(id == "private") tok.type = TType::Private;
else if(id == "internal") tok.type = TType::Internal;
else if(id == "alloc") tok.type = TType::Alloc;
else if(id == "dealloc") tok.type = TType::Dealloc;
else if(id == "typeof") tok.type = TType::Typeof;
else if(id == "get") tok.type = TType::Get;
else if(id == "set") tok.type = TType::Set;
else if(id == "module") tok.type = TType::Module;
else if(id == "namespace") tok.type = TType::Namespace;
else if(id == "extension") tok.type = TType::Extension;
else if(id == "typealias") tok.type = TType::TypeAlias;
else if(id == "override") tok.type = TType::Override;
else tok.type = TType::Identifier;
}
else if(stream[0] == '"')
{
// parse a string literal
std::stringstream ss;
unsigned long i = 1;
for(; stream[i] != '"'; i++)
{
if(stream[i] == '\\')
{
i++;
switch(stream[i])
{
case 'n': ss << "\n"; break;
case 'b': ss << "\b"; break;
case 'r': ss << "\r"; break;
case '\\': ss << "\\"; break;
}
continue;
}
ss << stream[i];
if(i == stream.size() - 1 || stream[i] == '\n')
Parser::parserError("Expected closing '\"'");
}
tok.type = TType::StringLiteral;
tok.text = "_" + ss.str(); // HACK: Parser checks for string length > 0, so if we have an empty string we
// need something here.
read = i + 1;
}
else if(!isalnum(stream[0]))
{
// check the first char
switch(stream[0])
{
// for single-char things
case '\n': tok.type = TType::NewLine; pos.line++; break;
case '{': tok.type = TType::LBrace; break;
case '}': tok.type = TType::RBrace; break;
case '(': tok.type = TType::LParen; break;
case ')': tok.type = TType::RParen; break;
case '[': tok.type = TType::LSquare; break;
case ']': tok.type = TType::RSquare; break;
case '<': tok.type = TType::LAngle; break;
case '>': tok.type = TType::RAngle; break;
case '+': tok.type = TType::Plus; break;
case '-': tok.type = TType::Minus; break;
case '*': tok.type = TType::Asterisk; break;
case '/': tok.type = TType::Divide; break;
case '\'': tok.type = TType::SQuote; break;
case '.': tok.type = TType::Period; break;
case ',': tok.type = TType::Comma; break;
case ':': tok.type = TType::Colon; break;
case '=': tok.type = TType::Equal; break;
case '?': tok.type = TType::Question; break;
case '!': tok.type = TType::Exclamation; break;
case ';': tok.type = TType::Semicolon; break;
case '&': tok.type = TType::Ampersand; break;
case '%': tok.type = TType::Percent; break;
case '|': tok.type = TType::Pipe; break;
case '@': tok.type = TType::At; break;
case '#': tok.type = TType::Pound; break;
}
tok.text = stream[0];
read = 1;
}
else
{
// delete ret;
Parser::parserError("Unknown token '%c'", stream[0]);
}
stream = stream.substr(read);
if(tok.type != TType::NewLine)
{
tok.posinfo.col += read;
pos.col += read;
}
else
{
tok.posinfo.col = 1;
pos.col = 1;
}
return tok;
}
}
| Philpax/flax | source/Parser/Lexer.cpp | C++ | apache-2.0 | 10,743 |
define(['jquery'],
function($){
var internals = {};
internals.createTooltip = function(){
internals.tooltip = internals.settings.el.find('.tooltip');
internals.onLoad();
};
internals.updateTooltip = function(e){
if (e.countyDisplay){
internals.tooltip.html(e.statistics.location + '<br />' + e.statistics.state);
}
else{
internals.tooltip.html(e.statistics.location);
}
};
internals.showTooltip = function(){
internals.settings.el.css('opacity','1');
};
internals.hideTooltip = function(){
internals.settings.el.css('opacity','0');
};
internals.moveTooltip = function(e){
var width = internals.tooltip.outerWidth();
var height = internals.tooltip.outerHeight();
var topMargin = -(height + 20);
var leftMargin = -(width/2);
if (e.hoverPosition.x < 110){
leftMargin = -10;
}
else if (e.hoverPosition.x > $('.map').outerWidth() - 110){
leftMargin = -(width - 10);
}
if (e.hoverPosition.y < (height + 20)){
internals.settings.el.addClass('display-under');
topMargin = 0;
}
else{
internals.settings.el.removeClass('display-under');
}
internals.settings.el.css({
'top': e.hoverPosition.y,
'left': e.hoverPosition.x
});
internals.tooltip.css({
'margin-top': topMargin,
'margin-left': leftMargin
});
};
internals.onLoad = function(){
$(internals.self).trigger('load');
};
return function (options){
var defaults = {
el: $('.tooltip-wrapper'),
includeGeocoder: true
};
internals.settings = $.extend(true,defaults,options);
internals.self = this;
this.init = function(){
internals.createTooltip();
};
$(internals.settings.data).on('tooltip-show',internals.showTooltip);
$(internals.settings.data).on('tooltip-hide',internals.hideTooltip);
$(internals.settings.data).on('select',internals.updateTooltip);
$(internals.settings.data).on('hover-position-change',internals.moveTooltip);
};
}); | ssylvia/living-wage-map | src/javascript/ui/Tooltip.js | JavaScript | apache-2.0 | 2,186 |
// ----------------------------------------------------------------------------
// Copyright (c) Microsoft Corporation. All rights reserved.
// ----------------------------------------------------------------------------
/// <reference path="C:\Program Files (x86)\Microsoft SDKs\Windows\v8.0\ExtensionSDKs\Microsoft.WinJS.1.0\1.0\DesignTime\CommonConfiguration\Neutral\Microsoft.WinJS.1.0\js\base.js" />
/// <reference path="C:\Program Files (x86)\Microsoft SDKs\Windows\v8.0\ExtensionSDKs\Microsoft.WinJS.1.0\1.0\DesignTime\CommonConfiguration\Neutral\Microsoft.WinJS.1.0\js\ui.js" />
/// <reference path="..\..\js\MobileServices.Internals.js" />
/// <reference path="..\..\generated\Tests.js" />
$testGroup('Push')
.functional()
.tag('push')
.tests(
$test('InitialUnregisterAll')
.description('Unregister all registrations with both the default and updated channel. Ensure no registrations still exist for either.')
.checkAsync(function () {
var client = $getClient();
var channelUri = defaultChannel;
return client.push.unregisterAll(channelUri)
.then(
function () {
return client.push.registrationManager.pushHttpClient.listRegistrations(channelUri);
})
.then(
function (registrations) {
$assert.isTrue(Array.isArray(registrations), 'Expect to get an array from listRegistrations');
$assert.areEqual(registrations.length, 0, 'Expect no registrations to be returned after unregisterAll');
$assert.areEqual(client.push.registrationManager.localStorageManager.registrations.size, 0, 'Expect local storage to contain no registrations after unregisterAll');
channelUri = updatedChannel;
return client.push.unregisterAll(channelUri);
})
.then(
function () {
return client.push.registrationManager.pushHttpClient.listRegistrations(channelUri);
})
.then(
function (registrations) {
$assert.isTrue(Array.isArray(registrations), 'Expect to get an array from listRegistrations');
$assert.areEqual(registrations.length, 0, 'Expect no registrations to be returned after unregisterAll');
$assert.areEqual(client.push.registrationManager.localStorageManager.registrations.size, 0, 'Expect local storage to contain no registrations after unregisterAll');
return WinJS.Promise.wrap();
});
}),
$test('RegisterNativeUnregisterNative')
.description('Register a native channel followed by unregistering it.')
.checkAsync(function () {
var client = $getClient();
var channelUri = defaultChannel;
return client.push.registerNative(channelUri)
.then(
function () {
return client.push.registrationManager.pushHttpClient.listRegistrations(channelUri);
})
.then(
function (registrations) {
$assert.isTrue(Array.isArray(registrations), 'Expect to get an array from listRegistrations');
$assert.areEqual(registrations.length, 1, 'Expect 1 registration to be returned after register');
$assert.areEqual(client.push.registrationManager.localStorageManager.registrations.size, 1, 'Expect local storage to contain 1 registration after register');
var localRegistration = client.push.registrationManager.localStorageManager.getFirstRegistrationByRegistrationId(registrations[0].registrationId);
$assert.isTrue(localRegistration, 'Expect local storage to have the registrationId returned from service');
$assert.areEqual(client.push.registrationManager.localStorageManager.channelUri, registrations[0].deviceId, 'Local storage should have channelUri from returned registration');
return client.push.unregisterNative();
})
.then(
function () {
return client.push.registrationManager.pushHttpClient.listRegistrations(channelUri);
})
.then(
function (registrations) {
$assert.isTrue(Array.isArray(registrations), 'Expect to get an array from listRegistrations');
$assert.areEqual(registrations.length, 0, 'Expect no registrations to be returned after unregisterNative');
$assert.areEqual(client.push.registrationManager.localStorageManager.registrations.size, 0, 'Expect local storage to contain no registrations after unregisterNative');
return WinJS.Promise.wrap();
});
}),
$test('RegisterTemplateUnregisterTemplate')
.description('Register a template followed by unregistering it.')
.checkAsync(function () {
var client = $getClient();
var channelUri = defaultChannel;
return client.push.registerTemplate(channelUri, templateBody, templateName, defaultHeaders, defaultTags)
.then(
function () {
return client.push.registrationManager.pushHttpClient.listRegistrations(channelUri);
})
.then(
function (registrations) {
$assert.isTrue(Array.isArray(registrations), 'Expect to get an array from listRegistrations');
$assert.areEqual(registrations.length, 1, 'Expect 1 registration to be returned after register');
$assert.areEqual(client.push.registrationManager.localStorageManager.registrations.size, 1, 'Expect local storage to contain 1 registration after register');
var localRegistration = client.push.registrationManager.localStorageManager.getFirstRegistrationByRegistrationId(registrations[0].registrationId);
$assert.isTrue(localRegistration, 'Expect local storage to have the registrationId returned from service');
$assert.areEqual(client.push.registrationManager.localStorageManager.channelUri, registrations[0].deviceId, 'Local storage should have channelUri from returned registration');
$assert.areEqual(registrations[0].deviceId, channelUri, 'Returned registration should use channelUri sent from registered template');
Object.getOwnPropertyNames(registrations[0].headers).forEach(function (header) {
$assert.areEqual(registrations[0].headers[header], defaultHeaders[header], 'Each header returned by registration should match what was registered.');
});
$assert.areEqual(Object.getOwnPropertyNames(registrations[0].headers).length, Object.getOwnPropertyNames(defaultHeaders).length, 'Returned registration should contain same number of headers sent from registered template');
$assert.areEqual(registrations[0].tags.length, defaultTags.length + 1, 'Returned registration should contain tags sent from registered template and 1 extra for installationId');
// TODO: Re-enable when .Net runtime supports installationID in service
//$assert.isTrue(registrations[0].tags.indexOf(WindowsAzure.MobileServiceClient._applicationInstallationId) > -1, 'Expected the installationID in the tags');
$assert.areEqual(registrations[0].templateName, templateName, 'Expected returned registration to use templateName it was fed');
$assert.areEqual(registrations[0].templateBody, templateBody, 'Expected returned registration to use templateBody it was fed');
$assert.areEqual(client.push.registrationManager.localStorageManager.getRegistration(templateName).registrationId, registrations[0].registrationId, 'Expected the stored registrationId to equal the one returned from service');
return client.push.unregisterTemplate(templateName);
})
.then(
function () {
return client.push.registrationManager.pushHttpClient.listRegistrations(channelUri);
})
.then(
function (registrations) {
$assert.isTrue(Array.isArray(registrations), 'Expect to get an array from listRegistrations');
$assert.areEqual(registrations.length, 0, 'Expect no registrations to be returned after unregisterTemplate');
$assert.areEqual(client.push.registrationManager.localStorageManager.registrations.size, 0, 'Expect local storage to contain no registrations after unregisterTemplate');
});
}),
$test('RegisterRefreshRegisterWithUpdatedChannel')
.description('Register a template followed by a refresh of the client local storage followed by updated register of same template name.')
.checkAsync(function () {
var client = $getClient();
var channelUri = defaultChannel;
return client.push.registerTemplate(channelUri, templateBody, templateName, defaultHeaders, defaultTags)
.then(
function () {
return client.push.registrationManager.pushHttpClient.listRegistrations(channelUri);
})
.then(
function (registrations) {
$assert.isTrue(Array.isArray(registrations), 'Expect to get an array from listRegistrations');
$assert.areEqual(registrations.length, 1, 'Expect 1 registration to be returned after register');
$assert.areEqual(client.push.registrationManager.localStorageManager.registrations.size, 1, 'Expect local storage to contain 1 registration after register');
var localRegistration = client.push.registrationManager.localStorageManager.getFirstRegistrationByRegistrationId(registrations[0].registrationId);
$assert.isTrue(localRegistration, 'Expect local storage to have the registrationId returned from service');
$assert.areEqual(client.push.registrationManager.localStorageManager.channelUri, registrations[0].deviceId, 'Local storage should have channelUri from returned registration');
$assert.areEqual(registrations[0].deviceId, channelUri, 'Returned registration should use channelUri sent from registered template');
Object.getOwnPropertyNames(registrations[0].headers).forEach(function (header) {
$assert.areEqual(registrations[0].headers[header], defaultHeaders[header], 'Each header returned by registration should match what was registered.');
});
$assert.areEqual(Object.getOwnPropertyNames(registrations[0].headers).length, Object.getOwnPropertyNames(defaultHeaders).length, 'Returned registration should contain same number of headers sent from registered template');
$assert.areEqual(registrations[0].tags.length, defaultTags.length + 1, 'Returned registration should contain tags sent from registered template and 1 extra for installationId');
// TODO: Re-enable when .Net runtime supports installationID in service
//$assert.isTrue(registrations[0].tags.indexOf(WindowsAzure.MobileServiceClient._applicationInstallationId) > -1, 'Expected the installationID in the tags');
$assert.areEqual(registrations[0].templateName, templateName, 'Expected returned registration to use templateName it was fed');
$assert.areEqual(registrations[0].templateBody, templateBody, 'Expected returned registration to use templateBody it was fed');
$assert.areEqual(client.push.registrationManager.localStorageManager.getRegistration(templateName).registrationId, registrations[0].registrationId, 'Expected the stored registrationId to equal the one returned from service');
client.push.registrationManager.localStorageManager.isRefreshNeeded = true;
channelUri = updatedChannel;
return client.push.registerTemplate(channelUri, templateBody, templateName, defaultHeaders, defaultTags);
})
.then(
function () {
return client.push.registrationManager.pushHttpClient.listRegistrations(channelUri);
})
.then(
function (registrations) {
$assert.isTrue(Array.isArray(registrations), 'Expect to get an array from listRegistrations');
$assert.areEqual(registrations.length, 1, 'Expect 1 registration to be returned after register');
$assert.areEqual(client.push.registrationManager.localStorageManager.registrations.size, 1, 'Expect local storage to contain 1 registration after register');
var localRegistration = client.push.registrationManager.localStorageManager.getFirstRegistrationByRegistrationId(registrations[0].registrationId);
$assert.isTrue(localRegistration, 'Expect local storage to have the registrationId returned from service');
$assert.areEqual(client.push.registrationManager.localStorageManager.channelUri, registrations[0].deviceId, 'Local storage should have channelUri from returned registration');
$assert.areEqual(registrations[0].deviceId, channelUri, 'Returned registration should use channelUri sent from registered template');
Object.getOwnPropertyNames(registrations[0].headers).forEach(function (header) {
$assert.areEqual(registrations[0].headers[header], defaultHeaders[header], 'Each header returned by registration should match what was registered.');
});
$assert.areEqual(Object.getOwnPropertyNames(registrations[0].headers).length, Object.getOwnPropertyNames(defaultHeaders).length, 'Returned registration should contain same number of headers sent from registered template');
$assert.areEqual(registrations[0].tags.length, defaultTags.length + 1, 'Returned registration should contain tags sent from registered template and 1 extra for installationId');
// TODO: Re-enable when .Net runtime supports installationID in service
//$assert.isTrue(registrations[0].tags.indexOf(WindowsAzure.MobileServiceClient._applicationInstallationId) > -1, 'Expected the installationID in the tags');
$assert.areEqual(registrations[0].templateName, templateName, 'Expected returned registration to use templateName it was fed');
$assert.areEqual(registrations[0].templateBody, templateBody, 'Expected returned registration to use templateBody it was fed');
$assert.areEqual(client.push.registrationManager.localStorageManager.getRegistration(templateName).registrationId, registrations[0].registrationId, 'Expected the stored registrationId to equal the one returned from service');
$assert.areEqual(registrations[0].deviceId, updatedChannel, 'Expected the return channelUri to be the updated one');
$assert.areEqual(client.push.registrationManager.localStorageManager.channelUri, updatedChannel, 'Expected localstorage channelUri to be the updated one');
return client.push.unregisterTemplate(templateName);
})
.then(
function () {
return client.push.registrationManager.pushHttpClient.listRegistrations(channelUri);
})
.then(
function (registrations) {
$assert.isTrue(Array.isArray(registrations), 'Expect to get an array from listRegistrations');
$assert.areEqual(registrations.length, 0, 'Expect no registrations to be returned after unregisterTemplate');
$assert.areEqual(client.push.registrationManager.localStorageManager.registrations.size, 0, 'Expect local storage to contain no registrations after unregisterTemplate');
});
})
);
var defaultChannel = 'https://bn2.notify.windows.com/?token=AgYAAADs42685sa5PFCEy82eYpuG8WCPB098AWHnwR8kNRQLwUwf%2f9p%2fy0r82m4hxrLSQ%2bfl5aNlSk99E4jrhEatfsWgyutFzqQxHcLk0Xun3mufO2G%2fb2b%2ftjQjCjVcBESjWvY%3d';
var updatedChannel = 'https://bn2.notify.windows.com/?token=BgYAAADs42685sa5PFCEy82eYpuG8WCPB098AWHnwR8kNRQLwUwf%2f9p%2fy0r82m4hxrLSQ%2bfl5aNlSk99E4jrhEatfsWgyutFzqQxHcLk0Xun3mufO2G%2fb2b%2ftjQjCjVcBESjWvY%3d';
var templateBody = '<toast><visual><binding template=\"ToastText01\"><text id=\"1\">$(message)</text></binding></visual></toast>';
var templateName = 'templateForToastWinJS';
var defaultTags = ['fooWinJS', 'barWinJS'];
var defaultHeaders = { 'x-wns-type': 'wns/toast', 'x-wns-ttl': '100000' }; | manimaranm7/azure-mobile-services | sdk/Javascript/test/winJS/tests/winJsOnly/push.js | JavaScript | apache-2.0 | 17,764 |
// Package problems provides an RFC 7807 (https://tools.ietf.org/html/rfc7807)
// compliant implementation of HTTP problem details. Which are defined as a
// means to carry machine-readable details of errors in an HTTP response to
// avoid the need to define new error response formats for HTTP APIs.
//
// The problem details specification was designed to allow for schema
// extensions. Because of this the exposed Problem interface only enforces the
// required Type and Title fields be set appropriately.
//
// Additionally, this library also ships with default http.HandlerFunc's capable
// of writing problems to http.ResponseWriter's in either of the two standard
// media formats JSON and XML.
package problems
| moogar0880/problems | doc.go | GO | apache-2.0 | 719 |
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package agent.gdb.manager.impl.cmd;
import agent.gdb.manager.GdbThread;
import agent.gdb.manager.evt.*;
import agent.gdb.manager.impl.*;
import agent.gdb.manager.impl.GdbManagerImpl.Interpreter;
public abstract class AbstractLaunchGdbCommand extends AbstractGdbCommand<GdbThread>
implements MixinResumeInCliGdbCommand<GdbThread> {
protected AbstractLaunchGdbCommand(GdbManagerImpl manager) {
super(manager);
}
@Override
public Interpreter getInterpreter() {
//return getInterpreter(manager);
/**
* A lot of good event-handling logic is factored in the Mixin interface. However, errors
* from CLI commands are catastrophically mishandled or just missed entirely, so we will
* still use MI2 for these.
*/
return Interpreter.MI2;
}
@Override
public boolean handle(GdbEvent<?> evt, GdbPendingCommand<?> pending) {
evt = checkErrorViaCli(evt);
if (evt instanceof GdbThreadCreatedEvent) {
pending.claim(evt);
}
return handleExpectingRunning(evt, pending);
}
@Override
public GdbThread complete(GdbPendingCommand<?> pending) {
completeOnRunning(pending);
// Just take the first thread. Others are considered clones.
GdbThreadCreatedEvent created = pending.findFirstOf(GdbThreadCreatedEvent.class);
int tid = created.getThreadId();
return manager.getThread(tid);
}
}
| NationalSecurityAgency/ghidra | Ghidra/Debug/Debugger-agent-gdb/src/main/java/agent/gdb/manager/impl/cmd/AbstractLaunchGdbCommand.java | Java | apache-2.0 | 1,907 |
/*
scjurgen@yahoo.com
*/
#include <avr/pgmspace.h>
#include <wire.h>
#include "DS1307.h"
#define DS1307_SEC 0
#define DS1307_MIN 1
#define DS1307_HR 2
#define DS1307_DOW 3
#define DS1307_DATE 4
#define DS1307_MTH 5
#define DS1307_YR 6
#define DS1307_CTRLREG 7
#define DS1307_CTRL_ID 0B1101000 // 0x68 //DS1307
// Define register bit masks
#define DS1307_CLOCKHALT 0B10000000 //0x80
#define DS1307_AMPM 0B01000000
#define DS1307_DATASTART 0x08
DS1307::DS1307()
{
Wire.begin();
//refresh();
}
void DS1307::refresh()
{
read();
}
// requests 7 bytes of data: seconds, minutes, hour, day of week, month, year
void DS1307::read(void)
{
Wire.beginTransmission(DS1307_CTRL_ID);
Wire.write(0x00);
Wire.endTransmission();
Wire.requestFrom(DS1307_CTRL_ID, 7);
dateTimeBCD.second = Wire.read();
dateTimeBCD.minute = Wire.read();
dateTimeBCD.hour = Wire.read();
dateTimeBCD.dayOfWeek = Wire.read();
dateTimeBCD.day = Wire.read();
dateTimeBCD.month = Wire.read();
dateTimeBCD.year = Wire.read();
}
void DS1307::write(void)
{
Wire.beginTransmission(DS1307_CTRL_ID);
// buffer pointer register
Wire.write(0x00);
Wire.write(dateTimeBCD.second);
Wire.write(dateTimeBCD.minute);
Wire.write(dateTimeBCD.hour);
Wire.write(dateTimeBCD.dayOfWeek);
Wire.write(dateTimeBCD.day);
Wire.write(dateTimeBCD.month);
Wire.write(dateTimeBCD.year);
Wire.endTransmission();
}
/*
void DS1307::set(unsigned char c, unsigned char v) // Update buffer, then update the chip
{
switch(c)
{
case DS1307_SEC:
if(v<60 && v>-1)
{
//preserve existing clock state (running/stopped)
int state=rtc_bcd[DS1307_SEC] & DS1307_CLOCKHALT;
rtc_bcd[DS1307_SEC]=state | (((v / 10)<<4) + (v % 10));
}
break;
case DS1307_MIN:
if(v<60 && v>-1)
{
rtc_bcd[DS1307_MIN]=((v / 10)<<4) + (v % 10);
}
break;
case DS1307_HR:
// TODO : AM/PM 12HR/24HR currently 24 hour mode
if(v<24 && v>-1)
{
rtc_bcd[DS1307_HR]=((v / 10)<<4) + (v % 10);
}
break;
case DS1307_DOW:
if(v<8 && v>-1)
{
rtc_bcd[DS1307_DOW]=v;
}
break;
case DS1307_DATE:
if(v<32 && v>-1)
{
rtc_bcd[DS1307_DATE]=((v / 10)<<4) + (v % 10);
}
break;
case DS1307_MTH:
if(v<13 && v>-1)
{
rtc_bcd[DS1307_MTH]=((v / 10)<<4) + (v % 10);
}
break;
case DS1307_YR:
if(v<99 && v>-1)
{
rtc_bcd[DS1307_YR]=((v / 10)<<4) + (v % 10);
}
break;
} // end switch
write();
}
*/
void DS1307::readSRAM(unsigned char *sram_data) const
{
// set the register to the sram area and read 56 bytes
Wire.beginTransmission(DS1307_CTRL_ID);
Wire.write(DS1307_DATASTART);
Wire.endTransmission();
for(int i=0;i<56;i++)
{
Wire.requestFrom(DS1307_CTRL_ID, 56);
sram_data[i]=Wire.read();
}
}
void DS1307::writeSRAM(const unsigned char *sram_data)
{
// set the register to the sram area and write 56 bytes
Wire.beginTransmission(DS1307_CTRL_ID);
Wire.write(DS1307_DATASTART);
for(int i=0;i<56;i++)
{
Wire.write(sram_data[i]);
}
Wire.endTransmission();
}
// set ctrl register for Clock out (see Macros: DS1307_1HZ etc.)
void DS1307::setControlRegister(unsigned char b)
{
Wire.beginTransmission(DS1307_CTRL_ID);
Wire.write(DS1307_CTRLREG);
Wire.write(b);
Wire.endTransmission();
}
| scjurgen/jayduino | DS1307/DS1307.cpp | C++ | apache-2.0 | 3,525 |
package com.metaui.tools.socket.client;
import com.metaui.tools.socket.transport.CmdTransport;
import com.metaui.tools.socket.transport.ISocketTransport;
import com.metaui.tools.socket.transport.ITransportEvent;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.net.Socket;
/**
* 服务器端创建与客户端的连接
*
* @author wei_jc
* @since 1.0.0
*/
public class ServerConnect extends Thread {
private Socket socket;
private ObjectInputStream in;
private ObjectOutputStream out;
private ITransportEvent event;
public ServerConnect(Socket socket) throws IOException {
this.socket = socket;
out = new ObjectOutputStream(socket.getOutputStream());
in = new ObjectInputStream(socket.getInputStream());
// 启动
start();
}
@Override
public void run() {
try {
while (true) {
try {
Object object = in.readObject();
if (object instanceof ISocketTransport) {
ISocketTransport transport = (ISocketTransport) object;
handle(transport);
} else {
throw new RuntimeException("不能识别的对象!");
}
} catch (Exception e) {
e.printStackTrace();
break;
}
}
} catch (Exception e) {
e.printStackTrace();
}
}
public void send(ISocketTransport transport) throws IOException {
out.writeObject(transport);
out.flush();
}
/**
* 处理接收信息
*/
public void handle(ISocketTransport transport) throws Exception {
if (transport instanceof CmdTransport) {
CmdTransport cmdTransport = (CmdTransport) transport;
String response = cmdTransport.getReceiveInfo();
System.out.println(response);
}
if (event != null) {
event.onTransport(transport);
}
}
public void setOnTransport(ITransportEvent event) {
this.event = event;
}
@Override
public String toString() {
return socket.getInetAddress().getHostAddress();
}
}
| weijiancai/metaui | metatools/src/main/java/com/metaui/tools/socket/client/ServerConnect.java | Java | apache-2.0 | 2,316 |
/*
* Copyright 2016 Maroš Šeleng
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
angular.module('symphonia.services')
.factory('SaveAndSendService', function ($q, $log, $cordovaDevice, $cordovaFile, $cordovaEmailComposer, $cordovaFileOpener2) {
var outputData = '';
var format = '';
var tmpName = 'tmpData';
var cacheFolder = undefined;
var savedFileDetails = {
path: undefined,
name: undefined // WITHOUT EXTENSION!!
};
function _open() {
var mime = format === 'xml' ? 'text/xml' : 'application/pdf';
return $cordovaFileOpener2.open(savedFileDetails.path + savedFileDetails.name + '.' + format, mime);
}
function _compose() {
if (savedFileDetails.path !== undefined) {
//file already saved!
return alreadySaved();
} else {
cacheFolder = getCacheDir();
return $cordovaFile.writeFile(cacheFolder, tmpName + '.' + format, outputData, true)
.then(function () {
$log.info('File \'' + tmpName + '.' + format + '\' saved at: \'' + cacheFolder + '\'.');
savedFileDetails.path = cacheFolder;
savedFileDetails.name = tmpName;
return selectAttachment(cacheFolder, tmpName);
}, function (error) {
$log.error('Failed to save file to cache directory:' + error);
return $q.reject('An error occurred while saving the file.');
});
}
}
function _saveFile(filename) {
var saveDestination = $cordovaDevice.getPlatform() === 'iOS' ? cordova.file.dataDirectory : cordova.file.externalDataDirectory;
if (savedFileDetails.path !== undefined && savedFileDetails.path === getCacheDir()) {
//already saved in the cache folder
return alreadyInCache(saveDestination, filename);
} else {
return $cordovaFile.writeFile(saveDestination, filename + '.' + format, outputData, true)
.then(function (success) {
savedFileDetails.path = saveDestination;
savedFileDetails.name = filename;
var message = 'File \'' + filename + '.' + format + '\' saved to \'' + saveDestination + '\'.';
$log.info(message);
return $q.resolve(message);
}, function (error) {
var message = 'Failed to save the file';
$log.error(message + '\n' + error);
return $q.reject(message);
});
}
}
function alreadySaved() {
$log.info('Picking a file \'' + savedFileDetails.name + '.' + format + '\' from \'' + savedFileDetails.path + '\' instead of caching one.');
return selectAttachment(savedFileDetails.path, savedFileDetails.name);
}
function selectAttachment(directory, fileName) {
$log.info('Selecting a file \'' + fileName + '.' + format + '\' from \'' + directory + '\' as an attachment.');
return $cordovaFile.readAsDataURL(directory, fileName + '.' + format)
.then(function (success) {
var data64 = success.split(';base64,').pop();
return openComposer(data64);
}, function (error) {
$log.info('File (\'' + fileName + '.' + format + '\' in \'' + directory + '\') to send NOT read:\n' + error);
return $q.reject('Failed to read the attachment file.');
});
}
function openComposer(data) {
var attachment = 'base64:' + savedFileDetails.name + '.' + format + '//' + data;
return $cordovaEmailComposer.isAvailable()
.then(function () {
var emailDetails = {
app: 'mailto',
attachments: [
attachment
],
subject: 'Digitized music scores',
body: 'This email contains file with music scores, that was produced by the <a href="https://www.symphonia.io">SYMPHONIA.IO</a> service.',
isHtml: true
};
return $cordovaEmailComposer.open(emailDetails)
.catch(function () {
return $q.resolve();
// because the open() function always goes to the error callback; no matter if success or not.
});
}, function () {
return $q.reject('Email composer not available.');
});
}
function alreadyInCache(newDestination, newName) {
return $cordovaFile.moveFile(getCacheDir(), tmpName + '.' + format, newDestination, newName + '.' + format)
.then(function () {
$log.info('File \'' + tmpName + '.' + format + '\' moved from cache and saved to \'' + newDestination + '\' as \'' + newName + '.' + format + '\'.');
savedFileDetails.path = newDestination;
savedFileDetails.name = newName;
var message = 'File \'' + newName + '.' + format + '\' saved to \'' + newDestination + '\'.';
return $q.resolve(message);
}, function (error) {
$log.error('Failed to move file from cache to storage: ' + error);
return $q.reject('Failed to save the file.');
});
}
function getCacheDir() {
return cordova.file.cacheDirectory;
}
function fuckIt(buffer) {
return new Blob([buffer], {type: 'application/pdf'});
}
return {
setOutputDataAndFormat: function (data, dataFormat) {
// FIXME: Is this really a good way?
outputData = fuckIt(data);
cacheFolder = undefined;
savedFileDetails.path = undefined;
savedFileDetails.name = undefined;
format = dataFormat == 'musicxml' ? 'xml' : 'pdf';
},
open: _open,
composeEmail: _compose,
saveFile: _saveFile,
showSendButton: function () {
return $cordovaEmailComposer.isAvailable()
}
}
});
| SymphoniaIO/Symphonia.io-Mobile | www/js/service/SaveAndSendService.js | JavaScript | apache-2.0 | 6,208 |
package com.deange.githubstatus.dagger;
import android.app.Application;
import android.content.Context;
import android.content.SharedPreferences;
import android.content.res.Resources;
import com.deange.githubstatus.BuildConfig;
import com.deange.githubstatus.converter.LocalDateTimeConverterFactory;
import com.deange.githubstatus.converter.ModelGsonFactory;
import com.deange.githubstatus.net.GithubStatusApi;
import com.deange.githubstatus.net.ServiceCreator;
import com.deange.githubstatus.net.mock.MockGithubStatusApi;
import com.f2prateek.rx.preferences2.Preference;
import com.f2prateek.rx.preferences2.RxSharedPreferences;
import com.google.gson.Gson;
import com.google.gson.GsonBuilder;
import dagger.Module;
import dagger.Provides;
import okhttp3.OkHttpClient;
import okhttp3.logging.HttpLoggingInterceptor;
import retrofit2.Retrofit;
import retrofit2.adapter.rxjava2.RxJava2CallAdapterFactory;
import retrofit2.converter.gson.GsonConverterFactory;
@Module
public class AppModule {
private final Application application;
public static AppModule create(Application application) {
return new AppModule(application);
}
private AppModule(Application application) {
this.application = application;
}
@Provides
public Context providesApplicationContext() {
return application.getApplicationContext();
}
@Provides
public Resources res(Context context) {
return context.getResources();
}
@Provides
public static Gson providesGson() {
return new GsonBuilder()
.registerTypeAdapterFactory(ModelGsonFactory.create())
.registerTypeAdapterFactory(new LocalDateTimeConverterFactory())
.create();
}
@Provides
public static OkHttpClient providesOkHttpClient() {
HttpLoggingInterceptor loggingInterceptor = new HttpLoggingInterceptor();
loggingInterceptor.setLevel(BuildConfig.DEBUG
? HttpLoggingInterceptor.Level.BODY
: HttpLoggingInterceptor.Level.NONE);
return new OkHttpClient.Builder()
.addInterceptor(loggingInterceptor)
.build();
}
@Provides
public static Retrofit providesRetrofit(Gson gson, OkHttpClient client) {
return new Retrofit.Builder()
.addConverterFactory(GsonConverterFactory.create(gson))
.client(client)
.baseUrl("https://status.github.com/")
.addCallAdapterFactory(RxJava2CallAdapterFactory.create())
.build();
}
@Provides
public static GithubStatusApi providesGithubStatusApi(
Retrofit retrofit,
ServiceCreator serviceCreator) {
return serviceCreator.createService(
retrofit.create(GithubStatusApi.class),
new MockGithubStatusApi(),
GithubStatusApi.class);
}
@Provides
public static SharedPreferences providesSharedPreferences(Context context) {
return context.getSharedPreferences("default", Context.MODE_PRIVATE);
}
@Provides
public static RxSharedPreferences providesRxSharedPreferences(SharedPreferences sharedPrefs) {
return RxSharedPreferences.create(sharedPrefs);
}
@Provides
@MockMode
public static Preference<Boolean> providesMockModePreference(RxSharedPreferences preferences) {
return preferences.getBoolean("mock_mode");
}
}
| cdeange/hubbub | app/src/main/java/com/deange/githubstatus/dagger/AppModule.java | Java | apache-2.0 | 3,277 |
/*
* Copyright 2007 The Kuali Foundation
*
* Licensed under the Educational Community License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.opensource.org/licenses/ecl2.php
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.kuali.kfs.module.ld.businessobject.lookup;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.kuali.kfs.module.ld.businessobject.LaborCalculatedSalaryFoundationTracker;
import org.kuali.kfs.module.ld.businessobject.inquiry.AbstractLaborInquirableImpl;
import org.kuali.kfs.module.ld.businessobject.inquiry.PositionDataDetailsInquirableImpl;
import org.kuali.kfs.module.ld.businessobject.inquiry.PositionFundingInquirableImpl;
import org.kuali.kfs.sys.KFSConstants;
import org.kuali.kfs.sys.KFSPropertyConstants;
import org.kuali.rice.kns.lookup.AbstractLookupableHelperServiceImpl;
import org.kuali.rice.kns.lookup.HtmlData;
import org.kuali.rice.kns.lookup.HtmlData.AnchorHtmlData;
import org.kuali.rice.kns.lookup.LookupUtils;
import org.kuali.rice.krad.bo.BusinessObject;
import org.kuali.rice.krad.util.BeanPropertyComparator;
public class PositionFundingLookupableHelperServiceImpl extends AbstractLookupableHelperServiceImpl {
/**
* @see org.kuali.rice.kns.lookup.Lookupable#getInquiryUrl(org.kuali.rice.krad.bo.BusinessObject, java.lang.String)
*/
@Override
public HtmlData getInquiryUrl(BusinessObject businessObject, String propertyName) {
if (KFSPropertyConstants.POSITION_NUMBER.equals(propertyName)) {
LaborCalculatedSalaryFoundationTracker CSFTracker = (LaborCalculatedSalaryFoundationTracker) businessObject;
AbstractLaborInquirableImpl positionDataDetailsInquirable = new PositionDataDetailsInquirableImpl();
Map<String, String> fieldValues = new HashMap<String, String>();
fieldValues.put(propertyName, CSFTracker.getPositionNumber());
BusinessObject positionData = positionDataDetailsInquirable.getBusinessObject(fieldValues);
return positionData == null ? new AnchorHtmlData(KFSConstants.EMPTY_STRING, KFSConstants.EMPTY_STRING) : positionDataDetailsInquirable.getInquiryUrl(positionData, propertyName);
}
return (new PositionFundingInquirableImpl()).getInquiryUrl(businessObject, propertyName);
}
/**
* @see org.kuali.rice.kns.lookup.AbstractLookupableHelperServiceImpl#getSearchResults(java.util.Map)
*/
@Override
public List<? extends BusinessObject> getSearchResults(Map<String, String> fieldValues) {
// remove hidden fields
LookupUtils.removeHiddenCriteriaFields(getBusinessObjectClass(), fieldValues);
setBackLocation(fieldValues.get(KFSConstants.BACK_LOCATION));
setDocFormKey(fieldValues.get(KFSConstants.DOC_FORM_KEY));
setReferencesToRefresh(fieldValues.get(KFSConstants.REFERENCES_TO_REFRESH));
List searchResults = (List) getLookupService().findCollectionBySearchHelper(getBusinessObjectClass(), fieldValues, false);
// sort list if default sort column given
List defaultSortColumns = getDefaultSortColumns();
if (defaultSortColumns.size() > 0) {
Collections.sort(searchResults, new BeanPropertyComparator(getDefaultSortColumns(), true));
}
return searchResults;
}
}
| Ariah-Group/Finance | af_webapp/src/main/java/org/kuali/kfs/module/ld/businessobject/lookup/PositionFundingLookupableHelperServiceImpl.java | Java | apache-2.0 | 3,747 |
/*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
/*
* This code was generated by https://github.com/googleapis/google-api-java-client-services/
* Modify at your own risk.
*/
package com.google.api.services.analyticsreporting.v4;
/**
* Service definition for AnalyticsReporting (v4).
*
* <p>
* Accesses Analytics report data.
* </p>
*
* <p>
* For more information about this service, see the
* <a href="https://developers.google.com/analytics/devguides/reporting/core/v4/" target="_blank">API Documentation</a>
* </p>
*
* <p>
* This service uses {@link AnalyticsReportingRequestInitializer} to initialize global parameters via its
* {@link Builder}.
* </p>
*
* @since 1.3
* @author Google, Inc.
*/
@SuppressWarnings("javadoc")
public class AnalyticsReporting extends com.google.api.client.googleapis.services.json.AbstractGoogleJsonClient {
// Note: Leave this static initializer at the top of the file.
static {
com.google.api.client.util.Preconditions.checkState(
com.google.api.client.googleapis.GoogleUtils.MAJOR_VERSION == 1 &&
com.google.api.client.googleapis.GoogleUtils.MINOR_VERSION >= 15,
"You are currently running with version %s of google-api-client. " +
"You need at least version 1.15 of google-api-client to run version " +
"1.26.0 of the Analytics Reporting API library.", com.google.api.client.googleapis.GoogleUtils.VERSION);
}
/**
* The default encoded root URL of the service. This is determined when the library is generated
* and normally should not be changed.
*
* @since 1.7
*/
public static final String DEFAULT_ROOT_URL = "https://analyticsreporting.googleapis.com/";
/**
* The default encoded service path of the service. This is determined when the library is
* generated and normally should not be changed.
*
* @since 1.7
*/
public static final String DEFAULT_SERVICE_PATH = "";
/**
* The default encoded batch path of the service. This is determined when the library is
* generated and normally should not be changed.
*
* @since 1.23
*/
public static final String DEFAULT_BATCH_PATH = "batch";
/**
* The default encoded base URL of the service. This is determined when the library is generated
* and normally should not be changed.
*/
public static final String DEFAULT_BASE_URL = DEFAULT_ROOT_URL + DEFAULT_SERVICE_PATH;
/**
* Constructor.
*
* <p>
* Use {@link Builder} if you need to specify any of the optional parameters.
* </p>
*
* @param transport HTTP transport, which should normally be:
* <ul>
* <li>Google App Engine:
* {@code com.google.api.client.extensions.appengine.http.UrlFetchTransport}</li>
* <li>Android: {@code newCompatibleTransport} from
* {@code com.google.api.client.extensions.android.http.AndroidHttp}</li>
* <li>Java: {@link com.google.api.client.googleapis.javanet.GoogleNetHttpTransport#newTrustedTransport()}
* </li>
* </ul>
* @param jsonFactory JSON factory, which may be:
* <ul>
* <li>Jackson: {@code com.google.api.client.json.jackson2.JacksonFactory}</li>
* <li>Google GSON: {@code com.google.api.client.json.gson.GsonFactory}</li>
* <li>Android Honeycomb or higher:
* {@code com.google.api.client.extensions.android.json.AndroidJsonFactory}</li>
* </ul>
* @param httpRequestInitializer HTTP request initializer or {@code null} for none
* @since 1.7
*/
public AnalyticsReporting(com.google.api.client.http.HttpTransport transport, com.google.api.client.json.JsonFactory jsonFactory,
com.google.api.client.http.HttpRequestInitializer httpRequestInitializer) {
this(new Builder(transport, jsonFactory, httpRequestInitializer));
}
/**
* @param builder builder
*/
AnalyticsReporting(Builder builder) {
super(builder);
}
@Override
protected void initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest<?> httpClientRequest) throws java.io.IOException {
super.initialize(httpClientRequest);
}
/**
* An accessor for creating requests from the Reports collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code AnalyticsReporting analyticsreporting = new AnalyticsReporting(...);}
* {@code AnalyticsReporting.Reports.List request = analyticsreporting.reports().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public Reports reports() {
return new Reports();
}
/**
* The "reports" collection of methods.
*/
public class Reports {
/**
* Returns the Analytics data.
*
* Create a request for the method "reports.batchGet".
*
* This request holds the parameters needed by the analyticsreporting server. After setting any
* optional parameters, call the {@link BatchGet#execute()} method to invoke the remote operation.
*
* @param content the {@link com.google.api.services.analyticsreporting.v4.model.GetReportsRequest}
* @return the request
*/
public BatchGet batchGet(com.google.api.services.analyticsreporting.v4.model.GetReportsRequest content) throws java.io.IOException {
BatchGet result = new BatchGet(content);
initialize(result);
return result;
}
public class BatchGet extends AnalyticsReportingRequest<com.google.api.services.analyticsreporting.v4.model.GetReportsResponse> {
private static final String REST_PATH = "v4/reports:batchGet";
/**
* Returns the Analytics data.
*
* Create a request for the method "reports.batchGet".
*
* This request holds the parameters needed by the the analyticsreporting server. After setting
* any optional parameters, call the {@link BatchGet#execute()} method to invoke the remote
* operation. <p> {@link
* BatchGet#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param content the {@link com.google.api.services.analyticsreporting.v4.model.GetReportsRequest}
* @since 1.13
*/
protected BatchGet(com.google.api.services.analyticsreporting.v4.model.GetReportsRequest content) {
super(AnalyticsReporting.this, "POST", REST_PATH, content, com.google.api.services.analyticsreporting.v4.model.GetReportsResponse.class);
}
@Override
public BatchGet set$Xgafv(java.lang.String $Xgafv) {
return (BatchGet) super.set$Xgafv($Xgafv);
}
@Override
public BatchGet setAccessToken(java.lang.String accessToken) {
return (BatchGet) super.setAccessToken(accessToken);
}
@Override
public BatchGet setAlt(java.lang.String alt) {
return (BatchGet) super.setAlt(alt);
}
@Override
public BatchGet setCallback(java.lang.String callback) {
return (BatchGet) super.setCallback(callback);
}
@Override
public BatchGet setFields(java.lang.String fields) {
return (BatchGet) super.setFields(fields);
}
@Override
public BatchGet setKey(java.lang.String key) {
return (BatchGet) super.setKey(key);
}
@Override
public BatchGet setOauthToken(java.lang.String oauthToken) {
return (BatchGet) super.setOauthToken(oauthToken);
}
@Override
public BatchGet setPrettyPrint(java.lang.Boolean prettyPrint) {
return (BatchGet) super.setPrettyPrint(prettyPrint);
}
@Override
public BatchGet setQuotaUser(java.lang.String quotaUser) {
return (BatchGet) super.setQuotaUser(quotaUser);
}
@Override
public BatchGet setUploadType(java.lang.String uploadType) {
return (BatchGet) super.setUploadType(uploadType);
}
@Override
public BatchGet setUploadProtocol(java.lang.String uploadProtocol) {
return (BatchGet) super.setUploadProtocol(uploadProtocol);
}
@Override
public BatchGet set(String parameterName, Object value) {
return (BatchGet) super.set(parameterName, value);
}
}
}
/**
* An accessor for creating requests from the UserActivity collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code AnalyticsReporting analyticsreporting = new AnalyticsReporting(...);}
* {@code AnalyticsReporting.UserActivity.List request = analyticsreporting.userActivity().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public UserActivity userActivity() {
return new UserActivity();
}
/**
* The "userActivity" collection of methods.
*/
public class UserActivity {
/**
* Returns User Activity data.
*
* Create a request for the method "userActivity.search".
*
* This request holds the parameters needed by the analyticsreporting server. After setting any
* optional parameters, call the {@link Search#execute()} method to invoke the remote operation.
*
* @param content the {@link com.google.api.services.analyticsreporting.v4.model.SearchUserActivityRequest}
* @return the request
*/
public Search search(com.google.api.services.analyticsreporting.v4.model.SearchUserActivityRequest content) throws java.io.IOException {
Search result = new Search(content);
initialize(result);
return result;
}
public class Search extends AnalyticsReportingRequest<com.google.api.services.analyticsreporting.v4.model.SearchUserActivityResponse> {
private static final String REST_PATH = "v4/userActivity:search";
/**
* Returns User Activity data.
*
* Create a request for the method "userActivity.search".
*
* This request holds the parameters needed by the the analyticsreporting server. After setting
* any optional parameters, call the {@link Search#execute()} method to invoke the remote
* operation. <p> {@link
* Search#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param content the {@link com.google.api.services.analyticsreporting.v4.model.SearchUserActivityRequest}
* @since 1.13
*/
protected Search(com.google.api.services.analyticsreporting.v4.model.SearchUserActivityRequest content) {
super(AnalyticsReporting.this, "POST", REST_PATH, content, com.google.api.services.analyticsreporting.v4.model.SearchUserActivityResponse.class);
}
@Override
public Search set$Xgafv(java.lang.String $Xgafv) {
return (Search) super.set$Xgafv($Xgafv);
}
@Override
public Search setAccessToken(java.lang.String accessToken) {
return (Search) super.setAccessToken(accessToken);
}
@Override
public Search setAlt(java.lang.String alt) {
return (Search) super.setAlt(alt);
}
@Override
public Search setCallback(java.lang.String callback) {
return (Search) super.setCallback(callback);
}
@Override
public Search setFields(java.lang.String fields) {
return (Search) super.setFields(fields);
}
@Override
public Search setKey(java.lang.String key) {
return (Search) super.setKey(key);
}
@Override
public Search setOauthToken(java.lang.String oauthToken) {
return (Search) super.setOauthToken(oauthToken);
}
@Override
public Search setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Search) super.setPrettyPrint(prettyPrint);
}
@Override
public Search setQuotaUser(java.lang.String quotaUser) {
return (Search) super.setQuotaUser(quotaUser);
}
@Override
public Search setUploadType(java.lang.String uploadType) {
return (Search) super.setUploadType(uploadType);
}
@Override
public Search setUploadProtocol(java.lang.String uploadProtocol) {
return (Search) super.setUploadProtocol(uploadProtocol);
}
@Override
public Search set(String parameterName, Object value) {
return (Search) super.set(parameterName, value);
}
}
}
/**
* Builder for {@link AnalyticsReporting}.
*
* <p>
* Implementation is not thread-safe.
* </p>
*
* @since 1.3.0
*/
public static final class Builder extends com.google.api.client.googleapis.services.json.AbstractGoogleJsonClient.Builder {
/**
* Returns an instance of a new builder.
*
* @param transport HTTP transport, which should normally be:
* <ul>
* <li>Google App Engine:
* {@code com.google.api.client.extensions.appengine.http.UrlFetchTransport}</li>
* <li>Android: {@code newCompatibleTransport} from
* {@code com.google.api.client.extensions.android.http.AndroidHttp}</li>
* <li>Java: {@link com.google.api.client.googleapis.javanet.GoogleNetHttpTransport#newTrustedTransport()}
* </li>
* </ul>
* @param jsonFactory JSON factory, which may be:
* <ul>
* <li>Jackson: {@code com.google.api.client.json.jackson2.JacksonFactory}</li>
* <li>Google GSON: {@code com.google.api.client.json.gson.GsonFactory}</li>
* <li>Android Honeycomb or higher:
* {@code com.google.api.client.extensions.android.json.AndroidJsonFactory}</li>
* </ul>
* @param httpRequestInitializer HTTP request initializer or {@code null} for none
* @since 1.7
*/
public Builder(com.google.api.client.http.HttpTransport transport, com.google.api.client.json.JsonFactory jsonFactory,
com.google.api.client.http.HttpRequestInitializer httpRequestInitializer) {
super(
transport,
jsonFactory,
DEFAULT_ROOT_URL,
DEFAULT_SERVICE_PATH,
httpRequestInitializer,
false);
setBatchPath(DEFAULT_BATCH_PATH);
}
/** Builds a new instance of {@link AnalyticsReporting}. */
@Override
public AnalyticsReporting build() {
return new AnalyticsReporting(this);
}
@Override
public Builder setRootUrl(String rootUrl) {
return (Builder) super.setRootUrl(rootUrl);
}
@Override
public Builder setServicePath(String servicePath) {
return (Builder) super.setServicePath(servicePath);
}
@Override
public Builder setBatchPath(String batchPath) {
return (Builder) super.setBatchPath(batchPath);
}
@Override
public Builder setHttpRequestInitializer(com.google.api.client.http.HttpRequestInitializer httpRequestInitializer) {
return (Builder) super.setHttpRequestInitializer(httpRequestInitializer);
}
@Override
public Builder setApplicationName(String applicationName) {
return (Builder) super.setApplicationName(applicationName);
}
@Override
public Builder setSuppressPatternChecks(boolean suppressPatternChecks) {
return (Builder) super.setSuppressPatternChecks(suppressPatternChecks);
}
@Override
public Builder setSuppressRequiredParameterChecks(boolean suppressRequiredParameterChecks) {
return (Builder) super.setSuppressRequiredParameterChecks(suppressRequiredParameterChecks);
}
@Override
public Builder setSuppressAllChecks(boolean suppressAllChecks) {
return (Builder) super.setSuppressAllChecks(suppressAllChecks);
}
/**
* Set the {@link AnalyticsReportingRequestInitializer}.
*
* @since 1.12
*/
public Builder setAnalyticsReportingRequestInitializer(
AnalyticsReportingRequestInitializer analyticsreportingRequestInitializer) {
return (Builder) super.setGoogleClientRequestInitializer(analyticsreportingRequestInitializer);
}
@Override
public Builder setGoogleClientRequestInitializer(
com.google.api.client.googleapis.services.GoogleClientRequestInitializer googleClientRequestInitializer) {
return (Builder) super.setGoogleClientRequestInitializer(googleClientRequestInitializer);
}
}
}
| googleapis/google-api-java-client-services | clients/google-api-services-analyticsreporting/v4/1.26.0/com/google/api/services/analyticsreporting/v4/AnalyticsReporting.java | Java | apache-2.0 | 16,867 |
package com.jmw.konfman.dao;
import java.util.Date;
import java.util.List;
import com.jmw.konfman.model.Reservation;
import com.jmw.konfman.model.Room;
import com.jmw.konfman.model.User;
public interface ReservationDao extends Dao {
/**
* Returns all reservations stored in the system
* @return the reservations as a List
*/
public List getReservations();
/**
* Gets user's current reservations (those which for today and in the future)
* @return a list of reservations
*/
public List getAllUserReservations(User user);
/**
* Gets user's current reservations (those which for today and in the future)
* @return a list of reservations
*/
public List getCurrentUserReservations(User user);
/**
* Gets user's past reservations (those which are before today)
* @return a list of reservations
*/
public List getPastUserReservations(User user);
/**
* Gets room's current reservations (those which for today and in the future)
* @return a list of reservations
*/
public List getAllRoomReservations(Room room);
/**
* Gets room's current reservations (those which for today and in the future)
* @return a list of reservations
*/
public List getCurrentRoomReservations(Room room);
/**
* Gets room's past reservations (those which are before today)
* @return a list of reservations
*/
public List getPastRoomReservations(Room room);
/**
* Gets room's reservations for a specific interval
*
* @param reservation the template reservation with the properties to be searched for
* @return a list of reservations
*/
public List getIntervalReservations(Reservation reservation);
/**
* Gets a specific reservation specified by reservationId
* @param reservationId the unique ID of the reservation sought
* @return the reservation
*/
public Reservation getReservation(Long reservationId);
/**
* Save changes to a reservation
* @param reservation the reservation to be changed
* @return false if the reservation was not saved due to a conflict
*/
public boolean saveReservation(Reservation reservation);
/**
* Removes a reservation from the database
* @param reservationId the ID of the reservation to remove
*/
public void removeReservation(Long reservationId);
/**
* Determines if there is a conflict between this reservation and an existing one
* @param reservation the new reservation
* @return true if there is a conflict, false if there is no conflict
*/
public boolean isConflict(Reservation reservation);
}
| NoraFarahin/konfman | src/main/java/com/jmw/konfman/dao/ReservationDao.java | Java | apache-2.0 | 2,751 |
<?php
/**
* ShopEx licence
*
* @copyright Copyright (c) 2005-2010 ShopEx Technologies Inc. (http://www.shopex.cn)
* @license http://ecos.shopex.cn/ ShopEx License
*/
class express_ctl_admin_delivery_printer extends desktop_controller{
public $workground = 'ectools_ctl_admin_order';
public function __construct($app)
{
parent::__construct($app);
header("cache-control: no-store, no-cache, must-revalidate");
define('DPGB_TMP_MODE',1);
define('DPGB_HOME_MODE',2);
$this->pagedata['dpi'] = intval(app::get('b2c')->getConf('system.clientdpi'));
if(!$this->pagedata['dpi']){
$this->pagedata['dpi'] = 96;
}
$this->model = $this->app->model('print_tmpl');
$this->o = app::get('image')->model('image_attach');
$this->obj = $this;
}
public function index()
{
$this->finder('express_mdl_print_tmpl',array(
'title'=>app::get('express')->_('快递单模板'),
'actions'=>array(
array('label'=>app::get('express')->_('添加模版'),'icon'=>'add.gif','target'=>'_blank','href'=>'index.php?app=express&ctl=admin_delivery_printer&act=add_tmpl'),
array('label'=>app::get('express')->_('导入模版'),'icon'=>'add.gif','target'=>'dialog::{title:\''.app::get('express')->_('导入模版').'\'}','href'=>'index.php?app=express&ctl=admin_delivery_printer&act=import'),
),'use_buildin_set_tag'=>false,'use_buildin_recycle'=>true,'use_buildin_filter'=>false,
));
}
function do_print(){
$this->get_delivery_info($_POST,$data);
$aData = $this->o->getList('image_id',array('target_id' => $_POST['dly_tmpl_id'],'target_type' => 'print_tmpl'));
$image_id = $aData[0]['image_id'];
$this->pagedata['bg_id'] = $image_id;
$url = $this->show_bg_picture(1,$image_id);
// addnew
$data['order_id'] = $_POST['order']['order_id'];
$data['order_print'] = $data['order_id'];
$oOrder = app::get('b2c')->model('orders');
$subsdf = array('order_objects'=>array('*',array('order_items'=>array('*',array(':products'=>'*')))));
$goodsItems = $oOrder->dump($data['order_id'],'*',$subsdf);
$this->get_order_info($goodsItems,$data);
$data['text'] = '自定义的内容';
$xmltool = kernel::single('site_utility_xml');
$mydata['item'] = $data;
$this->pagedata['prt_tmpl'] = $this->model->dump($_POST['dly_tmpl_id']);
$this->pagedata['templateData'] = json_encode(array(
'name'=>$this->pagedata['prt_tmpl']['prt_tmpl_title'],
'enable'=>($this->pagedata['prt_tmpl']['shortcut']=='true')?'1':'0',
'size'=>array(
'width'=>$this->pagedata['prt_tmpl']['prt_tmpl_width'],
'height'=>$this->pagedata['prt_tmpl']['prt_tmpl_height'],
),
'imgUrl'=>$url,
'dpi'=>96,
'offset'=>array(
'x'=>$this->pagedata['prt_tmpl']['prt_tmpl_offsetx'],
'y'=>$this->pagedata['prt_tmpl']['prt_tmpl_offsety'],
),
'ptItem'=>json_decode($this->pagedata['prt_tmpl']['prt_tmpl_data'],true),
));
$this->pagedata['testTemplateData'] = json_encode(array(
array(
'label'=>app::get('site')->getConf('site.name'),
'data'=>'shop_name',
),
array(
'label'=>'√',
'data'=>'tick',
),
array(
'label'=>$data['ship_name'],
'data'=>'ship_name',
),
array(
'label'=>$data['ship_addr'],
'data'=>'ship_addr',
),
array(
'label'=>$data['ship_tel'],
'data'=>'ship_tel',
),
array(
'label'=>$data['ship_mobile'],
'data'=>'ship_mobile',
),
array(
'label'=>$goodsItems['consignee']['r_time'],
'data'=>'ship_time',
),
array(
'label'=>$data['ship_zip'],
'data'=>'ship_zip',
),
array(
'label'=>$data['ship_area_0'],
'data'=>'ship_area_0',
),
array(
'label'=>$data['ship_area_1'],
'data'=>'ship_area_1',
),
array(
'label'=>$data['ship_area_2'],
'data'=>'ship_area_2',
),
array(
'label'=>$data['ship_addr'],
'data'=>'ship_addr',
),
array(
'label'=>$data['order_count'],
'data'=>'order_count',
),
array(
'label'=>$data['order_memo'],
'data'=>'order_memo',
),
array(
'label'=>$data['order_count'],
'data'=>'order_count',
),
array(
'label'=>$data['order_weight'],
'data'=>'order_weight',
),
array(
'label'=>$data['order_price'],
'data'=>'order_price',
),
array(
'label'=>$data['text'],
'data'=>'text',
),
array(
'label'=>$data['dly_area_0'],
'data'=>'dly_area_0',
),
array(
'label'=>$data['dly_area_1'],
'data'=>'dly_area_1',
),
array(
'label'=>$data['dly_area_2'],
'data'=>'dly_area_2',
),
array(
'label'=>$data['dly_address'],
'data'=>'dly_address',
),
array(
'label'=>$data['dly_tel'],
'data'=>'dly_tel',
),
array(
'label'=>$data['dly_mobile'],
'data'=>'dly_mobile',
),
array(
'label'=>$data['dly_zip'],
'data'=>'dly_zip',
),
array(
'label'=>$data['date_y'],
'data'=>'date_y',
),
array(
'label'=>$data['date_m'],
'data'=>'date_m',
),
array(
'label'=>$data['date_d'],
'data'=>'date_d',
),
array(
'label'=>$data['order_name'],
'data'=>'order_name',
),
array(
'label'=>str_replace(' ', ' ', $data['order_name_a']),
'data'=>'order_name_a',
),
array(
'label'=>str_replace(' ', ' ', $data['order_name_as']),
'data'=>'order_name_as',
),
array(
'label'=>str_replace('&nsbsp;', ' ', $data['order_name_ab']),
'data'=>'order_name_ab',
),
array(
'label' => (!empty($data['dly_name']) ? $data['dly_name'] : ' '),
'data' => 'dly_name',
),
array(
'label' => $data['order_id'],
'data' => 'order_id',
),
));
$this->pagedata['res_url'] = $this->app->res_url;
$this->singlepage('admin/delivery/center/printer.html');
}
private function get_delivery_info($arr_post,&$data)
{
$obj_dly_center = $this->app->model('dly_center');
$dly_center = $obj_dly_center->dump($arr_post['dly_center']);
$data['dly_name'] = $dly_center['uname'];
list($pkg,$regions,$region_id) = explode(':',$arr_post['order']['ship_area']);
foreach(explode('/',$regions) as $i=>$region){
$data['ship_area_'.$i]= $region;
}
if($dly_center['region']){
list($pkg,$regions,$region_id) = explode(':',$dly_center['region']);
foreach(explode('/',$regions) as $i=>$region){
$data['dly_area_'.$i]= $region;
}
}
$data['dly_address']=$dly_center['address'];
$data['dly_tel']=$dly_center['phone'] ? $dly_center['phone'] : 0;
$data['dly_mobile']=$dly_center['cellphone'] ? $dly_center['cellphone'] : 0;
$data['dly_zip']=$dly_center['zip']?$dly_center['zip']:0;
$t = time()+($GLOBALS['user_timezone']-SERVER_TIMEZONE)*3600;
$data['date_y']=date('Y',$t);
$data['date_m']=date('m',$t);
$data['date_d']=date('d',$t);
$data['order_memo'] = $_POST['order']['order_memo'];
unset($data['ship_area']);
}
private function get_order_info($arr_order,&$data)
{
$num = 0;
$weight = 0;
$math = kernel::single('ectools_math');
if ($arr_order['member_id'])
{
$oMember = app::get('b2c')->model('members');
$aMem = $oMember->dump($arr_order['member_id'],'*',array(':account@pam'=>array('*')));
if(!$aMem){
$data['member_name'] = app::get('express')->_('非会员顾客!');
}
else{
$data['member_name'] = $aMem['pam_account']['login_name'];
}
}
else{
$data['member_name'] = app::get('express')->_('非会员顾客');
}
if ($arr_order)
{
$oProduct = app::get('b2c')->model('products');
$order_item = app::get('b2c')->model('order_items');
$data['ship_name'] = $arr_order['consignee']['name'];
$data['ship_addr'] = $arr_order['consignee']['addr'];
$data['ship_tel'] = $arr_order['consignee']['telephone']?$arr_order['consignee']['telephone']:0;
$data['ship_mobile'] = $arr_order['consignee']['mobile']?$arr_order['consignee']['mobile']:0;
$data['ship_zip'] = $arr_order['consignee']['zip']?$arr_order['consignee']['zip']:0;
$data['order_memo'] || ( $data['order_memo'] = $arr_order['memo']?$arr_order['memo']:'订单缺省备注');
$i=0;
// 所有的goods type 处理的服务的初始化.
$arr_service_goods_type_obj = array();
$arr_service_goods_type = kernel::servicelist('order_goodstype_operation');
foreach ($arr_service_goods_type as $obj_service_goods_type)
{
$goods_types = $obj_service_goods_type->get_goods_type();
$arr_service_goods_type_obj[$goods_types] = $obj_service_goods_type;
}
foreach ($arr_order['order_objects'] as $k=>$item)
{
if ($item['obj_type'] != 'goods')
{
if ($item['obj_type'] == 'gift')
{
foreach ((array)$item['order_items'] as $key=> $val)
{
if (!$val['products'])
{
$tmp = $order_item->getList('*', array('item_id'=>$val['item_id']));
$val['products']['bn'] = $tmp[0]['bn'];
$val['products']['spec_info'] = $tmp[0]['bn'];
}
$arr_service_goods_type_obj[$item['obj_type']]->get_default_dly_order_info($val,$data);
}
}
else
{
$arr_service_goods_type_obj[$item['obj_type']]->get_default_dly_order_info($item,$data);
}
}
else
{
foreach ((array)$item['order_items'] as $key=> $val)
{
if ($val['item_type'] == "product" || $val['item_type'] == "ajunct")
{
if ($val['item_type'] == "product")
$val['item_type'] = 'goods';
if (!$val['products'])
{
$tmp = $order_item->getList('*', array('item_id'=>$val['item_id']));
$val['products']['bn'] = $tmp[0]['bn'];
$val['products']['spec_info'] = $tmp[0]['bn'];
}
$arr_service_goods_type_obj[$val['item_type']]->get_default_dly_order_info($val,$data);
}
else
{
if (!$val['products'])
{
$tmp = $order_item->getList('*', array('item_id'=>$val['item_id']));
$val['products']['bn'] = $tmp[0]['bn'];
$val['products']['spec_info'] = $tmp[0]['bn'];
}
$arr_service_goods_type_obj[$val['item_type']]->get_default_dly_order_info($val,$data);
}
$weight = $math->number_plus(array($weight, $val['weight']));
$num = $math->number_plus(array($num, $val['quantity']));
/*elseif($val['item_type'] == "pkg")
{
$data['order_name'][$i] = array('name'=>$val['name']);
$data['order_name_a'][$i] = array('name'=>$val['name'], 'num'=>$val['quantity']);
$data['order_name_as'][$i] = array('name'=>$val['name'], 'num'=>$val['quantity'], 'spec'=>$val['products']['spec_info']);
$data['order_name_ab'][$i] = array('name'=>$val['name'], 'num'=>$val['quantity'], 'bn'=>$val['products']['bn']);
$i++;
}*/
}
}
}
}
$data['order_count'] = $num;
$data['order_weight'] = $weight;
$data['order_price'] = $arr_order['cur_amount'];
}
public function add_tmpl($image_id=null)
{
$this->_fontlist();
$this->pagedata['tmpl'] = $this->model->dump($tmpl_id);
$this->pagedata['res_url'] = $this->app->res_url;
$url = $this->show_bg_picture(1,$image_id);
$this->pagedata['templateData'] = json_encode(array(
'name'=>'',
'enable'=>'1',
'size'=>array(
'width'=>'240',
'height'=>'158',
),
'imgUrl'=>$url,
'dpi'=>96,
'offset'=>array(
'x'=>'0',
'y'=>'0',
),
'ptItem'=>array(),
));
$this->pagedata['save_action'] = 'add_save';
$this->singlepage('admin/printer/dly_printer_editor.html');
}
/**
* 添加快递单模版
* @param null
* @return null
*/
public function add_save()
{
$o = app::get('image')->model('image_attach');
$this->begin('javascript:opener.finderGroup["'.$_POST['finder_id'].'"].refresh();window.close();');
if (!$_POST)
$this->end(false,app::get('express')->_('需要添加的信息不存在!'));
$tmpl_data = array();
$tmpl_data['prt_tmpl_offsety'] = floatval($_POST['offset']['y']);
$tmpl_data['prt_tmpl_offsetx'] = floatval($_POST['offset']['x']);
$tmpl_data['shortcut'] = $_POST['enable'];
$tmpl_data['prt_tmpl_title'] = $_POST['name'];
$tmpl_data['prt_tmpl_height'] = $_POST['size']['height'];
$tmpl_data['prt_tmpl_width'] = $_POST['size']['width'];
$ptItem = $_POST['ptItem'];
foreach($ptItem as $key=>$val){
$ptItem[$key]['tilt'] = ($val['tilt'] =='false')?false:true;
$ptItem[$key]['bold'] = ($val['bold'] =='false')?false:true;
}
$tmpl_data['prt_tmpl_data'] = json_encode($ptItem);
$tpl_id = $this->model->insert($tmpl_data);
if (!$tpl_id)
$this->end(false, app::get('express')->_('添加快递单模版失败!'));
if(isset($_POST['prt_tmpl_id']) && $_POST['prt_tmpl_id'] != ''){
$old_tpl_id = $_POST['prt_tmpl_id'];
$aData = $o->getList('attach_id,image_id',array('target_id' => $old_tpl_id,'target_type' => 'print_tmpl'));
$_POST['tmp_bg'] = $aData[0]['image_id'];
}
if (isset($_POST['tmp_bg']) && $_POST['tmp_bg'])
{
$sdf = array(
'attach_id' => $attach_id?$attach_id:'',
'target_id' => $tpl_id,
'target_type' => 'print_tmpl',
'image_id' => $_POST['tmp_bg'],
'last_modified' => time(),
);
if (!$o->save($sdf))
$this->end(false, app::get('express')->_('添加快递单模版背景失败!'));
}
$this->end(true,app::get('express')->_('添加快递单模版成功!'));
}
/**
* 修改快递单模版
* @param null
* @return null
*/
public function modify_save()
{
$o = app::get('image')->model('image_attach');
$this->begin('javascript:opener.finderGroup["'.$_POST['finder_id'].'"].refresh();window.close();');
if (!$_POST['prt_tmpl_id'])
{
$this->end(false,app::get('express')->_('要修改的快递单模版不存在!'));
}
else
{
$tmpl_data = array();
$tmpl_data['prt_tmpl_id'] = $_POST['prt_tmpl_id'];
$tmpl_data['prt_tmpl_offsety'] = floatval($_POST['offset']['y']);
$tmpl_data['prt_tmpl_offsetx'] = floatval($_POST['offset']['x']);
$tmpl_data['shortcut'] = $_POST['enable'];
$tmpl_data['prt_tmpl_title'] = $_POST['name'];
$tmpl_data['prt_tmpl_height'] = $_POST['size']['height'];
$tmpl_data['prt_tmpl_width'] = $_POST['size']['width'];
$ptItem = $_POST['ptItem'];
foreach($ptItem as $key=>$val){
$ptItem[$key]['tilt'] = ($val['tilt'] =='false')?false:true;
$ptItem[$key]['bold'] = ($val['bold'] =='false')?false:true;
}
$tmpl_data['prt_tmpl_data'] = json_encode($ptItem);
if ($this->model->update($tmpl_data,array('prt_tmpl_id'=>$_POST['prt_tmpl_id']))){
$tpl_id = $_POST['prt_tmpl_id'];
$aData = $o->getList('attach_id',array('target_id' => $tpl_id,'target_type' => 'print_tmpl'));
$attach_id = $aData[0]['attach_id'];
}else{
$tpl_id = false;
}
}
if (isset($_POST['tmp_bg']) && $_POST['tmp_bg'])
{
$sdf = array(
'attach_id' => $attach_id?$attach_id:'',
'target_id' => $tpl_id,
'target_type' => 'print_tmpl',
'image_id' => $_POST['tmp_bg'],
'last_modified' => time(),
);
if (!$o->save($sdf))
$this->end(false, app::get('express')->_('修改快递单模版背景失败!'));
}
$this->end(true,app::get('express')->_('修改快递单模版成功!'));
}
/**
* 显示编辑快递单模版的页面
* @param string 模版id
* @return null
*/
public function edit_tmpl($tmpl_id)
{
$this->pagedata['tmpl'] = $this->model->dump($tmpl_id);
$this->pagedata['res_url'] = $this->app->res_url;
if($this->pagedata['tmpl']){
$aData = $this->o->getList('image_id',array('target_id' => $tmpl_id,'target_type' => 'print_tmpl'));
$image_id = $aData[0]['image_id'];
$this->_fontlist();
$url = $this->show_bg_picture(1,$image_id);
$this->pagedata['save_action'] = 'modify_save';
$this->pagedata['templateData'] = json_encode(array(
'name'=>$this->pagedata['tmpl']['prt_tmpl_title'],
'enable'=>($this->pagedata['tmpl']['shortcut']=='true')?'1':'0',
'size'=>array(
'width'=>$this->pagedata['tmpl']['prt_tmpl_width'],
'height'=>$this->pagedata['tmpl']['prt_tmpl_height'],
),
'imgUrl'=>$url,
'dpi'=>96,
'offset'=>array(
'x'=>$this->pagedata['tmpl']['prt_tmpl_offsetx'],
'y'=>$this->pagedata['tmpl']['prt_tmpl_offsety'],
),
'ptItem'=>json_decode($this->pagedata['tmpl']['prt_tmpl_data'],true),
));
$this->singlepage('admin/printer/dly_printer_editor.html');
}else{
echo "<div class='notice'>ERROR ID</div>";
}
}
public function add_same($tmpl_id)
{
$this->pagedata['tmpl'] = $this->model->dump($tmpl_id);
$this->pagedata['res_url'] = $this->app->res_url;
$this->pagedata['tmpl_id'] = $tmpl_id;
if($this->pagedata['tmpl']){
//unset($this->pagedata['tmpl']['prt_tmpl_id']);
$aData = $this->o->getList('image_id',array('target_id' => $tmpl_id,'target_type' => 'print_tmpl'));
$image_id = $aData[0]['image_id'];
$this->_fontlist();
if($image_id){
$this->pagedata['image_id'] = $image_id;
}
$url = $this->show_bg_picture(1,$image_id);
$this->pagedata['tmpl_bg'] = $url;
$this->pagedata['save_action'] = 'add_save';
$tmpl = array(
'name'=>$this->pagedata['tmpl']['prt_tmpl_title'],
'enable'=>($this->pagedata['tmpl']['shortcut']=='true')?'1':'0',
'size'=>array(
'width'=>$this->pagedata['tmpl']['prt_tmpl_width'],
'height'=>$this->pagedata['tmpl']['prt_tmpl_height'],
),
'imgUrl'=>$url,
'dpi'=>96,
'offset'=>array(
'x'=>$this->pagedata['tmpl']['prt_tmpl_offsetx'],
'y'=>$this->pagedata['tmpl']['prt_tmpl_offsety'],
),
'ptItem'=>json_decode($this->pagedata['tmpl']['prt_tmpl_data'],true),
);
$this->pagedata['templateData'] = json_encode($tmpl);
$this->singlepage('admin/printer/dly_printer_editor.html');
}else{
echo "<div class='notice'>ERROR ID</div>";
}
}
function print_test(){
$this->pagedata['dpi'] = 96;
$o = app::get('image')->model('image_attach');
if($_POST['tmp_bg']){
$this->pagedata['bg_id'] = $_POST['tmp_bg'];
}else if($_POST['prt_tmpl_id']){
$tpl_id = $_POST['prt_tmpl_id'];
$aData = $o->getList('image_id',array('target_id' => $tpl_id,'target_type' => 'print_tmpl'));
$this->pagedata['bg_id'] = $aData[0]['image_id'];
}
$this->pagedata['res_url'] = $this->app->res_url;
$this->display('admin/printer/dly_print_test.html');
}
public function upload_bg($printer_id=0){
$this->pagedata['dly_printer_id'] = $printer_id;
$this->display('admin/printer/dly_printer_uploadbg.html');
}
function import(){
$this->display('admin/printer/dly_printer_import.html');
}
public function do_upload_bg()
{
$url = $this->show_bg_picture(1,$_POST['background']);
echo '<script>
window.pt.replaceBackground("'.$url.'");
window.pt.setBgID("'.$_POST['background'].'");
window.pt.dlg.close();
</script>';
}
function download($tmpl_id){
$tmpl = $this->model->dump($tmpl_id);
$tar = kernel::single('base_tar');
$tar->addFile('info',serialize($tmpl));
$aData = $this->o->getList('image_id',array('target_id' => $tmpl_id,'target_type' => 'print_tmpl'));
$image_id = $aData[0]['image_id'];
if($bg = $this->show_bg_picture(1,$image_id)){
$tar->addFile('background.jpg',file_get_contents($bg));
}
#kernel::single('base_session')->close();
$charset = kernel::single('base_charset');
$name = $charset->utf2local($tmpl['prt_tmpl_title'],'zh');
@set_time_limit(0);
header("Expires: Mon, 26 Jul 1997 05:00:00 GMT");
header('Content-type: application/octet-stream');
header('Content-type: application/force-download');
header('Content-Disposition: attachment; filename="'.$name.'.dtp"');
$tar->getTar('output');
}
public function done_upload_bg($rs,$file){
if($rs){
$url = 'index.php?app=express&ctl=admin_delivery_printer&act=show_bg_picture&p[0]='.$rs.'&p[1]='.$file;
echo '<script>
if($("dly_printer_bg")){
$("dly_printer_bg").value = "'.$file.'";
}else{
new Element("input",{id:"dly_printer_bg",type:"hidden",name:"tmp_bg",value:"__none__"}).inject("dly_printer_form");
}
window.printer_editor.dlg.close();
window.printer_editor.setPicture("'.$url.'");
</script>';
}else{
echo 'Error on upload:'.$file;
}
}
public function show_picture($mode, $image_id)
{
readfile($this->show_bg_picture($mode, $image_id));exit;
}
public function show_bg_picture($mode,$file){
$obj_storager = kernel::single("base_storager");
$str_file = $obj_storager->image_path($file);
return $str_file;
}
function do_upload_pkg()
{
$this->begin();
$file = $_FILES['package'];
$file_name = substr($file['name'],strrpos($file['name'],'.'));
$extname = strtolower($file_name);
$tar = kernel::single('base_tar');
$target = DATA_DIR . '/tmp';
if($extname=='.dtp')
{
if($tar->openTAR($file['tmp_name'],$target) && $tar->containsFile('info'))
{
if(!($info = unserialize($tar->getContents($tar->getFile('info')))))
{
$this->end(false, app::get('express')->_('无法读取结构信息,模板包可能已损坏!'));
}
$info['prt_tmpl_id']='';
if($tpl_id=$this->model->insert($info))
{
if($tar->containsFile('background.jpg'))
{ //包含背景图
$image = app::get('image')->model('image');
$image_id = $image->gen_id();
$pic = ($tar->getContents($tar->getFile('background.jpg')));
file_put_contents(DATA_DIR.'/'.$tpl_id.'.jpg',$tar->getContents($tar->getFile('background.jpg')));
$Image_id = $image->store(DATA_DIR.'/'.$tpl_id.'.jpg',$Image_id);
unlink(DATA_DIR.'/'.$tpl_id.'.jpg');
$sdf = array(
'target_id' => $tpl_id,
'target_type' => 'print_tmpl',
'image_id' => $Image_id,
'last_modified' => time(),
);
if(!($this->o->save($sdf)))
{
$this->end(false, app::get('express')->_('模板包中图片有误!'));
}
else
{
/*echo "<script>var _dialogIns = top.$('form-express-uploadtpl').getParent('.dialog').retrieve('instance');if(_dialogIns)_dialogIns.close();top.finderGroup['" . $_GET['_finder']['finder_id'] . "'].refresh();</script>";*/
$this->end(true, app::get('express')->_('上传成功!'));
}
}
}
}
else
{
$this->end(false, app::get('express')->_('无法解压缩,模板包可能已损坏!'));
}
}
else
{
$this->end(false, app::get('express')->_('必须是shopex快递单模板包(.dtp)'));
}
}
function _fontlist(){
$default_font = array(
array(
'label'=>'宋体',
'data'=>'宋体',
),
array(
'label'=>'黑体',
'data'=>'黑体',
),
array(
'label'=>'Arial',
'data'=>'Arial',
),
array(
'label'=>'Tahoma',
'data'=>'Tahoma',
),
array(
'label'=>'Times New Roman',
'data'=>'Times New Roman',
),
array(
'label'=>'Vrinda',
'data'=>'Vrinda',
),
array(
'label'=>'Verdana',
'data'=>'Verdana',
),
array(
'label'=>'Serif',
'data'=>'Serif',
),
array(
'label'=>'Cursive',
'data'=>'Cursive',
),
array(
'label'=>'Fantasy',
'data'=>'Fantasy',
),
array(
'label'=>'Sans-Serif',
'data'=>'Sans-Serif',
),
);
foreach ($default_font as $ft_item){
$this->pagedata['printData']['fontItem'][] = $ft_item;
}
if(PRINTER_FONTS){
$font = explode("|",PRINTER_FONTS);
foreach ($font as $ft_item){
$this->pagedata['printData']['fontItem'][] = array(
'label'=>$ft_item,
'data'=>$ft_item
);
}
}
$elements = $this->model->getElements();
foreach ((array)$elements as $key=>$ele_item){
$this->pagedata['printData']['printItem'][] = array(
'label'=>$ele_item,
'data'=>$key
);
}
$this->pagedata['printData'] = json_encode($this->pagedata['printData']);
}
}
| liuguogen/Ecstore | app/express/controller/admin/delivery/printer.php | PHP | apache-2.0 | 30,570 |
/**
* Copyright (C) 2014 Microsoft Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.microsoft.reef.examples.nggroup.bgd.data;
import com.microsoft.reef.examples.nggroup.bgd.math.Vector;
import java.io.Serializable;
/**
* Base interface for Examples for linear models.
*/
public interface Example extends Serializable {
/**
* Access to the label.
*
* @return the label
*/
public abstract double getLabel();
/**
* Computes the prediction for this Example, given the model w.
* <p/>
* w.dot(this.getFeatures())
*
* @param w the model
* @return the prediction for this Example, given the model w.
*/
public abstract double predict(Vector w);
/**
* Adds the current example's gradient to the gradientVector, assuming that
* the gradient with respect to the prediction is gradient.
*
* @param gradientVector
* @param gradient
*/
public abstract void addToGradient(Vector gradientVector, double gradient);
} | Microsoft-CISL/MLSS | BGD/src/main/java/com/microsoft/reef/examples/nggroup/bgd/data/Example.java | Java | apache-2.0 | 1,514 |
package Controller.Request;
import java.awt.Desktop;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.OutputStream;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.List;
import java.util.Properties;
import twitter4j.Paging;
import twitter4j.Status;
import twitter4j.TwitterException;
import twitter4j.TwitterFactory;
import twitter4j.User;
import twitter4j.auth.AccessToken;
import twitter4j.auth.RequestToken;
import Controller.GUIController;
import Model.AccountHandler;
import Model.DatabaseConnection;
import Model.TweetHandler;
import Model.TwitterConnection;
public class RequestManager {
public static RequestToken reqToken;
public static AccessToken accToken;
public GUIController guiController;
public AuthenticationRequest authRequest;
public SearchRequest searchRequest;
public static List<Status> statuses;
public RequestManager(GUIController guiController) {
this.guiController = guiController;
initHandlers();
}
private void initHandlers() {
Request.setAccountHandler(AccountHandler.getInstance());
Request.setTweetHandler(TweetHandler.getInstance());
}
public boolean login(){
authRequest=AuthenticationRequest.getInstance();
File pFile = new File("twitter4j.properties");
InputStream inStream = null;
OutputStream outStream = null;
Properties properties = new Properties();
try {
checkFileExistance(pFile,inStream, properties);
closeTheFile(inStream);
checkConsumerKeyAndSecret(properties);
System.out.println("Consumer Key: " + properties.getProperty("oauth.consumerKey"));
System.out.println("Consumer Secret: " + properties.getProperty("oauth.consumerSecret"));
TwitterConnection.twitter = (new TwitterFactory()).getInstance();
TwitterConnection.setTwitter(TwitterConnection.twitter);
if (properties.getProperty("oauth.accessToken") == null ||
properties.getProperty("oauth.accessTokenSecret") == null) {
TwitterConnection.setRequestToken(TwitterConnection.twitter);
BufferedReader buffReader = new BufferedReader(new InputStreamReader(System.in));
while(accToken== null){
System.out.println("Open the following URL to get a PIN to allow this application to access your account:");
System.out.println(reqToken.getAuthorizationURL());
gotoTokenURL(AuthenticationRequest.getLink(TwitterConnection.twitter));
accToken = AccountHandler.getAccesToken(accToken,buffReader);
printConsumerKeyAndSecret(properties);
try {
outStream = new FileOutputStream(pFile);
properties.store(outStream, "twitter4j.properties");
outStream.close();
System.out.println("Stored new properties to file.");
} catch (IOException ioerror) {
System.out.println("Error: Could not save new properties.");
ioerror.printStackTrace();
System.exit(-1);
}
User user = TwitterConnection.twitter.verifyCredentials();
TwitterConnection.setCurrentUser(user);
DatabaseConnection.dbConnector();
DatabaseConnection.createTable();
for(int i=1; i < 3 ;i++){//TODO later make 3, 16.
syncinsertValues(i);
}
}
}
DatabaseConnection.closeSQLite();
} catch (Exception e) {
e.printStackTrace();
}
return false;
}
private synchronized void syncinsertValues(int i) {
DatabaseConnection.insertValues(getUserTimeLine(i));
}
public void printConsumerKeyAndSecret(Properties properties){
System.out.println("Consumer Key: " + properties.getProperty("oauth.consumerKey"));
System.out.println("Consumer Secret: " + properties.getProperty("oauth.consumerSecret"));
}
public void checkConsumerKeyAndSecret(Properties properties){
if (properties.getProperty("oauth.consumerKey") == null ||
properties.getProperty("oauth.consumerSecret") == null) {
System.out.println("twitter4j.properties requires Consumer Key and Access Token Secret.");
System.exit(-1);
}
}
public void closeTheFile(InputStream inStream){
try{
if(inStream != null) inStream.close();
} catch (IOException ioerror) {
System.out.println("Error: Could not close twitter4j.properties.");
ioerror.printStackTrace();
}
}
public void checkFileExistance(File pFile, InputStream inStream,Properties properties) throws IOException{
if (pFile.exists()) {
inStream = new FileInputStream(pFile);
properties.load(inStream);
System.out.println("Properties file loaded.");
} else {
System.out.println("Error: twitter4j.properties file not found.");
System.exit(-1);
}
}
public void gotoTokenURL(String accessTokenURL) {
try {
Desktop.getDesktop().browse(new URI(accessTokenURL));
} catch (UnsupportedOperationException uoerror) {
System.out.println("Encountered an unsupported operation exception.");
uoerror.printStackTrace();
} catch (IOException ioerror) {
System.out.println("Encountered an input/output exception.");
ioerror.printStackTrace();
} catch (URISyntaxException urierror) {
System.out.println("Encountered an URI syntax exception.");
urierror.printStackTrace();
}
}
public static List<Status> getUserTimeLine(int i){
Paging page = new Paging (i, 200);//page number, number per page
try {
statuses=TwitterConnection.twitter.getUserTimeline(page);
return statuses;
} catch (TwitterException e) {
// TODO Auto-generated catch block
e.printStackTrace();
return null;
}
}
public static void printTweets(List<Status> statues){
for(int i=0;i<statues.size();i++){
System.out.println(statues.get(i).getText());
}
}
} | TweetDeleter/TweetDeleterProject | src/Controller/Request/RequestManager.java | Java | apache-2.0 | 5,701 |
/*
Copyright The TestGrid Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Backing state for a test results table of a TestGrid dashboard tab.
// TestState() is updated and stored by test-focused update_server.py.
// Stored in GCS as "<test group name>".
// NOTE: Do NOT update this until you have updated the internal state.proto!
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.25.0
// protoc v3.11.4
// source: state.proto
package state
import (
config "github.com/GoogleCloudPlatform/testgrid/pb/config"
proto "github.com/golang/protobuf/proto"
timestamp "github.com/golang/protobuf/ptypes/timestamp"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
// This is a compile-time assertion that a sufficiently up-to-date version
// of the legacy proto package is being used.
const _ = proto.ProtoPackageIsVersion4
type Property struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Property map[string]string `protobuf:"bytes,1,rep,name=property,proto3" json:"property,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
}
func (x *Property) Reset() {
*x = Property{}
if protoimpl.UnsafeEnabled {
mi := &file_state_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Property) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Property) ProtoMessage() {}
func (x *Property) ProtoReflect() protoreflect.Message {
mi := &file_state_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Property.ProtoReflect.Descriptor instead.
func (*Property) Descriptor() ([]byte, []int) {
return file_state_proto_rawDescGZIP(), []int{0}
}
func (x *Property) GetProperty() map[string]string {
if x != nil {
return x.Property
}
return nil
}
// A metric and its values for each test cycle.
type Metric struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Name of metric, such as duration
// Sparse encoding of values. Indices is a list of pairs of <index, count>
// that details columns with metric values. So given:
// Indices: [0, 2, 6, 4]
// Values: [0.1,0.2,6.1,6.2,6.3,6.4]
// Decoded 12-value equivalent is:
// [0.1, 0.2, nil, nil, nil, nil, 6.1, 6.2, 6.3, 6.4, nil, nil, ...]
Indices []int32 `protobuf:"varint,2,rep,packed,name=indices,proto3" json:"indices,omitempty"` // n=index of first value, n+1=count of filled values
Values []float64 `protobuf:"fixed64,3,rep,packed,name=values,proto3" json:"values,omitempty"` // only present for columns with a metric value
}
func (x *Metric) Reset() {
*x = Metric{}
if protoimpl.UnsafeEnabled {
mi := &file_state_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Metric) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Metric) ProtoMessage() {}
func (x *Metric) ProtoReflect() protoreflect.Message {
mi := &file_state_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Metric.ProtoReflect.Descriptor instead.
func (*Metric) Descriptor() ([]byte, []int) {
return file_state_proto_rawDescGZIP(), []int{1}
}
func (x *Metric) GetName() string {
if x != nil {
return x.Name
}
return ""
}
func (x *Metric) GetIndices() []int32 {
if x != nil {
return x.Indices
}
return nil
}
func (x *Metric) GetValues() []float64 {
if x != nil {
return x.Values
}
return nil
}
type UpdatePhaseData struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// The name for a part of the update cycle.
PhaseName string `protobuf:"bytes,1,opt,name=phase_name,json=phaseName,proto3" json:"phase_name,omitempty"`
// Time taken for a part of the update cycle, in seconds.
PhaseSeconds float64 `protobuf:"fixed64,2,opt,name=phase_seconds,json=phaseSeconds,proto3" json:"phase_seconds,omitempty"`
}
func (x *UpdatePhaseData) Reset() {
*x = UpdatePhaseData{}
if protoimpl.UnsafeEnabled {
mi := &file_state_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *UpdatePhaseData) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*UpdatePhaseData) ProtoMessage() {}
func (x *UpdatePhaseData) ProtoReflect() protoreflect.Message {
mi := &file_state_proto_msgTypes[2]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use UpdatePhaseData.ProtoReflect.Descriptor instead.
func (*UpdatePhaseData) Descriptor() ([]byte, []int) {
return file_state_proto_rawDescGZIP(), []int{2}
}
func (x *UpdatePhaseData) GetPhaseName() string {
if x != nil {
return x.PhaseName
}
return ""
}
func (x *UpdatePhaseData) GetPhaseSeconds() float64 {
if x != nil {
return x.PhaseSeconds
}
return 0
}
// Info on time taken to update test results during the last update cycle.
type UpdateInfo struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Metrics for how long parts of the update cycle take.
UpdatePhaseData []*UpdatePhaseData `protobuf:"bytes,1,rep,name=update_phase_data,json=updatePhaseData,proto3" json:"update_phase_data,omitempty"`
}
func (x *UpdateInfo) Reset() {
*x = UpdateInfo{}
if protoimpl.UnsafeEnabled {
mi := &file_state_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *UpdateInfo) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*UpdateInfo) ProtoMessage() {}
func (x *UpdateInfo) ProtoReflect() protoreflect.Message {
mi := &file_state_proto_msgTypes[3]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use UpdateInfo.ProtoReflect.Descriptor instead.
func (*UpdateInfo) Descriptor() ([]byte, []int) {
return file_state_proto_rawDescGZIP(), []int{3}
}
func (x *UpdateInfo) GetUpdatePhaseData() []*UpdatePhaseData {
if x != nil {
return x.UpdatePhaseData
}
return nil
}
// Info on a failing test row about the failure.
type AlertInfo struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Number of results that have failed.
FailCount int32 `protobuf:"varint,1,opt,name=fail_count,json=failCount,proto3" json:"fail_count,omitempty"`
// The build ID the test first failed at.
FailBuildId string `protobuf:"bytes,2,opt,name=fail_build_id,json=failBuildId,proto3" json:"fail_build_id,omitempty"`
// The time the test first failed at.
FailTime *timestamp.Timestamp `protobuf:"bytes,3,opt,name=fail_time,json=failTime,proto3" json:"fail_time,omitempty"`
// The test ID for the first test failure.
FailTestId string `protobuf:"bytes,4,opt,name=fail_test_id,json=failTestId,proto3" json:"fail_test_id,omitempty"`
// The build ID the test last passed at.
PassBuildId string `protobuf:"bytes,5,opt,name=pass_build_id,json=passBuildId,proto3" json:"pass_build_id,omitempty"`
// The time the test last passed at.
PassTime *timestamp.Timestamp `protobuf:"bytes,6,opt,name=pass_time,json=passTime,proto3" json:"pass_time,omitempty"`
// A snippet explaining the failure.
FailureMessage string `protobuf:"bytes,7,opt,name=failure_message,json=failureMessage,proto3" json:"failure_message,omitempty"`
// Link to search for build changes, internally a code-search link.
BuildLink string `protobuf:"bytes,8,opt,name=build_link,json=buildLink,proto3" json:"build_link,omitempty"`
// Text for option to search for build changes.
BuildLinkText string `protobuf:"bytes,9,opt,name=build_link_text,json=buildLinkText,proto3" json:"build_link_text,omitempty"`
// Text to display for link to search for build changes.
BuildUrlText string `protobuf:"bytes,10,opt,name=build_url_text,json=buildUrlText,proto3" json:"build_url_text,omitempty"`
// The build ID for the latest test failure. (Does not indicate the failure is
// 'over', just the latest test failure we found.)
LatestFailBuildId string `protobuf:"bytes,11,opt,name=latest_fail_build_id,json=latestFailBuildId,proto3" json:"latest_fail_build_id,omitempty"`
// The test ID for the latest test failure.
LatestFailTestId string `protobuf:"bytes,14,opt,name=latest_fail_test_id,json=latestFailTestId,proto3" json:"latest_fail_test_id,omitempty"`
// Maps (property name):(property value) for arbitrary alert properties.
Properties map[string]string `protobuf:"bytes,12,rep,name=properties,proto3" json:"properties,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// A list of IDs for issue hotlists related to this failure.
HotlistIds []string `protobuf:"bytes,13,rep,name=hotlist_ids,json=hotlistIds,proto3" json:"hotlist_ids,omitempty"`
// Dynamic email list, route email alerts to these instead of the configured defaults.
EmailAddresses []string `protobuf:"bytes,15,rep,name=email_addresses,json=emailAddresses,proto3" json:"email_addresses,omitempty"`
}
func (x *AlertInfo) Reset() {
*x = AlertInfo{}
if protoimpl.UnsafeEnabled {
mi := &file_state_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *AlertInfo) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*AlertInfo) ProtoMessage() {}
func (x *AlertInfo) ProtoReflect() protoreflect.Message {
mi := &file_state_proto_msgTypes[4]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use AlertInfo.ProtoReflect.Descriptor instead.
func (*AlertInfo) Descriptor() ([]byte, []int) {
return file_state_proto_rawDescGZIP(), []int{4}
}
func (x *AlertInfo) GetFailCount() int32 {
if x != nil {
return x.FailCount
}
return 0
}
func (x *AlertInfo) GetFailBuildId() string {
if x != nil {
return x.FailBuildId
}
return ""
}
func (x *AlertInfo) GetFailTime() *timestamp.Timestamp {
if x != nil {
return x.FailTime
}
return nil
}
func (x *AlertInfo) GetFailTestId() string {
if x != nil {
return x.FailTestId
}
return ""
}
func (x *AlertInfo) GetPassBuildId() string {
if x != nil {
return x.PassBuildId
}
return ""
}
func (x *AlertInfo) GetPassTime() *timestamp.Timestamp {
if x != nil {
return x.PassTime
}
return nil
}
func (x *AlertInfo) GetFailureMessage() string {
if x != nil {
return x.FailureMessage
}
return ""
}
func (x *AlertInfo) GetBuildLink() string {
if x != nil {
return x.BuildLink
}
return ""
}
func (x *AlertInfo) GetBuildLinkText() string {
if x != nil {
return x.BuildLinkText
}
return ""
}
func (x *AlertInfo) GetBuildUrlText() string {
if x != nil {
return x.BuildUrlText
}
return ""
}
func (x *AlertInfo) GetLatestFailBuildId() string {
if x != nil {
return x.LatestFailBuildId
}
return ""
}
func (x *AlertInfo) GetLatestFailTestId() string {
if x != nil {
return x.LatestFailTestId
}
return ""
}
func (x *AlertInfo) GetProperties() map[string]string {
if x != nil {
return x.Properties
}
return nil
}
func (x *AlertInfo) GetHotlistIds() []string {
if x != nil {
return x.HotlistIds
}
return nil
}
func (x *AlertInfo) GetEmailAddresses() []string {
if x != nil {
return x.EmailAddresses
}
return nil
}
// Info on default test metadata for a dashboard tab.
type TestMetadata struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Name of the test with associated test metadata.
TestName string `protobuf:"bytes,1,opt,name=test_name,json=testName,proto3" json:"test_name,omitempty"`
// Default bug component.
BugComponent int32 `protobuf:"varint,2,opt,name=bug_component,json=bugComponent,proto3" json:"bug_component,omitempty"`
// Default owner.
Owner string `protobuf:"bytes,3,opt,name=owner,proto3" json:"owner,omitempty"`
// Default list of cc's.
Cc []string `protobuf:"bytes,4,rep,name=cc,proto3" json:"cc,omitempty"`
// When present, only file a bug for failed tests with same error type.
// Otherwise, always file a bug.
ErrorType string `protobuf:"bytes,5,opt,name=error_type,json=errorType,proto3" json:"error_type,omitempty"`
}
func (x *TestMetadata) Reset() {
*x = TestMetadata{}
if protoimpl.UnsafeEnabled {
mi := &file_state_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *TestMetadata) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TestMetadata) ProtoMessage() {}
func (x *TestMetadata) ProtoReflect() protoreflect.Message {
mi := &file_state_proto_msgTypes[5]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TestMetadata.ProtoReflect.Descriptor instead.
func (*TestMetadata) Descriptor() ([]byte, []int) {
return file_state_proto_rawDescGZIP(), []int{5}
}
func (x *TestMetadata) GetTestName() string {
if x != nil {
return x.TestName
}
return ""
}
func (x *TestMetadata) GetBugComponent() int32 {
if x != nil {
return x.BugComponent
}
return 0
}
func (x *TestMetadata) GetOwner() string {
if x != nil {
return x.Owner
}
return ""
}
func (x *TestMetadata) GetCc() []string {
if x != nil {
return x.Cc
}
return nil
}
func (x *TestMetadata) GetErrorType() string {
if x != nil {
return x.ErrorType
}
return ""
}
// TestGrid columns (also known as TestCycle).
type Column struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Unique instance of the job, typically BUILD_NUMBER from prow or a guid
Build string `protobuf:"bytes,1,opt,name=build,proto3" json:"build,omitempty"`
// Name associated with the column (such as the run/invocation ID).No two
// columns should have the same build_id and name. The name field allows the
// display of multiple columns with the same build_id.
Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
// Milliseconds since start of epoch (python time.time() * 1000)
Started float64 `protobuf:"fixed64,3,opt,name=started,proto3" json:"started,omitempty"`
// Additional custom headers like commit, image used, etc.
Extra []string `protobuf:"bytes,4,rep,name=extra,proto3" json:"extra,omitempty"`
// Custom hotlist ids.
HotlistIds string `protobuf:"bytes,5,opt,name=hotlist_ids,json=hotlistIds,proto3" json:"hotlist_ids,omitempty"`
// An optional hint for the updater.
Hint string `protobuf:"bytes,6,opt,name=hint,proto3" json:"hint,omitempty"`
// Dynamic email list, route email alerts to these instead of the configured defaults.
EmailAddresses []string `protobuf:"bytes,7,rep,name=email_addresses,json=emailAddresses,proto3" json:"email_addresses,omitempty"`
}
func (x *Column) Reset() {
*x = Column{}
if protoimpl.UnsafeEnabled {
mi := &file_state_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Column) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Column) ProtoMessage() {}
func (x *Column) ProtoReflect() protoreflect.Message {
mi := &file_state_proto_msgTypes[6]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Column.ProtoReflect.Descriptor instead.
func (*Column) Descriptor() ([]byte, []int) {
return file_state_proto_rawDescGZIP(), []int{6}
}
func (x *Column) GetBuild() string {
if x != nil {
return x.Build
}
return ""
}
func (x *Column) GetName() string {
if x != nil {
return x.Name
}
return ""
}
func (x *Column) GetStarted() float64 {
if x != nil {
return x.Started
}
return 0
}
func (x *Column) GetExtra() []string {
if x != nil {
return x.Extra
}
return nil
}
func (x *Column) GetHotlistIds() string {
if x != nil {
return x.HotlistIds
}
return ""
}
func (x *Column) GetHint() string {
if x != nil {
return x.Hint
}
return ""
}
func (x *Column) GetEmailAddresses() []string {
if x != nil {
return x.EmailAddresses
}
return nil
}
// TestGrid rows (also known as TestRow)
type Row struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Display name, which might process id to append/filter info.
Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` // raw id for the row, such as the bazel target or golang package.
// Results for this row, run-length encoded to reduce size/improve
// performance. Thus (encoded -> decoded equivalent):
// [0, 3, 5, 4] -> [0, 0, 0, 5, 5, 5, 5]
// [5, 1] -> [5]
// [1, 5] -> [1, 1, 1, 1, 1]
// The decoded values are Result enums
Results []int32 `protobuf:"varint,3,rep,packed,name=results,proto3" json:"results,omitempty"`
// Test IDs for each test result in this test case.
// Must be present on every column, regardless of status.
CellIds []string `protobuf:"bytes,4,rep,name=cell_ids,json=cellIds,proto3" json:"cell_ids,omitempty"`
// Short description of the result, displayed on mouseover.
// Present for any column with a non-empty status (not NO_RESULT).
Messages []string `protobuf:"bytes,5,rep,name=messages,proto3" json:"messages,omitempty"`
// Names of metrics associated with this test case. Stored separate from
// metric info (which may be omitted).
Metric []string `protobuf:"bytes,7,rep,name=metric,proto3" json:"metric,omitempty"`
Metrics []*Metric `protobuf:"bytes,8,rep,name=metrics,proto3" json:"metrics,omitempty"` // Numerical performance/timing data, etc.
// Short string to place inside the cell (F for fail, etc)
// Present for any column with a non-empty status (not NO_RESULT).
Icons []string `protobuf:"bytes,9,rep,name=icons,proto3" json:"icons,omitempty"`
// IDs for issues associated with this row.
Issues []string `protobuf:"bytes,10,rep,name=issues,proto3" json:"issues,omitempty"`
// An alert for the failure if there's a recent failure for this row.
AlertInfo *AlertInfo `protobuf:"bytes,11,opt,name=alert_info,json=alertInfo,proto3" json:"alert_info,omitempty"`
// Values of a user-defined property found in cells for this row.
// TODO: Fold this into `properties` field.
UserProperty []string `protobuf:"bytes,12,rep,name=user_property,json=userProperty,proto3" json:"user_property,omitempty"`
// General key-value pairs associated with cells in this row.
// Present for any column with a non-empty status (not NO_RESULT).
Properties []*Property `protobuf:"bytes,13,rep,name=properties,proto3" json:"properties,omitempty"`
}
func (x *Row) Reset() {
*x = Row{}
if protoimpl.UnsafeEnabled {
mi := &file_state_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Row) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Row) ProtoMessage() {}
func (x *Row) ProtoReflect() protoreflect.Message {
mi := &file_state_proto_msgTypes[7]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Row.ProtoReflect.Descriptor instead.
func (*Row) Descriptor() ([]byte, []int) {
return file_state_proto_rawDescGZIP(), []int{7}
}
func (x *Row) GetName() string {
if x != nil {
return x.Name
}
return ""
}
func (x *Row) GetId() string {
if x != nil {
return x.Id
}
return ""
}
func (x *Row) GetResults() []int32 {
if x != nil {
return x.Results
}
return nil
}
func (x *Row) GetCellIds() []string {
if x != nil {
return x.CellIds
}
return nil
}
func (x *Row) GetMessages() []string {
if x != nil {
return x.Messages
}
return nil
}
func (x *Row) GetMetric() []string {
if x != nil {
return x.Metric
}
return nil
}
func (x *Row) GetMetrics() []*Metric {
if x != nil {
return x.Metrics
}
return nil
}
func (x *Row) GetIcons() []string {
if x != nil {
return x.Icons
}
return nil
}
func (x *Row) GetIssues() []string {
if x != nil {
return x.Issues
}
return nil
}
func (x *Row) GetAlertInfo() *AlertInfo {
if x != nil {
return x.AlertInfo
}
return nil
}
func (x *Row) GetUserProperty() []string {
if x != nil {
return x.UserProperty
}
return nil
}
func (x *Row) GetProperties() []*Property {
if x != nil {
return x.Properties
}
return nil
}
// A single table of test results backing a dashboard tab.
type Grid struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// A cycle of test results, not including the results. In the TestGrid client,
// the cycles define the columns.
Columns []*Column `protobuf:"bytes,1,rep,name=columns,proto3" json:"columns,omitempty"`
// A test case with test results. In the TestGrid client, the cases define the
// rows (and the results define the individual cells).
Rows []*Row `protobuf:"bytes,2,rep,name=rows,proto3" json:"rows,omitempty"`
// The latest configuration used to generate this test group.
Config *config.TestGroup `protobuf:"bytes,4,opt,name=config,proto3" json:"config,omitempty"`
// Seconds since epoch for last time this cycle was updated.
LastTimeUpdated float64 `protobuf:"fixed64,6,opt,name=last_time_updated,json=lastTimeUpdated,proto3" json:"last_time_updated,omitempty"`
// Stored info on previous timing for parts of the update cycle.
UpdateInfo []*UpdateInfo `protobuf:"bytes,8,rep,name=update_info,json=updateInfo,proto3" json:"update_info,omitempty"`
// Stored info on default test metadata.
TestMetadata []*TestMetadata `protobuf:"bytes,9,rep,name=test_metadata,json=testMetadata,proto3" json:"test_metadata,omitempty"`
// Clusters of failures for a TestResultTable instance.
Cluster []*Cluster `protobuf:"bytes,10,rep,name=cluster,proto3" json:"cluster,omitempty"`
// Most recent timestamp that clusters have processed.
MostRecentClusterTimestamp float64 `protobuf:"fixed64,11,opt,name=most_recent_cluster_timestamp,json=mostRecentClusterTimestamp,proto3" json:"most_recent_cluster_timestamp,omitempty"`
}
func (x *Grid) Reset() {
*x = Grid{}
if protoimpl.UnsafeEnabled {
mi := &file_state_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Grid) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Grid) ProtoMessage() {}
func (x *Grid) ProtoReflect() protoreflect.Message {
mi := &file_state_proto_msgTypes[8]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Grid.ProtoReflect.Descriptor instead.
func (*Grid) Descriptor() ([]byte, []int) {
return file_state_proto_rawDescGZIP(), []int{8}
}
func (x *Grid) GetColumns() []*Column {
if x != nil {
return x.Columns
}
return nil
}
func (x *Grid) GetRows() []*Row {
if x != nil {
return x.Rows
}
return nil
}
func (x *Grid) GetConfig() *config.TestGroup {
if x != nil {
return x.Config
}
return nil
}
func (x *Grid) GetLastTimeUpdated() float64 {
if x != nil {
return x.LastTimeUpdated
}
return 0
}
func (x *Grid) GetUpdateInfo() []*UpdateInfo {
if x != nil {
return x.UpdateInfo
}
return nil
}
func (x *Grid) GetTestMetadata() []*TestMetadata {
if x != nil {
return x.TestMetadata
}
return nil
}
func (x *Grid) GetCluster() []*Cluster {
if x != nil {
return x.Cluster
}
return nil
}
func (x *Grid) GetMostRecentClusterTimestamp() float64 {
if x != nil {
return x.MostRecentClusterTimestamp
}
return 0
}
// A cluster of failures grouped by test status and message for a test results
// table.
type Cluster struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Test status cluster grouped by.
TestStatus int32 `protobuf:"varint,1,opt,name=test_status,json=testStatus,proto3" json:"test_status,omitempty"`
// Error message or testFailureClassification string cluster grouped by.
Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
// ClusterRows that belong to this cluster.
ClusterRow []*ClusterRow `protobuf:"bytes,3,rep,name=cluster_row,json=clusterRow,proto3" json:"cluster_row,omitempty"`
}
func (x *Cluster) Reset() {
*x = Cluster{}
if protoimpl.UnsafeEnabled {
mi := &file_state_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Cluster) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Cluster) ProtoMessage() {}
func (x *Cluster) ProtoReflect() protoreflect.Message {
mi := &file_state_proto_msgTypes[9]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Cluster.ProtoReflect.Descriptor instead.
func (*Cluster) Descriptor() ([]byte, []int) {
return file_state_proto_rawDescGZIP(), []int{9}
}
func (x *Cluster) GetTestStatus() int32 {
if x != nil {
return x.TestStatus
}
return 0
}
func (x *Cluster) GetMessage() string {
if x != nil {
return x.Message
}
return ""
}
func (x *Cluster) GetClusterRow() []*ClusterRow {
if x != nil {
return x.ClusterRow
}
return nil
}
// Cells in a TestRow that belong to a specific Cluster.
type ClusterRow struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Name of TestRow.
DisplayName string `protobuf:"bytes,1,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"`
// Index within row that belongs to Cluster (refer to columns of the row).
Index []int32 `protobuf:"varint,2,rep,packed,name=index,proto3" json:"index,omitempty"`
}
func (x *ClusterRow) Reset() {
*x = ClusterRow{}
if protoimpl.UnsafeEnabled {
mi := &file_state_proto_msgTypes[10]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ClusterRow) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ClusterRow) ProtoMessage() {}
func (x *ClusterRow) ProtoReflect() protoreflect.Message {
mi := &file_state_proto_msgTypes[10]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ClusterRow.ProtoReflect.Descriptor instead.
func (*ClusterRow) Descriptor() ([]byte, []int) {
return file_state_proto_rawDescGZIP(), []int{10}
}
func (x *ClusterRow) GetDisplayName() string {
if x != nil {
return x.DisplayName
}
return ""
}
func (x *ClusterRow) GetIndex() []int32 {
if x != nil {
return x.Index
}
return nil
}
var File_state_proto protoreflect.FileDescriptor
var file_state_proto_rawDesc = []byte{
0x0a, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74,
0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x16,
0x70, 0x62, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67,
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x7c, 0x0a, 0x08, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72,
0x74, 0x79, 0x12, 0x33, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x18, 0x01,
0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x2e,
0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x70,
0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x1a, 0x3b, 0x0a, 0x0d, 0x50, 0x72, 0x6f, 0x70, 0x65,
0x72, 0x74, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18,
0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61,
0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
0x3a, 0x02, 0x38, 0x01, 0x22, 0x4e, 0x0a, 0x06, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, 0x12,
0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61,
0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x69, 0x6e, 0x64, 0x69, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20,
0x03, 0x28, 0x05, 0x52, 0x07, 0x69, 0x6e, 0x64, 0x69, 0x63, 0x65, 0x73, 0x12, 0x16, 0x0a, 0x06,
0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x01, 0x52, 0x06, 0x76, 0x61,
0x6c, 0x75, 0x65, 0x73, 0x22, 0x55, 0x0a, 0x0f, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, 0x68,
0x61, 0x73, 0x65, 0x44, 0x61, 0x74, 0x61, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x68, 0x61, 0x73, 0x65,
0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x68, 0x61,
0x73, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x68, 0x61, 0x73, 0x65, 0x5f,
0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0c, 0x70,
0x68, 0x61, 0x73, 0x65, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x22, 0x4a, 0x0a, 0x0a, 0x55,
0x70, 0x64, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x3c, 0x0a, 0x11, 0x75, 0x70, 0x64,
0x61, 0x74, 0x65, 0x5f, 0x70, 0x68, 0x61, 0x73, 0x65, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01,
0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, 0x68, 0x61,
0x73, 0x65, 0x44, 0x61, 0x74, 0x61, 0x52, 0x0f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, 0x68,
0x61, 0x73, 0x65, 0x44, 0x61, 0x74, 0x61, 0x22, 0xc1, 0x05, 0x0a, 0x09, 0x41, 0x6c, 0x65, 0x72,
0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x0a, 0x0a, 0x66, 0x61, 0x69, 0x6c, 0x5f, 0x63, 0x6f,
0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x66, 0x61, 0x69, 0x6c, 0x43,
0x6f, 0x75, 0x6e, 0x74, 0x12, 0x22, 0x0a, 0x0d, 0x66, 0x61, 0x69, 0x6c, 0x5f, 0x62, 0x75, 0x69,
0x6c, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x66, 0x61, 0x69,
0x6c, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x49, 0x64, 0x12, 0x37, 0x0a, 0x09, 0x66, 0x61, 0x69, 0x6c,
0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69,
0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08, 0x66, 0x61, 0x69, 0x6c, 0x54, 0x69, 0x6d,
0x65, 0x12, 0x20, 0x0a, 0x0c, 0x66, 0x61, 0x69, 0x6c, 0x5f, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x69,
0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x66, 0x61, 0x69, 0x6c, 0x54, 0x65, 0x73,
0x74, 0x49, 0x64, 0x12, 0x22, 0x0a, 0x0d, 0x70, 0x61, 0x73, 0x73, 0x5f, 0x62, 0x75, 0x69, 0x6c,
0x64, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x70, 0x61, 0x73, 0x73,
0x42, 0x75, 0x69, 0x6c, 0x64, 0x49, 0x64, 0x12, 0x37, 0x0a, 0x09, 0x70, 0x61, 0x73, 0x73, 0x5f,
0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d,
0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08, 0x70, 0x61, 0x73, 0x73, 0x54, 0x69, 0x6d, 0x65,
0x12, 0x27, 0x0a, 0x0f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x5f, 0x6d, 0x65, 0x73, 0x73,
0x61, 0x67, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x66, 0x61, 0x69, 0x6c, 0x75,
0x72, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x62, 0x75, 0x69,
0x6c, 0x64, 0x5f, 0x6c, 0x69, 0x6e, 0x6b, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x62,
0x75, 0x69, 0x6c, 0x64, 0x4c, 0x69, 0x6e, 0x6b, 0x12, 0x26, 0x0a, 0x0f, 0x62, 0x75, 0x69, 0x6c,
0x64, 0x5f, 0x6c, 0x69, 0x6e, 0x6b, 0x5f, 0x74, 0x65, 0x78, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28,
0x09, 0x52, 0x0d, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x4c, 0x69, 0x6e, 0x6b, 0x54, 0x65, 0x78, 0x74,
0x12, 0x24, 0x0a, 0x0e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x5f, 0x75, 0x72, 0x6c, 0x5f, 0x74, 0x65,
0x78, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x55,
0x72, 0x6c, 0x54, 0x65, 0x78, 0x74, 0x12, 0x2f, 0x0a, 0x14, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74,
0x5f, 0x66, 0x61, 0x69, 0x6c, 0x5f, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x0b,
0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x46, 0x61, 0x69, 0x6c,
0x42, 0x75, 0x69, 0x6c, 0x64, 0x49, 0x64, 0x12, 0x2d, 0x0a, 0x13, 0x6c, 0x61, 0x74, 0x65, 0x73,
0x74, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x5f, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x0e,
0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x46, 0x61, 0x69, 0x6c,
0x54, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x3a, 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72,
0x74, 0x69, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x41, 0x6c, 0x65,
0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65,
0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69,
0x65, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x68, 0x6f, 0x74, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x69, 0x64,
0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x68, 0x6f, 0x74, 0x6c, 0x69, 0x73, 0x74,
0x49, 0x64, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x5f, 0x61, 0x64, 0x64,
0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x65, 0x6d,
0x61, 0x69, 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x1a, 0x3d, 0x0a, 0x0f,
0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12,
0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65,
0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x95, 0x01, 0x0a, 0x0c,
0x54, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1b, 0x0a, 0x09,
0x74, 0x65, 0x73, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
0x08, 0x74, 0x65, 0x73, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x62, 0x75, 0x67,
0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05,
0x52, 0x0c, 0x62, 0x75, 0x67, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x12, 0x14,
0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6f,
0x77, 0x6e, 0x65, 0x72, 0x12, 0x0e, 0x0a, 0x02, 0x63, 0x63, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09,
0x52, 0x02, 0x63, 0x63, 0x12, 0x1d, 0x0a, 0x0a, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x74, 0x79,
0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x54,
0x79, 0x70, 0x65, 0x22, 0xc0, 0x01, 0x0a, 0x06, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x12, 0x14,
0x0a, 0x05, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x62,
0x75, 0x69, 0x6c, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01,
0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x74, 0x61, 0x72,
0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x73, 0x74, 0x61, 0x72, 0x74,
0x65, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x78, 0x74, 0x72, 0x61, 0x18, 0x04, 0x20, 0x03, 0x28,
0x09, 0x52, 0x05, 0x65, 0x78, 0x74, 0x72, 0x61, 0x12, 0x1f, 0x0a, 0x0b, 0x68, 0x6f, 0x74, 0x6c,
0x69, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x68,
0x6f, 0x74, 0x6c, 0x69, 0x73, 0x74, 0x49, 0x64, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x69, 0x6e,
0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x69, 0x6e, 0x74, 0x12, 0x27, 0x0a,
0x0f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73,
0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x41, 0x64, 0x64,
0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x22, 0xe4, 0x02, 0x0a, 0x03, 0x52, 0x6f, 0x77, 0x12, 0x12,
0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61,
0x6d, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02,
0x69, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x03, 0x20,
0x03, 0x28, 0x05, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x19, 0x0a, 0x08,
0x63, 0x65, 0x6c, 0x6c, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07,
0x63, 0x65, 0x6c, 0x6c, 0x49, 0x64, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x6d, 0x65, 0x73, 0x73, 0x61,
0x67, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x6d, 0x65, 0x73, 0x73, 0x61,
0x67, 0x65, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x18, 0x07, 0x20,
0x03, 0x28, 0x09, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, 0x21, 0x0a, 0x07, 0x6d,
0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x07, 0x2e, 0x4d,
0x65, 0x74, 0x72, 0x69, 0x63, 0x52, 0x07, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x14,
0x0a, 0x05, 0x69, 0x63, 0x6f, 0x6e, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x69,
0x63, 0x6f, 0x6e, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x69, 0x73, 0x73, 0x75, 0x65, 0x73, 0x18, 0x0a,
0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x69, 0x73, 0x73, 0x75, 0x65, 0x73, 0x12, 0x29, 0x0a, 0x0a,
0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b,
0x32, 0x0a, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x09, 0x61, 0x6c,
0x65, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x23, 0x0a, 0x0d, 0x75, 0x73, 0x65, 0x72, 0x5f,
0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c,
0x75, 0x73, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x12, 0x29, 0x0a, 0x0a,
0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b,
0x32, 0x09, 0x2e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x52, 0x0a, 0x70, 0x72, 0x6f,
0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x22, 0xee, 0x02,
0x0a, 0x04, 0x47, 0x72, 0x69, 0x64, 0x12, 0x21, 0x0a, 0x07, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e,
0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x07, 0x2e, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e,
0x52, 0x07, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x12, 0x18, 0x0a, 0x04, 0x72, 0x6f, 0x77,
0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x04, 0x2e, 0x52, 0x6f, 0x77, 0x52, 0x04, 0x72,
0x6f, 0x77, 0x73, 0x12, 0x22, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20,
0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52,
0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x2a, 0x0a, 0x11, 0x6c, 0x61, 0x73, 0x74, 0x5f,
0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01,
0x28, 0x01, 0x52, 0x0f, 0x6c, 0x61, 0x73, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x55, 0x70, 0x64, 0x61,
0x74, 0x65, 0x64, 0x12, 0x2c, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x69, 0x6e,
0x66, 0x6f, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74,
0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x66,
0x6f, 0x12, 0x32, 0x0a, 0x0d, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61,
0x74, 0x61, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x4d,
0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x0c, 0x74, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x74,
0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x22, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72,
0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x08, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72,
0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x41, 0x0a, 0x1d, 0x6d, 0x6f, 0x73,
0x74, 0x5f, 0x72, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72,
0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x01,
0x52, 0x1a, 0x6d, 0x6f, 0x73, 0x74, 0x52, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x43, 0x6c, 0x75, 0x73,
0x74, 0x65, 0x72, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x4a, 0x04, 0x08, 0x03,
0x10, 0x04, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x07, 0x10, 0x08, 0x22, 0x72,
0x0a, 0x07, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x65, 0x73,
0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a,
0x74, 0x65, 0x73, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65,
0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73,
0x73, 0x61, 0x67, 0x65, 0x12, 0x2c, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f,
0x72, 0x6f, 0x77, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x43, 0x6c, 0x75, 0x73,
0x74, 0x65, 0x72, 0x52, 0x6f, 0x77, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52,
0x6f, 0x77, 0x22, 0x45, 0x0a, 0x0a, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x6f, 0x77,
0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65,
0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e,
0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x02, 0x20, 0x03,
0x28, 0x05, 0x52, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x33,
}
var (
file_state_proto_rawDescOnce sync.Once
file_state_proto_rawDescData = file_state_proto_rawDesc
)
func file_state_proto_rawDescGZIP() []byte {
file_state_proto_rawDescOnce.Do(func() {
file_state_proto_rawDescData = protoimpl.X.CompressGZIP(file_state_proto_rawDescData)
})
return file_state_proto_rawDescData
}
var file_state_proto_msgTypes = make([]protoimpl.MessageInfo, 13)
var file_state_proto_goTypes = []interface{}{
(*Property)(nil), // 0: Property
(*Metric)(nil), // 1: Metric
(*UpdatePhaseData)(nil), // 2: UpdatePhaseData
(*UpdateInfo)(nil), // 3: UpdateInfo
(*AlertInfo)(nil), // 4: AlertInfo
(*TestMetadata)(nil), // 5: TestMetadata
(*Column)(nil), // 6: Column
(*Row)(nil), // 7: Row
(*Grid)(nil), // 8: Grid
(*Cluster)(nil), // 9: Cluster
(*ClusterRow)(nil), // 10: ClusterRow
nil, // 11: Property.PropertyEntry
nil, // 12: AlertInfo.PropertiesEntry
(*timestamp.Timestamp)(nil), // 13: google.protobuf.Timestamp
(*config.TestGroup)(nil), // 14: TestGroup
}
var file_state_proto_depIdxs = []int32{
11, // 0: Property.property:type_name -> Property.PropertyEntry
2, // 1: UpdateInfo.update_phase_data:type_name -> UpdatePhaseData
13, // 2: AlertInfo.fail_time:type_name -> google.protobuf.Timestamp
13, // 3: AlertInfo.pass_time:type_name -> google.protobuf.Timestamp
12, // 4: AlertInfo.properties:type_name -> AlertInfo.PropertiesEntry
1, // 5: Row.metrics:type_name -> Metric
4, // 6: Row.alert_info:type_name -> AlertInfo
0, // 7: Row.properties:type_name -> Property
6, // 8: Grid.columns:type_name -> Column
7, // 9: Grid.rows:type_name -> Row
14, // 10: Grid.config:type_name -> TestGroup
3, // 11: Grid.update_info:type_name -> UpdateInfo
5, // 12: Grid.test_metadata:type_name -> TestMetadata
9, // 13: Grid.cluster:type_name -> Cluster
10, // 14: Cluster.cluster_row:type_name -> ClusterRow
15, // [15:15] is the sub-list for method output_type
15, // [15:15] is the sub-list for method input_type
15, // [15:15] is the sub-list for extension type_name
15, // [15:15] is the sub-list for extension extendee
0, // [0:15] is the sub-list for field type_name
}
func init() { file_state_proto_init() }
func file_state_proto_init() {
if File_state_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_state_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Property); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_state_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Metric); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_state_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*UpdatePhaseData); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_state_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*UpdateInfo); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_state_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*AlertInfo); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_state_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*TestMetadata); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_state_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Column); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_state_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Row); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_state_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Grid); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_state_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Cluster); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_state_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ClusterRow); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_state_proto_rawDesc,
NumEnums: 0,
NumMessages: 13,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_state_proto_goTypes,
DependencyIndexes: file_state_proto_depIdxs,
MessageInfos: file_state_proto_msgTypes,
}.Build()
File_state_proto = out.File
file_state_proto_rawDesc = nil
file_state_proto_goTypes = nil
file_state_proto_depIdxs = nil
}
| GoogleCloudPlatform/testgrid | pb/state/state.pb.go | GO | apache-2.0 | 49,570 |
/*
* Copyright 2014-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.pinpoint.model.transform;
import javax.annotation.Generated;
import com.amazonaws.SdkClientException;
import com.amazonaws.services.pinpoint.model.*;
import com.amazonaws.protocol.*;
import com.amazonaws.annotation.SdkInternalApi;
/**
* DeleteApnsChannelRequestMarshaller
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
@SdkInternalApi
public class DeleteApnsChannelRequestMarshaller {
private static final MarshallingInfo<String> APPLICATIONID_BINDING = MarshallingInfo.builder(MarshallingType.STRING)
.marshallLocation(MarshallLocation.PATH).marshallLocationName("application-id").build();
private static final DeleteApnsChannelRequestMarshaller instance = new DeleteApnsChannelRequestMarshaller();
public static DeleteApnsChannelRequestMarshaller getInstance() {
return instance;
}
/**
* Marshall the given parameter object.
*/
public void marshall(DeleteApnsChannelRequest deleteApnsChannelRequest, ProtocolMarshaller protocolMarshaller) {
if (deleteApnsChannelRequest == null) {
throw new SdkClientException("Invalid argument passed to marshall(...)");
}
try {
protocolMarshaller.marshall(deleteApnsChannelRequest.getApplicationId(), APPLICATIONID_BINDING);
} catch (Exception e) {
throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e);
}
}
}
| jentfoo/aws-sdk-java | aws-java-sdk-pinpoint/src/main/java/com/amazonaws/services/pinpoint/model/transform/DeleteApnsChannelRequestMarshaller.java | Java | apache-2.0 | 2,053 |
/*
* Copyright 2015-2018 Igor Maznitsa.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not usne this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.igormaznitsa.mindmap.plugins.exporters;
import com.igormaznitsa.meta.annotation.MustNotContainNull;
import com.igormaznitsa.meta.common.utils.Assertions;
import com.igormaznitsa.mindmap.model.Topic;
import com.igormaznitsa.mindmap.model.logger.Logger;
import com.igormaznitsa.mindmap.model.logger.LoggerFactory;
import com.igormaznitsa.mindmap.plugins.api.AbstractExporter;
import com.igormaznitsa.mindmap.plugins.api.HasOptions;
import com.igormaznitsa.mindmap.plugins.api.PluginContext;
import com.igormaznitsa.mindmap.swing.panel.MindMapPanel;
import com.igormaznitsa.mindmap.swing.panel.MindMapPanelConfig;
import com.igormaznitsa.mindmap.swing.panel.Texts;
import com.igormaznitsa.mindmap.swing.panel.utils.ImageSelection;
import com.igormaznitsa.mindmap.swing.panel.utils.MindMapUtils;
import com.igormaznitsa.mindmap.swing.panel.utils.RenderQuality;
import com.igormaznitsa.mindmap.swing.services.IconID;
import com.igormaznitsa.mindmap.swing.services.ImageIconServiceProvider;
import com.igormaznitsa.mindmap.swing.services.UIComponentFactory;
import com.igormaznitsa.mindmap.swing.services.UIComponentFactoryProvider;
import java.awt.Component;
import java.awt.Toolkit;
import java.awt.datatransfer.Clipboard;
import java.awt.event.ActionEvent;
import java.awt.event.ActionListener;
import java.awt.image.BufferedImage;
import java.awt.image.RenderedImage;
import java.io.BufferedOutputStream;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import javax.imageio.ImageIO;
import javax.swing.BorderFactory;
import javax.swing.BoxLayout;
import javax.swing.Icon;
import javax.swing.JCheckBox;
import javax.swing.JComponent;
import javax.swing.JPanel;
import javax.swing.SwingUtilities;
import org.apache.commons.io.IOUtils;
public final class PNGImageExporter extends AbstractExporter {
private static final Logger LOGGER = LoggerFactory.getLogger(PNGImageExporter.class);
private static final UIComponentFactory UI_FACTORY = UIComponentFactoryProvider.findInstance();
private static final Icon ICO = ImageIconServiceProvider.findInstance().getIconForId(IconID.POPUP_EXPORT_PNG);
private boolean flagExpandAllNodes = false;
private boolean flagDrawBackground = true;
public PNGImageExporter() {
super();
}
@Override
@Nullable
public JComponent makeOptions(@Nonnull final PluginContext context) {
final Options options = new Options(flagExpandAllNodes, flagDrawBackground);
final JPanel panel = UI_FACTORY.makePanelWithOptions(options);
final JCheckBox checkBoxExpandAll = UI_FACTORY.makeCheckBox();
checkBoxExpandAll.setSelected(flagExpandAllNodes);
checkBoxExpandAll.setText(Texts.getString("PNGImageExporter.optionUnfoldAll"));
checkBoxExpandAll.setActionCommand("unfold");
final JCheckBox checkBoxDrawBackground = UI_FACTORY.makeCheckBox();
checkBoxDrawBackground.setSelected(flagDrawBackground);
checkBoxDrawBackground.setText(Texts.getString("PNGImageExporter.optionDrawBackground"));
checkBoxDrawBackground.setActionCommand("back");
panel.setLayout(new BoxLayout(panel, BoxLayout.Y_AXIS));
panel.add(checkBoxExpandAll);
panel.add(checkBoxDrawBackground);
panel.setBorder(BorderFactory.createEmptyBorder(16, 32, 16, 32));
final ActionListener actionListener = new ActionListener() {
@Override
public void actionPerformed(@Nonnull final ActionEvent e) {
if (e.getSource() == checkBoxExpandAll) {
options.setOption(Options.KEY_EXPAND_ALL, Boolean.toString(checkBoxExpandAll.isSelected()));
}
if (e.getSource() == checkBoxDrawBackground) {
options.setOption(Options.KEY_DRAW_BACK, Boolean.toString(checkBoxDrawBackground.isSelected()));
}
}
};
checkBoxExpandAll.addActionListener(actionListener);
checkBoxDrawBackground.addActionListener(actionListener);
return panel;
}
@Nullable
private BufferedImage makeImage(@Nonnull final PluginContext context, @Nullable final JComponent options) throws IOException {
if (options instanceof HasOptions) {
final HasOptions opts = (HasOptions) options;
this.flagExpandAllNodes = Boolean.parseBoolean(opts.getOption(Options.KEY_EXPAND_ALL));
this.flagDrawBackground = Boolean.parseBoolean(opts.getOption(Options.KEY_DRAW_BACK));
} else {
for (final Component compo : Assertions.assertNotNull(options).getComponents()) {
if (compo instanceof JCheckBox) {
final JCheckBox cb = (JCheckBox) compo;
if ("unfold".equalsIgnoreCase(cb.getActionCommand())) {
this.flagExpandAllNodes = cb.isSelected();
} else if ("back".equalsIgnoreCase(cb.getActionCommand())) {
this.flagDrawBackground = cb.isSelected();
}
}
}
}
final MindMapPanelConfig newConfig = new MindMapPanelConfig(context.getPanelConfig(), false);
newConfig.setDrawBackground(this.flagDrawBackground);
newConfig.setScale(1.0f);
return MindMapPanel.renderMindMapAsImage(context.getPanel().getModel(), newConfig, flagExpandAllNodes, RenderQuality.QUALITY);
}
@Override
public void doExportToClipboard(@Nonnull final PluginContext context, @Nonnull final JComponent options) throws IOException {
final BufferedImage image = makeImage(context, options);
if (image != null) {
SwingUtilities.invokeLater(new Runnable() {
@Override
public void run() {
final Clipboard clipboard = Toolkit.getDefaultToolkit().getSystemClipboard();
if (clipboard != null) {
clipboard.setContents(new ImageSelection(image), null);
}
}
});
}
}
@Override
public void doExport(@Nonnull final PluginContext context, @Nullable final JComponent options, @Nullable final OutputStream out) throws IOException {
final RenderedImage image = makeImage(context, options);
if (image == null) {
if (out == null) {
LOGGER.error("Can't render map as image");
context.getDialogProvider().msgError(null, Texts.getString("PNGImageExporter.msgErrorDuringRendering"));
return;
} else {
throw new IOException("Can't render image");
}
}
final ByteArrayOutputStream buff = new ByteArrayOutputStream(128000);
ImageIO.write(image, "png", buff);//NOI18N
final byte[] imageData = buff.toByteArray();
File fileToSaveMap = null;
OutputStream theOut = out;
if (theOut == null) {
fileToSaveMap = MindMapUtils.selectFileToSaveForFileFilter(
context.getPanel(),
context,
this.getClass().getName(),
Texts.getString("PNGImageExporter.saveDialogTitle"),
null,
".png",
Texts.getString("PNGImageExporter.filterDescription"),
Texts.getString("PNGImageExporter.approveButtonText"));
fileToSaveMap = MindMapUtils.checkFileAndExtension(context.getPanel(), fileToSaveMap, ".png");//NOI18N
theOut = fileToSaveMap == null ? null : new BufferedOutputStream(new FileOutputStream(fileToSaveMap, false));
}
if (theOut != null) {
try {
IOUtils.write(imageData, theOut);
} finally {
if (fileToSaveMap != null) {
IOUtils.closeQuietly(theOut);
}
}
}
}
@Override
@Nullable
public String getMnemonic() {
return "png";
}
@Override
@Nonnull
public String getName(@Nonnull final PluginContext context, @Nullable Topic actionTopic) {
return Texts.getString("PNGImageExporter.exporterName");
}
@Override
@Nonnull
public String getReference(@Nonnull final PluginContext context, @Nullable Topic actionTopic) {
return Texts.getString("PNGImageExporter.exporterReference");
}
@Override
@Nonnull
public Icon getIcon(@Nonnull final PluginContext context, @Nullable Topic actionTopic) {
return ICO;
}
@Override
public int getOrder() {
return 4;
}
private static class Options implements HasOptions {
private static final String KEY_EXPAND_ALL = "expand.all";
private static final String KEY_DRAW_BACK = "draw.back";
private boolean expandAll;
private boolean drawBack;
private Options(final boolean expandAllNodes, final boolean drawBackground) {
this.expandAll = expandAllNodes;
this.drawBack = drawBackground;
}
@Override
public boolean doesSupportKey(@Nonnull final String key) {
return KEY_DRAW_BACK.equals(key) || KEY_EXPAND_ALL.equals(key);
}
@Override
@Nonnull
@MustNotContainNull
public String[] getOptionKeys() {
return new String[] {KEY_EXPAND_ALL, KEY_DRAW_BACK};
}
@Override
@Nonnull
public String getOptionKeyDescription(@Nonnull final String key) {
if (KEY_DRAW_BACK.equals(key)) {
return "Draw background";
}
if (KEY_EXPAND_ALL.equals(key)) {
return "Unfold all topics";
}
return "";
}
@Override
public void setOption(@Nonnull final String key, @Nullable final String value) {
if (KEY_DRAW_BACK.equals(key)) {
this.drawBack = Boolean.parseBoolean(value);
} else if (KEY_EXPAND_ALL.equals(key)) {
this.expandAll = Boolean.parseBoolean(value);
}
}
@Override
@Nullable
public String getOption(@Nonnull final String key) {
if (KEY_DRAW_BACK.equals(key)) {
return Boolean.toString(this.drawBack);
}
if (KEY_EXPAND_ALL.equals(key)) {
return Boolean.toString(this.expandAll);
}
return null;
}
}
}
| raydac/netbeans-mmd-plugin | mind-map/mind-map-swing-panel/src/main/java/com/igormaznitsa/mindmap/plugins/exporters/PNGImageExporter.java | Java | apache-2.0 | 10,309 |
#!/usr/bin/env python
import os
from MentalUs import create_app, db
from MentalUs.models import MTUser, MTScale, MTAnnouncement, \
MTExtendFields, MTUserExtendInfo, MTScaleResult, MTUnfinishedScale
from flask.ext.script import Manager, Shell
from flask.ext.migrate import Migrate, MigrateCommand
app = create_app('dev')
manager = Manager(app)
migrate = Migrate(app, db)
def generate_debug():
MTUser.generate_debug()
MTScale.generate_debug()
MTAnnouncement.generate_debug()
MTExtendFields.generate_debug()
MTScaleResult.generate_debug()
MTUnfinishedScale.generate_debug()
def make_shell_context():
config_dict = {
'MTUser': MTUser,
'MTScale': MTScale,
'MTAnnouncement': MTAnnouncement,
'MTExtendFields': MTExtendFields,
'MTUserExtendInfo': MTUserExtendInfo,
'MTScaleResult': MTScaleResult,
'MTUnfinishedScale': MTUnfinishedScale,
'db': db,
'generate_debug': generate_debug
}
return config_dict
manager.add_command('shell', Shell(make_context=make_shell_context))
manager.add_command('db', MigrateCommand)
if __name__ == '__main__':
manager.run() | realityone/MentalUs | manage.py | Python | apache-2.0 | 1,169 |
export function foo() {
return 'hello world';
}
| ampproject/rollup-plugin-closure-compiler | test/hashbang/fixtures/hashbang-banner.js | JavaScript | apache-2.0 | 49 |
/**
* Copyright (c) 2016 Rory Hool
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
**/
package com.roryhool.videoinfoviewer.atomfragments;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import android.app.ActionBar.LayoutParams;
import android.app.Activity;
import android.os.Bundle;
import android.os.Parcelable;
import android.support.v4.app.Fragment;
import android.support.v7.widget.LinearLayoutManager;
import android.support.v7.widget.RecyclerView;
import android.support.v7.widget.RecyclerView.LayoutManager;
import android.support.v7.widget.RecyclerView.ViewHolder;
import android.util.DisplayMetrics;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
import android.view.animation.RotateAnimation;
import android.widget.CompoundButton;
import android.widget.CompoundButton.OnCheckedChangeListener;
import android.widget.ImageButton;
import android.widget.ImageView;
import android.widget.RelativeLayout;
import android.widget.TextView;
import com.coremedia.iso.IsoFile;
import com.coremedia.iso.boxes.Box;
import com.googlecode.mp4parser.AbstractContainerBox;
import com.roryhool.videoinfoviewer.Extras;
import com.roryhool.videoinfoviewer.R;
import com.roryhool.videoinfoviewer.VideoActivity;
import com.roryhool.videoinfoviewer.analytics.Analytics;
import com.roryhool.videoinfoviewer.data.Video;
import com.roryhool.videoinfoviewer.utils.AtomHelper;
import com.roryhool.videoinfoviewer.utils.IsoFileCache;
import com.roryhool.videoinfoviewer.utils.VideoCache;
import rx.Observable;
import rx.android.app.AppObservable;
import rx.android.schedulers.AndroidSchedulers;
import rx.schedulers.Schedulers;
import rx.subscriptions.CompositeSubscription;
public class AtomStructureFragment extends Fragment {
protected static final String EXTRA_LAYOUT_MANAGER_STATE = "com.roryhool.videoinfoviewer.atomfragments.AtomStructureFragment.EXTRA_LAYOUT_MANAGER_STATE";
protected Video mVideo;
protected IsoFile mIsoFile;
protected RecyclerView mRecycler;
protected View mProgressView;
protected TextView mErrorText;
protected LayoutManager mLayoutManager;
protected AtomAdapter mAdapter;
protected List<Atom> mAtoms = new ArrayList<>();
protected List<Atom> mAtomsForAdapter = new ArrayList<>();
protected CompositeSubscription mSubscription = new CompositeSubscription();
protected Parcelable mLayoutManagerState;
@Override
public View onCreateView( LayoutInflater inflater, ViewGroup container, Bundle savedInstanceState ) {
Analytics.logEvent( "App Action", "Opened Video in AtomStructureFragment" );
View view = inflater.inflate( R.layout.fragment_atom_structure, container, false );
mRecycler = (RecyclerView) view.findViewById( R.id.recycler );
mProgressView = view.findViewById( R.id.progress );
mErrorText = (TextView) view.findViewById( R.id.error_text );
mVideo = getVideo( getArguments() );
Bundle args = getArguments();
if ( args != null ) {
if ( args.containsKey( EXTRA_LAYOUT_MANAGER_STATE ) ) {
mLayoutManagerState = args.getParcelable( EXTRA_LAYOUT_MANAGER_STATE );
}
}
mIsoFile = mVideo.getIsoFile();
if ( mIsoFile != null ) {
mProgressView.setVisibility( View.VISIBLE );
mAdapter = new AtomAdapter();
mLayoutManager = new LinearLayoutManager( getActivity(), LinearLayoutManager.VERTICAL, false );
mRecycler.setLayoutManager( mLayoutManager );
mRecycler.setAdapter( mAdapter );
mSubscription.add(
AppObservable.bindFragment(
this,
Observable.from( mIsoFile.getBoxes() ) )
.subscribeOn( Schedulers.io() )
.observeOn( AndroidSchedulers.mainThread() )
.subscribe(
box -> handleBox( box ),
throwable -> onBoxFailure( throwable ),
() -> onBoxesHandled()
)
);
} else {
mErrorText.setText( R.string.failed_to_load_atom_structure );
Analytics.logEvent( "Failure", "Failed to load atom structure, null iso file" );
}
return view;
}
private void handleBox( Box box ) {
Activity activity = getActivity();
if ( activity != null ) {
AtomHelper.logEventsForBox( activity, box );
}
Atom atom = new Atom( box, 0 );
mAtoms.add( atom );
mAtomsForAdapter.add( atom );
atom.addChildren( mAtoms, atom.isExpanded() ? mAtomsForAdapter : new ArrayList<>() );
}
private void onBoxFailure( Throwable throwable ) {
throwable.printStackTrace();
}
private void onBoxesHandled() {
mAdapter.setAtoms( mAtomsForAdapter );
if ( mLayoutManagerState == null ) {
mProgressView.setVisibility( View.GONE );
} else {
mRecycler.post(
() -> {
if ( mLayoutManagerState != null ) {
mLayoutManager.onRestoreInstanceState( mLayoutManagerState );
}
mProgressView.setVisibility( View.GONE );
} );
}
}
@Override
public void onSaveInstanceState( Bundle outState ) {
super.onSaveInstanceState( outState );
outState.putAll( getArguments() );
for ( Atom atom : mAtoms ) {
if ( !atom.isExpanded() ) {
outState.putBoolean( atom.getId(), atom.isExpanded() );
}
}
if ( mLayoutManager != null ) {
outState.putParcelable( EXTRA_LAYOUT_MANAGER_STATE, mLayoutManager.onSaveInstanceState() );
}
}
public Video getVideo( Bundle bundle ) {
return VideoCache.Instance().getVideoById( bundle.getInt( Extras.EXTRA_VIDEO_CACHE_ID ) );
}
public boolean getIsExpanded( Atom atom ) {
Bundle args = getArguments();
if ( args != null && args.containsKey( atom.getId() ) ) {
return args.getBoolean( atom.getId() );
}
return true;
}
public class Atom {
protected Box mBox;
protected String mName;
protected int mDepth;
protected String mId;
protected List<Atom> mChildAtoms = new ArrayList<>();
protected boolean mExpanded = true;
public Atom( Box box, int depth ) {
mBox = box;
mDepth = depth;
mId = String.format( "%s-%d-%d", mBox.getType(), mBox.getSize(), mBox.getOffset() );
mExpanded = getIsExpanded( this );
}
public void addChildren( List<Atom> atoms, List<Atom> atomsForAdapter ) {
if ( mBox instanceof AbstractContainerBox ) {
AbstractContainerBox containerBox = (AbstractContainerBox) mBox;
for ( Box childBox : containerBox.getBoxes() ) {
Atom childAtom = new Atom( childBox, mDepth + 1 );
atoms.add( childAtom );
atomsForAdapter.add( childAtom );
childAtom.addChildren( atoms, childAtom.isExpanded() ? atomsForAdapter : new ArrayList<Atom>() );
mChildAtoms.add( childAtom );
}
}
}
public void toggleExpansion() {
mExpanded = !mExpanded;
}
public List<Atom> getChildAtoms() {
return mChildAtoms;
}
public int getVisibleChildCount() {
int visibleChildCount = 0;
if ( mExpanded ) {
for ( Atom childAtom : mChildAtoms ) {
visibleChildCount += 1;
visibleChildCount += childAtom.getVisibleChildCount();
}
}
return visibleChildCount;
}
public String getType() {
return mBox.getType();
}
public String getName() {
if ( mName == null ) {
mName = AtomHelper.getNameForType( mBox.getType() );
}
return mName;
}
public String getId() {
return mId;
}
protected boolean isExpanded() {
return mExpanded;
}
protected int getDepth() {
return mDepth;
}
protected Box getBox() {
return mBox;
}
protected int getChildCount() {
return mChildAtoms.size();
}
}
public class AtomViewHolder extends ViewHolder implements OnCheckedChangeListener {
protected AtomAdapter mAdapter;
protected View mView;
protected Atom mAtom;
public AtomViewHolder( View view, AtomAdapter adapter ) {
super( view );
mView = view;
mAdapter = adapter;
mView.setOnClickListener( this::onClickAtom );
ImageButton infoButton = (ImageButton) mView.findViewById( R.id.box_info_button );
infoButton.setOnClickListener( this::onClickInfoButton );
}
public void bind( Atom atom ) {
mAtom = atom;
TextView typeView = (TextView) mView.findViewById( R.id.box_type );
typeView.setText( mAtom.getType() );
RelativeLayout root = (RelativeLayout) mView.findViewById( R.id.atom_root );
root.setClickable( atom.getChildCount() == 0 ? false : true );
View paddingView = mView.findViewById( R.id.padding_view );
paddingView.setLayoutParams( new RelativeLayout.LayoutParams( dpToPx( 16 * mAtom.getDepth() ), LayoutParams.MATCH_PARENT ) );
TextView descriptionView = (TextView) mView.findViewById( R.id.box_description );
String name = mAtom.getName();
descriptionView.setText( name );
final ImageView boxIcon = (ImageView) mView.findViewById( R.id.box_icon );
boxIcon.setVisibility( atom.getChildCount() == 0 ? View.INVISIBLE : View.VISIBLE );
if ( atom.getChildCount() > 0 ) {
boxIcon.post(
new Runnable() {
@Override
public void run() {
int from = mAtom.isExpanded() ? -90 : 0;
int to = mAtom.isExpanded() ? 0 : -90;
RotateAnimation animation = new RotateAnimation( from, to, boxIcon.getWidth() / 2, boxIcon.getHeight() / 2 );
animation.setDuration( 0 );
animation.setFillAfter( true );
boxIcon.startAnimation( animation );
}
} );
}
}
public int dpToPx( int dp ) {
DisplayMetrics displayMetrics = getResources().getDisplayMetrics();
int px = Math.round( dp * ( displayMetrics.xdpi / DisplayMetrics.DENSITY_DEFAULT ) );
return px;
}
protected void onClickAtom( View v ) {
int visibleChildCount = mAtom.getVisibleChildCount();
mAtom.toggleExpansion();
visibleChildCount = mAtom.isExpanded() ? mAtom.getVisibleChildCount() : visibleChildCount;
boolean isExpanded = mAtom.isExpanded();
int from = isExpanded ? -90 : 0;
int to = isExpanded ? 0 : -90;
ImageView boxIcon = (ImageView) mView.findViewById( R.id.box_icon );
RotateAnimation animation = new RotateAnimation( from, to, boxIcon.getWidth() / 2, boxIcon.getHeight() / 2 );
animation.setDuration( 600 );
animation.setFillAfter( true );
boxIcon.startAnimation( animation );
if ( isExpanded ) {
int position = mAdapter.getItemPosition( mAtom );
mAdapter.addItems( position + 1, mAtom.getChildAtoms() );
mAdapter.notifyItemRangeInserted( position + 1, visibleChildCount );
} else {
int position = mAdapter.getItemPosition( mAtom );
mAdapter.removeItems( position + 1, visibleChildCount );
}
}
protected void onClickInfoButton( View v ) {
Activity activity = getActivity();
if ( activity instanceof VideoActivity ) {
Bundle args = new Bundle();
args.putInt( Extras.EXTRA_VIDEO_CACHE_ID, mVideo.CacheId );
args.putInt( Extras.EXTRA_BOX_ID, IsoFileCache.Instance().cacheBox( mAtom.getBox() ) );
VideoActivity videoActivity = (VideoActivity) activity;
videoActivity.addFragmentToVideoTab( mVideo, AtomInfoFragment.class, args );
}
}
@Override
public void onCheckedChanged( CompoundButton buttonView, boolean isChecked ) {
}
}
public class AtomAdapter extends RecyclerView.Adapter<AtomViewHolder> {
protected List<Atom> mAtoms = new ArrayList<>();
public void setAtoms( List<Atom> atoms ) {
mAtoms = atoms;
notifyDataSetChanged();
}
public int getItemPosition( Atom atom ) {
int position = 0;
for ( Atom a : mAtoms ) {
if ( a.equals( atom ) ) {
return position;
}
position++;
}
return -1;
}
public void removeItems( int position, int count ) {
Iterator<Atom> atomIterator = mAtoms.listIterator( position );
for ( int i = position; i < position + count; i++ ) {
atomIterator.next();
atomIterator.remove();
}
notifyItemRangeRemoved( position, count );
}
public int addItems( int position, List<Atom> atoms ) {
int i = position;
for ( Atom atom : atoms ) {
mAtoms.add( i, atom );
i++;
if ( atom.isExpanded() ) {
i = addItems( i, atom.getChildAtoms() );
}
}
return i;
}
@Override
public AtomViewHolder onCreateViewHolder( ViewGroup viewGroup, int position ) {
LayoutInflater inflater = LayoutInflater.from( viewGroup.getContext() );
View root = inflater.inflate( R.layout.atom, viewGroup, false );
return new AtomViewHolder( root, this );
}
@Override
public void onBindViewHolder( AtomViewHolder atomViewHolder, int position ) {
atomViewHolder.bind( mAtoms.get( position ) );
atomViewHolder.mView.setTag( position );
}
@Override
public int getItemCount() {
return mAtoms.size();
}
}
}
| hoolrory/VideoInfoViewer | app/src/main/java/com/roryhool/videoinfoviewer/atomfragments/AtomStructureFragment.java | Java | apache-2.0 | 14,715 |
<?php
namespace Point\PointFinance\Http\Controllers;
use Point\Core\Models\Master\RoleUser;
use Point\Core\Traits\ValidationTrait;
use Point\Framework\Models\Journal;
use Point\Framework\Models\Master\Coa;
use Point\Framework\Models\Master\Person;
use Point\PointFinance\Models\Bank\Bank;
use Point\PointFinance\Models\Cash\Cash;
use Point\PointFinance\Models\Cash\CashCashAdvance;
use Point\PointFinance\Models\CashAdvance;
class ReportController extends Controller
{
use ValidationTrait;
public function index($type)
{
self::checkingPermission($type);
$view = view('point-finance::app.finance.point.report.report');
if ($type == 'cash') {
$view->list_coa = Coa::where('coa_category_id', 1)->active()->get();
} elseif ($type == 'bank') {
$view->list_coa = Coa::where('coa_category_id', 2)->active();
$view->list_coa = $view->list_coa->get();
}
$view->list_person = Person::active()->get();
$view->type = $type;
return $view;
}
public function checkingPermission($type)
{
if ($type == 'bank') {
access_is_allowed('read.point.finance.bank.report');
} elseif ($type == 'cash') {
access_is_allowed('read.point.finance.cash.report');
} else {
abort(404);
}
}
public function _view()
{
if (!$this->validateCSRF()) {
return response()->json($this->restrictionAccessMessage());
}
$type = \Input::get('type');
$subledger = \Input::get('subledger_id');
$coa_id = \Input::get('coa_id');
$date_from = \Input::get('date_from') ? \Input::get('date_from') : date('d-m-Y');
$date_to = \Input::get('date_to') ? \Input::get('date_to') : date('d-m-Y');
// respon view
$report = self::dataReport($type, $date_from, $date_to, $coa_id, $subledger);
$view = view('point-finance::app.finance.point.report._detail');
$view->list_report = $report['report'];
$view->type = $type;
$view->total_cash_advance = CashAdvance::joinFormulir()->selectOriginal()->notArchived()->notCanceled()
->where('formulir.form_date', '<=', date_format_db($date_to, 'end'))
->where('is_payed', true)
->where('amount', '>', 0)
->where('coa_id', $coa_id)
->handedOver()
->sum('amount');
$view->total_cash_advance_used = CashCashAdvance::joinFormulir()->selectOriginal()->notArchived()->notCanceled()
->where('formulir.form_date', '<=', date_format_db($date_to, 'end'))
->where('cash_advance_amount', '>', 0)
->sum('cash_advance_amount');
$view->total_cash_advance_remaining = CashAdvance::joinFormulir()->selectOriginal()->notArchived()->notCanceled()
->where('formulir.form_date', '<=', date_format_db($date_to, 'end'))
->where('is_payed', true)
->where('amount', '>', 0)
->where('coa_id', $coa_id)
->handedOver()
->sum('remaining_amount');
\Log::info('amount ' . $view->total_cash_advance);
\Log::info('used ' . $view->total_cash_advance_used);
$view->opening_balance = $report['journal_debit'] - $report['journal_credit'];
$view->url = url('finance/point/report/export/?type='.$type.'&subledger_id='.$subledger.'&coa_id='.$coa_id.'&date_from='.$date_from.'&date_to='.$date_to);
$view->url_pdf = url('finance/point/report/export/pdf?type='.$type.'&subledger_id='.$subledger.'&coa_id='.$coa_id.'&date_from='.$date_from.'&date_to='.$date_to);
$roleUser = RoleUser::where('user_id', auth()->user()->id)->where('role_id', 1)->first();
$isAdministrator = false;
if ($roleUser) {
$isAdministrator = true;
}
$view->isAdministrator = $isAdministrator;
return $view;
}
public function exportPDF()
{
$type = \Input::get('type');
$subledger = \Input::get('subledger_id');
$coa_id = \Input::get('coa_id');
$date_from = \Input::get('date_from') ? \Input::get('date_from') : date('d-m-Y');
$date_to = \Input::get('date_to') ? \Input::get('date_to') : date('d-m-Y');
// respon view
$report = self::dataReport($type, $date_from, $date_to, $coa_id, $subledger);
$opening_balance = $report['journal_debit'] - $report['journal_credit'];
$type = $type;
$list_report = $report['report'];
$pdf = \PDF::loadView('point-finance::app.finance.point.report.report-pdf', ['list_report' => $list_report, 'opening_balance' => $opening_balance, 'type' => $type]);
return $pdf->stream();
}
public static function dataReport($type, $date_from, $date_to, $coa_id, $subledger)
{
// payment type cash
$report_type = Cash::joinFormulir()->where('coa_id', $coa_id)->notArchived()->close()->selectOriginal()->orderByStandardAsc();
// payment type bank
if ($type == 'bank') {
$report_type = Bank::joinFormulir()->where('coa_id', $coa_id)->notArchived()->close()->selectOriginal()->orderByStandardAsc();
}
// getting data from Journal
$journal_debit = Journal::where('form_date', '<', \DateHelper::formatDB($date_from))
->where('coa_id', $coa_id)
->sum('debit');
$journal_credit = Journal::where('form_date', '<', \DateHelper::formatDB($date_from))
->where('coa_id', $coa_id)
->sum('credit');
// filter subledger
$report = $report_type
->whereBetween('form_date', array(\DateHelper::formatDB($date_from), \DateHelper::formatDB($date_to, 'end')))
->get();
if ($subledger) {
$report = $report_type
->whereBetween('form_date', array(\DateHelper::formatDB($date_from), \DateHelper::formatDB($date_to, 'end')))
->where('person_id', $subledger)
->get();
}
return [
'report' => $report,
'journal_debit' => $journal_debit,
'journal_credit' => $journal_credit
];
}
public function export()
{
$type = \Input::get('type');
$coa_id = \Input::get('coa_id');
$subledger_id = \Input::get('subledger_id');
$date_from = \Input::get('date_from');
$date_to = \Input::get('date_to');
\Excel::create($type. ' Report', function ($excel) use ($type, $coa_id, $subledger_id, $date_from, $date_to) {
# Sheet Data
$excel->sheet('Data', function ($sheet) use ($type, $coa_id, $subledger_id, $date_from, $date_to) {
$sheet->setWidth(array(
'A' => 10,
'B' => 25,
'C' => 25,
'D' => 25,
'E' => 25,
'F' => 25,
'G' => 25,
'H' => 25,
'I' => 25,
));
$sheet->setColumnFormat(array(
'H' => '#,##0.00',
'I' => '#,##0.00'
));
$title = strtoupper($type." REPORT FROM " . $date_from . " - " . $date_to);
$info_export = "DATE EXPORT ". \Carbon::now();
$sheet->cell('A1', function ($cell) use ($title) {
$cell->setValue($title);
});
$sheet->cell('A2', function ($cell) use ($info_export) {
$cell->setValue($info_export);
});
// MERGER COLUMN
$sheet->mergeCells('A4:I4', 'center');
$sheet->cell('A4', function ($cell) use ($type) {
// Set font
$cell->setFont(array(
'family' => 'Times New Roman',
'size' => '14',
'bold' => true
));
$cell->setValue(strtoupper($type.' REPORT'));
});
$sheet->mergeCells('A5:I5', 'center');
$sheet->cell('A5', function ($cell) use ($date_from, $date_to) {
// Set font
$cell->setFont(array(
'family' => 'Times New Roman',
'size' => '14',
'bold' => true
));
if ($date_from && $date_to) {
$cell->setValue('PERIOD : '. strtoupper(\DateHelper::formatView(date_format_db($date_from)) . ' TO ' . \DateHelper::formatView(date_format_db($date_to))));
} else {
$cell->setValue('PERIOD : '. strtoupper(\DateHelper::formatView(\Carbon::now())));
}
});
$sheet->cell('A6:I6', function ($cell) {
// Set font
$cell->setFont(array(
'family' => 'Times New Roman',
'size' => '12',
'bold' => true
));
});
// Generad table of content
$header = array(
array('NO', 'FORM DATE', 'FORM NUMBER', 'PERSON', 'ACCOUNT', 'ACCOUNT DESCRIPTION', 'NOTES', 'RECEIVED', 'DISBURSED')
);
$data_report = self::dataReport($type, $date_from, $date_to, $coa_id, $subledger_id);
$total_data = count($data_report['report']);
$total_row = 0;
$total_received = 0;
$total_disbursed = 0;
// $received = 0;
for ($i=0; $i < $total_data; $i++) {
foreach ($data_report['report'][$i]->detail as $report_detail) {
$total_row++;
$received = '0.00';
if ($data_report['report'][$i]->payment_flow == 'in') {
$received = $report_detail->amount;
$total_received += $report_detail->amount;
}
$disbursed = '0.00';
if ($data_report['report'][$i]->payment_flow == 'out') {
$disbursed = $report_detail->amount;
$total_disbursed += $report_detail->amount;
}
$coa = Coa::find($report_detail->coa_id);
array_push($header, [$total_row,
date_format_view($data_report['report'][$i]->formulir->form_date),
$data_report['report'][$i]->formulir->form_number,
$data_report['report'][$i]->person->codeName,
$coa->coa_number,
$coa->name,
$report_detail->notes_detail,
$received * 1,
$disbursed * 1
]);
}
}
$total_row = $total_row + 6;
$sheet->fromArray($header, null, 'A6', false, false);
$sheet->setBorder('A6:I'.$total_row, 'thin');
$next_row = $total_row + 1;
$sheet->cell('G'.$next_row, function ($cell) {
$cell->setValue('TOTAL');
$cell->setFont(array(
'family' => 'Times New Roman',
'size' => '12',
'bold' => true
));
});
$sheet->cell('H'.$next_row, function ($cell) use ($total_received) {
$cell->setValue($total_received);
});
$sheet->cell('I'.$next_row, function ($cell) use ($total_disbursed) {
$cell->setValue($total_disbursed);
});
$next_row = $next_row + 1;
$sheet->cell('G'.$next_row, function ($cell) {
$cell->setValue('OPENING BALANCE');
$cell->setFont(array(
'family' => 'Times New Roman',
'size' => '12',
'bold' => true
));
});
$sheet->cell('H'.$next_row, function ($cell) use ($data_report) {
$cell->setValue($data_report['journal_debit'] - $data_report['journal_credit']);
});
$next_row = $next_row + 1;
$sheet->cell('G'.$next_row, function ($cell) {
$cell->setValue('ENDING BALANCE');
$cell->setFont(array(
'family' => 'Times New Roman',
'size' => '12',
'bold' => true
));
});
$sheet->cell('H'.$next_row, function ($cell) use ($data_report, $total_received, $total_disbursed) {
$cell->setValue(($data_report['journal_debit'] - $data_report['journal_credit']) + $total_received - $total_disbursed);
});
});
})->export('xls');
}
}
| bgd-point/point-app-test | packages/point/point-finance/src/Http/Controllers/ReportController.php | PHP | apache-2.0 | 13,507 |
/* Copyright (c) The m-m-m Team, Licensed under the Apache License, Version 2.0
* http://www.apache.org/licenses/LICENSE-2.0 */
/**
* Contains the API of the view for the search.
* <a name="documentation"></a><h2>Search View API</h2>
* This package contains the API of the view that can be used to present the
* search-engine (as web-application). It helps to enter search-queries,
* perform them, present hits and show the details of a hit.
*/
package net.sf.mmm.search.view.api;
| m-m-m/search | search/engine/api/src/main/java/net/sf/mmm/search/view/api/package-info.java | Java | apache-2.0 | 492 |
//
// This file was generated by the JavaTM Architecture for XML Binding(JAXB) Reference Implementation, v2.2.8-b130911.1802
// See <a href="http://java.sun.com/xml/jaxb">http://java.sun.com/xml/jaxb</a>
// Any modifications to this file will be lost upon recompilation of the source schema.
// Generated on: 2016.10.09 at 10:10:23 AM CST
//
package elong;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlElement;
import java.util.List;
import com.alibaba.fastjson.annotation.JSONField;
import javax.xml.bind.annotation.XmlType;
/**
* <p>Java class for NameValue complex type.
*
* <p>The following schema fragment specifies the expected content contained within this class.
*
* <pre>
* <complexType name="NameValue">
* <complexContent>
* <restriction base="{http://www.w3.org/2001/XMLSchema}anyType">
* <sequence>
* <element name="Name" type="{http://www.w3.org/2001/XMLSchema}string" minOccurs="0"/>
* <element name="Value" type="{http://www.w3.org/2001/XMLSchema}string" minOccurs="0"/>
* </sequence>
* </restriction>
* </complexContent>
* </complexType>
* </pre>
*
*
*/
@XmlAccessorType(XmlAccessType.FIELD)
@XmlType(name = "NameValue", propOrder = {
"name",
"value"
})
public class NameValue {
@JSONField(name = "Name")
protected String name;
@JSONField(name = "Value")
protected String value;
/**
* Gets the value of the name property.
*
* @return
* possible object is
* {@link String }
*
*/
public String getName() {
return name;
}
/**
* Sets the value of the name property.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setName(String value) {
this.name = value;
}
/**
* Gets the value of the value property.
*
* @return
* possible object is
* {@link String }
*
*/
public String getValue() {
return value;
}
/**
* Sets the value of the value property.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setValue(String value) {
this.value = value;
}
}
| leonmaybe/eLong-OpenAPI-JAVA-demo | src/elong/NameValue.java | Java | apache-2.0 | 2,402 |
#
# Cookbook Name:: nginx
# Attributes:: geoip
#
# Author:: Jamie Winsor (<jamie@vialstudios.com>)
#
# Copyright 2012-2013, Riot Games
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
default['nginx']['geoip']['path'] = '/srv/geoip'
default['nginx']['geoip']['enable_city'] = true
default['nginx']['geoip']['country_dat_url'] = 'https://geolite.maxmind.com/download/geoip/database/GeoLiteCountry/GeoIP.dat.gz'
default['nginx']['geoip']['country_dat_checksum'] = nil
default['nginx']['geoip']['city_dat_url'] = 'https://geolite.maxmind.com/download/geoip/database/GeoLiteCity.dat.gz'
default['nginx']['geoip']['city_dat_checksum'] = nil
default['nginx']['geoip']['lib_version'] = '1.6.3'
lib_version = node['nginx']['geoip']['lib_version'] # convenience variable for line length
default['nginx']['geoip']['lib_url'] = "https://github.com/maxmind/geoip-api-c/releases/download/v#{lib_version}/GeoIP-#{lib_version}.tar.gz"
default['nginx']['geoip']['lib_checksum'] = 'e483839a81a91c3c85df89ef409fc7b526c489e0355d537861cfd1ea9534a8f2'
| luishdez/nginx-cookbook | attributes/geoip.rb | Ruby | apache-2.0 | 1,602 |
/*
* Copyright (c) 2016, WSO2 Inc. (http://www.wso2.org) All Rights Reserved.
*
* WSO2 Inc. licenses this file to you under the Apache License,
* Version 2.0 (the "License"); you may not use this file except
* in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.wso2.carbon.device.mgt.common.push.notification;
import javax.xml.bind.annotation.XmlElement;
import javax.xml.bind.annotation.XmlRootElement;
import java.util.Map;
@XmlRootElement(name = "PushNotificationProviderConfiguration")
public class PushNotificationConfig {
private String type;
Map<String, String> properties;
public PushNotificationConfig(String type, Map<String, String> properties) {
this.type = type;
this.properties = properties;
}
@XmlElement(name = "Type", required = true)
public String getType() {
return type;
}
public Map<String, String> getProperties() {
return properties;
}
public String getProperty(String name) {
return properties.get(name);
}
}
| Shabirmean/carbon-device-mgt | components/device-mgt/org.wso2.carbon.device.mgt.common/src/main/java/org/wso2/carbon/device/mgt/common/push/notification/PushNotificationConfig.java | Java | apache-2.0 | 1,474 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package de.knightsoftnet.mtwidgets.client.ui.widget;
import de.knightsoftnet.mtwidgets.client.ui.widget.oracle.PhoneNumberCommonRestOracle;
import de.knightsoftnet.validators.client.rest.helper.FutureResult;
import de.knightsoftnet.validators.shared.data.ValueWithPos;
import de.knightsoftnet.validators.shared.data.ValueWithPosAndCountry;
import java.util.concurrent.ExecutionException;
/**
* phone number common suggest widget.
*
* @author Manfred Tremmel
*
*/
public class PhoneNumberCommonRestSuggestBox extends AbstractPhoneNumberRestSuggestBox {
/**
* default constructor.
*/
public PhoneNumberCommonRestSuggestBox() {
super(new PhoneNumberCommonRestOracle());
}
@Override
public void formatValue(final ValueWithPosAndCountry<String> pkey,
final FutureResult<ValueWithPos<String>> presult) throws ExecutionException {
service.formatCommonWithPos(pkey, presult);
}
@Override
public boolean isFormatingCharacter(final char pcharacter) {
return pcharacter == '+' || pcharacter == ' ' || pcharacter == '/' || pcharacter == '-'
|| pcharacter == '(' || pcharacter == ')';
}
}
| ManfredTremmel/gwt-bean-validators | gwt-mt-widgets-restygwt-jaxrs/src/main/java/de/knightsoftnet/mtwidgets/client/ui/widget/PhoneNumberCommonRestSuggestBox.java | Java | apache-2.0 | 1,928 |
/**
* Support classes for working with annotated message-handling methods with
* non-blocking, reactive contracts.
*/
@NonNullApi
@NonNullFields
package org.springframework.messaging.handler.annotation.reactive;
import org.springframework.lang.NonNullApi;
import org.springframework.lang.NonNullFields;
| spring-projects/spring-framework | spring-messaging/src/main/java/org/springframework/messaging/handler/annotation/reactive/package-info.java | Java | apache-2.0 | 307 |
package edu.harvard.mgh.lcs.sprout.forms.study.to;
import java.io.Serializable;
public class ParameterTO implements Serializable {
private static final long serialVersionUID = -2079563494105436251L;
private String name;
private String value;
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getValue() {
return value;
}
public void setValue(String value) {
this.value = value;
}
}
| stephenlorenz/sproutstudy | sproutStudy_ejb/src/main/java/edu/harvard/mgh/lcs/sprout/forms/study/to/ParameterTO.java | Java | apache-2.0 | 494 |
/*
* Copyright (C) 2018-2021 DiffPlug
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.diffplug.gradle.eclipse;
import com.diffplug.common.base.Box;
import com.diffplug.common.base.Errors;
import com.diffplug.gradle.FileMisc;
import com.diffplug.gradle.GoomphCacheLocations;
import com.diffplug.gradle.ZipMisc;
import com.diffplug.gradle.pde.EclipseRelease;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.util.HashMap;
import java.util.Map;
import java.util.Objects;
import java.util.function.Function;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import javax.xml.parsers.ParserConfigurationException;
import org.osgi.framework.Version;
import org.w3c.dom.Document;
import org.w3c.dom.Node;
import org.xml.sax.SAXException;
/** Maps eclipse jars to their mavenCentral artifact ids and versions based on their official release. */
public class MavenCentralMapping {
private static final EclipseRelease FIRST_ON_CENTRAL = EclipseRelease.official("4.6.2");
private static final String PLATFORM = "org.eclipse.platform";
private static final String JDT = "org.eclipse.jdt";
private static final String PDE = "org.eclipse.pde";
private static final String EMF = "org.eclipse.emf";
private static final String ECF = "org.eclipse.ecf";
private static final String ICU_BUNDLE_ID = "com.ibm.icu";
public static boolean isEclipseGroup(String group) {
return group.equals(PLATFORM) || group.equals(JDT) || group.equals(PDE) || group.equals(EMF) || group.equals(ECF);
}
/** Returns the MavenCentral groupId:artifactId appropriate for the given bundleId. */
public static String groupIdArtifactId(String bundleId) {
if (ICU_BUNDLE_ID.equals(bundleId)) {
return "com.ibm.icu:icu4j";
} else if ("org.eclipse.jdt.core.compiler.batch".equals(bundleId)) {
return JDT + ":ecj";
} else if (bundleId.startsWith(JDT)) {
return JDT + ":" + bundleId;
} else if (bundleId.startsWith(PDE)) {
return PDE + ":" + bundleId;
} else if (bundleId.startsWith(EMF)) {
return EMF + ":" + bundleId;
} else if (bundleId.startsWith(ECF)) {
return ECF + ":" + bundleId;
} else {
return PLATFORM + ":" + bundleId;
}
}
/** Creates a map from a key defined by the keyExtractor function to its corresponding version in maven central. */
static Map<String, String> parse(InputStream inputStream, Function<String, String> keyExtractor) throws ParserConfigurationException, SAXException, IOException {
Map<String, String> map = new HashMap<>();
DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
DocumentBuilder db = dbf.newDocumentBuilder();
Document doc = db.parse(inputStream);
Node artifacts = doc.getDocumentElement().getElementsByTagName("artifacts").item(0);
for (int i = 0; i < artifacts.getChildNodes().getLength(); ++i) {
Node artifact = artifacts.getChildNodes().item(i);
if ("artifact".equals(artifact.getNodeName())) {
String classifier = artifact.getAttributes().getNamedItem("classifier").getNodeValue();
if ("osgi.bundle".equals(classifier)) {
String bundleId = artifact.getAttributes().getNamedItem("id").getNodeValue();
String bundleVersion = artifact.getAttributes().getNamedItem("version").getNodeValue();
String key = keyExtractor.apply(bundleId);
String version = calculateMavenCentralVersion(bundleId, bundleVersion);
map.put(key, version);
}
}
}
return map;
}
static String calculateMavenCentralVersion(String bundleId, String bundleVersion) {
Version parsed = Version.parseVersion(bundleVersion);
if (ICU_BUNDLE_ID.equals(bundleId) && parsed.getMicro() == 0) {
return parsed.getMajor() + "." + parsed.getMinor();
} else {
return parsed.getMajor() + "." + parsed.getMinor() + "." + parsed.getMicro();
}
}
/** Returns a map from every bundle-id to its corresponding 3-part version (the qualifier is dropped). */
public static Map<String, String> bundleToVersion(EclipseRelease release) {
return createVersionMap(release, Function.identity());
}
/** Returns a map from every groupId:artifactId to its corresponding version in maven central (the qualifier is dropped). */
public static Map<String, String> groupIdArtifactIdToVersion(EclipseRelease release) {
return createVersionMap(release, MavenCentralMapping::groupIdArtifactId);
}
private static final String ARTIFACTS_JAR = "artifacts.jar";
private static Map<String, String> createVersionMap(EclipseRelease release, Function<String, String> keyExtractor) {
// warn if the user is asking for a too-old version of eclipse, but go ahead and try anyway just in case
if (release.version().compareTo(FIRST_ON_CENTRAL.version()) < 0) {
throw new IllegalArgumentException(FIRST_ON_CENTRAL.version() + " was the first eclipse release that was published on maven central, you requested " + release);
}
if (!release.isXYZ()) {
throw new IllegalArgumentException("Maven central mapping requires 'x.y.z' and does not support 'x.y'. Try " + release + ".0 instead of " + release);
}
File versionFolder = new File(GoomphCacheLocations.eclipseReleaseMetadata(), release.version().toString());
FileMisc.mkdirs(versionFolder);
File artifactsJar = new File(versionFolder, ARTIFACTS_JAR);
if (artifactsJar.exists() && artifactsJar.length() > 0) {
try {
return parseFromFile(artifactsJar, keyExtractor);
} catch (Exception e) {
e.printStackTrace();
System.err.println("Retrying download...");
FileMisc.forceDelete(artifactsJar);
}
}
return Errors.rethrow().get(() -> {
FileMisc.download(release.updateSite() + "artifacts.jar", artifactsJar);
return parseFromFile(artifactsJar, keyExtractor);
});
}
private static Map<String, String> parseFromFile(File artifactsJar, Function<String, String> keyExtractor) throws IOException {
Box.Nullable<Map<String, String>> value = Box.Nullable.ofNull();
ZipMisc.read(artifactsJar, "artifacts.xml", input -> {
value.set(Errors.rethrow().get(() -> parse(input, keyExtractor)));
});
return Objects.requireNonNull(value.get());
}
}
| diffplug/goomph | src/main/java/com/diffplug/gradle/eclipse/MavenCentralMapping.java | Java | apache-2.0 | 6,641 |
<?
$MESS ['SH_IBLOCK_MODULE_NOT_INSTALLED'] = "Модуль инфоблоков не установлен";
?> | SidiGi/bx-simple-bigdata | bigdata.products.list/lang/ru/class.php | PHP | apache-2.0 | 111 |
/*
* Copyright 2013 The Ehensin Project
*
* The Ehensin Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package com.ehensin.tunnel.client.event;
/**
* event type enumeration
*
* */
public enum EventTypeEnum {
CHANNEL_GRADE,
TUNNAL_GRADE,
ASYNC_CALLBACK,
CONFIG_UPDATE
}
| ehensin/ehensin-tunnel | tunnel-client/src/main/java/com/ehensin/tunnel/client/event/EventTypeEnum.java | Java | apache-2.0 | 825 |
/*
* Copyright 2017 NAVER Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.navercorp.pinpoint.web.service.stat;
import com.navercorp.pinpoint.common.util.Assert;
import com.navercorp.pinpoint.web.dao.stat.SampledResponseTimeDao;
import com.navercorp.pinpoint.web.util.TimeWindow;
import com.navercorp.pinpoint.web.vo.stat.SampledResponseTime;
import com.navercorp.pinpoint.web.vo.stat.chart.StatChart;
import com.navercorp.pinpoint.web.vo.stat.chart.agent.ResponseTimeChart;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.stereotype.Service;
import java.util.ArrayList;
import java.util.List;
import java.util.Objects;
/**
* @author Taejin Koo
*/
@Service
public class ResponseTimeChartService implements AgentStatChartService {
private final SampledResponseTimeDao sampledResponseTimeDao;
public ResponseTimeChartService(@Qualifier("sampledResponseTimeDaoFactory") SampledResponseTimeDao sampledResponseTimeDao) {
this.sampledResponseTimeDao = Objects.requireNonNull(sampledResponseTimeDao, "sampledResponseTimeDao");
}
@Override
public StatChart selectAgentChart(String agentId, TimeWindow timeWindow) {
Assert.requireNonNull(agentId, "agentId");
Objects.requireNonNull(timeWindow, "timeWindow");
List<SampledResponseTime> sampledResponseTimes = this.sampledResponseTimeDao.getSampledAgentStatList(agentId, timeWindow);
return new ResponseTimeChart(timeWindow, sampledResponseTimes);
}
@Override
public List<StatChart> selectAgentChartList(String agentId, TimeWindow timeWindow) {
StatChart agentStatChart = selectAgentChart(agentId, timeWindow);
List<StatChart> result = new ArrayList<>(1);
result.add(agentStatChart);
return result;
}
}
| suraj-raturi/pinpoint | web/src/main/java/com/navercorp/pinpoint/web/service/stat/ResponseTimeChartService.java | Java | apache-2.0 | 2,401 |
/**********************************************************************
Copyright (c) 2012-2014 Alexander Kerner. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
***********************************************************************/
package org.msqbat.fe.impl;
import java.util.ArrayList;
import java.util.List;
import net.sf.jranges.range.doublerange.RangeDouble;
import net.sf.jranges.range.doublerange.impl.FactoryRangeDoubleZeroPositive;
import net.sf.jranges.range.doublerange.impl.RangeDoubleUtils;
import net.sf.jranges.range.doublerange.impl.ZeroPositiveDoubleRange;
public class MALDIBoxes extends Binning {
private transient List<RangeDouble> splitRanges;
public RangeDouble getRange() {
return new ZeroPositiveDoubleRange(130.5655, 10254.6250, 1.0005);
}
@Override
public List<RangeDouble> getRanges() {
return split();
}
public synchronized List<RangeDouble> split() {
if (splitRanges == null) {
splitRanges = new ArrayList<RangeDouble>(RangeDoubleUtils.split(getRange(), 6,
new FactoryRangeDoubleZeroPositive()));
}
return splitRanges;
}
}
| MSqBAT/org.msqbat.fe | plugins/org.msqbat.fe.impl/src/org/msqbat/fe/impl/MALDIBoxes.java | Java | apache-2.0 | 1,672 |
/*
* Licensed to the Technische Universität Darmstadt under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The Technische Universität Darmstadt
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License.
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package de.tudarmstadt.ukp.clarin.webanno.api.annotation.adapter;
import static de.tudarmstadt.ukp.clarin.webanno.api.WebAnnoConst.COREFERENCE_RELATION_FEATURE;
import static de.tudarmstadt.ukp.clarin.webanno.api.WebAnnoConst.COREFERENCE_TYPE_FEATURE;
import static java.lang.System.currentTimeMillis;
import static java.util.Collections.emptyList;
import static org.apache.uima.fit.util.CasUtil.selectFS;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.function.Supplier;
import org.apache.commons.lang3.tuple.Pair;
import org.apache.uima.cas.CAS;
import org.apache.uima.cas.FeatureStructure;
import org.apache.uima.cas.Type;
import org.apache.uima.cas.text.AnnotationFS;
import org.apache.uima.fit.util.CasUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.context.ApplicationEventPublisher;
import org.springframework.core.annotation.AnnotationAwareOrderComparator;
import de.tudarmstadt.ukp.clarin.webanno.api.AnnotationSchemaService;
import de.tudarmstadt.ukp.clarin.webanno.api.annotation.event.ChainLinkCreatedEvent;
import de.tudarmstadt.ukp.clarin.webanno.api.annotation.event.ChainLinkDeletedEvent;
import de.tudarmstadt.ukp.clarin.webanno.api.annotation.event.ChainSpanCreatedEvent;
import de.tudarmstadt.ukp.clarin.webanno.api.annotation.event.ChainSpanDeletedEvent;
import de.tudarmstadt.ukp.clarin.webanno.api.annotation.exception.AnnotationException;
import de.tudarmstadt.ukp.clarin.webanno.api.annotation.feature.FeatureSupportRegistry;
import de.tudarmstadt.ukp.clarin.webanno.api.annotation.layer.LayerSupportRegistry;
import de.tudarmstadt.ukp.clarin.webanno.api.annotation.model.AnnotatorState;
import de.tudarmstadt.ukp.clarin.webanno.api.annotation.model.VID;
import de.tudarmstadt.ukp.clarin.webanno.api.annotation.util.AnnotationComparator;
import de.tudarmstadt.ukp.clarin.webanno.api.annotation.util.WebAnnoCasUtil;
import de.tudarmstadt.ukp.clarin.webanno.model.AnnotationFeature;
import de.tudarmstadt.ukp.clarin.webanno.model.AnnotationLayer;
import de.tudarmstadt.ukp.clarin.webanno.model.SourceDocument;
import de.tudarmstadt.ukp.clarin.webanno.support.logging.LogMessage;
/**
* Manage interactions with annotations on a chain layer.
*/
public class ChainAdapter
extends TypeAdapter_ImplBase
{
private final Logger log = LoggerFactory.getLogger(getClass());
public static final String CHAIN = "Chain";
public static final String LINK = "Link";
public static final String FEAT_FIRST = "first";
public static final String FEAT_NEXT = "next";
private final List<SpanLayerBehavior> behaviors;
public ChainAdapter(LayerSupportRegistry aLayerSupportRegistry,
FeatureSupportRegistry aFeatureSupportRegistry,
ApplicationEventPublisher aEventPublisher, AnnotationLayer aLayer,
Supplier<Collection<AnnotationFeature>> aFeatures, List<SpanLayerBehavior> aBehaviors)
{
super(aLayerSupportRegistry, aFeatureSupportRegistry, aEventPublisher, aLayer, aFeatures);
if (aBehaviors == null) {
behaviors = emptyList();
}
else {
List<SpanLayerBehavior> temp = new ArrayList<>(aBehaviors);
AnnotationAwareOrderComparator.sort(temp);
behaviors = temp;
}
}
public AnnotationFS addSpan(SourceDocument aDocument, String aUsername, CAS aCas, int aBegin,
int aEnd)
throws AnnotationException
{
return handle(new CreateSpanAnnotationRequest(aDocument, aUsername, aCas, aBegin, aEnd));
}
public AnnotationFS handle(CreateSpanAnnotationRequest aRequest) throws AnnotationException
{
CreateSpanAnnotationRequest request = aRequest;
for (SpanLayerBehavior behavior : behaviors) {
request = behavior.onCreate(this, request);
}
AnnotationFS newSpan = createChainElementAnnotation(request);
publishEvent(new ChainSpanCreatedEvent(this, aRequest.getDocument(), aRequest.getUsername(),
getLayer(), newSpan));
return newSpan;
}
private AnnotationFS createChainElementAnnotation(CreateSpanAnnotationRequest aRequest)
{
// Add the link annotation on the span
AnnotationFS newLink = newLink(aRequest.getCas(), aRequest.getBegin(), aRequest.getEnd());
// The added link is a new chain on its own - add the chain head FS
newChain(aRequest.getCas(), newLink);
return newLink;
}
public int addArc(SourceDocument aDocument, String aUsername, CAS aCas, AnnotationFS aOriginFs,
AnnotationFS aTargetFs)
{
// Determine if the links are adjacent. If so, just update the arc label
AnnotationFS originNext = getNextLink(aOriginFs);
AnnotationFS targetNext = getNextLink(aTargetFs);
// adjacent - origin links to target
if (WebAnnoCasUtil.isSame(originNext, aTargetFs)) {
}
// adjacent - target links to origin
else if (WebAnnoCasUtil.isSame(targetNext, aOriginFs)) {
if (isLinkedListBehavior()) {
throw new IllegalStateException("Cannot change direction of a link within a chain");
}
else {
// in set mode there are no arc labels anyway
}
}
// if origin and target are not adjacent
else {
FeatureStructure originChain = getChainForLink(aCas, aOriginFs);
FeatureStructure targetChain = getChainForLink(aCas, aTargetFs);
AnnotationFS targetPrev = getPrevLink(targetChain, aTargetFs);
if (!WebAnnoCasUtil.isSame(originChain, targetChain)) {
if (isLinkedListBehavior()) {
// if the two links are in different chains then split the chains up at the
// origin point and target point and create a new link between origin and target
// the tail of the origin chain becomes a new chain
// if originFs has a next, then split of the origin chain up
// the rest becomes its own chain
if (originNext != null) {
newChain(aCas, originNext);
// we set originNext below
// we set the arc label below
}
// if targetFs has a prev, then split it off
if (targetPrev != null) {
setNextLink(targetPrev, null);
}
// if it has no prev then we fully append the target chain to the origin chain
// and we can remove the target chain head
else {
aCas.removeFsFromIndexes(targetChain);
}
// connect the rest of the target chain to the origin chain
setNextLink(aOriginFs, aTargetFs);
}
else {
// collect all the links
List<AnnotationFS> links = new ArrayList<>();
links.addAll(collectLinks(originChain));
links.addAll(collectLinks(targetChain));
// sort them ascending by begin and descending by end (default UIMA order)
links.sort(new AnnotationComparator());
// thread them
AnnotationFS prev = null;
for (AnnotationFS link : links) {
if (prev != null) {
// Set next link
setNextLink(prev, link);
// // Clear arc label - it makes no sense in this mode
// setLabel(prev, aFeature, null);
}
prev = link;
}
// make sure the last link terminates the chain
setNextLink(links.get(links.size() - 1), null);
// the chain head needs to point to the first link
setFirstLink(originChain, links.get(0));
// we don't need the second chain head anymore
aCas.removeFsFromIndexes(targetChain);
}
}
else {
// if the two links are in the same chain, we just ignore the action
if (isLinkedListBehavior()) {
throw new IllegalStateException(
"Cannot connect two spans that are already part of the same chain");
}
}
}
publishEvent(new ChainLinkCreatedEvent(this, aDocument, aUsername, getLayer(), aOriginFs));
// We do not actually create a new FS for the arc. Features are set on the originFS.
return WebAnnoCasUtil.getAddr(aOriginFs);
}
@Override
public void delete(SourceDocument aDocument, String aUsername, CAS aCas, VID aVid)
{
if (aVid.getSubId() == VID.NONE) {
deleteSpan(aDocument, aUsername, aCas, aVid.getId());
}
else {
deleteLink(aDocument, aUsername, aCas, aVid.getId());
}
}
private void deleteLink(SourceDocument aDocument, String aUsername, CAS aCas, int aAddress)
{
AnnotationFS linkToDelete = WebAnnoCasUtil.selectByAddr(aCas, AnnotationFS.class, aAddress);
// Create the tail chain
// We know that there must be a next link, otherwise no arc would have been rendered!
newChain(aCas, getNextLink(linkToDelete));
// Disconnect the tail from the head
setNextLink(linkToDelete, null);
publishEvent(
new ChainLinkDeletedEvent(this, aDocument, aUsername, getLayer(), linkToDelete));
}
private void deleteSpan(SourceDocument aDocument, String aUsername, CAS aCas, int aAddress)
{
Type chainType = CasUtil.getType(aCas, getChainTypeName());
AnnotationFS linkToDelete = WebAnnoCasUtil.selectByAddr(aCas, AnnotationFS.class, aAddress);
// case 1 "removing first link": we keep the existing chain head and just remove the
// first element
//
// case 2 "removing middle link": the new chain consists of the rest, the old chain head
// remains
//
// case 3 "removing the last link": the old chain head remains and the last element of the
// chain is removed.
// To know which case we have, we first need to find the chain containing the element to
// be deleted.
FeatureStructure oldChainFs = null;
AnnotationFS prevLinkFs = null;
chainLoop: for (FeatureStructure chainFs : selectFS(aCas, chainType)) {
AnnotationFS linkFs = getFirstLink(chainFs);
prevLinkFs = null; // Reset when entering new chain!
// Now we seek the link within the current chain
while (linkFs != null) {
if (WebAnnoCasUtil.isSame(linkFs, linkToDelete)) {
oldChainFs = chainFs;
break chainLoop;
}
prevLinkFs = linkFs;
linkFs = getNextLink(linkFs);
}
}
// Did we find the chain?!
if (oldChainFs == null) {
throw new IllegalArgumentException(
"Chain link with address [" + aAddress + "] not found in any chain!");
}
AnnotationFS followingLinkToDelete = getNextLink(linkToDelete);
if (prevLinkFs == null) {
// case 1: first element removed
setFirstLink(oldChainFs, followingLinkToDelete);
aCas.removeFsFromIndexes(linkToDelete);
// removed last element form chain?
if (followingLinkToDelete == null) {
aCas.removeFsFromIndexes(oldChainFs);
}
}
else if (followingLinkToDelete == null) {
// case 3: removing the last link (but not leaving the chain empty)
setNextLink(prevLinkFs, null);
aCas.removeFsFromIndexes(linkToDelete);
}
else if (prevLinkFs != null && followingLinkToDelete != null) {
// case 2: removing a middle link
// Set up new chain for rest
newChain(aCas, followingLinkToDelete);
// Cut off from old chain
setNextLink(prevLinkFs, null);
// Delete middle link
aCas.removeFsFromIndexes(linkToDelete);
}
else {
throw new IllegalStateException(
"Unexpected situation while removing link. Please contact developers.");
}
publishEvent(
new ChainSpanDeletedEvent(this, aDocument, aUsername, getLayer(), linkToDelete));
}
@Override
public String getAnnotationTypeName()
{
return getLayer().getName() + LINK;
}
public String getChainTypeName()
{
return getLayer().getName() + CHAIN;
}
/**
* Find the chain head for the given link.
*
* @param aCas
* the CAS.
* @param aLink
* the link to search the chain for.
* @return the chain.
*/
private FeatureStructure getChainForLink(CAS aCas, AnnotationFS aLink)
{
Type chainType = CasUtil.getType(aCas, getChainTypeName());
for (FeatureStructure chainFs : selectFS(aCas, chainType)) {
AnnotationFS linkFs = getFirstLink(chainFs);
// Now we seek the link within the current chain
while (linkFs != null) {
if (WebAnnoCasUtil.isSame(linkFs, aLink)) {
return chainFs;
}
linkFs = getNextLink(linkFs);
}
}
// This should never happen unless the data in the CAS has been created wrongly
throw new IllegalArgumentException("Link not part of any chain");
}
private List<AnnotationFS> collectLinks(FeatureStructure aChain)
{
List<AnnotationFS> links = new ArrayList<>();
// Now we seek the link within the current chain
AnnotationFS linkFs = (AnnotationFS) aChain
.getFeatureValue(aChain.getType().getFeatureByBaseName(getChainFirstFeatureName()));
while (linkFs != null) {
links.add(linkFs);
linkFs = getNextLink(linkFs);
}
return links;
}
/**
* Create a new chain head feature structure. Already adds the chain to the CAS.
*/
private FeatureStructure newChain(CAS aCas, AnnotationFS aFirstLink)
{
Type chainType = CasUtil.getType(aCas, getChainTypeName());
FeatureStructure newChain = aCas.createFS(chainType);
newChain.setFeatureValue(chainType.getFeatureByBaseName(getChainFirstFeatureName()),
aFirstLink);
aCas.addFsToIndexes(newChain);
return newChain;
}
/**
* Create a new link annotation. Already adds the chain to the CAS.
*/
private AnnotationFS newLink(CAS aCas, int aBegin, int aEnd)
{
Type linkType = CasUtil.getType(aCas, getAnnotationTypeName());
AnnotationFS newLink = aCas.createAnnotation(linkType, aBegin, aEnd);
aCas.addFsToIndexes(newLink);
return newLink;
}
/**
* Set the first link of a chain in the chain head feature structure.
*/
private void setFirstLink(FeatureStructure aChain, AnnotationFS aLink)
{
aChain.setFeatureValue(aChain.getType().getFeatureByBaseName(getChainFirstFeatureName()),
aLink);
}
/**
* Get the first link of a chain from the chain head feature structure.
*/
private AnnotationFS getFirstLink(FeatureStructure aChain)
{
return (AnnotationFS) aChain
.getFeatureValue(aChain.getType().getFeatureByBaseName(getChainFirstFeatureName()));
}
/**
* Get the chain link before the given link within the given chain. The given link must be part
* of the given chain.
*
* @param aChain
* a chain head feature structure.
* @param aLink
* a link.
* @return the link before the given link or null if the given link is the first link of the
* chain.
*/
private AnnotationFS getPrevLink(FeatureStructure aChain, AnnotationFS aLink)
{
AnnotationFS prevLink = null;
AnnotationFS curLink = getFirstLink(aChain);
while (curLink != null) {
if (WebAnnoCasUtil.isSame(curLink, aLink)) {
break;
}
prevLink = curLink;
curLink = getNextLink(curLink);
}
return prevLink;
}
/**
* Set the link following the current link.
*/
private void setNextLink(AnnotationFS aLink, AnnotationFS aNext)
{
aLink.setFeatureValue(aLink.getType().getFeatureByBaseName(getLinkNextFeatureName()),
aNext);
}
/**
* Get the link following the current link.
*/
private AnnotationFS getNextLink(AnnotationFS aLink)
{
return (AnnotationFS) aLink
.getFeatureValue(aLink.getType().getFeatureByBaseName(getLinkNextFeatureName()));
}
public boolean isLinkedListBehavior()
{
return getLayer().isLinkedListBehavior();
}
public String getLinkNextFeatureName()
{
return FEAT_NEXT;
}
public String getChainFirstFeatureName()
{
return FEAT_FIRST;
}
@Override
public void initialize(AnnotationSchemaService aSchemaService)
{
AnnotationFeature relationFeature = new AnnotationFeature();
relationFeature.setType(CAS.TYPE_NAME_STRING);
relationFeature.setName(COREFERENCE_RELATION_FEATURE);
relationFeature.setLayer(getLayer());
relationFeature.setEnabled(true);
relationFeature.setUiName("Reference Relation");
relationFeature.setProject(getLayer().getProject());
aSchemaService.createFeature(relationFeature);
AnnotationFeature typeFeature = new AnnotationFeature();
typeFeature.setType(CAS.TYPE_NAME_STRING);
typeFeature.setName(COREFERENCE_TYPE_FEATURE);
typeFeature.setLayer(getLayer());
typeFeature.setEnabled(true);
typeFeature.setUiName("Reference Type");
typeFeature.setProject(getLayer().getProject());
aSchemaService.createFeature(typeFeature);
}
@Override
public List<Pair<LogMessage, AnnotationFS>> validate(CAS aCas)
{
List<Pair<LogMessage, AnnotationFS>> messages = new ArrayList<>();
for (SpanLayerBehavior behavior : behaviors) {
long startTime = currentTimeMillis();
messages.addAll(behavior.onValidate(this, aCas));
log.trace("Validation for [{}] on [{}] took {}ms", behavior.getClass().getSimpleName(),
getLayer().getUiName(), currentTimeMillis() - startTime);
}
return messages;
}
@Override
public void select(AnnotatorState aState, AnnotationFS aAnno)
{
aState.getSelection().selectSpan(aAnno);
}
}
| webanno/webanno | webanno-api-annotation/src/main/java/de/tudarmstadt/ukp/clarin/webanno/api/annotation/adapter/ChainAdapter.java | Java | apache-2.0 | 20,058 |
package com.appsfs.sfs.activity;
import android.content.Context;
import android.content.Intent;
import android.os.Bundle;
import android.support.design.widget.NavigationView;
import android.support.v4.view.GravityCompat;
import android.support.v4.widget.DrawerLayout;
import android.support.v7.app.ActionBar;
import android.support.v7.app.AppCompatActivity;
import android.support.v7.widget.Toolbar;
import android.util.Log;
import android.view.LayoutInflater;
import android.view.Menu;
import android.view.MenuItem;
import android.view.MotionEvent;
import android.view.View;
import android.widget.ImageView;
import android.widget.TextView;
import android.widget.Toast;
import com.android.volley.Response;
import com.android.volley.VolleyError;
import com.appsfs.sfs.Objects.MyMarker;
import com.appsfs.sfs.R;
import com.appsfs.sfs.Utils.SFSPreference;
import com.appsfs.sfs.Objects.Shipper;
import com.appsfs.sfs.Objects.Shop;
import com.appsfs.sfs.Utils.Utils;
import com.appsfs.sfs.api.function.GetCodeOrder;
import com.appsfs.sfs.api.function.GetShipperOnline;
import com.appsfs.sfs.api.function.LogoutUser;
import com.appsfs.sfs.api.helper.AccessHeader;
import com.appsfs.sfs.api.helper.CustomRespond;
import com.appsfs.sfs.api.helper.RequestHelper;
import com.appsfs.sfs.api.sync.ShipperListSync;
import com.appsfs.sfs.api.sync.ShipperSync;
import com.appsfs.sfs.api.sync.UserSync;
import com.appsfs.sfs.database.DatabaseHelperShipper;
import com.appsfs.sfs.database.DatabaseHelperShop;
import com.appsfs.sfs.database.DatabaseHelperUser;
import com.appsfs.sfs.service.GPSService;
import com.google.android.gms.maps.CameraUpdate;
import com.google.android.gms.maps.CameraUpdateFactory;
import com.google.android.gms.maps.GoogleMap;
import com.google.android.gms.maps.OnMapReadyCallback;
import com.google.android.gms.maps.SupportMapFragment;
import com.google.android.gms.maps.model.BitmapDescriptorFactory;
import com.google.android.gms.maps.model.CameraPosition;
import com.google.android.gms.maps.model.LatLng;
import com.google.android.gms.maps.model.Marker;
import com.google.android.gms.maps.model.MarkerOptions;
import org.json.JSONException;
import org.json.JSONObject;
import java.lang.reflect.Array;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Objects;
/**
* Created by longdv on 4/10/16.
*/
public class SFSShopMainActivity extends AppCompatActivity implements OnMapReadyCallback,Response.Listener<CustomRespond>,Response.ErrorListener {
private DrawerLayout mDrawerLayout;
private GoogleMap mMap;
private TextView mHeaderName;
private View mHeaderView;
LatLng latLng = null;
SFSPreference mSfsPreference;
UserSync userSync;
ShipperListSync shipperListSync;
Marker marker;
ArrayList<ShipperSync> mShipperSyncs;
private ArrayList<MyMarker> mMyMarkersArray ;
private HashMap<Marker, MyMarker> mMarkersHashMap;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
Toolbar toolbar = (Toolbar) findViewById(R.id.toolbar);
setSupportActionBar(toolbar);
ActionBar actionBar = getSupportActionBar();
actionBar.setHomeAsUpIndicator(R.drawable.ic_menu);
actionBar.setDisplayHomeAsUpEnabled(true);
mMarkersHashMap = new HashMap<Marker, MyMarker>();
mMyMarkersArray = new ArrayList<MyMarker>();
mSfsPreference = SFSPreference.getInstance(this);
mDrawerLayout = (DrawerLayout) findViewById(R.id.drawer_layout);
NavigationView navigationView = (NavigationView) findViewById(R.id.navigation_view);
mHeaderView = navigationView.getHeaderView(0);
mHeaderName = (TextView) mHeaderView.findViewById(R.id.tv_header);
navigationView.setNavigationItemSelectedListener(new NavigationView.OnNavigationItemSelectedListener() {
@Override
public boolean onNavigationItemSelected(MenuItem menuItem) {
menuItem.setChecked(true);
mDrawerLayout.closeDrawers();
int id = menuItem.getItemId();
switch (id) {
case R.id.navigation_item_edit_profile:
Intent i = new Intent(SFSShopMainActivity.this, EditProfileActivity.class);
startActivity(i);
return true;
case R.id.navigation_item_edit_information:
Intent i1 = new Intent(SFSShopMainActivity.this, EditShopInfomationActivity.class);
startActivity(i1);
return true;
case R.id.navigation_item_order:
getNewCodeOrder();
return true;
case R.id.navigation_item_detail_order:
startDetailOrder();
return true;
case R.id.navigation_item_signout:
clickLogout();
return true;
default:
return true;
}
}
});
String json = mSfsPreference.getString("user_json","");
try {
userSync = new UserSync(new JSONObject(json));
mHeaderName.setText(userSync.getPhone());
latLng = new LatLng(userSync.getLatitude(),userSync.getLongitude());
Log.d("","Locatoion " + latLng);
new GetShipperOnline(SFSShopMainActivity.this,this,this).start();
} catch (Exception e) {
Log.d("sabdjkasdk",e.getLocalizedMessage());
}
}
private void createOrder() {
Intent i2 = new Intent(SFSShopMainActivity.this, CreateOrdersActivity.class);
startActivity(i2);
}
private void startDetailOrder() {
Intent i3 = new Intent(SFSShopMainActivity.this, DetailOrdersActivity.class);
startActivity(i3);
}
@Override
public boolean onCreateOptionsMenu(Menu menu) {
// Inflate the menu; this adds items to the action bar if it is present.
getMenuInflater().inflate(R.menu.menu_main, menu);
return true;
}
@Override
public boolean onOptionsItemSelected(MenuItem item) {
int id = item.getItemId();
switch (id) {
case android.R.id.home:
mDrawerLayout.openDrawer(GravityCompat.START);
return true;
}
return super.onOptionsItemSelected(item);
}
@Override
public void onMapReady(GoogleMap googleMap) {
mMap = googleMap;
mMap.setMapType(GoogleMap.MAP_TYPE_NORMAL);
Marker marker = mMap.addMarker(new MarkerOptions().position(latLng));
marker.setIcon(BitmapDescriptorFactory.fromResource(R.drawable.icon_shop));
mShipperSyncs = shipperListSync.getListShipperSync();
mMyMarkersArray = addMarkerArray(mShipperSyncs);
plotMarkers(mMyMarkersArray);
mMap.setOnMarkerClickListener(new GoogleMap.OnMarkerClickListener() {
@Override
public boolean onMarkerClick(Marker marker) {
CameraPosition cameraPosition = new CameraPosition.Builder()
.target(marker.getPosition())
.zoom(13)
.build();
mMap.animateCamera(CameraUpdateFactory.newCameraPosition(cameraPosition),1000,null);
marker.showInfoWindow();
return true;
}
});
CameraUpdate center =
CameraUpdateFactory.newLatLng(latLng);
CameraUpdate zoom=CameraUpdateFactory.zoomTo(13.0f);
mMap.moveCamera(center);
mMap.animateCamera(zoom);
CameraUpdate cameraUpdate = CameraUpdateFactory.newLatLngZoom(latLng, 13);
mMap.animateCamera(cameraUpdate);
}
private ArrayList<MyMarker> addMarkerArray(ArrayList<ShipperSync> shipperList) {
ArrayList<MyMarker> mMyMarkersArray = new ArrayList<MyMarker>();
for (int i = 0; i < shipperList.size(); i++) {
MyMarker myMarker = new MyMarker();
LatLng latLngShops = new LatLng(shipperList.get(i).getLatitude(), shipperList.get(i).getLongitude());
String nameShops = shipperList.get(i).getName();
String phoneShops = shipperList.get(i).getPhoneNumber();
myMarker.setLabelName(nameShops);
myMarker.setLabelPhone(phoneShops);
myMarker.setLatitude(latLngShops.latitude);
myMarker.setLongitude(latLngShops.longitude);
mMyMarkersArray.add(myMarker);
}
return mMyMarkersArray;
}
private void plotMarkers(ArrayList<MyMarker> markers) {
if (markers.size() > 0) {
for (final MyMarker myMarker : markers) {
// Create user marker with custom icon and other options
MarkerOptions markerOption = new MarkerOptions().position(new LatLng(myMarker.getLatitude(), myMarker.getLongitude()));
markerOption.icon(BitmapDescriptorFactory.fromResource(R.drawable.ic_marker_shipper));
marker = mMap.addMarker(markerOption);
mMarkersHashMap.put(marker, myMarker);
CustomWindowInfo customWindowInfo = new CustomWindowInfo(SFSShopMainActivity.this);
mMap.setInfoWindowAdapter(customWindowInfo);
mMap.setOnInfoWindowClickListener(new GoogleMap.OnInfoWindowClickListener() {
@Override
public void onInfoWindowClick(Marker marker) {
Utils.getInstance().callPhoneNumber(SFSShopMainActivity.this,myMarker.getLabelPhone());
}
});
}
}
}
@Override
public void onErrorResponse(VolleyError error) {
}
@Override
public void onResponse(CustomRespond response) {
if (response.getFrom().equalsIgnoreCase(LogoutUser.SIGN_OUT_USER)) {
AccessHeader.resetAccessHeader();
mSfsPreference.putString("user_json", "");
mSfsPreference.putInt("current_id_user", 0);
Utils.getInstance().changeActivity(SFSShopMainActivity.this, MainActivity.class);
} else if (response.getFrom().equalsIgnoreCase(GetCodeOrder.NEW_CODE_ORDER)) {
try {
mSfsPreference.putString("order_code", response.getData().getString("code"));
createOrder();
} catch (JSONException e) {
e.getMessage();
}
} else {
try {
shipperListSync = new ShipperListSync(response.getData());
Log.e("TAG","respone: " + response);
} catch (Exception e) {
Log.d("shipper null",e.getLocalizedMessage());
}
/*Google map*/
if (Utils.getInstance().checkNetworkState(SFSShopMainActivity.this) == true) {
SupportMapFragment mapFragment = (SupportMapFragment) getSupportFragmentManager().findFragmentById(R.id.map);
mapFragment.getMapAsync(this);
} else {
Utils.getInstance().showDiaglog(this,"NO INTERNET!","Please check your device's connection settings");
}
}
// Utils.getInstance().changeActivity(SFSShopMainActivity.this, LoginActivity.class);
}
private class CustomWindowInfo implements GoogleMap.InfoWindowAdapter {
private LayoutInflater mInflater;
MyMarker myMarker;
public CustomWindowInfo(Context context) {
mInflater = LayoutInflater.from(context);
}
class ViewHolder {
TextView mName;
TextView mPhone;
ImageView imageViewCall;
}
@Override
public View getInfoWindow(final Marker marker) {
View v = mInflater.inflate(R.layout.info_window_layout, null);
ViewHolder viewHolder = new ViewHolder();
myMarker = mMarkersHashMap.get(marker);
viewHolder.mName = (TextView) v.findViewById(R.id.tv_window_name);
viewHolder.mPhone = (TextView) v.findViewById(R.id.tv_window_phone);
viewHolder.imageViewCall = (ImageView) v.findViewById(R.id.iv_window_call);
viewHolder.mName.setText(myMarker.getLabelName());
viewHolder.mPhone.setText(myMarker.getLabelPhone());
return v;
}
@Override
public View getInfoContents(Marker marker) {
return null;
}
}
private void clickLogout() {
LogoutUser logoutUser = new LogoutUser(SFSShopMainActivity.this,this,this);
logoutUser.start();
stopService(new Intent(SFSShopMainActivity.this, GPSService.class));
}
@Override
protected void onDestroy() {
super.onDestroy();
if (mMap != null)
mMap.clear();
}
private void getNewCodeOrder() {
new GetCodeOrder(SFSShopMainActivity.this, this, this, userSync).start();
}
}
| luongvietdung/SFS-Client | app/src/main/java/com/appsfs/sfs/activity/SFSShopMainActivity.java | Java | apache-2.0 | 13,142 |
package com.hlab.fabrevealmenu.view;
import android.annotation.SuppressLint;
import android.annotation.TargetApi;
import android.content.Context;
import android.content.res.TypedArray;
import android.graphics.Typeface;
import android.graphics.drawable.BitmapDrawable;
import android.os.Build;
import android.util.AttributeSet;
import android.view.LayoutInflater;
import android.view.Menu;
import android.view.MenuInflater;
import android.view.MenuItem;
import android.view.View;
import android.widget.FrameLayout;
import android.widget.LinearLayout;
import androidx.annotation.ColorRes;
import androidx.annotation.MenuRes;
import androidx.annotation.NonNull;
import androidx.appcompat.view.menu.MenuBuilder;
import androidx.cardview.widget.CardView;
import androidx.core.content.res.ResourcesCompat;
import androidx.core.view.ViewCompat;
import androidx.recyclerview.widget.RecyclerView;
import com.hlab.fabrevealmenu.R;
import com.hlab.fabrevealmenu.helper.AnimationHelper;
import com.hlab.fabrevealmenu.helper.Direction;
import com.hlab.fabrevealmenu.helper.OnFABMenuSelectedListener;
import com.hlab.fabrevealmenu.helper.ViewHelper;
import com.hlab.fabrevealmenu.model.FABMenuItem;
import java.util.ArrayList;
public class FABRevealMenu extends FrameLayout {
//Common constants
private final int FAB_STATE_COLLAPSED = 0;
private final int FAB_STATE_EXPANDED = 1;
private final int FAB_MENU_SIZE_NORMAL = 0;
private final int FAB_MENU_SIZE_SMALL = 1;
public OnFABMenuSelectedListener menuSelectedListener = null;
private Context mContext;
private View mCustomView;
private View mFab;
//attributes
@MenuRes
private int mMenuRes;
private int mMenuBackground;
private int mOverlayBackground;
private boolean mShowOverlay;
private int mMenuSize;
private Direction mDirection;
private boolean mShowTitle;
private boolean mShowIcon;
private int mTitleTextColor;
private int mTitleDisabledTextColor;
private int FAB_CURRENT_STATE = FAB_STATE_COLLAPSED;
private Typeface mMenuTitleTypeface;
private int mMenuCornerRadius;
private int mDuration;
//Views in the menu
private FrameLayout mOverlayLayout = null;
private LinearLayout mRevealView = null;
private RecyclerView mMenuView = null;
private boolean mEnableNestedScrolling = true;
private CardView mBaseView = null;
private FABMenuAdapter menuAdapter = null;
//Menu specific fields
private ArrayList<FABMenuItem> menuList = null;
//Helper class
private ViewHelper viewHelper;
private AnimationHelper animationHelper;
public FABRevealMenu(Context context) {
super(context);
initView(context, null);
}
public FABRevealMenu(Context context, AttributeSet attrs) {
super(context, attrs);
initView(context, attrs);
}
public FABRevealMenu(Context context, AttributeSet attrs, int defStyleAttr) {
super(context, attrs, defStyleAttr);
initView(context, attrs);
}
private void initView(Context context, AttributeSet attrs) {
mContext = context;
if (attrs != null) {
TypedArray a = context.obtainStyledAttributes(attrs, R.styleable.FABRevealMenu, 0, 0);
//background
mMenuBackground = a.getColor(R.styleable.FABRevealMenu_menuBackgroundColor, getColor(R.color.colorWhite));
mOverlayBackground = a.getColor(R.styleable.FABRevealMenu_overlayBackground, getColor(R.color.colorOverlayDark));
//menu
mMenuRes = a.getResourceId(R.styleable.FABRevealMenu_menuRes, -1);
//custom view
int customView = a.getResourceId(R.styleable.FABRevealMenu_menuCustomView, -1);
if (customView != -1)
mCustomView = LayoutInflater.from(context).inflate(customView, null);
//direction
mDirection = Direction.fromId(a.getInt(R.styleable.FABRevealMenu_menuDirection, 0));
//title
mTitleTextColor = a.getColor(R.styleable.FABRevealMenu_menuTitleTextColor, getColor(android.R.color.white));
mTitleDisabledTextColor = a.getColor(R.styleable.FABRevealMenu_menuTitleDisabledTextColor, getColor(android.R.color.darker_gray));
mShowTitle = a.getBoolean(R.styleable.FABRevealMenu_showTitle, true);
mShowIcon = a.getBoolean(R.styleable.FABRevealMenu_showIcon, true);
mShowOverlay = a.getBoolean(R.styleable.FABRevealMenu_showOverlay, true);
//size
mMenuSize = a.getInt(R.styleable.FABRevealMenu_menuSize, FAB_MENU_SIZE_NORMAL);
mMenuCornerRadius = a.getDimensionPixelSize(R.styleable.FABRevealMenu_menuCornerRadius, 10);
mDuration = a.getInteger(R.styleable.FABRevealMenu_duration, 500);
//Font
if (a.hasValue(R.styleable.FABRevealMenu_menuTitleFontFamily)) {
int fontId = a.getResourceId(R.styleable.FABRevealMenu_menuTitleFontFamily, -1);
if (fontId != -1)
mMenuTitleTypeface = ResourcesCompat.getFont(context, fontId);
}
a.recycle();
//helper initialization
viewHelper = new ViewHelper(context);
animationHelper = new AnimationHelper(mDuration);
//initialization
if (mMenuRes != -1) {
setMenu(mMenuRes);
} else if (mCustomView != null) {
setCustomView(mCustomView);
}
}
}
public View getCustomView() {
return mCustomView;
}
/**
* Set custom view as menu
*
* @param view custom view
*/
public void setCustomView(@NonNull View view) {
mMenuRes = -1;
removeAllViews();
mCustomView = view;
mCustomView.setClickable(true);
viewHelper.setLayoutParams(mCustomView);
setUpView(mCustomView, false);
}
public void setNestedScrollingEnabled(boolean enabled) {
mEnableNestedScrolling = enabled;
}
/**
* Set menu from menu xml
*
* @param menuRes menu xml resource
*/
public void setMenu(@MenuRes int menuRes) {
mCustomView = null;
mMenuRes = menuRes;
removeAllViews();
@SuppressLint("RestrictedApi")
Menu menu = new MenuBuilder(getContext());
inflateMenu(menuRes, menu);
setUpMenu(menu);
}
protected void inflateMenu(@MenuRes int menuRes, Menu menu) {
new MenuInflater(getContext()).inflate(menuRes, menu);
}
public void updateMenu() {
mCustomView = null;
removeAllViews();
if (menuList.size() > 0) {
setUpMenuView();
} else
setMenu(mMenuRes);
}
/**
* Set menu from list of items
*
* @param menuList list of items
*/
public void setMenuItems(ArrayList<FABMenuItem> menuList) throws NullPointerException {
this.menuList = menuList;
mMenuRes = -1;
mCustomView = null;
if (menuList == null)
throw new NullPointerException("Null items are not allowed.");
removeAllViews();
if (menuList.size() > 0) {
for (int i = 0; i < menuList.size(); i++) {
FABMenuItem item = menuList.get(i);
item.setId(i);
if (item.getIconDrawable() == null && item.getIconBitmap() != null) {
item.setIconDrawable(new BitmapDrawable(getResources(), item.getIconBitmap()));
}
}
}
setUpMenuView();
}
@TargetApi(Build.VERSION_CODES.LOLLIPOP)
private void setUpMenu(@NonNull Menu menu) throws IllegalStateException {
menuList = new ArrayList<>();
if (menu.size() > 0) {
for (int i = 0; i < menu.size(); i++) {
MenuItem item = menu.getItem(i);
menuList.add(new FABMenuItem(item.getItemId(), item.getTitle().toString(), item.getIcon()));
}
setUpMenuView();
} else
throw new IllegalStateException("Menu resource not found.");
}
private void setUpMenuView() {
if (menuList != null && menuList.size() > 0) {
mMenuView = viewHelper.generateMenuView(mEnableNestedScrolling);
boolean isCircularShape = false;
//set layout manager
if (mDirection == Direction.LEFT || mDirection == Direction.RIGHT) {
int minItemWidth = isMenuSmall() ? (int) mContext.getResources().getDimension(R.dimen.column_size_small) : (int) mContext.getResources().getDimension(R.dimen.column_size);
int rowLayoutResId = isMenuSmall() ? R.layout.row_horizontal_menu_item_small : R.layout.row_horizontal_menu_item;
mMenuView.setLayoutManager(new DynamicGridLayoutManager(mContext, minItemWidth, menuList.size()));
menuAdapter = new FABMenuAdapter(this, menuList, rowLayoutResId, true, mTitleTextColor, mTitleDisabledTextColor, mShowTitle, mShowIcon, mDirection);
if (mMenuTitleTypeface != null)
menuAdapter.setMenuTitleTypeface(mMenuTitleTypeface);
} else {
isCircularShape = !mShowTitle;
int rowLayoutResId = isMenuSmall() ? R.layout.row_vertical_menu_item_small : R.layout.row_vertical_menu_item;
mMenuView.setLayoutManager(new DynamicGridLayoutManager(mContext, 0, 0));
menuAdapter = new FABMenuAdapter(this, menuList, rowLayoutResId, isCircularShape, mTitleTextColor, mTitleDisabledTextColor, mShowTitle, mShowIcon, mDirection);
if (mMenuTitleTypeface != null)
menuAdapter.setMenuTitleTypeface(mMenuTitleTypeface);
}
mMenuView.setAdapter(menuAdapter);
setUpView(mMenuView, mShowTitle && !isCircularShape);
}
}
private void setUpView(View mView, boolean toSetMinWidth) {
mBaseView = viewHelper.generateBaseView(mMenuCornerRadius);
mBaseView.setCardBackgroundColor(mMenuBackground);
mRevealView = viewHelper.generateRevealView();
mOverlayLayout = null;
mOverlayLayout = viewHelper.generateOverlayView();
if (mShowOverlay) {
mOverlayLayout.setBackgroundColor(mShowOverlay ? mOverlayBackground : getColor(android.R.color.transparent));
}
if (toSetMinWidth)
mBaseView.setMinimumWidth(getResources().getDimensionPixelSize(isMenuSmall() ? R.dimen.menu_min_width_small : R.dimen.menu_min_width));
//1.add menu view
mBaseView.addView(mView);
//2.add base view
mRevealView.addView(mBaseView);
//3.add overlay
if (mOverlayLayout != null) {
addView(mOverlayLayout);
}
//4.add reveal view
addView(mRevealView);
if (mOverlayLayout != null) {
mOverlayLayout.setOnClickListener(v -> closeMenu());
}
}
/**
* Attach fab to menu
*
* @param fab fab view
*/
public void bindAnchorView(@NonNull View fab) {
mFab = fab;
mFab.post(() -> {
ViewCompat.setTransitionName(mFab, "FAB");
mFab.setOnClickListener(v -> showMenu());
viewHelper.alignMenuWithFab(mFab, mRevealView, mDirection);
});
}
// --- action methods --- //
public FABMenuItem getItemByIndex(int index) {
if (menuAdapter != null) {
return menuAdapter.getItemByIndex(index);
}
return null;
}
public FABMenuItem getItemById(int id) {
if (menuAdapter != null) {
return menuAdapter.getItemById(id);
}
return null;
}
/**
* Remove menu item by id
*/
public boolean removeItem(int id) {
if (menuList != null) {
for (int i = 0; i < menuList.size(); i++) {
if (menuList.get(i).getId() == id) {
menuList.remove(i);
((DynamicGridLayoutManager) mMenuView.getLayoutManager()).updateTotalItems(menuList.size());
if (menuAdapter != null) {
menuAdapter.notifyItemRemoved(i);
menuAdapter.notifyItemRangeChanged(i, menuList.size());
}
return true;
}
}
}
return false;
}
public void notifyItemChanged(int id) {
if (menuAdapter != null) {
menuAdapter.notifyItemChangedById(id);
}
}
public void setOnFABMenuSelectedListener(OnFABMenuSelectedListener menuSelectedListener) {
this.menuSelectedListener = menuSelectedListener;
}
public boolean isShowing() {
return (FAB_CURRENT_STATE == FAB_STATE_EXPANDED);
}
/**
* Show the menu
*/
public void showMenu() {
if (mFab == null) {
throw new IllegalStateException("FloatingActionButton not bound." +
"Please, use bindAnchorView() to add your Fab button.");
}
if (FAB_CURRENT_STATE == FAB_STATE_COLLAPSED) {
FAB_CURRENT_STATE = FAB_STATE_EXPANDED;
viewHelper.alignMenuWithFab(mFab, mRevealView, mDirection);
animationHelper.revealMenu(this, mFab, mRevealView, false);
if (mShowOverlay)
animationHelper.showOverlay(mOverlayLayout);
}
}
/**
* Close the menu
*/
public void closeMenu() throws IllegalStateException {
if (mFab == null) {
throw new IllegalStateException("FloatingActionButton not bound." +
"Please, use bindAnchorView() to add your Fab button.");
}
if (FAB_CURRENT_STATE == FAB_STATE_EXPANDED) {
FAB_CURRENT_STATE = FAB_STATE_COLLAPSED;
viewHelper.alignMenuWithFab(mFab, mRevealView, mDirection);
animationHelper.revealMenu(this, mFab, mRevealView, true);
if (mShowOverlay)
animationHelper.hideOverlay(mOverlayLayout);
}
}
private void recreateView() {
if (mMenuRes != -1)
updateMenu();
else if (mCustomView != null)
setCustomView(mCustomView);
else if (menuList != null)
setMenuItems(menuList);
}
private int getColor(int colorResId) {
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.M)
getResources().getColor(colorResId, mContext.getTheme());
else
getResources().getColor(colorResId);
return colorResId;
}
// ---- getter setter --- //
public void setOverlayBackground(@ColorRes int mOverlayBackground) throws NullPointerException {
this.mOverlayBackground = mOverlayBackground;
if (mOverlayLayout != null) {
mOverlayLayout.setBackgroundColor(getColor(mOverlayBackground));
} else
throw new NullPointerException("Overlay view is not initialized/ set ShowOverlay to true");
}
public void setMenuBackground(@ColorRes int menuBackgroundRes) {
mBaseView.setCardBackgroundColor(getColor(menuBackgroundRes));
}
public boolean isShowOverlay() {
return mShowOverlay;
}
public void setShowOverlay(boolean mShowOverlay) {
this.mShowOverlay = mShowOverlay;
closeMenu();
post(this::recreateView);
}
private boolean isMenuSmall() {
return mMenuSize == FAB_MENU_SIZE_SMALL;
}
/**
* Set small size for menu item
*/
public void setSmallerMenu() {
mMenuSize = FAB_MENU_SIZE_SMALL;
post(this::recreateView);
}
/**
* Set normal size for menu item
*/
public void setNormalMenu() {
mMenuSize = FAB_MENU_SIZE_NORMAL;
post(this::recreateView);
}
public void setTitleVisible(boolean mShowTitle) {
this.mShowTitle = mShowTitle;
if (menuAdapter != null) {
if (mShowTitle && (mDirection == Direction.UP || mDirection == Direction.DOWN))
mBaseView.setMinimumWidth(getResources().getDimensionPixelSize(R.dimen.menu_min_width));
else
mBaseView.setMinimumWidth(LayoutParams.WRAP_CONTENT);
menuAdapter.setShowTitle(mShowTitle);
closeMenu();
post(this::recreateView);
}
}
public void setMenuTitleTextColor(@ColorRes int mTitleTextColor) {
this.mTitleTextColor = mTitleTextColor;
if (menuAdapter != null) {
menuAdapter.setTitleTextColor(mTitleTextColor);
menuAdapter.notifyDataSetChanged();
}
}
public void setMenuTitleDisabledTextColor(@ColorRes int mTitleDisabledTextColor) {
this.mTitleDisabledTextColor = mTitleDisabledTextColor;
if (menuAdapter != null) {
menuAdapter.setTitleDisabledTextColor(mTitleDisabledTextColor);
menuAdapter.notifyDataSetChanged();
}
}
public Direction getMenuDirection() {
return mDirection;
}
public void setMenuDirection(Direction mDirection) {
this.mDirection = mDirection;
if (menuAdapter != null) {
menuAdapter.setDirection(mDirection);
post(this::recreateView);
}
}
public void setMenuTitleTypeface(Typeface mMenuTitleTypeface) {
if (mMenuTitleTypeface != null) {
this.mMenuTitleTypeface = mMenuTitleTypeface;
post(this::recreateView);
}
}
}
| HarinTrivedi/FABRevealMenu-master | fabrevealmenu/src/main/java/com/hlab/fabrevealmenu/view/FABRevealMenu.java | Java | apache-2.0 | 17,588 |
/*******************************************************************************
* Copyright (c) 2010 Haifeng Li
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
package smile.clustering;
import java.util.List;
import java.util.ArrayList;
import java.util.Arrays;
import smile.neighbor.Neighbor;
import smile.neighbor.RNNSearch;
import smile.neighbor.LinearSearch;
import smile.neighbor.CoverTree;
import smile.math.Math;
import smile.math.distance.Distance;
import smile.math.distance.Metric;
/**
* Density-Based Spatial Clustering of Applications with Noise.
* DBScan finds a number of clusters starting from the estimated density
* distribution of corresponding nodes.
* <p>
* DBScan requires two parameters: radius (i.e. neighborhood radius) and the
* number of minimum points required to form a cluster (minPts). It starts
* with an arbitrary starting point that has not been visited. This point's
* neighborhood is retrieved, and if it contains sufficient number of points,
* a cluster is started. Otherwise, the point is labeled as noise. Note that
* this point might later be found in a sufficiently sized radius-environment
* of a different point and hence be made part of a cluster.
* <p>
* If a point is found to be part of a cluster, its neighborhood is also
* part of that cluster. Hence, all points that are found within the
* neighborhood are added, as is their own neighborhood. This process
* continues until the cluster is completely found. Then, a new unvisited point
* is retrieved and processed, leading to the discovery of a further cluster
* of noise.
* <p>
* DBScan visits each point of the database, possibly multiple times (e.g.,
* as candidates to different clusters). For practical considerations, however,
* the time complexity is mostly governed by the number of nearest neighbor
* queries. DBScan executes exactly one such query for each point, and if
* an indexing structure is used that executes such a neighborhood query
* in O(log n), an overall runtime complexity of O(n log n) is obtained.
* <p>
* DBScan has many advantages such as
* <ul>
* <li> DBScan does not need to know the number of clusters in the data
* a priori, as opposed to k-means.
* <li> DBScan can find arbitrarily shaped clusters. It can even find clusters
* completely surrounded by (but not connected to) a different cluster.
* Due to the MinPts parameter, the so-called single-link effect
* (different clusters being connected by a thin line of points) is reduced.
* <li> DBScan has a notion of noise. Outliers are labeled as Clustering.OUTLIER,
* which is Integer.MAX_VALUE.
* <li> DBScan requires just two parameters and is mostly insensitive to the
* ordering of the points in the database. (Only points sitting on the
* edge of two different clusters might swap cluster membership if the
* ordering of the points is changed, and the cluster assignment is unique
* only up to isomorphism.)
* </ul>
* On the other hand, DBScan has the disadvantages of
* <ul>
* <li> In high dimensional space, the data are sparse everywhere
* because of the curse of dimensionality. Therefore, DBScan doesn't
* work well on high-dimensional data in general.
* <li> DBScan does not respond well to data sets with varying densities.
* </ul>
*
* <h2>References</h2>
* <ol>
* <li> Martin Ester, Hans-Peter Kriegel, Jorg Sander, Xiaowei Xu (1996-). A density-based algorithm for discovering clusters in large spatial databases with noise". KDD, 1996. </li>
* <li> Jorg Sander, Martin Ester, Hans-Peter Kriegel, Xiaowei Xu. (1998). Density-Based Clustering in Spatial Databases: The Algorithm GDBSCAN and Its Applications. 1998. </li>
* </ol>
*
* @param <T> the type of input object.
*
* @author Haifeng Li
*/
public class DBScan <T> extends PartitionClustering<T> {
/**
* Label for unclassified data samples.
*/
private static final int UNCLASSIFIED = -1;
/**
* The minimum number of points required to form a cluster
*/
private double minPts;
/**
* The range of neighborhood.
*/
private double radius;
/**
* Data structure for neighborhood search.
*/
private RNNSearch<T,T> nns;
/**
* Constructor. Clustering the data. Note that this one could be very
* slow because of brute force nearest neighbor search.
* @param data the dataset for clustering.
* @param distance the distance measure for neighborhood search.
* @param minPts the minimum number of neighbors for a core data point.
* @param radius the neighborhood radius.
*/
public DBScan(T[] data, Distance<T> distance, int minPts, double radius) {
this(data, new LinearSearch<T>(data, distance), minPts, radius);
}
/**
* Constructor. Clustering the data. Using cover tree for nearest neighbor
* search.
* @param data the dataset for clustering.
* @param distance the distance measure for neighborhood search.
* @param minPts the minimum number of neighbors for a core data point.
* @param radius the neighborhood radius.
*/
public DBScan(T[] data, Metric<T> distance, int minPts, double radius) {
this(data, new CoverTree<T>(data, distance), minPts, radius);
}
/**
* Clustering the data.
* @param data the dataset for clustering.
* @param nns the data structure for neighborhood search.
* @param minPts the minimum number of neighbors for a core data point.
* @param radius the neighborhood radius.
*/
public DBScan(T[] data, RNNSearch<T,T> nns, int minPts, double radius) {
if (minPts < 1) {
throw new IllegalArgumentException("Invalid minPts: " + minPts);
}
if (radius <= 0.0) {
throw new IllegalArgumentException("Invalid radius: " + radius);
}
this.nns = nns;
this.minPts = minPts;
this.radius = radius;
k = 0;
int n = data.length;
y = new int[n];
Arrays.fill(y, UNCLASSIFIED);
for (int i = 0; i < data.length; i++) {
if (y[i] == UNCLASSIFIED) {
List<Neighbor<T,T>> neighbors = new ArrayList<Neighbor<T,T>>();
nns.range(data[i], radius, neighbors);
if (neighbors.size() < minPts) {
y[i] = OUTLIER;
} else {
y[i] = k;
for (int j = 0; j < neighbors.size(); j++) {
if (y[neighbors.get(j).index] == UNCLASSIFIED) {
y[neighbors.get(j).index] = k;
Neighbor<T,T> neighbor = neighbors.get(j);
List<Neighbor<T,T>> secondaryNeighbors = new ArrayList<Neighbor<T,T>>();
nns.range(neighbor.key, radius, secondaryNeighbors);
if (secondaryNeighbors.size() >= minPts) {
neighbors.addAll(secondaryNeighbors);
}
}
if (y[neighbors.get(j).index] == OUTLIER) {
y[neighbors.get(j).index] = k;
}
}
k++;
}
}
}
size = new int[k + 1];
for (int i = 0; i < n; i++) {
if (y[i] == OUTLIER) {
size[k]++;
} else {
size[y[i]]++;
}
}
}
/**
* Returns the parameter of minimum number of neighbors.
*/
public double getMinPts() {
return minPts;
}
/**
* Returns the radius of neighborhood.
*/
public double getRadius() {
return radius;
}
/**
* Cluster a new instance.
* @param x a new instance.
* @return the cluster label. Note that it may be {@link #OUTLIER}.
*/
@Override
public int predict(T x) {
List<Neighbor<T,T>> neighbors = new ArrayList<Neighbor<T,T>>();
nns.range(x, radius, neighbors);
if (neighbors.size() < minPts) {
return OUTLIER;
}
int[] label = new int[k + 1];
for (Neighbor<T,T> neighbor : neighbors) {
int yi = y[neighbor.index];
if (yi == OUTLIER) yi = k;
label[yi]++;
}
int c = Math.whichMax(label);
if (c == k) c = OUTLIER;
return c;
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append(String.format("DBScan clusters of %d data points:\n", y.length));
for (int i = 0; i < k; i++) {
int r = (int) Math.round(1000.0 * size[i] / y.length);
sb.append(String.format("%3d\t%5d (%2d.%1d%%)\n", i, size[i], r / 10, r % 10));
}
int r = (int) Math.round(1000.0 * size[k] / y.length);
sb.append(String.format("Noise\t%5d (%2d.%1d%%)\n", size[k], r / 10, r % 10));
return sb.toString();
}
}
| arehart13/smile | core/src/main/java/smile/clustering/DBScan.java | Java | apache-2.0 | 9,726 |
/*
* Copyright 2011-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package com.amazonaws.services.rds.auth;
import static com.amazonaws.util.ValidationUtils.assertIsPositive;
import static com.amazonaws.util.ValidationUtils.assertStringNotEmpty;
/**
* Request object to get an auth token for IAM database authentication.
*
* @see RdsIamAuthTokenGenerator
*/
public class GetIamAuthTokenRequest {
private final String hostname;
private final int port;
private final String userName;
public GetIamAuthTokenRequest(String hostname, int port, String userName) {
this.hostname = assertStringNotEmpty(hostname, "hostname");
this.port = assertIsPositive(port, "port");
this.userName = assertStringNotEmpty(userName, "userName");
}
public String getHostname() {
return hostname;
}
public int getPort() {
return port;
}
public String getUserName() {
return userName;
}
public static Builder builder() {
return new Builder();
}
public static class Builder {
private String hostname;
private int port;
private String userName;
private Builder() {
}
public Builder hostname(String endpoint) {
this.hostname = endpoint;
return this;
}
public Builder port(int port) {
this.port = port;
return this;
}
public Builder userName(String userName) {
this.userName = userName;
return this;
}
public GetIamAuthTokenRequest build() {
return new GetIamAuthTokenRequest(hostname, port, userName);
}
}
}
| jentfoo/aws-sdk-java | aws-java-sdk-rds/src/main/java/com/amazonaws/services/rds/auth/GetIamAuthTokenRequest.java | Java | apache-2.0 | 2,219 |
/*
* Copyright 2017-2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.gamelift.model.transform;
import javax.annotation.Generated;
import com.amazonaws.SdkClientException;
import com.amazonaws.Request;
import com.amazonaws.http.HttpMethodName;
import com.amazonaws.services.gamelift.model.*;
import com.amazonaws.transform.Marshaller;
import com.amazonaws.protocol.*;
import com.amazonaws.protocol.Protocol;
import com.amazonaws.annotation.SdkInternalApi;
/**
* ResumeGameServerGroupRequest Marshaller
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
@SdkInternalApi
public class ResumeGameServerGroupRequestProtocolMarshaller implements Marshaller<Request<ResumeGameServerGroupRequest>, ResumeGameServerGroupRequest> {
private static final OperationInfo SDK_OPERATION_BINDING = OperationInfo.builder().protocol(Protocol.AWS_JSON).requestUri("/")
.httpMethodName(HttpMethodName.POST).hasExplicitPayloadMember(false).hasPayloadMembers(true).operationIdentifier("GameLift.ResumeGameServerGroup")
.serviceName("AmazonGameLift").build();
private final com.amazonaws.protocol.json.SdkJsonProtocolFactory protocolFactory;
public ResumeGameServerGroupRequestProtocolMarshaller(com.amazonaws.protocol.json.SdkJsonProtocolFactory protocolFactory) {
this.protocolFactory = protocolFactory;
}
public Request<ResumeGameServerGroupRequest> marshall(ResumeGameServerGroupRequest resumeGameServerGroupRequest) {
if (resumeGameServerGroupRequest == null) {
throw new SdkClientException("Invalid argument passed to marshall(...)");
}
try {
final ProtocolRequestMarshaller<ResumeGameServerGroupRequest> protocolMarshaller = protocolFactory.createProtocolMarshaller(SDK_OPERATION_BINDING,
resumeGameServerGroupRequest);
protocolMarshaller.startMarshalling();
ResumeGameServerGroupRequestMarshaller.getInstance().marshall(resumeGameServerGroupRequest, protocolMarshaller);
return protocolMarshaller.finishMarshalling();
} catch (Exception e) {
throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e);
}
}
}
| aws/aws-sdk-java | aws-java-sdk-gamelift/src/main/java/com/amazonaws/services/gamelift/model/transform/ResumeGameServerGroupRequestProtocolMarshaller.java | Java | apache-2.0 | 2,775 |
package org.reclipse.behavior.inference.automaton.symbols;
import org.reclipse.behavior.inference.automaton.AbstractSymbol;
import org.reclipse.behavior.inference.automaton.Token;
import org.reclipse.tracer.model.tracegraph.TGMethodCall;
/**
* @author lowende
* @author Last editor: $Author: mcp $
* @version $Revision: 4281 $ $Date: 2010-03-10 11:47:02 +0100 (Mi, 10 Mrz 2010) $
*/
public class Epsilon extends AbstractSymbol
{
/**
* @see org.reclipse.behavior.inference.automaton.AbstractSymbol#accept(org.reclipse.tracer.model.tracegraph.TGMethodCall,
* org.reclipse.behavior.inference.automaton.Token)
*/
@Override
public boolean accept(TGMethodCall methodCall, Token token)
{
throw new UnsupportedOperationException(
"Epsilon symbol can not accept any input.");
}
/**
* @see java.lang.Object#toString()
*/
@Override
public String getSymbolText()
{
return "\u03B5";
}
/**
* @see java.lang.Object#toString()
*/
@Override
public String toString()
{
return getSymbolText();
}
}
| CloudScale-Project/StaticSpotter | plugins/org.reclipse.behavior.inference/src/org/reclipse/behavior/inference/automaton/symbols/Epsilon.java | Java | apache-2.0 | 1,149 |
/*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
/*
* This code was generated by https://github.com/googleapis/google-api-java-client-services/
* Modify at your own risk.
*/
package com.google.api.services.billingbudgets.v1beta1;
/**
* Service definition for CloudBillingBudget (v1beta1).
*
* <p>
* The Cloud Billing Budget API stores Cloud Billing budgets, which define a budget plan and the rules to execute as spend is tracked against that plan.
* </p>
*
* <p>
* For more information about this service, see the
* <a href="https://cloud.google.com/billing/docs/how-to/budget-api-overview" target="_blank">API Documentation</a>
* </p>
*
* <p>
* This service uses {@link CloudBillingBudgetRequestInitializer} to initialize global parameters via its
* {@link Builder}.
* </p>
*
* @since 1.3
* @author Google, Inc.
*/
@SuppressWarnings("javadoc")
public class CloudBillingBudget extends com.google.api.client.googleapis.services.json.AbstractGoogleJsonClient {
// Note: Leave this static initializer at the top of the file.
static {
com.google.api.client.util.Preconditions.checkState(
com.google.api.client.googleapis.GoogleUtils.MAJOR_VERSION == 1 &&
com.google.api.client.googleapis.GoogleUtils.MINOR_VERSION >= 15,
"You are currently running with version %s of google-api-client. " +
"You need at least version 1.15 of google-api-client to run version " +
"1.30.10 of the Cloud Billing Budget API library.", com.google.api.client.googleapis.GoogleUtils.VERSION);
}
/**
* The default encoded root URL of the service. This is determined when the library is generated
* and normally should not be changed.
*
* @since 1.7
*/
public static final String DEFAULT_ROOT_URL = "https://billingbudgets.googleapis.com/";
/**
* The default encoded service path of the service. This is determined when the library is
* generated and normally should not be changed.
*
* @since 1.7
*/
public static final String DEFAULT_SERVICE_PATH = "";
/**
* The default encoded batch path of the service. This is determined when the library is
* generated and normally should not be changed.
*
* @since 1.23
*/
public static final String DEFAULT_BATCH_PATH = "batch";
/**
* The default encoded base URL of the service. This is determined when the library is generated
* and normally should not be changed.
*/
public static final String DEFAULT_BASE_URL = DEFAULT_ROOT_URL + DEFAULT_SERVICE_PATH;
/**
* Constructor.
*
* <p>
* Use {@link Builder} if you need to specify any of the optional parameters.
* </p>
*
* @param transport HTTP transport, which should normally be:
* <ul>
* <li>Google App Engine:
* {@code com.google.api.client.extensions.appengine.http.UrlFetchTransport}</li>
* <li>Android: {@code newCompatibleTransport} from
* {@code com.google.api.client.extensions.android.http.AndroidHttp}</li>
* <li>Java: {@link com.google.api.client.googleapis.javanet.GoogleNetHttpTransport#newTrustedTransport()}
* </li>
* </ul>
* @param jsonFactory JSON factory, which may be:
* <ul>
* <li>Jackson: {@code com.google.api.client.json.jackson2.JacksonFactory}</li>
* <li>Google GSON: {@code com.google.api.client.json.gson.GsonFactory}</li>
* <li>Android Honeycomb or higher:
* {@code com.google.api.client.extensions.android.json.AndroidJsonFactory}</li>
* </ul>
* @param httpRequestInitializer HTTP request initializer or {@code null} for none
* @since 1.7
*/
public CloudBillingBudget(com.google.api.client.http.HttpTransport transport, com.google.api.client.json.JsonFactory jsonFactory,
com.google.api.client.http.HttpRequestInitializer httpRequestInitializer) {
this(new Builder(transport, jsonFactory, httpRequestInitializer));
}
/**
* @param builder builder
*/
CloudBillingBudget(Builder builder) {
super(builder);
}
@Override
protected void initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest<?> httpClientRequest) throws java.io.IOException {
super.initialize(httpClientRequest);
}
/**
* An accessor for creating requests from the BillingAccounts collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code CloudBillingBudget billingbudgets = new CloudBillingBudget(...);}
* {@code CloudBillingBudget.BillingAccounts.List request = billingbudgets.billingAccounts().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public BillingAccounts billingAccounts() {
return new BillingAccounts();
}
/**
* The "billingAccounts" collection of methods.
*/
public class BillingAccounts {
/**
* An accessor for creating requests from the Budgets collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code CloudBillingBudget billingbudgets = new CloudBillingBudget(...);}
* {@code CloudBillingBudget.Budgets.List request = billingbudgets.budgets().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public Budgets budgets() {
return new Budgets();
}
/**
* The "budgets" collection of methods.
*/
public class Budgets {
/**
* Creates a new budget. See Quotas and limits for more information on the limits of the number of
* budgets you can create.
*
* Create a request for the method "budgets.create".
*
* This request holds the parameters needed by the billingbudgets server. After setting any
* optional parameters, call the {@link Create#execute()} method to invoke the remote operation.
*
* @param parent Required. The name of the billing account to create the budget in. Values are of the form
* `billingAccounts/{billingAccountId}`.
* @param content the {@link com.google.api.services.billingbudgets.v1beta1.model.GoogleCloudBillingBudgetsV1beta1CreateBudgetRequest}
* @return the request
*/
public Create create(java.lang.String parent, com.google.api.services.billingbudgets.v1beta1.model.GoogleCloudBillingBudgetsV1beta1CreateBudgetRequest content) throws java.io.IOException {
Create result = new Create(parent, content);
initialize(result);
return result;
}
public class Create extends CloudBillingBudgetRequest<com.google.api.services.billingbudgets.v1beta1.model.GoogleCloudBillingBudgetsV1beta1Budget> {
private static final String REST_PATH = "v1beta1/{+parent}/budgets";
private final java.util.regex.Pattern PARENT_PATTERN =
java.util.regex.Pattern.compile("^billingAccounts/[^/]+$");
/**
* Creates a new budget. See Quotas and limits for more information on the limits of the number of
* budgets you can create.
*
* Create a request for the method "budgets.create".
*
* This request holds the parameters needed by the the billingbudgets server. After setting any
* optional parameters, call the {@link Create#execute()} method to invoke the remote operation.
* <p> {@link
* Create#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param parent Required. The name of the billing account to create the budget in. Values are of the form
* `billingAccounts/{billingAccountId}`.
* @param content the {@link com.google.api.services.billingbudgets.v1beta1.model.GoogleCloudBillingBudgetsV1beta1CreateBudgetRequest}
* @since 1.13
*/
protected Create(java.lang.String parent, com.google.api.services.billingbudgets.v1beta1.model.GoogleCloudBillingBudgetsV1beta1CreateBudgetRequest content) {
super(CloudBillingBudget.this, "POST", REST_PATH, content, com.google.api.services.billingbudgets.v1beta1.model.GoogleCloudBillingBudgetsV1beta1Budget.class);
this.parent = com.google.api.client.util.Preconditions.checkNotNull(parent, "Required parameter parent must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(PARENT_PATTERN.matcher(parent).matches(),
"Parameter parent must conform to the pattern " +
"^billingAccounts/[^/]+$");
}
}
@Override
public Create set$Xgafv(java.lang.String $Xgafv) {
return (Create) super.set$Xgafv($Xgafv);
}
@Override
public Create setAccessToken(java.lang.String accessToken) {
return (Create) super.setAccessToken(accessToken);
}
@Override
public Create setAlt(java.lang.String alt) {
return (Create) super.setAlt(alt);
}
@Override
public Create setCallback(java.lang.String callback) {
return (Create) super.setCallback(callback);
}
@Override
public Create setFields(java.lang.String fields) {
return (Create) super.setFields(fields);
}
@Override
public Create setKey(java.lang.String key) {
return (Create) super.setKey(key);
}
@Override
public Create setOauthToken(java.lang.String oauthToken) {
return (Create) super.setOauthToken(oauthToken);
}
@Override
public Create setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Create) super.setPrettyPrint(prettyPrint);
}
@Override
public Create setQuotaUser(java.lang.String quotaUser) {
return (Create) super.setQuotaUser(quotaUser);
}
@Override
public Create setUploadType(java.lang.String uploadType) {
return (Create) super.setUploadType(uploadType);
}
@Override
public Create setUploadProtocol(java.lang.String uploadProtocol) {
return (Create) super.setUploadProtocol(uploadProtocol);
}
/**
* Required. The name of the billing account to create the budget in. Values are of the form
* `billingAccounts/{billingAccountId}`.
*/
@com.google.api.client.util.Key
private java.lang.String parent;
/** Required. The name of the billing account to create the budget in. Values are of the form
`billingAccounts/{billingAccountId}`.
*/
public java.lang.String getParent() {
return parent;
}
/**
* Required. The name of the billing account to create the budget in. Values are of the form
* `billingAccounts/{billingAccountId}`.
*/
public Create setParent(java.lang.String parent) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(PARENT_PATTERN.matcher(parent).matches(),
"Parameter parent must conform to the pattern " +
"^billingAccounts/[^/]+$");
}
this.parent = parent;
return this;
}
@Override
public Create set(String parameterName, Object value) {
return (Create) super.set(parameterName, value);
}
}
/**
* Deletes a budget. Returns successfully if already deleted.
*
* Create a request for the method "budgets.delete".
*
* This request holds the parameters needed by the billingbudgets server. After setting any
* optional parameters, call the {@link Delete#execute()} method to invoke the remote operation.
*
* @param name Required. Name of the budget to delete. Values are of the form
* `billingAccounts/{billingAccountId}/budgets/{budgetId}`.
* @return the request
*/
public Delete delete(java.lang.String name) throws java.io.IOException {
Delete result = new Delete(name);
initialize(result);
return result;
}
public class Delete extends CloudBillingBudgetRequest<com.google.api.services.billingbudgets.v1beta1.model.GoogleProtobufEmpty> {
private static final String REST_PATH = "v1beta1/{+name}";
private final java.util.regex.Pattern NAME_PATTERN =
java.util.regex.Pattern.compile("^billingAccounts/[^/]+/budgets/[^/]+$");
/**
* Deletes a budget. Returns successfully if already deleted.
*
* Create a request for the method "budgets.delete".
*
* This request holds the parameters needed by the the billingbudgets server. After setting any
* optional parameters, call the {@link Delete#execute()} method to invoke the remote operation.
* <p> {@link
* Delete#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param name Required. Name of the budget to delete. Values are of the form
* `billingAccounts/{billingAccountId}/budgets/{budgetId}`.
* @since 1.13
*/
protected Delete(java.lang.String name) {
super(CloudBillingBudget.this, "DELETE", REST_PATH, null, com.google.api.services.billingbudgets.v1beta1.model.GoogleProtobufEmpty.class);
this.name = com.google.api.client.util.Preconditions.checkNotNull(name, "Required parameter name must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^billingAccounts/[^/]+/budgets/[^/]+$");
}
}
@Override
public Delete set$Xgafv(java.lang.String $Xgafv) {
return (Delete) super.set$Xgafv($Xgafv);
}
@Override
public Delete setAccessToken(java.lang.String accessToken) {
return (Delete) super.setAccessToken(accessToken);
}
@Override
public Delete setAlt(java.lang.String alt) {
return (Delete) super.setAlt(alt);
}
@Override
public Delete setCallback(java.lang.String callback) {
return (Delete) super.setCallback(callback);
}
@Override
public Delete setFields(java.lang.String fields) {
return (Delete) super.setFields(fields);
}
@Override
public Delete setKey(java.lang.String key) {
return (Delete) super.setKey(key);
}
@Override
public Delete setOauthToken(java.lang.String oauthToken) {
return (Delete) super.setOauthToken(oauthToken);
}
@Override
public Delete setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Delete) super.setPrettyPrint(prettyPrint);
}
@Override
public Delete setQuotaUser(java.lang.String quotaUser) {
return (Delete) super.setQuotaUser(quotaUser);
}
@Override
public Delete setUploadType(java.lang.String uploadType) {
return (Delete) super.setUploadType(uploadType);
}
@Override
public Delete setUploadProtocol(java.lang.String uploadProtocol) {
return (Delete) super.setUploadProtocol(uploadProtocol);
}
/**
* Required. Name of the budget to delete. Values are of the form
* `billingAccounts/{billingAccountId}/budgets/{budgetId}`.
*/
@com.google.api.client.util.Key
private java.lang.String name;
/** Required. Name of the budget to delete. Values are of the form
`billingAccounts/{billingAccountId}/budgets/{budgetId}`.
*/
public java.lang.String getName() {
return name;
}
/**
* Required. Name of the budget to delete. Values are of the form
* `billingAccounts/{billingAccountId}/budgets/{budgetId}`.
*/
public Delete setName(java.lang.String name) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^billingAccounts/[^/]+/budgets/[^/]+$");
}
this.name = name;
return this;
}
@Override
public Delete set(String parameterName, Object value) {
return (Delete) super.set(parameterName, value);
}
}
/**
* Returns a budget. WARNING: There are some fields exposed on the Google Cloud Console that aren't
* available on this API. When reading from the API, you will not see these fields in the return
* value, though they may have been set in the Cloud Console.
*
* Create a request for the method "budgets.get".
*
* This request holds the parameters needed by the billingbudgets server. After setting any
* optional parameters, call the {@link Get#execute()} method to invoke the remote operation.
*
* @param name Required. Name of budget to get. Values are of the form
* `billingAccounts/{billingAccountId}/budgets/{budgetId}`.
* @return the request
*/
public Get get(java.lang.String name) throws java.io.IOException {
Get result = new Get(name);
initialize(result);
return result;
}
public class Get extends CloudBillingBudgetRequest<com.google.api.services.billingbudgets.v1beta1.model.GoogleCloudBillingBudgetsV1beta1Budget> {
private static final String REST_PATH = "v1beta1/{+name}";
private final java.util.regex.Pattern NAME_PATTERN =
java.util.regex.Pattern.compile("^billingAccounts/[^/]+/budgets/[^/]+$");
/**
* Returns a budget. WARNING: There are some fields exposed on the Google Cloud Console that
* aren't available on this API. When reading from the API, you will not see these fields in the
* return value, though they may have been set in the Cloud Console.
*
* Create a request for the method "budgets.get".
*
* This request holds the parameters needed by the the billingbudgets server. After setting any
* optional parameters, call the {@link Get#execute()} method to invoke the remote operation. <p>
* {@link Get#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param name Required. Name of budget to get. Values are of the form
* `billingAccounts/{billingAccountId}/budgets/{budgetId}`.
* @since 1.13
*/
protected Get(java.lang.String name) {
super(CloudBillingBudget.this, "GET", REST_PATH, null, com.google.api.services.billingbudgets.v1beta1.model.GoogleCloudBillingBudgetsV1beta1Budget.class);
this.name = com.google.api.client.util.Preconditions.checkNotNull(name, "Required parameter name must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^billingAccounts/[^/]+/budgets/[^/]+$");
}
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public Get set$Xgafv(java.lang.String $Xgafv) {
return (Get) super.set$Xgafv($Xgafv);
}
@Override
public Get setAccessToken(java.lang.String accessToken) {
return (Get) super.setAccessToken(accessToken);
}
@Override
public Get setAlt(java.lang.String alt) {
return (Get) super.setAlt(alt);
}
@Override
public Get setCallback(java.lang.String callback) {
return (Get) super.setCallback(callback);
}
@Override
public Get setFields(java.lang.String fields) {
return (Get) super.setFields(fields);
}
@Override
public Get setKey(java.lang.String key) {
return (Get) super.setKey(key);
}
@Override
public Get setOauthToken(java.lang.String oauthToken) {
return (Get) super.setOauthToken(oauthToken);
}
@Override
public Get setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Get) super.setPrettyPrint(prettyPrint);
}
@Override
public Get setQuotaUser(java.lang.String quotaUser) {
return (Get) super.setQuotaUser(quotaUser);
}
@Override
public Get setUploadType(java.lang.String uploadType) {
return (Get) super.setUploadType(uploadType);
}
@Override
public Get setUploadProtocol(java.lang.String uploadProtocol) {
return (Get) super.setUploadProtocol(uploadProtocol);
}
/**
* Required. Name of budget to get. Values are of the form
* `billingAccounts/{billingAccountId}/budgets/{budgetId}`.
*/
@com.google.api.client.util.Key
private java.lang.String name;
/** Required. Name of budget to get. Values are of the form
`billingAccounts/{billingAccountId}/budgets/{budgetId}`.
*/
public java.lang.String getName() {
return name;
}
/**
* Required. Name of budget to get. Values are of the form
* `billingAccounts/{billingAccountId}/budgets/{budgetId}`.
*/
public Get setName(java.lang.String name) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^billingAccounts/[^/]+/budgets/[^/]+$");
}
this.name = name;
return this;
}
@Override
public Get set(String parameterName, Object value) {
return (Get) super.set(parameterName, value);
}
}
/**
* Returns a list of budgets for a billing account. WARNING: There are some fields exposed on the
* Google Cloud Console that aren't available on this API. When reading from the API, you will not
* see these fields in the return value, though they may have been set in the Cloud Console.
*
* Create a request for the method "budgets.list".
*
* This request holds the parameters needed by the billingbudgets server. After setting any
* optional parameters, call the {@link List#execute()} method to invoke the remote operation.
*
* @param parent Required. Name of billing account to list budgets under. Values are of the form
* `billingAccounts/{billingAccountId}`.
* @return the request
*/
public List list(java.lang.String parent) throws java.io.IOException {
List result = new List(parent);
initialize(result);
return result;
}
public class List extends CloudBillingBudgetRequest<com.google.api.services.billingbudgets.v1beta1.model.GoogleCloudBillingBudgetsV1beta1ListBudgetsResponse> {
private static final String REST_PATH = "v1beta1/{+parent}/budgets";
private final java.util.regex.Pattern PARENT_PATTERN =
java.util.regex.Pattern.compile("^billingAccounts/[^/]+$");
/**
* Returns a list of budgets for a billing account. WARNING: There are some fields exposed on the
* Google Cloud Console that aren't available on this API. When reading from the API, you will not
* see these fields in the return value, though they may have been set in the Cloud Console.
*
* Create a request for the method "budgets.list".
*
* This request holds the parameters needed by the the billingbudgets server. After setting any
* optional parameters, call the {@link List#execute()} method to invoke the remote operation. <p>
* {@link List#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param parent Required. Name of billing account to list budgets under. Values are of the form
* `billingAccounts/{billingAccountId}`.
* @since 1.13
*/
protected List(java.lang.String parent) {
super(CloudBillingBudget.this, "GET", REST_PATH, null, com.google.api.services.billingbudgets.v1beta1.model.GoogleCloudBillingBudgetsV1beta1ListBudgetsResponse.class);
this.parent = com.google.api.client.util.Preconditions.checkNotNull(parent, "Required parameter parent must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(PARENT_PATTERN.matcher(parent).matches(),
"Parameter parent must conform to the pattern " +
"^billingAccounts/[^/]+$");
}
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public List set$Xgafv(java.lang.String $Xgafv) {
return (List) super.set$Xgafv($Xgafv);
}
@Override
public List setAccessToken(java.lang.String accessToken) {
return (List) super.setAccessToken(accessToken);
}
@Override
public List setAlt(java.lang.String alt) {
return (List) super.setAlt(alt);
}
@Override
public List setCallback(java.lang.String callback) {
return (List) super.setCallback(callback);
}
@Override
public List setFields(java.lang.String fields) {
return (List) super.setFields(fields);
}
@Override
public List setKey(java.lang.String key) {
return (List) super.setKey(key);
}
@Override
public List setOauthToken(java.lang.String oauthToken) {
return (List) super.setOauthToken(oauthToken);
}
@Override
public List setPrettyPrint(java.lang.Boolean prettyPrint) {
return (List) super.setPrettyPrint(prettyPrint);
}
@Override
public List setQuotaUser(java.lang.String quotaUser) {
return (List) super.setQuotaUser(quotaUser);
}
@Override
public List setUploadType(java.lang.String uploadType) {
return (List) super.setUploadType(uploadType);
}
@Override
public List setUploadProtocol(java.lang.String uploadProtocol) {
return (List) super.setUploadProtocol(uploadProtocol);
}
/**
* Required. Name of billing account to list budgets under. Values are of the form
* `billingAccounts/{billingAccountId}`.
*/
@com.google.api.client.util.Key
private java.lang.String parent;
/** Required. Name of billing account to list budgets under. Values are of the form
`billingAccounts/{billingAccountId}`.
*/
public java.lang.String getParent() {
return parent;
}
/**
* Required. Name of billing account to list budgets under. Values are of the form
* `billingAccounts/{billingAccountId}`.
*/
public List setParent(java.lang.String parent) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(PARENT_PATTERN.matcher(parent).matches(),
"Parameter parent must conform to the pattern " +
"^billingAccounts/[^/]+$");
}
this.parent = parent;
return this;
}
/**
* Optional. The maximum number of budgets to return per page. The default and maximum value
* are 100.
*/
@com.google.api.client.util.Key
private java.lang.Integer pageSize;
/** Optional. The maximum number of budgets to return per page. The default and maximum value are 100.
*/
public java.lang.Integer getPageSize() {
return pageSize;
}
/**
* Optional. The maximum number of budgets to return per page. The default and maximum value
* are 100.
*/
public List setPageSize(java.lang.Integer pageSize) {
this.pageSize = pageSize;
return this;
}
/**
* Optional. The value returned by the last `ListBudgetsResponse` which indicates that this
* is a continuation of a prior `ListBudgets` call, and that the system should return the
* next page of data.
*/
@com.google.api.client.util.Key
private java.lang.String pageToken;
/** Optional. The value returned by the last `ListBudgetsResponse` which indicates that this is a
continuation of a prior `ListBudgets` call, and that the system should return the next page of
data.
*/
public java.lang.String getPageToken() {
return pageToken;
}
/**
* Optional. The value returned by the last `ListBudgetsResponse` which indicates that this
* is a continuation of a prior `ListBudgets` call, and that the system should return the
* next page of data.
*/
public List setPageToken(java.lang.String pageToken) {
this.pageToken = pageToken;
return this;
}
@Override
public List set(String parameterName, Object value) {
return (List) super.set(parameterName, value);
}
}
/**
* Updates a budget and returns the updated budget. WARNING: There are some fields exposed on the
* Google Cloud Console that aren't available on this API. Budget fields that are not exposed in
* this API will not be changed by this method.
*
* Create a request for the method "budgets.patch".
*
* This request holds the parameters needed by the billingbudgets server. After setting any
* optional parameters, call the {@link Patch#execute()} method to invoke the remote operation.
*
* @param name Output only. Resource name of the budget. The resource name implies the scope of a budget. Values
* are of the form `billingAccounts/{billingAccountId}/budgets/{budgetId}`.
* @param content the {@link com.google.api.services.billingbudgets.v1beta1.model.GoogleCloudBillingBudgetsV1beta1UpdateBudgetRequest}
* @return the request
*/
public Patch patch(java.lang.String name, com.google.api.services.billingbudgets.v1beta1.model.GoogleCloudBillingBudgetsV1beta1UpdateBudgetRequest content) throws java.io.IOException {
Patch result = new Patch(name, content);
initialize(result);
return result;
}
public class Patch extends CloudBillingBudgetRequest<com.google.api.services.billingbudgets.v1beta1.model.GoogleCloudBillingBudgetsV1beta1Budget> {
private static final String REST_PATH = "v1beta1/{+name}";
private final java.util.regex.Pattern NAME_PATTERN =
java.util.regex.Pattern.compile("^billingAccounts/[^/]+/budgets/[^/]+$");
/**
* Updates a budget and returns the updated budget. WARNING: There are some fields exposed on the
* Google Cloud Console that aren't available on this API. Budget fields that are not exposed in
* this API will not be changed by this method.
*
* Create a request for the method "budgets.patch".
*
* This request holds the parameters needed by the the billingbudgets server. After setting any
* optional parameters, call the {@link Patch#execute()} method to invoke the remote operation.
* <p> {@link
* Patch#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param name Output only. Resource name of the budget. The resource name implies the scope of a budget. Values
* are of the form `billingAccounts/{billingAccountId}/budgets/{budgetId}`.
* @param content the {@link com.google.api.services.billingbudgets.v1beta1.model.GoogleCloudBillingBudgetsV1beta1UpdateBudgetRequest}
* @since 1.13
*/
protected Patch(java.lang.String name, com.google.api.services.billingbudgets.v1beta1.model.GoogleCloudBillingBudgetsV1beta1UpdateBudgetRequest content) {
super(CloudBillingBudget.this, "PATCH", REST_PATH, content, com.google.api.services.billingbudgets.v1beta1.model.GoogleCloudBillingBudgetsV1beta1Budget.class);
this.name = com.google.api.client.util.Preconditions.checkNotNull(name, "Required parameter name must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^billingAccounts/[^/]+/budgets/[^/]+$");
}
}
@Override
public Patch set$Xgafv(java.lang.String $Xgafv) {
return (Patch) super.set$Xgafv($Xgafv);
}
@Override
public Patch setAccessToken(java.lang.String accessToken) {
return (Patch) super.setAccessToken(accessToken);
}
@Override
public Patch setAlt(java.lang.String alt) {
return (Patch) super.setAlt(alt);
}
@Override
public Patch setCallback(java.lang.String callback) {
return (Patch) super.setCallback(callback);
}
@Override
public Patch setFields(java.lang.String fields) {
return (Patch) super.setFields(fields);
}
@Override
public Patch setKey(java.lang.String key) {
return (Patch) super.setKey(key);
}
@Override
public Patch setOauthToken(java.lang.String oauthToken) {
return (Patch) super.setOauthToken(oauthToken);
}
@Override
public Patch setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Patch) super.setPrettyPrint(prettyPrint);
}
@Override
public Patch setQuotaUser(java.lang.String quotaUser) {
return (Patch) super.setQuotaUser(quotaUser);
}
@Override
public Patch setUploadType(java.lang.String uploadType) {
return (Patch) super.setUploadType(uploadType);
}
@Override
public Patch setUploadProtocol(java.lang.String uploadProtocol) {
return (Patch) super.setUploadProtocol(uploadProtocol);
}
/**
* Output only. Resource name of the budget. The resource name implies the scope of a
* budget. Values are of the form `billingAccounts/{billingAccountId}/budgets/{budgetId}`.
*/
@com.google.api.client.util.Key
private java.lang.String name;
/** Output only. Resource name of the budget. The resource name implies the scope of a budget. Values
are of the form `billingAccounts/{billingAccountId}/budgets/{budgetId}`.
*/
public java.lang.String getName() {
return name;
}
/**
* Output only. Resource name of the budget. The resource name implies the scope of a
* budget. Values are of the form `billingAccounts/{billingAccountId}/budgets/{budgetId}`.
*/
public Patch setName(java.lang.String name) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^billingAccounts/[^/]+/budgets/[^/]+$");
}
this.name = name;
return this;
}
@Override
public Patch set(String parameterName, Object value) {
return (Patch) super.set(parameterName, value);
}
}
}
}
/**
* Builder for {@link CloudBillingBudget}.
*
* <p>
* Implementation is not thread-safe.
* </p>
*
* @since 1.3.0
*/
public static final class Builder extends com.google.api.client.googleapis.services.json.AbstractGoogleJsonClient.Builder {
/**
* Returns an instance of a new builder.
*
* @param transport HTTP transport, which should normally be:
* <ul>
* <li>Google App Engine:
* {@code com.google.api.client.extensions.appengine.http.UrlFetchTransport}</li>
* <li>Android: {@code newCompatibleTransport} from
* {@code com.google.api.client.extensions.android.http.AndroidHttp}</li>
* <li>Java: {@link com.google.api.client.googleapis.javanet.GoogleNetHttpTransport#newTrustedTransport()}
* </li>
* </ul>
* @param jsonFactory JSON factory, which may be:
* <ul>
* <li>Jackson: {@code com.google.api.client.json.jackson2.JacksonFactory}</li>
* <li>Google GSON: {@code com.google.api.client.json.gson.GsonFactory}</li>
* <li>Android Honeycomb or higher:
* {@code com.google.api.client.extensions.android.json.AndroidJsonFactory}</li>
* </ul>
* @param httpRequestInitializer HTTP request initializer or {@code null} for none
* @since 1.7
*/
public Builder(com.google.api.client.http.HttpTransport transport, com.google.api.client.json.JsonFactory jsonFactory,
com.google.api.client.http.HttpRequestInitializer httpRequestInitializer) {
super(
transport,
jsonFactory,
DEFAULT_ROOT_URL,
DEFAULT_SERVICE_PATH,
httpRequestInitializer,
false);
setBatchPath(DEFAULT_BATCH_PATH);
}
/** Builds a new instance of {@link CloudBillingBudget}. */
@Override
public CloudBillingBudget build() {
return new CloudBillingBudget(this);
}
@Override
public Builder setRootUrl(String rootUrl) {
return (Builder) super.setRootUrl(rootUrl);
}
@Override
public Builder setServicePath(String servicePath) {
return (Builder) super.setServicePath(servicePath);
}
@Override
public Builder setBatchPath(String batchPath) {
return (Builder) super.setBatchPath(batchPath);
}
@Override
public Builder setHttpRequestInitializer(com.google.api.client.http.HttpRequestInitializer httpRequestInitializer) {
return (Builder) super.setHttpRequestInitializer(httpRequestInitializer);
}
@Override
public Builder setApplicationName(String applicationName) {
return (Builder) super.setApplicationName(applicationName);
}
@Override
public Builder setSuppressPatternChecks(boolean suppressPatternChecks) {
return (Builder) super.setSuppressPatternChecks(suppressPatternChecks);
}
@Override
public Builder setSuppressRequiredParameterChecks(boolean suppressRequiredParameterChecks) {
return (Builder) super.setSuppressRequiredParameterChecks(suppressRequiredParameterChecks);
}
@Override
public Builder setSuppressAllChecks(boolean suppressAllChecks) {
return (Builder) super.setSuppressAllChecks(suppressAllChecks);
}
/**
* Set the {@link CloudBillingBudgetRequestInitializer}.
*
* @since 1.12
*/
public Builder setCloudBillingBudgetRequestInitializer(
CloudBillingBudgetRequestInitializer cloudbillingbudgetRequestInitializer) {
return (Builder) super.setGoogleClientRequestInitializer(cloudbillingbudgetRequestInitializer);
}
@Override
public Builder setGoogleClientRequestInitializer(
com.google.api.client.googleapis.services.GoogleClientRequestInitializer googleClientRequestInitializer) {
return (Builder) super.setGoogleClientRequestInitializer(googleClientRequestInitializer);
}
}
}
| googleapis/google-api-java-client-services | clients/google-api-services-billingbudgets/v1beta1/1.30.1/com/google/api/services/billingbudgets/v1beta1/CloudBillingBudget.java | Java | apache-2.0 | 41,775 |
/*
* Copyright 2017 Johannes Donath <johannesd@torchmind.com>
* and other copyright owners as documented in the project's IP log.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.torchmind.observable.primitive;
import com.torchmind.observable.AbstractObservable;
import com.torchmind.observable.listener.ValidationListener;
import edu.umd.cs.findbugs.annotations.Nullable;
/**
* Provides a observable implementation which simplifies access to short values.
*
* @author <a href="mailto:johannesd@torchmind.com">Johannes Donath</a>
*/
public class SimpleShortObservable extends AbstractObservable<Short> implements ShortObservable {
private final short fallbackValue;
public SimpleShortObservable(
@Nullable ValidationListener<Short> validationListener, Short value, short fallbackValue) {
super(validationListener, value);
this.fallbackValue = fallbackValue;
}
public SimpleShortObservable(
@Nullable ValidationListener<Short> validationListener, Short value) {
this(validationListener, value, (short) 0);
}
public SimpleShortObservable(Short value) {
this(null, value);
}
public SimpleShortObservable() {
this(null);
}
/**
* {@inheritDoc}
*/
@Override
public short getValue() {
Short value = this.get();
if (value == null) {
return this.fallbackValue;
}
return value;
}
/**
* {@inheritDoc}
*/
@Override
public void setValue(short value) {
this.set(value);
}
}
| Torchmind/Observables | src/main/java/com/torchmind/observable/primitive/SimpleShortObservable.java | Java | apache-2.0 | 2,007 |
package com.lonepulse.icklebot.annotation.inject;
/*
* #%L
* IckleBot
* %%
* Copyright (C) 2013 Lonepulse
* %%
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* #L%
*/
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
/**
* <p>Initiates <b>Implicit Injection</b> on all the instance variables.</p>
*
* @version 1.0.0
* <br><br>
* @author <a href="mailto:lahiru@lonepulse.com">Lahiru Sahan Jayasinghe</a>
*/
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.TYPE)
public @interface InjectAll {}
| sahan/IckleBot | icklebot/src/main/java/com/lonepulse/icklebot/annotation/inject/InjectAll.java | Java | apache-2.0 | 1,139 |
#include "source/common/stats/allocator_impl.h"
#include <algorithm>
#include <cstdint>
#include "envoy/stats/sink.h"
#include "envoy/stats/stats.h"
#include "source/common/common/hash.h"
#include "source/common/common/lock_guard.h"
#include "source/common/common/logger.h"
#include "source/common/common/thread.h"
#include "source/common/common/thread_annotations.h"
#include "source/common/common/utility.h"
#include "source/common/stats/metric_impl.h"
#include "source/common/stats/stat_merger.h"
#include "source/common/stats/symbol_table.h"
#include "absl/container/flat_hash_set.h"
namespace Envoy {
namespace Stats {
const char AllocatorImpl::DecrementToZeroSyncPoint[] = "decrement-zero";
AllocatorImpl::~AllocatorImpl() {
ASSERT(counters_.empty());
ASSERT(gauges_.empty());
#ifndef NDEBUG
// Move deleted stats into the sets for the ASSERTs in removeFromSetLockHeld to function.
for (auto& counter : deleted_counters_) {
auto insertion = counters_.insert(counter.get());
// Assert that there were no duplicates.
ASSERT(insertion.second);
}
for (auto& gauge : deleted_gauges_) {
auto insertion = gauges_.insert(gauge.get());
// Assert that there were no duplicates.
ASSERT(insertion.second);
}
for (auto& text_readout : deleted_text_readouts_) {
auto insertion = text_readouts_.insert(text_readout.get());
// Assert that there were no duplicates.
ASSERT(insertion.second);
}
#endif
}
#ifndef ENVOY_CONFIG_COVERAGE
void AllocatorImpl::debugPrint() {
Thread::LockGuard lock(mutex_);
for (Counter* counter : counters_) {
ENVOY_LOG_MISC(info, "counter: {}", symbolTable().toString(counter->statName()));
}
for (Gauge* gauge : gauges_) {
ENVOY_LOG_MISC(info, "gauge: {}", symbolTable().toString(gauge->statName()));
}
}
#endif
// Counter, Gauge and TextReadout inherit from RefcountInterface and
// Metric. MetricImpl takes care of most of the Metric API, but we need to cover
// symbolTable() here, which we don't store directly, but get it via the alloc,
// which we need in order to clean up the counter and gauge maps in that class
// when they are destroyed.
//
// We implement the RefcountInterface API to avoid weak counter and destructor overhead in
// shared_ptr.
template <class BaseClass> class StatsSharedImpl : public MetricImpl<BaseClass> {
public:
StatsSharedImpl(StatName name, AllocatorImpl& alloc, StatName tag_extracted_name,
const StatNameTagVector& stat_name_tags)
: MetricImpl<BaseClass>(name, tag_extracted_name, stat_name_tags, alloc.symbolTable()),
alloc_(alloc) {}
~StatsSharedImpl() override {
// MetricImpl must be explicitly cleared() before destruction, otherwise it
// will not be able to access the SymbolTable& to free the symbols. An RAII
// alternative would be to store the SymbolTable reference in the
// MetricImpl, costing 8 bytes per stat.
this->clear(symbolTable());
}
// Metric
SymbolTable& symbolTable() final { return alloc_.symbolTable(); }
bool used() const override { return flags_ & Metric::Flags::Used; }
// RefcountInterface
void incRefCount() override { ++ref_count_; }
bool decRefCount() override {
// We must, unfortunately, hold the allocator's lock when decrementing the
// refcount. Otherwise another thread may simultaneously try to allocate the
// same name'd stat after we decrement it, and we'll wind up with a
// dtor/update race. To avoid this we must hold the lock until the stat is
// removed from the map.
//
// It might be worth thinking about a race-free way to decrement ref-counts
// without a lock, for the case where ref_count > 2, and we don't need to
// destruct anything. But it seems preferable at to be conservative here,
// as stats will only go out of scope when a scope is destructed (during
// xDS) or during admin stats operations.
Thread::LockGuard lock(alloc_.mutex_);
ASSERT(ref_count_ >= 1);
if (--ref_count_ == 0) {
alloc_.sync().syncPoint(AllocatorImpl::DecrementToZeroSyncPoint);
removeFromSetLockHeld();
return true;
}
return false;
}
uint32_t use_count() const override { return ref_count_; }
/**
* We must atomically remove the counter/gauges from the allocator's sets when
* our ref-count decrement hits zero. The counters and gauges are held in
* distinct sets so we virtualize this removal helper.
*/
virtual void removeFromSetLockHeld() ABSL_EXCLUSIVE_LOCKS_REQUIRED(alloc_.mutex_) PURE;
protected:
AllocatorImpl& alloc_;
// ref_count_ can be incremented as an atomic, without taking a new lock, as
// the critical 0->1 transition occurs in makeCounter and makeGauge, which
// already hold the lock. Increment also occurs when copying shared pointers,
// but these are always in transition to ref-count 2 or higher, and thus
// cannot race with a decrement to zero.
//
// However, we must hold alloc_.mutex_ when decrementing ref_count_ so that
// when it hits zero we can atomically remove it from alloc_.counters_ or
// alloc_.gauges_. We leave it atomic to avoid taking the lock on increment.
std::atomic<uint32_t> ref_count_{0};
std::atomic<uint16_t> flags_{0};
};
class CounterImpl : public StatsSharedImpl<Counter> {
public:
CounterImpl(StatName name, AllocatorImpl& alloc, StatName tag_extracted_name,
const StatNameTagVector& stat_name_tags)
: StatsSharedImpl(name, alloc, tag_extracted_name, stat_name_tags) {}
void removeFromSetLockHeld() ABSL_EXCLUSIVE_LOCKS_REQUIRED(alloc_.mutex_) override {
const size_t count = alloc_.counters_.erase(statName());
ASSERT(count == 1);
alloc_.sinked_counters_.erase(this);
}
// Stats::Counter
void add(uint64_t amount) override {
// Note that a reader may see a new value but an old pending_increment_ or
// used(). From a system perspective this should be eventually consistent.
value_ += amount;
pending_increment_ += amount;
flags_ |= Flags::Used;
}
void inc() override { add(1); }
uint64_t latch() override { return pending_increment_.exchange(0); }
void reset() override { value_ = 0; }
uint64_t value() const override { return value_; }
private:
std::atomic<uint64_t> value_{0};
std::atomic<uint64_t> pending_increment_{0};
};
class GaugeImpl : public StatsSharedImpl<Gauge> {
public:
GaugeImpl(StatName name, AllocatorImpl& alloc, StatName tag_extracted_name,
const StatNameTagVector& stat_name_tags, ImportMode import_mode)
: StatsSharedImpl(name, alloc, tag_extracted_name, stat_name_tags) {
switch (import_mode) {
case ImportMode::Accumulate:
flags_ |= Flags::LogicAccumulate;
break;
case ImportMode::NeverImport:
flags_ |= Flags::NeverImport;
break;
case ImportMode::Uninitialized:
// Note that we don't clear any flag bits for import_mode==Uninitialized,
// as we may have an established import_mode when this stat was created in
// an alternate scope. See
// https://github.com/envoyproxy/envoy/issues/7227.
break;
}
}
void removeFromSetLockHeld() override ABSL_EXCLUSIVE_LOCKS_REQUIRED(alloc_.mutex_) {
const size_t count = alloc_.gauges_.erase(statName());
ASSERT(count == 1);
alloc_.sinked_gauges_.erase(this);
}
// Stats::Gauge
void add(uint64_t amount) override {
child_value_ += amount;
flags_ |= Flags::Used;
}
void dec() override { sub(1); }
void inc() override { add(1); }
void set(uint64_t value) override {
child_value_ = value;
flags_ |= Flags::Used;
}
void sub(uint64_t amount) override {
ASSERT(child_value_ >= amount);
ASSERT(used() || amount == 0);
child_value_ -= amount;
}
uint64_t value() const override { return child_value_ + parent_value_; }
ImportMode importMode() const override {
if (flags_ & Flags::NeverImport) {
return ImportMode::NeverImport;
} else if (flags_ & Flags::LogicAccumulate) {
return ImportMode::Accumulate;
}
return ImportMode::Uninitialized;
}
void mergeImportMode(ImportMode import_mode) override {
ImportMode current = importMode();
if (current == import_mode) {
return;
}
switch (import_mode) {
case ImportMode::Uninitialized:
// mergeImportNode(ImportMode::Uninitialized) is called when merging an
// existing stat with importMode() == Accumulate or NeverImport.
break;
case ImportMode::Accumulate:
ASSERT(current == ImportMode::Uninitialized);
flags_ |= Flags::LogicAccumulate;
break;
case ImportMode::NeverImport:
ASSERT(current == ImportMode::Uninitialized);
// A previous revision of Envoy may have transferred a gauge that it
// thought was Accumulate. But the new version thinks it's NeverImport, so
// we clear the accumulated value.
parent_value_ = 0;
flags_ &= ~Flags::Used;
flags_ |= Flags::NeverImport;
break;
}
}
void setParentValue(uint64_t value) override { parent_value_ = value; }
private:
std::atomic<uint64_t> parent_value_{0};
std::atomic<uint64_t> child_value_{0};
};
class TextReadoutImpl : public StatsSharedImpl<TextReadout> {
public:
TextReadoutImpl(StatName name, AllocatorImpl& alloc, StatName tag_extracted_name,
const StatNameTagVector& stat_name_tags)
: StatsSharedImpl(name, alloc, tag_extracted_name, stat_name_tags) {}
void removeFromSetLockHeld() ABSL_EXCLUSIVE_LOCKS_REQUIRED(alloc_.mutex_) override {
const size_t count = alloc_.text_readouts_.erase(statName());
ASSERT(count == 1);
alloc_.sinked_text_readouts_.erase(this);
}
// Stats::TextReadout
void set(absl::string_view value) override {
std::string value_copy(value);
absl::MutexLock lock(&mutex_);
value_ = std::move(value_copy);
flags_ |= Flags::Used;
}
std::string value() const override {
absl::MutexLock lock(&mutex_);
return value_;
}
private:
mutable absl::Mutex mutex_;
std::string value_ ABSL_GUARDED_BY(mutex_);
};
CounterSharedPtr AllocatorImpl::makeCounter(StatName name, StatName tag_extracted_name,
const StatNameTagVector& stat_name_tags) {
Thread::LockGuard lock(mutex_);
ASSERT(gauges_.find(name) == gauges_.end());
ASSERT(text_readouts_.find(name) == text_readouts_.end());
auto iter = counters_.find(name);
if (iter != counters_.end()) {
return CounterSharedPtr(*iter);
}
auto counter = CounterSharedPtr(makeCounterInternal(name, tag_extracted_name, stat_name_tags));
counters_.insert(counter.get());
// Add counter to sinked_counters_ if it matches the sink predicate.
if (sink_predicates_ != nullptr && sink_predicates_->includeCounter(*counter)) {
auto val = sinked_counters_.insert(counter.get());
ASSERT(val.second);
}
return counter;
}
GaugeSharedPtr AllocatorImpl::makeGauge(StatName name, StatName tag_extracted_name,
const StatNameTagVector& stat_name_tags,
Gauge::ImportMode import_mode) {
Thread::LockGuard lock(mutex_);
ASSERT(counters_.find(name) == counters_.end());
ASSERT(text_readouts_.find(name) == text_readouts_.end());
auto iter = gauges_.find(name);
if (iter != gauges_.end()) {
return GaugeSharedPtr(*iter);
}
auto gauge =
GaugeSharedPtr(new GaugeImpl(name, *this, tag_extracted_name, stat_name_tags, import_mode));
gauges_.insert(gauge.get());
// Add gauge to sinked_gauges_ if it matches the sink predicate.
if (sink_predicates_ != nullptr && sink_predicates_->includeGauge(*gauge)) {
auto val = sinked_gauges_.insert(gauge.get());
ASSERT(val.second);
}
return gauge;
}
TextReadoutSharedPtr AllocatorImpl::makeTextReadout(StatName name, StatName tag_extracted_name,
const StatNameTagVector& stat_name_tags) {
Thread::LockGuard lock(mutex_);
ASSERT(counters_.find(name) == counters_.end());
ASSERT(gauges_.find(name) == gauges_.end());
auto iter = text_readouts_.find(name);
if (iter != text_readouts_.end()) {
return TextReadoutSharedPtr(*iter);
}
auto text_readout =
TextReadoutSharedPtr(new TextReadoutImpl(name, *this, tag_extracted_name, stat_name_tags));
text_readouts_.insert(text_readout.get());
// Add text_readout to sinked_text_readouts_ if it matches the sink predicate.
if (sink_predicates_ != nullptr && sink_predicates_->includeTextReadout(*text_readout)) {
auto val = sinked_text_readouts_.insert(text_readout.get());
ASSERT(val.second);
}
return text_readout;
}
bool AllocatorImpl::isMutexLockedForTest() {
bool locked = mutex_.tryLock();
if (locked) {
mutex_.unlock();
}
return !locked;
}
Counter* AllocatorImpl::makeCounterInternal(StatName name, StatName tag_extracted_name,
const StatNameTagVector& stat_name_tags) {
return new CounterImpl(name, *this, tag_extracted_name, stat_name_tags);
}
void AllocatorImpl::forEachCounter(SizeFn f_size, StatFn<Counter> f_stat) const {
Thread::LockGuard lock(mutex_);
if (f_size != nullptr) {
f_size(counters_.size());
}
for (auto& counter : counters_) {
f_stat(*counter);
}
}
void AllocatorImpl::forEachGauge(SizeFn f_size, StatFn<Gauge> f_stat) const {
Thread::LockGuard lock(mutex_);
if (f_size != nullptr) {
f_size(gauges_.size());
}
for (auto& gauge : gauges_) {
f_stat(*gauge);
}
}
void AllocatorImpl::forEachTextReadout(SizeFn f_size, StatFn<TextReadout> f_stat) const {
Thread::LockGuard lock(mutex_);
if (f_size != nullptr) {
f_size(text_readouts_.size());
}
for (auto& text_readout : text_readouts_) {
f_stat(*text_readout);
}
}
void AllocatorImpl::forEachSinkedCounter(SizeFn f_size, StatFn<Counter> f_stat) const {
if (sink_predicates_ != nullptr) {
Thread::LockGuard lock(mutex_);
f_size(sinked_counters_.size());
for (auto counter : sinked_counters_) {
f_stat(*counter);
}
} else {
forEachCounter(f_size, f_stat);
}
}
void AllocatorImpl::forEachSinkedGauge(SizeFn f_size, StatFn<Gauge> f_stat) const {
if (sink_predicates_ != nullptr) {
Thread::LockGuard lock(mutex_);
f_size(sinked_gauges_.size());
for (auto gauge : sinked_gauges_) {
f_stat(*gauge);
}
} else {
forEachGauge(f_size, f_stat);
}
}
void AllocatorImpl::forEachSinkedTextReadout(SizeFn f_size, StatFn<TextReadout> f_stat) const {
if (sink_predicates_ != nullptr) {
Thread::LockGuard lock(mutex_);
f_size(sinked_text_readouts_.size());
for (auto text_readout : sinked_text_readouts_) {
f_stat(*text_readout);
}
} else {
forEachTextReadout(f_size, f_stat);
}
}
void AllocatorImpl::setSinkPredicates(std::unique_ptr<SinkPredicates>&& sink_predicates) {
Thread::LockGuard lock(mutex_);
ASSERT(sink_predicates_ == nullptr);
sink_predicates_ = std::move(sink_predicates);
sinked_counters_.clear();
sinked_gauges_.clear();
sinked_text_readouts_.clear();
// Add counters to the set of sinked counters.
for (auto& counter : counters_) {
if (sink_predicates_->includeCounter(*counter)) {
sinked_counters_.emplace(counter);
}
}
// Add gauges to the set of sinked gauges.
for (auto& gauge : gauges_) {
if (sink_predicates_->includeGauge(*gauge)) {
sinked_gauges_.insert(gauge);
}
}
// Add text_readouts to the set of sinked text readouts.
for (auto& text_readout : text_readouts_) {
if (sink_predicates_->includeTextReadout(*text_readout)) {
sinked_text_readouts_.insert(text_readout);
}
}
}
void AllocatorImpl::markCounterForDeletion(const CounterSharedPtr& counter) {
Thread::LockGuard lock(mutex_);
auto iter = counters_.find(counter->statName());
if (iter == counters_.end()) {
// This has already been marked for deletion.
return;
}
ASSERT(counter.get() == *iter);
// Duplicates are ASSERTed in ~AllocatorImpl.
deleted_counters_.emplace_back(*iter);
counters_.erase(iter);
sinked_counters_.erase(counter.get());
}
void AllocatorImpl::markGaugeForDeletion(const GaugeSharedPtr& gauge) {
Thread::LockGuard lock(mutex_);
auto iter = gauges_.find(gauge->statName());
if (iter == gauges_.end()) {
// This has already been marked for deletion.
return;
}
ASSERT(gauge.get() == *iter);
// Duplicates are ASSERTed in ~AllocatorImpl.
deleted_gauges_.emplace_back(*iter);
gauges_.erase(iter);
sinked_gauges_.erase(gauge.get());
}
void AllocatorImpl::markTextReadoutForDeletion(const TextReadoutSharedPtr& text_readout) {
Thread::LockGuard lock(mutex_);
auto iter = text_readouts_.find(text_readout->statName());
if (iter == text_readouts_.end()) {
// This has already been marked for deletion.
return;
}
ASSERT(text_readout.get() == *iter);
// Duplicates are ASSERTed in ~AllocatorImpl.
deleted_text_readouts_.emplace_back(*iter);
text_readouts_.erase(iter);
sinked_text_readouts_.erase(text_readout.get());
}
} // namespace Stats
} // namespace Envoy
| envoyproxy/envoy | source/common/stats/allocator_impl.cc | C++ | apache-2.0 | 17,160 |
"use strict";
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
return new (P || (P = Promise))(function (resolve, reject) {
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
function step(result) { result.done ? resolve(result.value) : new P(function (resolve) { resolve(result.value); }).then(fulfilled, rejected); }
step((generator = generator.apply(thisArg, _arguments || [])).next());
});
};
Object.defineProperty(exports, "__esModule", { value: true });
const DatastoreOperation_1 = require("./DatastoreOperation");
const Core_1 = require("../Core");
const BasicUtils_1 = require("../utility/BasicUtils");
const Messaging_1 = require("../Messaging");
class DatastoreFlush extends DatastoreOperation_1.DatastoreBaseOperation {
constructor(model, idsOrKeys) {
super(model);
this.flushIds = [];
this.usingKeys = false;
if (idsOrKeys != null) {
if (Array.isArray(idsOrKeys)) {
this.flushIds = idsOrKeys;
}
else {
this.flushIds = [idsOrKeys];
}
if (typeof this.flushIds[0] === "object") {
if (this.flushIds[0].kind === this.kind) {
this.usingKeys = true;
}
else {
Messaging_1.throwError(Messaging_1.CreateMessage.OPERATION_KEYS_WRONG(this.model, "FLUSH IN CACHE"));
}
}
else {
this.flushIds = this.flushIds.map(id => {
if (this.idType === "int" && BasicUtils_1.isNumber(id)) {
return Core_1.default.Instance.dsModule.int(id);
}
else if (this.idType === "string" && typeof id === "string") {
if (id.length === 0) {
Messaging_1.throwError(Messaging_1.CreateMessage.OPERATION_STRING_ID_EMPTY(this.model, "FLUSH IN CACHE"));
}
return id;
}
Messaging_1.throwError(Messaging_1.CreateMessage.OPERATION_DATA_ID_TYPE_ERROR(this.model, "FLUSH IN CACHE", id));
});
}
}
}
run() {
return __awaiter(this, void 0, void 0, function* () {
let flushKeys;
if (this.usingKeys) {
flushKeys = this.flushIds.map(this.augmentKey);
}
else {
const baseKey = this.getBaseKey();
flushKeys = this.flushIds.map(id => {
return this.createFullKey(baseKey.concat(this.kind, id));
});
}
if (Core_1.default.Instance.cacheStore != null) {
yield Core_1.default.Instance.cacheStore.flushEntitiesByKeys(flushKeys);
}
else {
Messaging_1.warn(`Trying to flush some ids / keys of [${this.kind}] - but no Cache Store has been set on Pebblebed instance!`);
}
});
}
}
exports.default = DatastoreFlush;
//# sourceMappingURL=DatastoreFlush.js.map | lostpebble/pebblebed | dist/operations/DatastoreFlush.js | JavaScript | apache-2.0 | 3,302 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.ode.bpel.common.evt;
import java.util.Properties;
import org.apache.commons.lang.BooleanUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.ode.bpel.evt.BpelEvent;
import org.apache.ode.bpel.iapi.BpelEventListener;
/**
* Example implementation of the {@link BpelEventListener} interface.
*
* Dumps navigation events to a logging appender and optionally to stdout. To
* use the DebugBpelEventListener add the following lines to your
* ode-xxx.properties file: <code>
* ode-jbi.event.listeners=org.apache.ode.bpel.common.evt.DebugBpelEventListener
* debugeventlistener.dumpToStdOut=on/off
* </code>
*
* @author Tammo van Lessen (University of Stuttgart)
*/
public class DebugBpelEventListener implements BpelEventListener {
private static final Log __log = LogFactory.getLog(BpelEventListener.class);
private static final String SYSOUT_KEY = "debugeventlistener.dumpToStdOut";
private boolean _dumpToStdOut = false;
public void onEvent(BpelEvent bpelEvent) {
if (__log.isDebugEnabled()) {
__log.debug(bpelEvent.toString());
}
if (_dumpToStdOut) {
System.out.println(bpelEvent.toString());
}
}
public void startup(Properties configProperties) {
if (configProperties != null) {
_dumpToStdOut = BooleanUtils.toBoolean(configProperties
.getProperty(SYSOUT_KEY, "false"));
}
}
public void shutdown() {
}
}
| TheRingbearer/HAWKS | ode/bpel-epr/src/main/java/org/apache/ode/bpel/common/evt/DebugBpelEventListener.java | Java | apache-2.0 | 2,241 |
<?php
// Check for empty fields
if(empty($_POST['name']) ||
empty($_POST['email']) ||
empty($_POST['phone']) ||
empty($_POST['message']) ||
!filter_var($_POST['email'],FILTER_VALIDATE_EMAIL))
{
echo "No arguments Provided!";
return false;
}
$name = $_POST['name'];
$email_address = $_POST['email'];
$phone = $_POST['phone'];
$message = $_POST['message'];
// Create the email and send the message
$to = 'quickbytekenya@gmail.com'; // Add your email address inbetween the '' replacing yourname@yourdomain.com - This is where the form will send a message to.
$email_subject = "Website Contact Form: $name";
$email_body = "You have received a new message from your website contact form.\n\n"."Here are the details:\n\nName: $name\n\nEmail: $email_address\n\nPhone: $phone\n\nMessage:\n$message";
$headers = "From: noreply@yourdomain.com\n"; // This is the email address the generated message will be from. We recommend using something like noreply@yourdomain.com.
$headers .= "Reply-To: $email_address";
mail($to,$email_subject,$email_body,$headers);
return true;
?> | quickbyte/quickbyte.github.io | mail/contact_me.php | PHP | apache-2.0 | 1,092 |