text stringlengths 14 5.77M | meta dict | __index_level_0__ int64 0 9.97k ⌀ |
|---|---|---|
require 'shellwords'
class Converter
module Network
protected
def get_paths_by_type(dir, file_re, tree = get_tree(get_tree_sha(dir)))
tree['tree'].select { |f| f['type'] == 'blob' && f['path'] =~ file_re }.map { |f| f['path'] }
end
def read_files(path, files)
full_path = "https://raw.githubusercontent.com/#@repo/#@branch_sha/#{path}"
contents = read_cached_files(path, files)
log_http_get_files contents.keys, full_path, true if contents.keys
files -= contents.keys
log_http_get_files files, full_path, false
files.map do |name|
Thread.start {
contents[name] = open("#{full_path}/#{name}").read
Thread.exclusive { write_cached_files path, name => contents[name] }
}
end.each(&:join)
contents
end
def read_cached_files(path, files)
full_path = "#@cache_path/#@branch_sha/#{path}"
contents = {}
if File.directory?(full_path)
files.each do |name|
path = "#{full_path}/#{name}"
contents[name] = File.read(path, mode: 'rb') if File.exists?(path)
end
end
contents
end
def write_cached_files(path, files)
full_path = "./#@cache_path/#@branch_sha/#{path}"
files.each do |name, content|
FileUtils.mkdir_p File.dirname(File.join(full_path, name))
File.open("#{full_path}/#{name}", 'wb') { |f| f.write content }
end
end
def get_file(url)
cache_path = "./#@cache_path#{URI(url).path}"
FileUtils.mkdir_p File.dirname(cache_path)
if File.exists?(cache_path)
log_http_get_file url, true
File.read(cache_path, mode: 'rb')
else
log_http_get_file url, false
content = open(url).read
File.open(cache_path, 'wb') { |f| f.write content }
content
end
end
# get sha of the branch (= the latest commit)
def get_branch_sha
@branch_sha ||= begin
if @branch + "\n" == %x[git rev-parse #@branch]
@branch
else
cmd = "git ls-remote #{Shellwords.escape "https://github.com/#@repo"} #@branch"
log cmd
result = %x[#{cmd}]
raise 'Could not get branch sha!' unless $?.success? && !result.empty?
result.split(/\s+/).first
end
end
end
# Get the sha of a dir
def get_tree_sha(dir, tree = get_trees)
tree['tree'].find { |t| t['path'] == dir }['sha']
end
def get_trees
@trees ||= get_tree(@branch_sha)
end
def get_tree(sha)
get_json("https://api.github.com/repos/#@repo/git/trees/#{sha}")
end
def get_json(url)
JSON.parse get_file(url)
end
end
end
| {
"redpajama_set_name": "RedPajamaGithub"
} | 1,568 |
class GalleriesController < ApplicationController
before_filter :login_required
before_filter :get_instance_vars
before_filter :correct_user_required, :only => [ :new, :create, :edit, :update, :destroy ]
before_filter :correct_user_for_deletion, :only => [ :destroy ]
def show
@body = "galleries"
@gallery = Gallery.find(params[:id])
@photos = @gallery.photos.paginate :page => params[:page]
end
def index
@body = "galleries"
@parent = parent
@galleries = @parent.galleries.paginate :page => params[:page]
end
def new
@gallery = parent.galleries.new
end
def create
@gallery = parent.galleries.build(params[:gallery])
respond_to do |format|
if @gallery.save
flash[:success] = "Gallery successfully created"
format.html { redirect_to gallery_path(@gallery) }
else
format.html { render :action => "new" }
end
end
end
def edit
@gallery = Gallery.find(params[:id])
end
def update
@gallery = Gallery.find(params[:id])
respond_to do |format|
if @gallery.update_attributes(params[:gallery])
flash[:success] = "Gallery successfully updated"
format.html { redirect_to gallery_path(@gallery) }
else
format.html { render :action => "new" }
end
end
end
def destroy
if parent.galleries.count == 1
flash[:error] = "You can't delete the final gallery"
elsif parent.galleries.find(params[:id]).destroy
flash[:success] = "Gallery successfully deleted"
else
flash[:error] = "Gallery could not be deleted"
end
respond_to do |format|
format.html { redirect_to parent_galleries_path }
end
end
private
def correct_user_required
if dog? && !current_person.dogs.include?(@dog)
flash[:error] = "You are not the owner of this dog"
redirect_to dog_galleries_path(@dog)
elsif group? && !( @group.owner?(current_person) || Membership.accepted_by_person?(current_person, @group))
flash[:error] = "You are not authorized to modify this group's galleries"
redirect_to group_galleries_path(@group)
end
end
def correct_user_for_deletion
@gallery = Gallery.find(params[:id])
if @gallery.person != current_person
flash[:error] = "You are not authorized to delete this gallery"
redirect_to parent_galleries_path
end
end
def get_instance_vars
if dog?
@dog = Dog.find(params[:dog_id])
elsif group?
@group = Group.find(params[:group_id])
end
end
def parent_galleries_path
if dog?
dog_galleries_path(parent)
elsif group?
group_galleries_path(parent)
end
end
def parent
if dog?
@dog
elsif group?
@group
end
end
def dog?
!params[:dog_id].nil?
end
def group?
!params[:group_id].nil?
end
end
| {
"redpajama_set_name": "RedPajamaGithub"
} | 4,428 |
{"url":"https:\/\/topanswers.xyz\/tex?q=1734","text":"\u0928\u093f\u0930\u0902\u091c\u0928\nTry this MWE -\n\n\n\\documentclass{article}\n\\newif\\iftest\n\\AtEndDocument{%\n\\iftest\nHello world!\n\\else\nNot working :(\n\\fi\n}%\n\n\\begin{document}\n{\\testtrue}\n\\end{document}\n\n\nI can see that as \\testtrue is enclosed in a group, the final test done at the end of the document doesn't work, but I want it to work. So how to set a conditional to true value globally? i.e. even outside the local group.\nSkillmon\nJust for educational purposes, it is also possible to smuggle a conditional out of the current group using the primitive \\aftergroup. The result is not a global assignment, but just a local assignment that escapes the current group level to the next one.\n\nAnd again for educational purposes, also a loop that will smuggle the conditional to the top level (being a pretty slow version of global assignments; as I said this is for educational purposes, while the single level smuggling can be useful, this global smuggling should've been a global assignment from the start).\n\n\n\\documentclass[]{article}\n\n\\newif\\iftest\n\n\\newcommand\\evaltest{\\iftest Hello world!\\else Not working :(\\fi}\n\n\\newcommand\\smugglebool[1]\n{%\n\\ifnum\\currentgrouplevel>0\n\\expandafter\\aftergroup\n\\csname #1\\csname if#1\\endcsname true\\else false\\fi\\endcsname\n\\fi\n}\n\n\\makeatletter\n\\newcommand\\smuggleboolglobal[1]\n{%\n\\ifnum\\currentgrouplevel>\\z@\n\\aftergroup\\@smuggleboolglobal\n\\expandafter\\aftergroup\n\\csname #1\\csname if#1\\endcsname true\\else false\\fi\\endcsname\n\\fi\n}\n\\newcommand\\@smuggleboolglobal[1]\n{%\n#1%\n\\ifnum\\currentgrouplevel>\\z@\n\\aftergroup\\@smuggleboolglobal\n\\aftergroup#1%\n\\fi\n}\n\\makeatother\n\n\\begin{document}\n\\evaltest\n\n{\\testtrue\\smugglebool{test}}\\evaltest\n\n{{\\testfalse\\smugglebool{test}}}\\evaltest\n\n{{\\testfalse\\smuggleboolglobal{test}}}\\evaltest\n\\end{document}\n\n\u0928\u093f\u0930\u0902\u091c\u0928\nJust after posting the question, I found an answer here <https:\/\/tug.org\/pipermail\/texhax\/2014-November\/021434.html>.\n\n\n\\documentclass{article}\n\\newif\\iftest\n\\AtEndDocument{%\n\\iftest\nHello world!\n\\else\nNot working :(\n\\fi\n}%\n\n\\begin{document}\n{\\global\\testtrue}\n\\end{document}\n\n\nI still welcome alternative approaches. Don't hesitate to post more answers :)\n\nEnter question or answer id or url (and optionally further answer ids\/urls from the same question) from\n\nSeparate each id\/url with a space. No need to list your own answers; they will be imported automatically.","date":"2023-02-01 14:57:13","metadata":"{\"extraction_info\": {\"found_math\": true, \"script_math_tex\": 0, \"script_math_asciimath\": 0, \"math_annotations\": 0, \"math_alttext\": 0, \"mathml\": 0, \"mathjax_tag\": 0, \"mathjax_inline_tex\": 0, \"mathjax_display_tex\": 0, \"mathjax_asciimath\": 1, \"img_math\": 0, \"codecogs_latex\": 0, \"wp_latex\": 0, \"mimetex.cgi\": 0, \"\/images\/math\/codecogs\": 0, \"mathtex.cgi\": 0, \"katex\": 0, \"math-container\": 0, \"wp-katex-eq\": 0, \"align\": 0, \"equation\": 0, \"x-ck12\": 0, \"texerror\": 0, \"math_score\": 0.6249949932098389, \"perplexity\": 2332.6785387803084}, \"config\": {\"markdown_headings\": true, \"markdown_code\": true, \"boilerplate_config\": {\"ratio_threshold\": 0.18, \"absolute_threshold\": 10, \"end_threshold\": 15, \"enable\": true}, \"remove_buttons\": true, \"remove_image_figures\": true, \"remove_link_clusters\": true, \"table_config\": {\"min_rows\": 2, \"min_cols\": 3, \"format\": \"plain\"}, \"remove_chinese\": true, \"remove_edit_buttons\": true, \"extract_latex\": true}, \"warc_path\": \"s3:\/\/commoncrawl\/crawl-data\/CC-MAIN-2023-06\/segments\/1674764499946.80\/warc\/CC-MAIN-20230201144459-20230201174459-00719.warc.gz\"}"} | null | null |
# Mathematics for Engineers and Technologists
Huw Fox
Bill Bolton
# Table of Contents
Cover image
Title page
Copyright
Series Preface
Introduction: why mathematics?
Chapter 1: Functions
1.1 Introduction to functions
1.2 Linear functions
1.3 Quadratic functions
1.4 Inverse functions
1.5 Circular functions
Problems 1.5
1.6 Exponential functions
1.7 Log functions
Problems 1.7
1.8 Hyperbolic functions
Problems 1.8
Chapter 2: Vectors, phasors and complex numbers
2.1 Vectors
2.2 Phasors
2.3 Complex numbers
Problems 2.3
Chapter 3: Mathematical models
3.1 Modelling
3.2 Relating models and data
Chapter 4: Calculus
4.1 Differentiation
Problems 4.1
4.2 Integration
Problems 4.2
Chapter 5: Differential equations
5.1 Differential equations
5.2 First-order differential equations
5.3 Second-order differential equations
Chapter 6: Laplace transform
6.1 The Laplace transform
6.2 Solving differential equations
6.3 Transfer function
Chapter 7: Sequences and series
7.1 Sequences and series
7.2 Fourier series
Chapter 8: Logic gates
8.1 Logic gates
8.2 Boolean algebra
8.3 Logic gate systems
Chapter 9: Probability and statistics
9.1 Probability
9.2 Distributions
9.3 Experimental errors
Solutions to problems
Index
# Copyright
Butterworth-Heinemann
An imprint of Elsevier Science
Linacre House, Jordan Hill, Oxford, OX2 8DP
225 Wildwood Avenue, Woburn, MA 01801-2041
First published 2002
Copyright © 2002, Huw Fox and Bill Bolton. All rights reserved
The right of Huw Fox and Bill Bolton to be identified as the authors of this work has been asserted in accordance with the Copyright, Designs and Patents Act 1988
No part of this publication may be reproduced in any material form (including photocopying or storing in any medium by electronic means and whether or not transiently or incidentally to some other use of this publication) without the written permission of the copyright holder except in accordance with the provisions of the Copyright Licensing Agency Ltd, 90 Tottenham Court Road, London, England W1T 4LP. Applications for the copyright holder's written permission to reproduce any part of this publication should be addressed to the publishers
**British Library Cataloguing in Publication Data**
A catalogue record for this book is available from the British Library
ISBN 0 7506 5544 5
For information on all Butterworth-Heinemann publications visit our website at www.bh.com
Printed and bound in Great Britain
# Series Preface
'There is a time for all things: for shouting, for gentle speaking, for silence; for the washing of pots and the writing of books. Let now the pots go black, and set to work. It is hard to make a beginning, but it must be done' – Oliver Heaviside, _Electromagnetic Theory_ , Vol 3 (1912), Ch. 9 'Waves from moving sources – Adagio, andante, Allegro Moderato.'
Oliver Heavyside was one of the greatest engineers of all time, ranking alongside Faraday and Maxwell in his field. As can be seen from the above excerpt from a seminal work, he appreciated the need to communicate to a wider audience. He also offered the advice 'So be rigorous: that will cover a multitude of sins. An do not frown.' The series of books that this prefaces takes up Heavyside's challenge but in a world which is quite different to that being experienced just a century ago.
With the vast range of books already available covering many of the topics developed in this series, what is this series offering which is unique? I hope the next few paragraphs help to answer that; certainly no one involved in this project would give up their time to bring these books to fruition if they had not thought that the series is both unique and valuable.
The motivation for this series of books was born out of the desire of the UK's Engineering Council to increase the number of incorporated engineers graduating from Higher Education establishments, and the Institution of Incorporated Engineers' (IIE) aim to provide enhanced services to those delivering Incorporated Engineering Courses. However, what has emerged from the project should prove of great value to a very wide range of courses within the UK and internationally – from Foundation Degrees or Higher Nationals through to first year modules for traditional 'Chartered' degree courses. The reason why these books will appeal to such a wide audience is that they present the core subject areas for engineering studies in a lively, student-centred way, with key theory delivered in real world contexts, and a pedagogical structure that supports independent learning and classroom use.
Despite the apparent waxing of 'new' technologies and the waning of 'old' technologies, engineering is still fundamental to wealth creation. Sitting alongside these are the new business focused, information and communication dominated, technology organisations. Both facets have an equal importance in the health of a nation and the prospects of individuals. In preparing this series of books, we have tried to strike a balance between traditional engineering and developing technology.
The philosophy is to provide a series of complementary texts which can be tailored to the actual courses being run – allowing the flexibility for course designers to take into account 'local' issues, such as areas of particular staff expertise and interest, while being able to demonstrate the depth and breadth of course material referenced to a common framework. The series is designed to cover material in the core texts which approximately corresponds to the first year of study with module texts focusing on individual topics to second and final year level. While the general structure of each of the texts is common, the styles are quite different, reflecting best practice in their areas.
Another set of factors which we have taken into account in designing this series is the reduction in contact hours between staff and students, the evolving responsibilities of both parties and the way in which advances in technology are changing the way study can be, and is, undertaken. As a result, the lecturers' support material which accompanies these texts, is paramount to delivering maximum benefit to the student.
It is with these thoughts of Voltaire that I leave the reader to embark on the rigours of study:
'Work banishes those three great evils: boredom, vice and poverty'
Alistair Duffy
Series Editor
De Montfort University, Leicester, UK
Further information on the IIE Textbook Series is available from bhmarketing@repp.co.uk
www.bh.com/iie
Please send book proposals to:rachel.hudson@repp.co.uk
**Other titles currently available in the IIE Textbook Series**
_Mechanical Engineering Systems_ | 0 7506 5213 6
---|---
_Business Skills for Engineers and Technologies_ | 0 7506 5211 1
_Design Engineering_ | 0 7506 5211 X
_Technology of Engineering Materials_ | 0 7506 5643 3
_Systems for Planning and Control_ |
_in Manufacturing_ | 0 7506 4977 1
# Introduction: why mathematics?
Mathematics is an essential tool for the engineer and technologist. As an illustration, consider a number of simple situations:
• _**A beam**_
A uniform horizontal beam rests on supports at each end and loads placed at its mid point. How does the deflection of the beam from the horizontal at its mid point depend on the applied load? Can we develop a mathematical relationship which will enable us to predict the deflection for a given load? We might need such a relationship in order to be able to consider the design of a simple plank bridge across a stream, or the elements in a more complex truss bridge.
See **Chapter 1** for a discussion of mathematical relationships; when one quantity is dependent on another it is said to be a function of it.
We might develop such a relationship by conducting an experiment in which we measure the deflections for a number of loads and so develop an empirical relationship which fits the results. This could involve plotting a graph between the force and deflection and from the 'shape' of the graph determining a relationship. This requires an understanding of graphs and, in particular, straight line graphs. If the graph between two quantities is not a straight line then engineers use 'tricks' to persuade the graph to become straight line because straight line graphs enable relationships to be most easily discerned.
See **Chapter 3** for the determination of relationships from graphs by 'persuading' them to become straight line graphs.
We might, however, develop the relationship from a consideration of how beams behave when subject to loads and so end up with a more general relationship which we can apply to other beams. In developing such a relationship we would use algebra, i.e. the quantities such as force and deflection are represented by letters such as _F_ and _y_ , and so we need to be able to manipulate algebraic expressions. In fact, the basic expression for elasticity involves a differential equation, i.e. an equation involving terms concerned with rates of change, and so to derive the relationship for the deflection we need to be able to solve such an equation.
See **Chapters 4** and **5** for a discussion of calculus and the solution of differential equations.
• _**An oscillation**_
A loaded vertical spring is set in oscillation; what are the factors determining the frequency of the oscillation? We might carry out an experiment involving different loads on a spring, and also try a number of different springs. The relationship between frequency and load can be determined by plotting a graph. However, if we just plot frequency against load we obtain a non-linear graph and it is not easy to see the relationship. If we plot the logarithm of the frequency against the logarithm of the load then a straight line graph is obtained and the relationship can easily be discerned.
See **Chapter 1** for a discussion of logarithms and log graphs.
See **Chapter 1** for an introduction to phasors and **Chapter 2** for a more detailed consideration.
Alternatively, a more general relationship might be found from the theory of oscillations. We might develop the theory by considering a model of oscillations in which the variation of displacement with time is represented by how the vertical height of a rotating radius varies with time when the radius rotates with a constant angular velocity.
See **Chapter 6** for a discussion of how oscillations can be represented by differential equations and **Chapter 7** for how we can use the Laplace transforms to simplify the handling of such equations.
The relationships devised for this simple system of an oscillating spring can, however, provide a basic introduction to the consideration of much more complex oscillations.
• _**An a.c. electrical circuit**_
A simple electrical circuit is set up with an a.c. voltage being applied to a series circuit of a capacitor and a resistor. To develop a theory which enables the circuit current to be determined by different values of capacitance and resistance and also for different frequency alternating currents, an approach based on a consideration of phasors is generally used. A phasor is used to model in its length the amplitude or root-mean-square value of a voltage or current and by its initial angle the phase. By using such models, the analysis of a.c. circuits is simplified and we do not need differential equations. The phasors can be drawn and added or subtracted graphically. However, an algebraic method is to use complex numbers.
See **Chapter 1** for an introduction to phasors and **Chapter 2** for a more detailed consideration.
See **Chapter 7** for a discussion of the Fourier series.
We might have the complication with such a circuit that the alternating voltage is not sinusoidal. In such a case we can deal with the circuit analysis by representing the signal as a series of sinusoidal terms.
• _**Programmable logic controller (PLC)**_
PLCs are widely used for control systems. Such controllers can be easily programmed to carry out such operations as switching on motors when sensor A and sensor B both give ON signals or perhaps when either sensor A or sensor B gives an ON signal. Thus a central heating system controller needs to switch the pump motor on when either the temperature sensor on the hot water tank gives an ON signal or the room temperature sensor gives an ON signal. Such control systems require a consideration of basic logic systems.
See **Chapter 8** for a discussion of logic gates.
• _**Measurements**_
Engineering involves making measurements. With any measurement there is inevitably some associated error. To estimate and handle such errors in calculations based on the measurements, we need an understanding of basic statistics.
See **Chapter 9** for a discussion of statistics and the handling of errors.
In considering the elasticity of a beam we form mathematical models of the real world situation. Thus, in the case of the beam, we make a number of simplifying assumptions, such as the deflections are small, the beam is thin and of uniform cross-section. With such simplifications we can produce a model. Real beams may behave differently because the assumptions are not valid but our mathematical model provides working relationship. In some situations we develop what are obviously models of systems. For example, we might represent the behaviour of a car suspension and wheel as a mass with a spring and a damper. In considering a motor we might be able to represent it by the model of an inductor in series with a resistor and a source of e.m.f. for the back e.m.f. of the motor.
See **Chapter 3** for an introductory discussion of mathematical models.
As the above examples illustrate, we need mathematics to be able to solve engineering problems. Mathematics, however, is not a tool that you can pick up and use without an understanding of the principles behind its development and its limitations. Thus, in this book, the principles behind the mathematics are explored and the book is more than just a collection of 'cookbook techniques' which can be used for particular situations. Such a 'cookbook' approach presents problems if you encounter, as engineers inevitably do, a new situation. The aim of this book is:
_To enable the reader to understand the principles of the mathematics and acquire the ability to use it in engineering._
With that aim in mind, we hope you will enjoy the book and wish you well in your studies.
1
# Functions
Summary
Engineers, whether electrical, electronic, mechatronic or mechanical, are concerned with expressing relationships between physical quantities clearly and unambiguously. This might be the relationship between the displacement of an oscillating object and time, or perhaps the amplitude of an a.c. voltage and time. This chapter is about how we can represent such relationships in mathematical terms, taking the opportunity to revise some basic mathematics in the process. This does not mean that it is not important to clearly explain in words what relationships there are between quantities but rather to supplement the written word by using a system that is both clear, unambiguous, and internationally understood, thereby removing the possibility of misinterpretation.
Objectives
By the end of this chapter, the reader should be able to:
• understand the concept of a function for relating quantities within engineering disciplines;
• use functions, and their notation, to describe relationships;
• manipulate and evaluate algebraically simple function expressions, including inverses;
• use graphs to express functions;
• express cyclic functions in terms of sine and cosine functions;
• know, and use, the relationships between sine, cosine and tangent ratios;
• describe waveforms in the general format _R_ sin ( _ωt_ ± α);
• use exponential and logarithmic functions;
• use hyperbolic functions.
## 1.1 Introduction to functions
As you embark on studying engineering, whether electrical, electronic, mechatronic or mechanical, it will become apparent that equations are used to describe relationships between physical quantities and are more clear and unambiguous than the written word on its own. This does not mean that it is not important to clearly explain relationships in words but rather there is a need to supplement the written word by using equations. In any discussion of relationships between physical quantities, the term function is likely to be encountered. So what is a function?
Key point
Quantities which vary, like the force and extension in the case of the spring system, are called _variables_ and the _dependent variable_ , the extension in this case, is the one whose values depend on the values of the _independent variable_ or _argument_ , in this case the force.
Key point
A function is a relationship which has for each value of the independent variable a unique value of the dependent variable.
#### So what is a function?
Let's commence our explanation of the term _function_ by discussing a simple example.
We are all familiar with springs, whether they are those we find inside some pens, the springs holding the tremolo block in position on a classic Fender stratocaster guitar or the suspension springs on a car. Consider, therefore, a simple vertical spring dangling from a clamp with a mass pan attached to its lower end (Figure 1.1). Using such a system we can perform a simple experiment, adding masses to the pan and determining the relationship between the force exerted by the masses and the resulting extension of the spring. The extension is measured from the datum line of the system when in stable equilibrium before we start adding masses and recording extensions. The results of such an experiment might be of the form shown in Table 1.1.
Table 1.1
Force-extension values for simple spring example
Figure 1.1 Simple spring with mass pan, the figure indicating the two physical quantities, namely the applied force and the resulting extension, that we are interested in determining the relationship between. Note, adding masses of 120 g results in force increments of about 1.2 N.
From Table 1.1, we can assume that there is some relationship or connection between the values of the force _F_ acting on the spring and the corresponding observed extension _e_ values. We can say:
[1]
Statement [1] is an inferred relationship between an observed measurement _e_ and a varied quantity, in this case the applied force _F_. We can therefore call _F_ the _independent_ quantity, sometimes referred to as the _argument_ , from which a _dependent_ result is obtained. We can restate statement [1] as:
[2]
A _function_ is a relationship which has for each value of the independent variable a unique value of the dependent variable. Statement [2] can be written as:
This means exactly the same thing as statement [2] but is just easier and more concise to write. The ' _f_ ' simply is shorthand for 'function of'. Note that _f_ ( _F_ ) does not mean a variable _f_ multiplied by _F_. When we are dealing with a number of different functions it is customary to use different letters for the function label, e.g. we might use _y_ = _f_ ( _x_ ) and _z_ = _g_ ( _x_ ).
Key points
With _y_ = _f_ ( _x_ ) we have each value of _y_ associated with an _x_ value. We can plot such data points on a graph having _x_ and _y_ axes, the _y_ -axis running vertically and the _x_ -axis running horizontally and at 90° to the _y_ -axis (Figure 1.2). The point of intersection of the two axes is called the _origin_ ; at this point _y_ has the value 0 and _x_ = 0. Values of _x_ which are positive are plotted to the right of the origin, negative values to the left. Positive values of _y_ are plotted upwards from the origin, negative values downwards.
Figure 1.2 Graph axes
#### Tables, graphs and equations to define functions
The statement _e_ = _f_ ( _F_ ) merely tells us that the extension _e_ is a function of the applied force _F_ and does not describe the actual relationship between them. The data in Table 1.1 is one way we describe the relationship. If we know the force is 3.6 N then the extension must be 0.03 m.
However, a pattern can be seen from an inspection of the results in Table 1.1: if we double the applied force then the resulting extension doubles, if we treble the applied force the extension trebles and if we quadruple the applied force the extension quadruples. We can say, at least over the range of values we have observed:
and we can write this as:
[3]
The symbol ∝ simple means (or is shorthand for) 'proportional to'.
We can also see how the extension depends on the applied force by plotting a graph. If we plot a graph of the force values in Table 1.1 against the corresponding extension values we obtain the straight line graph shown in Figure 1.3. The straight line passes through the origin. This is a characteristic of relationships when one quantity is directly proportional to the other. The graph is thus one way of defining the functional relationship.
Figure 1.3 Force–extension graph for the spring experiment values given in Table 1.1
We can use the graph in Figure 1.3 to determine an equation relating the extension to the applied force and so give another way of defining the functional relationship. The graph is a straight line and so has a constant slope (or gradient), the slope being defined in the same way as we define the slope of a road, i.e. as the change in vertical height of the line over a given horizontal distance (Figure 1.4). We can compute the slope by choosing a pair of values of force and extension, e.g. force 60 N, extension 0.05 m; force 1.2 N, extension 0.01 m, and so obtain:
Since the straight line passes through the origin, this tells us that the force in newtons is, over the interval that has been considered, 120 times the extension in metres. We can write this as:
[4]
The constant term, i.e. the 120 N/m, is called the _constant of proportionality_ linking _F_ to the corresponding _e_ values.
Figure 1.4 Generalised straight line graph witth the slope being
To summarise: we can define the functional relationship between two variables by:
• a set of results (as in Table 1.1),
• a graph (as in Figure 1.3),
• an equation (as in equation [4]).
Note that to define a function _y_ = _f_ ( _x_ ) completely, we must define the range of values of _x_ over which the definition is true. This is called the _domain_ of the function.
Example
If _y_ is a function of _x_ and the relationship is defined by the equation _y_ = 4 _x_ 2, what is the value of _y_ when _x_ = 2?
We have:
and so:
Therefore, _y_ = 16 when _x_ = 2. Substituting _x_ = 2 into the original functional equation, we can write:
Example
If _y_ is a function of _x_ and the relationship is defined by the equation _y_ = 12 _x_ 2 \+ 3 _x_ \+ 6, what is the value of _y_ when _x_ = 1?
We have:
and so:
Therefore, _y_ = 21 when _x_ = 1. Substituting _x_ = 1 into the original functional equation, we can write:
#### Equations and functions
Functions may, as mentioned earlier, be defined using equations. Equations give the instructions for calculating the dependent variable of functions for values of the independent variable. For example, for an ohmic resistor the potential difference _V_ across it is a function of the current _I_ through it, i.e.
The equation defining the functional relationship (Ohm's law) is _V_ = _RI_ , where _R_ is the constant if proportionality connecting the variable _V_ with the variable _I_ , thereby defining their unique relationship. So given a value for the current we can use the equation to obtain a value of the potential difference. Thus, when _R_ = 10 Ω and _I_ = 2 A we have:
For an object freely falling from rest, the distance fallen _s_ is a function of the time _t_ for which it has been falling, i.e. _s_ = _f_ ( _t_ ). The defining equation is _s_ = ½ _at_ 2, where _a_ , the acceleration, is a constant. The acceleration is the acceleration due to gravity _g_ and so we can write the defining equation as _s_ = ½ _gt_ 2. Thus, given a value for the time we can use the equation to obtain a value for the distance fallen. If we assume that _g_ has a value of 9.8 m/s2, then for a time of 3 s;
Note that a function may be defined by several equations, with each giving the instructions for calculating the dependent variable for different values of the independent variable. For example, for the voltage signal shown in Figure 1.5, a so-called _step voltage_ , we have _v_ = _f_ ( _t_ ) and the relationship
Figure 1.5 Step voltage
The value of _v_ = _f_ (1) is thus 0, while the value of _v_ = _f_ (4) is 2 V.
Functions may of course get quite complex! For example, the natural frequency of transverse vibration of a cantilever is described by the equation:
The frequency of the cantilever is a function of _E, I, m_ and _L_. So, in functional notation we can write:
Example
If we have _y_ as a function of _x_ and described by the relationship _y_ = _x_ 2, what are the values of (a) _f_ (0), (b) _f_ (2)?
(a) The function is described by _y_ = _f_ ( _x_ ) = _x_ 2. Thus _f_ (0) is the value of the function when _x_ = 0 and so is 0.
(b) _f_ (2) is the value of the function when _x_ = 2 and so is 4.
Example
Determine the values of (a) _f_ (2), (b) _f_ (4) if we have _y_ as a function of _x_ and defined by:
(a) The value of the function at _x_ = 2 is given by the first relationship as 1.
(b) The value of the function at _x_ = 4 is given by the second relationship as _y_ = 2(4 − 3) + 1 = 3.
### 1.1.1 Combinations of functions
Many of the functions encountered in engineering and science can be considered to be combinations of other functions. Suppose we have the function We can think of the function _f_ ( _x_ ) as resulting from the combination of two functions _g_ ( _x_ ) and _h_ ( _x_ ). One of the functions takes an input of _x_ and gives an output of _x_ 2 and the other takes an input of _x_ and gives an output of 2 _x_. The two outputs are then added and we have _f_ ( _x_ ) = _g_ ( _x_ ) + _h_ ( _x_ ). Figure 1.6 illustrates this.
Figure 1.6 Combination of functions g(x) and h(x)
Another way we can combine functions is by applying them in sequence. For example, if we have _h_ ( _x_ ) = 2 _x_ and _g_ ( _x_ ) = _x_ 2, then suppose we have the arrangement shown in Figure 1.7. The input of _x_ to the _g_ function box results in an output of _x_ 2. The _h_ function box takes its input and doubles it. Thus for an input of _x_ 2 we have an output of 2 _x_ 2, thus _f_ ( _x_ ) = _h_ { _g_ ( _x_ )} = 2 _x_ 2. Note that the order of the function boxes is important.
Figure 1.7 Combination of functions to give the function f(x)
### Problems 1.1
1. If we have _y_ as a function of _x_ and defined by the equation _y_ = 2 _x_ \+ 3, what are the values of (a) _f_ (0), (b) _f_ (1)?
2. If we have _y_ as a function of _x_ and defined by the equation _y_ = _x_ 2 \+ _x_ , what are the values of (a) _f_ (0), (b) _f_ (2)?
3. If _y_ is a function of _x_ defined by the following equations, find the values of _f_ (0) and _f_ (1).
(a) _y_ = _x_ 2 \+ 3,
(b) _y_ = _x_ \+ 4,
(c) _y_ = ( _x_ \+ 1)2 − 3
4. Determine the values of (a) _f_ (0.5), (b) _f_ (2) if we have _y_ as a function of _x_ and defined by:
5. Determine the values of (a) _f_ (1) (b) _f_ (3) if we have _y_ as a function of _t_ and defined by:
6. The voltage in an electrical circuit is supplied by a constant voltage source of 10 V. If the voltage is switched on after time _t_ = 2 s, state the equations defining the step voltage at any time _t_.
7. Sketch the periodic waveform described by the following equations:
8. The period of oscillation _T_ of a simple pendulum is a function of the length _L_ of the pendulum, being defined by the equation
where _g_ is the acceleration due to gravity. What are the values of (a) _f_ (1), (b) _f_ (10) if _g_ can be taken as 10 m/s2?
9. The velocity _v_ in metres per second of a moving object is a function of the time _t_ in seconds, being defined by v = 2 + 5 _t_. What are the values of (a) _f_ (0), (b) _f_ (1)?
10. If _g_ (x) = 2 _x_ and _h_ ( _x_ ) = _x_ \+ 1, what are (a) _g_ ( _x_ ) + _h_ ( _x_ ), (b) _g_ { _h_ ( _x_ )}, (c) _h_ { _g_ ( _x_ )}?
11. If _f_ ( _x_ ) = _x_ 2 \+ 1, _g_ ( _x_ ) = 3 _x_ and _h_ ( _x_ ) = 3 _x_ \+ 2, determine: (a) _f_ ( _x_ ) + _g_ ( _x_ ), (b) _f_ { _g_ ( _x_ )}, (c) _g_ { _f_ (x)}, (d) _f_ ( _x_ ) − _h_ ( _x_ ), (e) _f_ {h( _x_ )}.
## 1.2 Linear functions
Key points
Linear functions are ones that provide a linear or straight line relationship between two variables when plotted as a conventional graph of one variable against the other. They can be defined by the general equation _y_ = _mx_ \+ _c_ , where _m_ is the gradient and _c_ the intercept with the _y_ axis.
This section is about a form of functions that is very commonly encountered in engineering, namely linear functions. Quite simply, linear functions are ones that provide a linear or straight line relationship between two variables when plotted as a conventional graph of one variable against the other.
The potential difference _V_ across a resistor is a function of the current _I_ through it. If the resistor obeys Ohm's law then _V_ = _RI_ , the potential difference is proportional to the current. If the current is doubled then the potential difference is doubled, if the current is trebled the potential difference is trebled. This means that a graph of _V_ plotted against _I_ is a straight line graph passing through the origin. _Gradient_ is defined as the change in _y_ value divided by the change in _x_ value. Thus, for all straight line graphs passing through the origin (Figure 1.8), the gradient is constant and given by gradient _m_ = _y/x_. Hence the equation of such a straight line is of the form:
[5]
where _m_ is the gradient of the line. _Only_ when we have such a relationship is _y_ directly proportional to _x_.
Figure 1.8 Straight line graph with y directly proportional to x; the straight line thus passes though the origin
Straight line graphs which do not pass through the origin (Figure 1.9) have a gradient, change in _y_ value divided by change in _x_ value, given by _m_ = ( _y_ − _c_ )/ _x_ , where _c_ is the value of _y_ when _x_ = 0, i.e. the intercept of the straight line with the _y_ -axis. Thus, such lines have the equation:
[6]
This is the equation which defines a straight line and is termed a _linear equation_. It is important to realise that with _c_ ≠ 0 that _y_ is _not proportional_ to _x_.
Figure 1.9 Straight line graph not passing through the origin
The gradient _m_ of a straight line graph may be positive or negative. The gradient may also have a value of zero and this is a line parallel to the _x_ -axis. The intercept _c_ may be positive or negative, or zero.
Example
State the gradients and intercepts of the graphs of the following equations: (a) _y_ = 2 _x_ \+ 3, (b) _y_ = 2 − _x_ , (c) _y_ = _x_ − 2.
(a) This has a gradient of +2 and an intercept with the _y_ -axis of +3. A positive gradient means that _y_ increases as _x_ increases.
(b) This has a gradient of −1 and an intercept with the _y_ -axis of +2. A negative gradient means that _y_ decreases as _x_ increases.
(c) This has a gradient of +1 and an intercept with the _y_ -axis of −2.
Example
During a test to find how the power of a CNC lathe varied with depth of cut, the following results were obtained:
Use a graph to show that the function connecting the quantities _d_ and _P_ is of the form _y_ = _mx_ \+ _c_. Use this function to calculate the depth of cut when the power is 1 W.
Figure 1.10 shows the graph with _P_ on the _y_ -axis and _d_ on the _x_ -axis. The graph line represents the line of best fit through all the points and may therefore be prone to some error. Because it is a straight line, the function is of the form _y_ = _mx_ \+ _c_ and so we have:
The slope _m_ of the graph is about 0.27 and the point where the line would intercept the _P_ axis when _d_ = 0 is about 0.76. The function is thus:
We can check the integrity of the above equation by substituting values from the table of observed results, say _d_ = 2.03 mm. This gives:
If we now refer back to the table of results we see that this is close to what is given there, i.e. 1.32 W.
Figure 1.10 Graph of power– depth of cut
To finally answer the question regarding the depth of cut to be expected when the power is 1 W:
Hence _d_ is about 0.89 mm.
Example
A pressure measurement system using a piezoelectric transducer is set up as represented by the system diagram of Figure 1.11. As the input pressure signal is altered, corresponding output readings are taken off the oscilloscope screen and the results shown below were obtained:
Use a graph to show that the function connecting the quantities output voltage θo to the pressure θi is of the form _y_ = _mx_ \+ _c_. The static sensitivity of such a measurement system may be defined as the change in output signal divided by the change in the corresponding input signal. Determine the static sensitivity.
Figure 1.11 Pressure measurement system
Figure 1.12 shows the graph obtained by plotting the above values. The graph line represents the line of best fit through all the points and may therefore be prone to some error. Because it is a straight line, the function is of the form _y_ = _mx_ \+ _c_ and so we have:
From the graph, the approximate slope _m_ is 350/5.5 = 63.6 mV/bar. The line passes through the origin and so _c_ = 0. Hence:
The static sensitivity _K_ is:
The symbol Δ in front of a quantity is used to indicate an increment of that quantity. But this is just the slope of the graph and so the static sensitivity is 63.6 mV/bar.
Figure 1.12 Graph of output voltage-input pressure for pressure measurement system
### Problems 1.2
1. State which of the following will give a straight line graph and, if so, whether it passes through the origin:
(a) A graph of the extension of a spring plotted against the applied load when the extension is proportional to the applied load.
(b) A graph of the resistance _R_ of a length of resistance wire plotted against the temperature _t_ when _R_ = _R_ 0(1 + _at_ ), with _R_ 0 and _a_ being constants.
(c) A graph of the distance _d_ travelled by a car plotted against time _t_ when _d_ = 10 + 4 _t_ 2.
(d) A graph is plotted of the pressure _p_ of a gas against its volume _v_ , the pressure being related to the volume by Boyle's law, i.e. _pv_ = a constant.
2. Determine the straight line equations for the following data if linear functional relationships can be assumed:
(a) The current _i_ and time _t_ over a period of time if at the beginning of the time we have _i_ = 2 A and _t_ = 0 s and at the end we have _i_ = 3 A and _t_ = 2 s.
(b) The extension e of a strip of material as a function of its length _L_ when subject to constant stress, given that:
## 1.3 Quadratic functions
Key point
Quadratic functions have defining equations in which the highest power of the variable is 2.
A _linear function_ is one where the equation defining the function is of the form _y_ = _mx_ \+ _c_. The highest power of a variable is 1. This is only one type of function. Here we look at another form, the quadratic function, and examine its defining equation.
The term _quadratic function_ is used for a function _y_ = _f_ ( _x_ ) where the defining equation has the general form:
[7]
where _a, b_ and _c_ are constants. The highest power of the variable is 2.
Quadratic equations occur often in engineering. An example of such an equation in engineering occurs with the e.m.f. _E_ of a thermocouple which can often be described by:
where _t_ is the temperature and _a_ and _b_ are constants. Other examples occur in the relationships for the bending moment _M_ for bending beams, such as that for a cantilever propped at its free end:
where _x_ is the distance from the free end of a cantilever of length _L_ and _w_ the distributed load per unit length.
The linear equation and the quadratic equation are just two examples of what are termed _polynomials_. A polynomial is the term used for any equation involving powers of the variable which are positive integers. Such powers can be 1, 2, 3, 4, 5, etc. For example, _x_ 4 \+ 4 _x_ 3 \+ 2 _x_ 2 \+ 5 _x_ \+ 2 = 0 is a polynomial with the highest power being 4.
### 1.3.1 Factors and roots
Key point
We can solve a quadratic equation by:
1. Factoring the quadratic.
2. Setting each factor equal to 0.
3. Solving the resulting linear equations.
To factorise a number means to write it as the product of smaller numbers. Thus, for example, we can factor 12 to give 12 = 3 × 4. The 3 and 4 are factors of 12. If the 3 and the 4 are multiplied together then 12 is obtained. To _factorise a polynomial_ means to write it as the product of simpler polynomials. Thus for the quadratic expression _x_ 2 \+ 5 _x_ \+ 6 we can write:
( _x_ \+ 2) and ( _x_ \+ 3) are factors. If the two factors are multiplied together then the _x_ 2 \+ 5 _x_ \+ 6 is obtained. Note that, in general:
[8]
If we have _u_ × _v_ = 0 then we must have either _u_ = 0 or _v_ = 0 or both _u_ and _v_ are 0. This is because 0 times any number is 0. Thus if we have the quadratic equation _x_ 2 \+ 5 _x_ \+ 6 = 0 and rewrite it as ( _x_ \+ 2)( _x_ \+ 3) = 0, then we must have either _x_ \+ 2 = 0, or _x_ \+ 3 = 0 or both equal to 0. This means that the solutions to the quadratic equation are the solutions of these two linear equations, i.e. _x_ = −2 and _x_ = −3. These values are called the _roots_ of the equation. We can check these values by substituting them into the quadratic equation. Thus for _x_ = −2 we have 4 − 10 + 6 = 0 and thus 0 = 0. For _x_ = −3 we have 9 − 15 + 6 = 0 and thus 0 = 0.
Example
Factorise and hence solve the quadratic equation _x_ 2 − 3 _x_ \+ 2 = 0.
To factorise this equation we need to find the two numbers which when multiplied together will give 2 and which when added together will give −3.
If we multiply −1 and −2 we obtain 2 and the addition of −1 and −2 gives −3. Thus we can write:
The solutions are thus given by _x_ − 1 = 0, i.e. _x_ = 1, and _x_ − 2 = 0, i.e. _x_ = 2.
We can check these values by substituting them into the original equation, _x_ 2 − 3 _x_ \+ 2. Thus, for _x_ = 1 we have 1 − 3 + 2 = 0 and so 0 = 0. For _x_ = 2 we have 4 − 6 + 2 = 0 and so 0 = 0.
Key point
The procedure for determining the roots of a quadratic equation by completing the square can be summarised as:
1. Put the equation in the form _x_ 2 \+ _ax_ = _b_.
2. Determine the value of (a/2).
3. Add ( _a_ /2)2 to both sides of the equation to give:
4. Hence obtain the equation:
5. Determine the two roots by taking the square root of both sides of the equation, i.e.
#### Completing the square
Consider the equation _x_ 2 \+ 6 _x_ \+ 9 = 0. This equation can be factorised to give ( _x_ \+ 3)( _x_ \+ 3) = 0, i.e. ( _x_ \+ 3)2 = 0. It is a perfect square, both the roots being the same. Now consider the equation _x_ 2 \+ 6 _x_ \+ 2 = 0. What are the factors? We can rewrite the equation as:
If we add 9 to both sides of the equation then we obtain
The left-hand side of the equation has been made into a perfect square by the adding of the 9. Thus we can write:
This means that _x_ \+ 3 must be one of the square roots of 7, i.e.
The plus or minus is because every positive number has two square roots, one positive and one negative. Thus we have Hence:
The two solutions are thus
This method of determining the roots of a quadratic equation is known as _completing the square_. In the above discussion the left-hand side of the equation was made into a perfect square by the adding of 9. How do we determine what number to add in order to make a perfect square? Any expression of the form _x_ 2 \+ _ax_ becomes a perfect square when we add ( _a/_ 2)2, since:
[9]
Thus for _x_ 2 \+ 6 _x_ we have _a_ = 6 and so ( _a_ /2) = 3; we add 32 = 9.
The above rule for completing the square only works if the coefficient of _x_ 2, i.e. the number in front of _x_ 2, is 1. However, if this is not the case we can simply divide throughout by that coefficient in order to make it 1.
Example
Use the method of completing the square to solve the quadratic equation _x_ 2 \+ 10 _x_ − 4 = 0.
The quadratic equation can be written as:
Adding (10/2)2 = 25, to both sides of the equation gives:
Thus:
Hence, _x_ \+ 5 = ±√29 = ±5.39 and the solutions of the quadratic equation are _x_ = +5.39 − 5 = 0.39 and _x_ = −5.39 − 5 = −10.39.
We can check these values by substituting them in the equation _x_ 2 \+ 10 _x_ − 4 = 0. Thus, for _x_ = 0.39 we have 0.392 \+ 3.9 − 4 = 0.05, which, because of the rounding used to limit the number of decimal places in determining the root, is effectively zero. For the other solution of _x_ = −10.39 we have (−10.39)2 \+ 103.9 − 4 = 0.05, which, because of the rounding used to limit the number of decimal places in determining the root, is effectively zero.
#### The quadratic formula
Consider the quadratic equation _ax_ 2 \+ _bx_ \+ _c_ = 0. To obtain the roots by completing the square method, we divide throughout by _a_ to give:
This can be written as:
To make the left-hand side of the equation a perfect square we must add ( _b_ /2 _a_ )2 to both sides of the equation. Hence:
and so:
Taking the square root of both sides of the equation gives
Thus:
and so we have the _general formula for the solution of a quadratic equation_ :
[10]
Consider the following three situations:
• If we have ( _b_ 2 − 4 _ac_ ) > 0, then the square root is of a positive number. There are then _two distinct roots_ which are said to be _real_.
• If we have ( _b_ 2 − 4 _ac_ ) = 0, then the square root is zero and the formula gives just one value for _x_. Since a quadratic equation must have two roots, we say that the equation has _two coincident real roots_.
• If we have ( _b_ 2 − 4 _ac_ ) < 0, then the square root is of a negative number. A new type of number has to be invented to enable such expressions to be solved. The number is referred to as a _complex number_ and the roots are said to be _imaginary_ (the roots in 1 and 2 above are said to be _real_ ). Such numbers are discussed later in this book.
Key point
The general formula for the solution of a quadratic equations is:
[10]
Example
Determine, if they exist as real roots, the roots of the following quadratic equations:
(a) 4 _x_ 2 − 7 _x_ \+ 3,
(b) _x_ 2 − 4 _x_ \+ 4,
(c) _x_ 2 \+ 2 _x_ \+ 4.
(a) Using the general quadratic formula [10], here we have _a_ = 4, _b_ = −7 and _c_ = 3. Therefore:
Therefore, _x_ = 1 or _x_ = 0.75 defines the two roots of the equation. We can now represent the function as:
and this, when multiplied out, gives _x_ 2 − 1.75 _x_ \+ 0.76 and which when multiplied by 4 gives the original equation 4 _x_ 2 − 7 _x_ \+ 3.
(b) Using the general quadratic formula [10] gives:
Therefore, we have two roots with _x_ = 2. We may now rewrite the equation in the form ( _x_ − 2)( _x_ − 2). This, when multiplied out, gives _x_ 2 − 4 _x_ \+ 4.
(c) Using the general quadratic formula [10] gives:
Since the term inside the square root is negative, we have _no real roots_.
Example
Figure 1.13 shows a simple cantilever of length _L_ , propped at its free end. It can be shown that the bending moment _M_ of this type of cantilever is a function of the distance _x_ measured from the fixed end of the beam, thus _M_ = _f_ ( _x_ ). The defining equation for the function is:
where _W_ is the distributed load in newtons per unit length. Using this quadratic formula, determine the positions along the beam at which the bending moment is zero (in engineering called the _points of contraflexure_ ).
Figure 1.13 Propped cantilever
When _M_ = 0, we have:
We can solve this by using the general quadratic formula [10]. Firstly, we can simplify the expression by multiplying through by 8 and taking the _W_ term out as a factor:
and so the equation becomes 4 _x_ 2 − 5 _Lx_ \+ _L_ 2 = 0. Thus:
Hence _x_ = _L_ /4 or _x_ = _L_. The bending moment is thus zero at two locations (it has two points of contraflexure), i.e. at _L_ /4 from the fixed end or at the extreme right-hand end of the beam when _x_ = _L_.
Example
The distance s in metres moved by a vehicle over a period of time _t_ seconds is defined by the equation _s_ = _ut_ \+ ½ _at_ 2, with a being the constant acceleration and _u_ the initial velocity. Assuming the vehicle commences motion with an initial velocity of 5 m/s and covers 84 m with a constant acceleration of 2 m/s2, calculate the time over which this occurs.
Substituting the values in the equation gives:
Writing the equation in the general format:
Thus:
and so _t_ = −12 s or _t_ = 7 s. Since we cannot have a negative time, the only acceptable answer is _t_ = 7 s.
The solution may be checked by substituting into the original equation _t_ 2 \+ 5 _t_ − 84 = 0, when _t_ = 7 s we have 72 \+ 5(7) − 84 = 0. Since this is true, our solution holds.
Example
The total surface area _A_ of a cylinder of radius _r_ and height _h_ is given by the equation _A_ = 2 _πr_ 2 \+ 2 _πrh_. If _h_ = 6 cm, what will be the radius required to give a surface area of 88/7 cm2? Take π as 22/7.
Putting the numbers in the equation gives
Multiplying throughout by 7 and dividing by 44 gives
Hence we can write
and so:
Hence the solutions are _r_ = −6.32 cm and _r_ = 0.32 cm. The negative solution has no physical significance. Hence the solution is a radius of 0.32 cm.
We can check this value of 0.32 cm by substitution in the equation 2 = _r_ 2 \+ 6 _r_. Hence 0.10 + 1.92 = 2.02, which is effectively 2 bearing in mind the rounding of the root value to two decimal places that has occurred.
### Problems 1.3
1. Determine, if they exist, the real roots of the following quadratic functions:
(a) _x_ 2 \+ 2 _x_ − 4,
(b) _x_ 2 \+ 3 _x_ \+ 1,
(c) _x_ 2 − 2 _x_ − 1,
(d) _x_ 2 \+ _x_ \+ 2.
2. The e.m.f. _E_ of a thermocouple is a function of the temperature _T_ , being given by _E_ = −0.02 _T_ 2 \+ 6 _T_. The e.m.f. is in μV and the temperature in °C. Determine the temperatures at which the e.m.f. will be 200 μV
3. When a ball is thrown vertically upwards with an initial velocity _u_ from an initial height _h o_, the height _h_ of the ball is a function of the time _t_ , being given by _h_ − _h_ 0 = _ut_ − 4.9 _t_ 2. Determine the times for which the height is 1 m, if _u_ = 4 m/s and _h_ 0 = 0.5 m.
4. The deflection _y_ of a simply supported beam of length _L_ when subject to an impact load of _mg_ dropped from a height _h_ on its centre is obtained by equating the total energy released by the falling load with the strain energy acquired, i.e.
Hence obtain an expression for the deflection _y_.
5. The height _h_ risen by an object, after a time _t_ , when thrown vertically upwards with an initial velocity _u_ is given by the equation where _g_ is the acceleration due to gravity. Solve the quadratic equation for _t_ if _u_ = 100 m/s, _h_ = 150 m and _g_ = 9.81 m/s2.
6. A rectangle has one side 3 cm longer than the other. What will be the dimensions of the rectangle if the diagonals have to have lengths of 10 cm? Hint: let one of the sides have a length _x_ , then the other side has a length of 3 + _x_. The Pythagoras theorem can then be used.
## 1.4 Inverse functions
So far, in the treatment of a function we have started with a value of the independent variable _x_ and used the function to find the corresponding value of the dependent variable _y_ (Figure 1.14(a)). However, suppose we are given a value for _y_ and want to find _x_ (Figure 1.14(b)). For example, we might have distance _s_ as a function of time _t_ , e.g. _s_ = 2 _t_. Given a value of the independent variable _t_ we can use the function to determine _s_. Suppose though that we are given a value of the dependent variable _s_ and have to determine the corresponding _t_ value? With the given equation we can rearrange it to give _t_ = _s_ /2. The function from _t_ to _s_ is _f_ ( _t_ ), the function from _s_ to _t_ is a different function _g_ ( _s_ ).
Figure 1.14 (a) y = f(x), (b) x = g(y)
Figure 1.15(a) shows some values for the _s_ = _f_ ( _t_ ) function described by the equation _s_ = 2 _t_. Figure 1.15(b) shows the function obtained by reversing the arrows, i.e. starting with time values deducing the corresponding distance values. This figure represents the _inverse_ relationship.
Figure 1.15 For (a) function s = 2t, (b) function t = s/2
Note that there is a simple point of significance here: if we use _s_ = 2 _t_ to calculate a value for _s_ given a value of _t_ and then use the inverse by taking that value of _t_ to calculate a value of _s_ , we end up back where we started with our original value of _s_. This leads to a method of specifying an inverse function. Consider the arrangement shown in Figure 1.16. Here the _g_ function system box operates on the output from the _f_ function box in order to undo the work of the _f_ box. Because the _g_ function is undoing the work of the _f_ function it is said to be the _inverse_ of _f_. We may, therefore, write:
[11]
This equation [11] is used to define an inverse function:
Figure 1.16 x = g{f(x)}
_If f is a function of x then the function g which satisfies g{f(x)}= x for all values of x in the domain of f is called the inverse off_.
With regard to notation: the inverse of a function _f_ of _x_ is written as _f_ −1( _x_ ). Note that _f_ −1( _x_ ) does _not_ mean 1/ _f_ ( _x_ ), it is simply the notation to indicate the inverse function (the −1 not indicating a a power −1!). _f_ −1( _x_ ) takes an input which is some function of _x_ and inverts it to give an output of _x_. Thus the above definition gives:
[12]
Key point
If f is a function of _x_ then the function g which satisfies g{f( _x_ )} = _x_ for all values of _x_ in the domain of f is called the inverse of f.
Key point
A function can only have an inverse if there is a one-to-one relationship between the input to a function and its output. Some functions have inverses for just some part of their domain. For example, the function _y_ = _f_ ( _x_ ) = _x_ 2 with an input of +1 or −1 gives _y_ = +1. Thus if we take the inverse we do not know whether the result should be +1 or −1. unless we place some resriction on the domain, e.g. restrict it to just positive values of _x_.
As an illustration, consider a function _f_ which adds 2, i.e. we have _f_ ( _x_ ) = _x_ \+ 2 (Figure 1.17(a)). Then the inverse is a function that subtracts 2 in order to undo the action of the _f_ function (Figure 1.17(b)). Thus, _f_ −1( _x_ ) = _x_ − 2 and so if we put into the function _x_ \+ 2 we obtain ( _x_ \+ 2) − 2.
Figure 1.17 (a) The function which adds 2, (b) the inverse function which subtracts 2
As a further illustration, consider a function _f_ which multiplies by 3, i.e. we have _f_ ( _x_ ) = 3 _x_ (Figure 1.18(a)). Then the inverse is a function that divides by 3 in order to undo the action of the _f_ function (Figure 1.18(b)). Thus _f −_1( _x_ ) = _x_ /3.
Figure 1.18 (a) The function which multiplies by 3, (b) the inverse function which divides by 3
As another illustration, consider a function which multiplies by 3 and then adds 2, i.e. _f_ ( _x_ ) = 3 _x_ \+ 2. Here we have operated on the input _x_ to the function twice, initially we multiplied the _x_ by 3 and then we added the number 2 (Figure 1.19(a)). To arrive back at the original input, the inverse do two things (the reverse operation of those just detailed), namely initially subtract 2 and then secondly divided through by 3 (Figure 1.19(b)). Note that you must undo things in the reverse order to which they were done with the function _f_.
Figure 1.19 (a) The functions which multiply by 3 and then add 2, (b) the inverse functions which subtract 2 and then divide by 3
The above illustrations are rather basic functions. We will investigate more complex ones later. However, the basic rules still apply and, once understood, will provide a solid foundation from which to build more complex relationships.
Example
If _f_ ( _x_ ) = 2 _x_ , what is the inverse function?
The initial function _f_ ( _x_ ) = 2 _x_ multiples by 2. Therefore, to reverse the process we simply divide by 2. Thus, _f_ −1( _x_ ) = 1/2 _x_.
Example
If _f_ ( _x_ ) = 2 _x_ \+ 3, what is the inverse function?
_f_ ( _x_ ) = 2 _x_ \+ 3 involves doubling the input and then adding 3. The inverse is thus subtracting 3 from the input and then halving. Thus the inverse function is:
### 1.4.1 Graphs of _f_ and _f_ −1
We can use the above rules for a function and its inverse to find the graph of an inverse function from a graph of the function. Consider the graph of _y_ = _f_ ( _x_ ) shown in Figure 1.20(a). This is the graph described by the equation _y_ = _x_ 2. What is the graph of the inverse function _f_ −1( _x_ )? This will be the graph of _y_ = √ _x_ (Figure 1.20(b)) since the function √ _x_ is what we need to apply to undo the function _x_ 2.
Figure 1.20 (a) y= f(x). (b) y = f−1(x)
If we examine the two graphs we find that the inverse _f_ −1 is just the reflection of the graph of _f_ in the line _y_ = _x_ (Figure 1.21). This is true for any function when it possesses an inverse.
Figure 1.21 The inverse as a reflection of function in line y = x
### Problems 1.4
1. Determine the inverses of:
(a) _f_ ( _x_ ) = 5 _x_ − 3,
(b) _f_ ( _x_ ) = 4 + _x_ ,
(c) _f_ ( _x_ ) = _x_ 3,
(d) _f_ ( _x_ ) = 2 _x_ 3 − 1.
2. Does the function _f_ ( _x_ ) = _x_ 2, have an inverse for all real values of _x_?
3. For each of the following functions, restrict the domain so that there is an inverse and then determine it:
(a) _f_ ( _x_ ) = ( _x_ − 1)2,
(b) _f_ ( _x_ ) = ( _x_ \+ 1)2 − 4.
## 1.5 Circular functions
This section focuses on the so-called circular or trigonometric functions. Such functions are widely used in engineering. Thus in describing oscillations, whether mechanical of perhaps a vibrating beam or electrical and alternating current, the equations used to define the quantity which fluctuates with time is likely to involve a trigonometric function.
As an illustration, consider the mechanical oscillation of a mass on the end of a spring when is just vibrates up-and-down when the mass is given a vertical displacement (Figure 1.22). With very little damping, the mass will oscillate up-and-down for quite some time. Figure 1.23 shows how the displacement varies with time.
Figure 1.22 Oscillation of a mass on the end of a vertical spring
Figure 1.23 Displacement y variation with time for the oscillating mass when damping is virtually absent
If we look at the situation when there is noticeable damping present, then the displacement variation with time looks more like Figure 1.24. The difference between this graph and Figure 1.23 is that, though we have a similar form of graph, the effect of the damping is that the amplitude decreases with time.
Figure 1.24 Displacement y variation with time for the oscillating mass when damping is noticeable
We can derive an equation to represent the variation of displacement with time in the absence of damping by using a simple model. Suppose we draw a circle with a radius OA equal to the amplitude of the oscillation, i.e. the maximum displacement, and consider a point P moving round the circle with a constant angular velocity ω (Figure 1.25) and starting from the horizontal. The vertical projection of the rotating radius OP gives a displacement-angle graph. Since the radius OP is rotating with a constant angular velocity, the angle rotated is proportional to the time. The result is a graph which replicates that of the undamped oscillating mass.
Figure 1.25 The vertical projection AB of the rotating radius gives a displacement-time graph
Key points
The convention used for angles is that they are referenced from zero degrees and when measured in an anticlockwise direction are termed positive angles. One unit for angles is degrees (Figure 1.26(a)). However, when angles occur in equations in engineering, it is usual to describe them in radians. One complete rotation of a radius is a rotation through 360°.
Figure 1.26 Conventions for angles in degrees and radians
One radian is the angle swept so that the arc formed is the same length as the radius, hence since one complete sweep of a radius is an arc length of 2 _πr_ , then one complete rotation is 2π radians (Figure 1.26(b)). Thus 360° = 2π radians (rad) and so 1 rad = 360/2π = 57.3°.
Hence:
Since AB/OA = sin θ we can write:
where AB is the vertical height of the line at some instant of time, OA being its length. The maximum value of AB will be OA and occur when θ = 90°. But a constant angular velocity ω means that in a time _t_ the angle θ covered is _ωt_. Thus the vertical projection AB of the rotating line will vary with time and is described by the equation:
If _y_ is the displacement of the alternating mass and _A_ the amplitude of its oscillation, the equation can be written as:
[13]
This type of oscillation is called _simple harmonic motion_.
It is usual to give angular velocities in units of radians per second, an angular rotation through 360° being a rotation through 2π radians. Since the periodic time _T_ is the time taken for one cycle of a waveform, then _T_ is the time taken for OA to complete one revolution, i.e. 2π radians. Thus:
The frequency _f_ is 1 _/T_ and so ω = 2 _πf_. Because ω is just 2π times the frequency, it is often called the _angular frequency_. The frequency _f_ has units of hertz (Hz) or cycles per second and thus the angular frequency has units of s−1. We can thus write the above equation as:
[14]
We can use a similar model to describe alternating current; in this case the rotating radius OP is called a _phasor_. Thus the current _i_ is related to its maximum value _I_ by:
For the damped oscillation, the amplitude decreases with time so we must figure out how to quantify this 'decay' and link it somehow with the basic sine wave function of the undamped system. As we will later discover, the damped oscillation is actually described by a combination of a sine function and an exponential function.
Key points
In _y_ = _A_ sin _ωt_ , _A_ is the amplitude, ω the angular frequency.
Angular frequency ω = 2 _πf_.
Period _T_ = 1/ _f_.
Key points
The definitions used to define the trigonometric ratios in terms of a right-angled triangle (Figure 1.28) are:
Figure 1.28 Defining trig, ratios
#### The circular functions
We can define the circular functions, i.e. the sine, cosine and tangent, in terms of the rotation of a radial arm of length _A_ (it often represents the amplitude) in a circular path (Figure 1.27). Thus, we can define the sine of an angle as:
Figure 1.27 Defining circular functions
But, with _b_ = _y_ i − 0, then sin θ = _y_ 1 _/A_ and so:
[15]
This relationship now enables us to calculate the vertical side of the triangle O _x_ 1P, or the _y_ -coordinate of point P.
Likewise, we can define the cosine of an angle as:
But, with _c_ = _x_ 1 − 0, then cos θ = _x_ 1 _/A_ and so:
[16]
This relationship now enables us to calculate the horizontal side of the triangle O _x_ 1P, or the _x_ -coordinate of point P.
We can define the tangent of an angle in terms of the gradient of the line OP as:
[17]
Using equations [15] and [16] we can write equation [17] as:
[18]
With reference to Figure 1.27, as the point P moves around the circle, so the angle θ changes. The trigonometrical ratios can be defined in terms of the angles in a right-angled triangle. However, the above definitions allow us to define them for all angles, not just those which are 90° or less. Because they are defined in terms of a circle, they are termed _circular functions_.
Consider the motion of a point P around a unit radius circle (Figure 1.29). P0 is the initial position of the point and P the position to which it has rotated. The radial arm OP in moving from OP0 has swept out an angle θ. The angle θ is measured between the radial arm and the OP0 axis as a positive angle when the arm rotates in an anticlockwise direction. Since the circle has a unit radius, to obtain for angles up to 90° the same result as the trigonometric ratios defined in terms of the right-angled triangle, the perpendicular height NP defines the sine of the angle P0OP and the horizontal distance ON defines the cosine of the angle P0OP.
Figure 1.29 Circular functions
Consider the circular rotations for angles in each quadrant (note: the first quadrant is angles 0° to 90°, the second quadrant 90° to 180°, the third quadrant 180° to 270°, the fourth quadrant 270° to 360°):
1. _Angles between 0 and 90°_
When the radial arm OP is in the first quadrant (Figure 1.30) with 0 ≤ θ < π/2, 0 ≤ θ < 90°, both NP and ON are positive. Thus both the sine and the cosine of angle θ are positive. Since the tangent is NP/ON then the tangent of angle θ is positive. For example, sin 30° = +0.5, cos 30° = +0.87 and tan 30° = +0.58.
Figure 1.30 First quadrant
2. _Angles between 90° and 180°_
When the radial arm OP moves into the second quadrant (Figure 1.31) with π/2 ≤ θ < π, 90° ≤ θ < 180°, NP is positive and ON negative. Thus the sine of angle θ is positive and the cosine negative. Since the tangent is NP/ON then the tangent of angle θ is negative. For example, sin 120° = 0.87, cos 120° = −0.50 and tan 120° = −1.73.
Figure 1.31 Second quadrant
3. _Angles between 180° and 270°_
When the radial arm moves into the third quadrant (Figure 1.32) with π ≤ θ < 3π/2, 180° ≤ θ < 270°, NP is negative and ON negative. Thus the sine of angle θ is negative and the cosine negative. Since the tangent is NP/ON then the tangent of angle θ is positive. For example, sin 210° = −0.5, cos 210° = −0.87 and tan 210° = +0.58.
Figure 1.32 Third quadrant
4. _Angles between 270° and 360°_
When the radial arm is in the fourth quarter (Figure 1.33) with 3π/2 ≤ _θ <_ 2π, 270° ≤ θ < 360°, NP is negative and ON positive. Thus the sine of angle θ is negative and the cosine positive. Since the tangent is NP/ON then the tangent of angle θ is negative. For example, sin 300° = −0.87, cos 300° = 0.5 and tan 300° = −1.73.
Figure 1.33 Fourth quadrant
We can now summarise with Figure 1.34 as an aid to memory.
Figure 1.34 Circular functions in the four quadrants
For angles greater than 2π rad (360°), the radial arm OP simply rotates more than one revolution. Negative angles are interpreted as a clockwise movement of the radial arm from OP0.
Key point
Mathematicians call the sine function an 'odd' function. An _odd_ function is defined as one which has:
An _even_ function has:
#### Cyclic functions
A cyclic function is one which repeats itself on a cyclic period. Thus, if we have a function _y_ = _f_ ( _x_ ) which is cyclic and repeats itself after a time _T_ , then:
[19]
_T_ is termed the _periodic time_ and is the time taken to complete one cycle. Hence, if the frequency is _f_ then _f_ cycles are completed each second and so _T_ = 1/ _f_.
As the arm OP in Figure 1.35 rotates round-and-round its circular path, the values of its vertical projection NP is cyclic and generates the sine graph shown. Since the graph describes a periodic function of period 2π, then:
[20]
where _n_ = 0, ±1, ±2, etc.
Figure 1.35 Graph of y = sin θ
Note that if OP rotates in a clockwise direction, i.e. the negative direction, then as θ is negative, this generates the sine function continued to the left of the origin O into the negative region (Figure 1.36). For negative values of θ, the sine function has the same values as the positive values except for a change in sign:
To obtain the graph of cos θ as the radial arm OP rotates round-and-round its circular path, we read off the values of its horizontal projection ON. Figure 1.37 shows the result. Since the graph describes a periodic function of period 2π, then:
Figure 1.36 Sine graph for negative angles
Figure 1.37 Graph of y = cos θ
[21]
where _n_ = 0, ±1, ±2, etc.
Note that the graph of _y_ = sin θ is the same as that of _y_ = cos θ moved ½π to the right, while that of _y_ = cos θ is the same as _y_ = sin θ moved ½π to the left, i.e. sin θ = cos (θ − ½π) and cos θ = sin (θ + ½π).
In the projections of the radial arm OP to generate the sine or cosine graphs, we have let OP have the value of 1. If we consider a radial arm of length _A_ , we have the same function but multiplied by _A_ , i.e. _y_ = _A_ sin θ. The amplitude of the waveform is changed. To illustrate this look at the following functions and their graphs as plotted to the same scale and on the same axes (Figure 1.38): _y_ = 1 sin θ with amplitude _A_ = 1, _y_ = 4 sin θ with amplitude _A_ = 4 and _y_ = 0.5 sin θ with amplitude _A_ = 0.5.
Figure 1.38 Effect of changing A in y = A sin θ, only the amplitude of the graph waves is changed
In engineering, we often encounter functions of the general form:
[22]
φ is the initial angle we start the rotating radial arm OP at and, as a consequence, φ is the angle by which the sine or cosine graph is moved to the left when positive and to the right when negative. It defines a phase shift of the complete waveform. Figure 1.39 illustrates this by showing the effect of a phase shift of π/3, i.e. 60°.
Figure 1.39 Graph of y = sin (θ + π/3), showing the effect of a phase shift of π/3, i.e. 60°, as being to shift the graph to the left by that amount
Now consider the graph of the function _y_ = tan _x_. For the radial arm OP rotating in a circle, the tangent is PN/OP (Figure 1.40). But if we draw a tangent to the circle at Po then, for a unit radius circle the tangent of the angle is P0M. When the radius arm has moved to an angle between 90° and 180° then the tangent is P0M1. The graph describes a periodic function which repeats itself every period of π (not every 2π as for a sine or cosine function). Thus:
[23]
for _n_ = 0, ±1, ±2, etc.
Figure 1.40 y = tan θ
Example
Draw graphs of _y_ = cos θ and _y_ = cos _2θ_ on the same axis and comment on how they differ.
A simple way to sketch the graphs is to formulate a table for values between θ = 0° and θ = 360° for cos θ and cos 2θ, then plot the respective curves. So we have:
Figure 1.41 shows the resulting graphs.
Figure 1.41 Graphs of y = cos θ and y = cos 2 θ
Example
Sketch the function _y_ = 5 sin (θ + 30°) for values of θ between 0° and 360°.
The equation indicates that the waveform has an amplitude of 5 and a phase shift of +30°. Figure 1.42 shows the form of the function.
Figure 1.42 Graph of y = 5 sin (θ + 30°)
Maths in action
To illustrate a simple mechanical application, consider a piston head moving cyclically without damping, i.e. without friction, and represented by the spring-mass system shown in Figure 1.43. The spring represents the restoring or elastic driving force acting on the piston head.
Figure 1.43 Representation of a piston head
We can find some basic properties of the system if we apply cyclic functions. We assume that the motion can be represented by the y-component of a rotating radial arm which rotates with a constant angular velocity and so is given by _y_ = _A_ sin θ, where _y_ is the vertical displacement at a time _t_. Since θ = _ωt_ , we can write:
From mechanical theory, the angular frequency ω is:
where _k_ is the spring constant and _m_ the mass. Hence ω = √(144/4) = 6 rad/s and so _y_ = _A_ sin 6 _t_. We thus have y at a maximum when sin 6 _t_ = 1, i.e. when 6 _t_ = 1.57 and so _t_ = 0.26 s.
If the maximum displacement, i.e. amplitude, of the piston is 0.1 m, we have _y_ = 0.1 sin 6 _t_. Thus, at _t_ = 3 s we have _y_ = 0.1 sin 6(3) = 0.1 sin 18 = 0.1. Since 18 rad = 1031° then _y_ = 0.1 (−0.75) = −0.075 m. The minus sign indicates that the displacement is in the upward direction from the datum line.
#### Phasors
A sinusoidal alternating current can be represented by the equation _i_ = _I_ sin _ωt_ , where _i_ is the current at time _t_ and _I_ the maximum current. In a similar way we can write for a sinusoidal alternating voltage _v_ = _V_ sin _ωt_ , where _v_ is the voltage at time _t_ and _V_ the maximum voltage. Thus we can think of an alternating current and voltage in terms of a model in which the instantaneous value of the current or voltage is represented by the vertical projection of a line rotating in an anticlockwise direction with a constant angular velocity. The term _phasor_ , being an abbreviation of the term phase vector, is used for such rotating lines. The length of the phasor can represent the maximum value of the sinusoidal waveform (or the generally more convenient root-mean-square value, the maximum value is proportional to the root-mean-square value). The line representing a phasor is drawn with an arrowhead at the end that rotates and is drawn in its position at time _t_ = 0, i.e. the phasor represents a frozen view of the rotating line at one instant of time of _t_ = 0 (Figure 1.44).
Figure 1.44 Representing a phasor by an arrow-headed line
Alternating currents or voltages which do not always start with zero values at time _t_ = 0 and can be represented in general by:
[24]
The phasor for such alternating currents or voltages is represented by a phasor (Figure 1.45) at an angle φ to the reference line, this line being generally taken as being the horizontal. The angle φ is termed the _phase angle_. We can describe such a phasor by merely stating its magnitude and phase angle (the term used is polar coordinates). Thus A describes a phasor with a magnitude, represented by its length, of 2 A and with a phase angle of 40°.
Figure 1.45 Phasor with phase angle φ
In discussing alternating current circuits we often have to consider the relationship between an alternating current through a component and the alternating voltage across it. If we take the alternating voltage as the reference and consider it to be represented by a horizontal voltage phasor, then the current may have some value at that time and so be represented by another phasor at some angle φ. There is said to be a _phase difference_ of φ between the current and the voltage. If φ has a positive value then the current is said to be _leading_ the voltage, if a negative value then _lagging_ the voltage (Figure 1.46).
Figure 1.46 (a) Current leading voltage, (b) current lagging voltage
Example
A sinusoidal voltage has a maximum value of 10 V and a frequency of 100 Hz. If the voltage has a phase angle of 30°, what will be the instantaneous voltage at times of (a) _t_ = 0, (b) _t_ = 0.5 ms?
The equation for the sinusoidal voltage will be:
The term 2 _πft_ , i.e. _ωt_ , is in radians. Thus, for consistency, we should express φ in radians. An angle of 30° is π/6 radians. Thus:
It should be noted that it is quite common in engineering to mix the units of radians and degrees in such expressions. Thus you might see:
However, when carrying out calculations involving the terms in the bracket there must be consistency of the units.
(a) When _t_ = 0 then: _v_ = 10 sin π/6 = 5 V.
(b) When _t_ = 0.5 ms then:
and so _v_ = 10 sin 0.838 = 7.43 V.
### 1.5.2 Manipulating circular functions
Often in working through engineering problems, it is necessary to rearrange circular functions in a different format. This section looks at how we can do this.
Key points
The cosecant, secant and cotangent ratios are defined as the reciprocals of the sine, cosine and tangent:
Key points
#### Trigonometric relationships
For the right-angled triangle shown in Figure 1.47, the _Pythagoras theorem_ gives AB2 \+ BC2 = AC2. Dividing both sides of the equation by AC2 gives:
Figure 1.47 Right-angled triangle
Hence:
[25]
Dividing this equation by cos2 θ gives:
[26]
and dividing equation [25] by sin2 θ gives:
[27]
Example
Simplify
#### Trigonometric ratios of sums of angles
It is often useful to express the trigonometric ratios of angles such as _A_ \+ _B_ or _A_ − _B_ in terms of the trigonometric ratios of _A_ and _B_. In such situations the relationships shown in Key points prove useful.
As an illustration of how we can derive such relationships, consider the two right-angled triangles OPQ and OQR shown in Figure 1.48:
Figure 1.48 Compound angle
Hence:
[28]
If we replace _B_ by − _B_ we obtain:
[29]
If in equation [28] we replace _A_ by (π/2 − _A_ ) we obtain:
[30]
If in equation [29] we replace _B_ by − _B_ we obtain:
[31]
We can obtain tan ( _A_ \+ _B_ ) by dividing sin ( _A_ \+ _B_ ) by cos ( _A_ \+ _B_ ) and likewise tan ( _A_ − _B_ ) by dividing sin ( _A_ − _B_ ) by cos ( _A_ − _B_ ). By adding or subtracting equations from above we obtain the relationships such as 2 sin _A_ cos _B_.
If, in the above relationships for the sums of angles _A_ and _B_ we let _B_ = _A_ we obtain the double-angle equations shown in Key points.
Key points
Example
Solve the equation cos 2 _x_ \+ 3 sin _x_ = 2.
Using equation [46] for cos 2 _x_ gives:
This can be rearranged as:
Hence sin _x_ = ½ or 1. For angles between 0° and 90°, _x_ = 30° or 90°.
Example
In an alternating current circuit, the instantaneous voltage _v_ is given by _v_ = 5 sin _ωt_ and the instantaneous current _i_ by _i_ = 10 sin ( _ωt_ − π/6). Find an expression for the instantaneous power _P_ at a time _t_ given _P_ = _vi_.
As _P_ = _vi_ we have:
Using 2 sin _A_ sin _B_ = cos ( _A_ − _B_ ) − cos ( _A_ \+ _B_ ) gives:
Key points
with:
with:
Sometimes it is useful to write an equation of the form _a_ sin θ + _b_ cos θ in the form _R_ sin (θ − _a_ ). We can do this by using the trigonometric formula for compound angles, e.g. equation [29] for sin ( _A_ − _B_ ). Thus:
Hence, we require:
Therefore, comparing coefficients of the sin θ terms:
and comparing coefficients of the cos θ terms:
Dividing these two equations gives
[32]
This leads us to be able to describe the angle _a_ by the right-angled triangle shown in Figure 1.49. Hence:
Figure 1.49 Right-angled triangle
[33]
Thus:
[34]
The Key points show other relationships which can be derived in a similar way.
Example
Express 3 cos θ + 4 sin θ in the form (a) _R_ cos (θ − _a_ ), (b) _R_ sin (θ + _a_ ).
(a) We can derive it by using the double-angle formula cos ( _A_ − _B_ ) = cos _A_ cos _B_ \+ sin _A_ sin _B_. Thus:
Thus 3 = _R_ cos _a_ and 4 = _R_ sin _a_. Hence tan a = 4/3 and so a = 53.1° or 0.93 rad. _R_ = √(32 \+ 42) = 5. Hence:
(b) We can derive it by directly using the double-angle relationship sin ( _A_ \+ _B_ ) = sin _A_ cos _B_ \+ cos _A_ sin _B_. Thus:
Thus 3 = _R_ sin _a_ and 4 = _R_ cos _a_. Hence tan a = 3/4 and so a = 36.9° or 0.64 rad. _R_ − √(32 \+ 42) = 5. Hence:
Example
Express 6 sin _ωt_ − 2.5 cos _ωt_ in the form _R_ sin ( _ωt_ \+ _a_ ).
Using the double angle formula sin ( _A_ \+ _B_ ) = sin _A_ cos _B_ \+ cos _A_ sin _B_ :
Comparing coefficients of sin _ωt_ gives 6 = _R_ cos _a_ and of cos _ωt_ gives −2.5 = _R_ sin _a_. Thus tan _a_ = −2.5/6. The negative sign for the sine and the tangent means that the angle must be in the fourth quadrant (see Figure 1.34). Hence a = −0.39 rad. _R_ = √(62 \+ 2.52) = 6.5 and so:
Thus, by subtracting the waveform 2.5 cos _ωt_ from 6 sin _ωt_ we end up with a waveform of amplitude 6.5 and a phase shift of −0.39 rad.
Example
Two sinusoidal alternating voltages of _v_ 1 = 1.25 sin _ωt_ and _v_ 2 = 1.60 cos _ωt_ are combined. Show that the result is a voltage of _v_ = 2 sin ( _ωt_ \+ 52°).
Using sin ( _A_ \+ _B_ ) = sin _A_ cos _B_ \+ cos _A_ sin _B_ , then:
With the accuracy to which the result was quoted, the case is proved.
#### Adding phasors
Often in alternating current circuits we need to add the voltages across two components in series. We must take account of the possibility that the two voltages may not be in phase, despite having the same frequency since they are supplied by the same source. This means that if we consider the phasors, they will rotate with the same angular velocity but may have different lengths and start with a phase angle between them. Consider one of the voltages to have an amplitude _V_ 1 and zero phase angle (Figure 1.50(a)) and the other an amplitude _V_ 2 and a phase difference of φ from the first voltage (Figure 1.50 (b)). We can obtain the sum of the two by adding the two graphs, point-by-point, to obtain the result shown in Figure 1.50(c). Thus at the instant of time indicated in the figures, the two voltages are _v_ 1 and _v_ 2. Hence the total voltage is _v_ = _v_ 1 \+ _v_ 2. We can repeat this for each instant of time and hence end up with the graph shown in Figure 1.50(c).
Figure 1.50 Adding two sinusoidal signals of the same frequency
However, exactly the same result is obtained by adding the two phasors by means of the _parallelogram rule_ of vectors. If we place the tails of the arrows representing the two phasors together and complete a parallelogram, then the diagonal of that parallelogram drawn from the junction of the two tails represents the sum of the two phasors. Figure 17.16(c) shows such a parallelogram and the resulting phasor with magnitude _V_.
If the phase angle between the two phasors of sizes _V_ 1 and _V_ 2 is 90°, as in Figure 1.51, then the resultant can be calculated by the use of the Pythagoras theorem as having a size _V_ of:
Figure 1.51 Adding two phasors
[35]
and is at a phase angle φ relative to the phasor for _V_ 2 of:
[36]
Key points
If _y_ = sin _x_ then _x_ = sin−1 _y_ , when −π/2 ≤ y ≤ π/2.
If y = cos _x_ then _x_ = sin−1 _y_ , when 0 ≤ _y_ ≤ π.
If _y_ = tan _x_ then _x_ = tan−1 _y_ , when −π/2 ≤ _y_ ≤ π/2.
Example
Two sinusoidal alternating voltages are described by the equations of _v_ 1 = 10 sin _ωt_ volts and _v_ 2 = 15 sin ( _ωt_ \+ π/2) volts. Determine the sum of these voltages.
Figure 1.52 shows the phasor diagram for the two voltages. The angle between the phasors is π/2, i.e. 90°. We could determine the sum from a scale drawing or by calculation using the Pythagoras theorem. Thus:
Hence the magnitude of the sum of the two voltages is 18.0 V. The phase angle is given by:
Figure 1.52 Example
Hence φ = 56.3° or 0.983 rad. Thus the sum is an alternating voltage described by a phasor of amplitude 18.0 V and phase angle 56.3° (or 0.983 rad). This alternating voltage is thus described by:
### 1.5.4 The inverse circular functions
If sin _x_ = 0.8 what is the value of _x_? This requires the inverse being obtained. There is an inverse if the function is one-to-one or restrictions imposed to give this state of affairs. However, the function _y_ = sin _x_ gives many values of _x_ for the same value of _y_. To obtain an inverse we have to restrict the domain of the function to −π/2 to +π/2. With that restriction _y_ = sin _x_ has an inverse. The inverse function is denoted as sin−1 _x_ (sometimes also written as arcsin _x_ ). Note that the −1 is _not_ a power here but purely notation to indicate the inverse. If sin _x_ = 0.8 then the value of _x_ that gives this sine is the inverse and so _x_ = sin−1 0.8, i.e. _x_ = 53°. Figure 1.53 shows the graphs for sin _x_ and its inverse function. In a similar way we can define inverses for cosines and tangents.
Figure 1.53 sin x and its inverse
## Problems 1.5
1. State the amplitude and phase angle (with respect to _y_ = 5 sin θ) of the function _y_ = 5 sin (θ + 30°).
2. A cyclic function used to describe a rotating radius (phasor) is defined by the equation _y_ = 4 sin 3 _t_. What is the amplitude and the angular frequency of the function?
3. State the amplitude, period and phase angle for the following cyclic functions:
(a) 2 sin (5 _t_ \+ 1),
(b) 6 cos 3 _t_ ,
(c) 5 cos
(d) 2 cos ( _t_ − 0.6)
4. State the amplitude, period and phase angle for the following cyclic functions:
(a) 6 sin (2 _t_ \+ 1),
(b) 2 cos 9 _t_ ,
(c) 5 cos
(d) 2 cos ( _t_ − 0.2),
(e) 5 sin (4 _t_ \+ π/8),
(f) ½ sin ( _t_ − π/0.6)
5. The potential difference across a component in an electrical circuit is given by the equation _v_ = 40 sin 40 _πt_. Deduce the maximum value of the potential difference and its frequency.
6. A sinusoidal voltage has a maximum value of 1 V and a frequency of 1 kHz. If the voltage has a phase angle of 60°, what will be the instantaneous voltage at times of
(a) _t_ = 0,
(b) _t_ = 0.5 ms?
7. A sinusoidal alternating current has an instantaneous value _i_ at a time _t_ , in seconds, given by _i_ = 100 sin (200 _πt_ − 0.25) mA. Determine (a) the maximum current, (b) the frequency, (c) the phase angle.
8. A sinusoidal alternating voltage has an instantaneous value _v_ at a time _t_ , in seconds, given by _v_ = 12 sin (100 _πt_ \+ 0.5) volts. Determine (a) the maximum voltage, (b) the frequency, (c) the phase angle.
9. What is the value of _v_ , when _t_ = 30 μs, for an amplitude-modulated radio wave with a voltage _v_ in volts which varies with time _t_ in seconds and is defined by the equation _v_ = 50(1 + 0.02 sin 2400 _πt_ ) sin (200 × 103 _πt_ ).
10. Show that sin ( _A_ \+ _B_ \+ _C_ ) = cos _A_ cos _B_ cos _C_ (tan _A_ \+ tan _B_ \+ tan _C_ − tan _A_ tan _B_ tan _C_ ).
11. Find the values of _x_ between 0 and 360° which satisfy the condition 8 cos _x_ \+ 9 sin _x_ = 7.25.
12. Write 5 sin θ + 4 cos θ in the forms (a) _R_ sin (θ − α), (b) _R_ cos (θ + α).
13. Express _W_ (sin α + μ cos α) in the form _R_ cos (α − β) giving the values of _R_ and tan β. Also show that the maximum value of the expression is W√(1 + μ2) and that this occurs when tan α = 1/μ.
14. Write the following functions in the form _R_ sin (ω _t_ \+ α):
(a) 3 sin _ωt_ \+ 4 cos _ωt_ ,
(b) 4.6 sin _ωt_ − 7.3 cos _ωt,_
(c) −2.7 sin _ωt_ − 4.1 cos _ωt_
15. Express 3 sin θ + 5 cos θ in the form _R_ sin (θ + α) with α measured in degrees.
16. Write the following functions in the form _R_ sin ( _ωt_ ± a):
(a) 4 sin _ωt_ − 3 cos _ωt_ ,
(b) −7 sin _ωt_ \+ 4 cos _ωt_ ,
(c) −3 sin _ωt_ − 6 cos _ωt_
17. The currents in two parallel branches of a circuit are 10 sin _ωt_ milliamps and 20 sin ( _ωt_ \+ π/2) milliamps. What is the total current entering the parallel arrangement?
18. The voltage across a component in a circuit is 5.0 sin _ωt_ volts and across another component in series with it 2.0 sin ( _ωt_ \+ π/6) volts. Determine the total voltage across both components.
19. The sinusoidal alternating voltage across a component in a circuit is 50√2 sin ( _ωt_ \+ 40°) volts and across another component in series with it 100√2 sin ( _ωt_ − 30°) volts. What is the total voltage across the two components?
20. The currents in two parallel branches of a circuit are 4√2 sin _ωt_ amps and 6√2 sin ( _ωt_ − π/3) amps. What is the total current entering the parallel arrangement?
21. Determine the value in radians of:
(a) sin−1 0.74,
(b) cos−1 0.10,
(c) tan−1 0.80,
(d) sin−1 0.40
## 1.6 Exponential functions
There are many situations in engineering where we are concerned with functions which grow or decay with time, e.g.
• The variation with time of the temperature of a cooling object.
• The variation with time of the charge on a capacitor when it is being charged and when it is being discharged.
• The variation with time of the current in a circuit containing inductance when the current is first switched on and then when it is switched off.
• The decay with time of the radioactivity of a radioactive isotope.
This section is about the equations we can use to describe such growth or decay.
#### Exponentials
In general, we can describe growth and decay processes by an equation of the form:
[37]
where _a_ is some constant called the _base_ , and _y_ the value of the quantity at a time _t_. Thus, for growth, we might have 2 _t_ , 3 _t_ , 4 _t_ , etc. and for decay 2− _t_ , 3− _t_ , 4− _t_ , etc. We could write equations for growth or decay processes with different values of the base. However, we usually standardise the base to one particular value. The most widely used form of equation is e _x_ , where e is a constant with the value 2.728 281 828... Whenever an engineer refers to an exponential change he or she is almost invariably referring to an equation written in terms of e _x_. Why choose this strange number 2.718... for the base? The reason is linked to the properties of expressions written in this way. For _y_ = e _x_ , the rate of change of _y_ with _x_ , i.e. the slope of a graph of _y_ against _x_ , is equal to e _x_ (this is discussed in more detail in the chapter concerned with differentiation):
[38]
and there are many engineering situations where this property occurs.
A simple illustration of the above is given if we take a strip of paper and cut it into half, throwing away one of the halves. We then take the half strip and cut it into half, throwing away one of the halves. If we keep on repeating this procedure we obtain the graph shown in Figure 1.54(a). This is an exponential decay in the length of the paper. Now look at the change in length per tear, i.e. the 'gradient' of the graph, Figure 1.54(b). We have the same exponential function. A similar type of relationship exists in the discharge of a charged capacitor. The charge on the capacitor decreases exponentially with time and the rate of change of charge, i.e. the current, follows the same exponential decay.
Figure 1.54 An 'exponential decay'
The following shows the values of e _x_ and e− _x_ for various values of _x_ and Figure 1.55 the resulting graphs
Figure 1.55 y = ex and y = e−x
The e _x_ graph describes a growth curve, the e− _x_ a decay curve. Note that both graphs have _y_ = 1 when _x_ = 0.
In a more general form we can write the exponential equation in the form _y_ = e _kx_ , or _y_ = e− _kx_ , where _k_ is some constant. This constant _k_ determines how fast _y_ changes with _x_. The following data illustrates this:
The bigger _k_ is the faster _y_ decreases, or increases, with _x_.
When _x_ = 0 then for _y_ = e _kx_ , or _y_ = e− _kx_ , _y_ = e0 and so _y_ = 1. This is thus the value of _y_ that occurs when _x_ is zero. Since we may often have an initial value other than 1, we write the equation in the form:
[39]
where _A_ is the initial value of _y_ at _x_ = 0. For example, for the discharging of a capacitor in an electrical circuit we have, for the charge _q_ on the capacitor at a time _t_ , the equation:
[40]
When _t_ = 0 then _q_ = _Q_ 0. The constant _k_ is 1/ _CR_. The bigger the value of _CR_ the smaller the value of 1/ _CR_ and so the slower the rate at which the capacitor becomes discharged.
One form of equation involving exponentials that is quite common is of the form:
[41]
When _x_ = 0 then e0 = 1 and so _y_ = _A_ − _A_ = 0. The initial value is thus 0. As _x_ increases then e− _kx_ decreases from 1 towards 0, eventually becoming zero when _x_ is infinite. Thus the value of _y_ increases as _x_ increases. When _x_ is very large then e− _kx_ becomes virtually 0 and so _y_ becomes equal to _A_. Figure 8.4 shows the graph. It shows a quantity _y_ which increases rapidly at first and then slows down to become eventually _A_.
Figure 1.56 y = A − A e−kx
For example, for a capacitor which starts with zero charge on its plates and is then charged we have the equation:
[42]
When _t_ = 0 then e0 = 1 and so _q_ = _Q_ 0 − _Q_ 0 = 0. As _t_ increases, so the value of e− _t_ /CR decreases and so _q_ becomes more and more equal to _Q_ 0.
Example
For an object cooling according to Newton's law, the temperature θ of the object varies with time _t_ according to the equation θ = θ0 e− _kt_ , where θ0 and _k_ are constants. (a) Explain why this equation represents a quantity which is decreasing with time. (b) What is the value of the temperature at _t_ = 0? (c) How will the rates at which the object cools change if in one instance _k_ = 0.01 and in another _k_ = 0.02 (the units of _k_ are per °C)?
(a) If we assume that _t_ and _k_ will only have positive values, then the − _kt_ means that the power is negative and so the temperature decreases with time.
(b) When _t_ = 0 then e− _kt_ = 1 and so θ = θ0. Thus θ0 is the initial value at the time _t_ = 0.
(c) Doubling the value of _k_ means that the object will cool faster, in fact it will cool twice as fast.
Example
The current _i_ in amperes in an electrical circuit varies with time _t_ according to the equation _i_ = 10(1 − e− _t_ /0.4). What will be (a) the initial value of the current when _t_ = 0, (b) the final value of the current at infinite time?
(a) When _t_ = 0 then e− _t_ /0.4 = e° = 1. Thus _i_ = 10(1 − 1) = 0.
(b) When _t_ becomes very large then e− _t_ /0.4 becomes 0. Thus we have _i_ = 10(1 − 0) and so the current becomes 10 A.
Maths in action
Time constant
Consider the discharging of a charged capacitor through a resistance (Figure 1.57). The voltage _v C_ across the capacitor varies with time _t_ according to the equation _v C_ = _V_ e− _RC_ , where _V_ is the initial potential difference across the capacitor at time _t_ = 0. Suppose we let τ = _RC_ , calling τ the _time constant_ for the circuit. Thus, _v C_ = _V_ e− _t_ /τ. The time taken for _v C_ to drop from V to 0.5 _V_ is thus given by:
Thus in a time of 0.693τ the voltage will drop to half its initial voltage. The time taken to drop to 0.25 _V_ is given by:
Figure 1.57 Discharge of a charged capacitor
Thus in a time of 1.386τ the voltage will drop to one-quarter of its initial voltage. This is twice the time taken to drop to half the voltage. This is a characteristic of a decaying exponential graph: if _t_ is the time taken to reach half the steady-state value, then in 2 _t_ it will reach one-quarter, in 3 _t_ it will reach one-eighth, etc. In each of these time intervals it reduces its value by a half (Figure 1.58).
Figure 1.58 Voltage across the capacitor
Discharge of a capacitor
_Time_ | _V C_
---|---
0 | _V_
0.7 _T_ | 0.5 _V_
1.4 _T_ | 0.25 _V_
2.1 _T_ | 0.125 _V_
2.8 _T_ | 0.0625 _V_
3.5 _T_ | 0.03125 _V_
When _t_ = 1τ then _v_ C = _V_ e−1 = 0.632 _V_. Thus in a time equal to the time constant the voltage across the capacitor drops to 63.2% of the initial voltage. When _t_ = 2 _x_ then _v_ C = _V_ e−2 = 0.135 _V_. Thus the voltage across the capacitor drops to 13.5% of the initial voltage. When _t_ = 3τ then _v_ C = _V_ e−3 = 0.050 _V_. Thus the voltage across the capacitor drops to 5.0% of the initial voltage.
Now consider the growth of the charge on an initially uncharged capacitor when a voltage is switched across it (Figure 1.59). The time constant τ is _RC_. Thus:
What time will be required for _v C_ to reach 0.5 _V_?
Thus in a time of 0.693τ the voltage will reach half its steady-state voltage. The time taken to reach 0.75 _V_ is given by:
Figure 1.59 Charging a capacitor
Thus in a time of 1.386τ the voltage will reach three-quarters of its steady-state value. This is twice the time taken to reach half the steady-state voltage. This is a characteristic of exponential graphs: if _t_ is the time taken to reach half the steady-state value, then in 2 _t_ it will reach three-quarters, in 3 _t_ it will reach seven-eighths, etc. In each successive time interval of 0.7τ the p.d. across the capacitor reduces its value by a half (Figure 1.60).
Figure 1.60 Voltage across the capacitor
Growth of the p.d. across _C_
_Time_ | _V C_
---|---
0 | 0
0.7 _T_ | 0.5 _V_
1.4 _T_ | 0.75 _V_
2.1 _T_ | 0.875 _V_
2.8 _T_ | 0.938 _V_
3.5 _T_ | 0.969 _V_
When _t_ = 1τ then _v C_ = 1/(1 − e−1) = 0.632 _V_. Thus in a time equal to the time constant the voltage across the capacitor rises to 63.2% of the steady-state voltage. When _t_ = 2τ then _v C_ = _V_ (1 − e−2) = 0.865 _V_. Thus the voltage across the capacitor rises to 86.5% of the steady-state voltage. When _t_ = 3τ then _v C_ = _V_ (1 − e−3) = 0.950 _V_. Thus the voltage across the capacitor rises to 95.0% of the steady-state voltage.
#### Damped oscillations
In Section 1.5 we considered the vertical oscillations of a mass on the end of a spring (Figure 1.22) with Figure 1.24 showing how the vertical displacement of the mass can be described by a sinusoidal oscillation with an amplitude which decays with time. In the absence of damping the displacement is described by:
where the amplitude _A_ is a constant. With the damped oscillation we replace the constant _A_ by a term involving exponential decay, i.e.
[43]
with _C_ being a constant and ζ a damping term called the _damping factor_. At zero time the exponential term has the value 1 and so _C_ is the initial amplitude. As the time increases so the exponential term becomes smaller and smaller and the amplitude term thus decreases.
Key points
where _a_ and _b_ are bases.
### 1.6.1 Manipulating exponentials
The techniques used for the manipulation of exponentials are the same as those for manipulating powers. The following examples illustrate this.
Example
Simplify the following:
(a) e2 _t_ e4 _t_ ,
(b) (e2 _t_ )−3,
(c)
(d)
(e)
(a)
(b)
(c)
(d)
(e) Bringing the fraction to a common denominator:
Alternatively we could take the reciprocals of each term and write the equation as:
### Problems 1.6
1. The number _N_ of radioactive atoms in a sample of radioactive material decreases with time _t_ and is described by the equation _N_ = _N_ 0 e− _λt_ , where _N_ 0 and λ are constants. (a) Explain why this equation represents a quantity which is decreasing with time. (b) What will be the number of radioactive atoms at time _t_ = 0? (c) For a radioactive material that decreases only very slowly with time, will λ have a large or smaller value than with a radioactive material which decreases quickly with time?
2. The length _L_ of a rod of material increases from some initial length with the temperature θ above that at which the initial length is measured and is described by the equation _L_ = _L_ 0, e _aθ_ , where _L_ 0 and α are constants. (a) Explain why the equation represents a quantity which increases with time. (b) What will be the length of the rod when h = 0? (c) What will be the effect of a material having a higher value of a than some other material?
3. For an electrical circuit involving inductance, the current in amperes is related to the time _t_ by the equation _i_ = 3(1 − e−10 _t_ ). What is the value of the current when (a) _t_ = 0, and (b) _t_ is very large?
4. What are the values of _y_ in the following equations when (i) _x_ = 0, (ii) _x_ is very large, i.e. infinite?
(a)
(b)
(c)
(d)
(e)
(f)
(g)
(h)
(i)
5. The voltage, in volts, across a capacitor is given by 20 e−01 _t_ , where _t_ is the time in seconds. Determine the voltage when _t_ is (a) 1 s, (b) 10 s.
6. The atmospheric pressure _p_ is related to the height _h_ above the ground at which it is measured by the equation _p_ = _p_ 0 e− _h/c_ , where _c_ is a constant and _p_ 0 the pressure at ground level where _h_ = 0. Determine the pressure at a height of 1000 m if _p_ 0 is 1.01 × 105 Pa and c = 70 000 (unit m−1).
7. The current _i_ , in amperes, in a circuit involving an inductor in series with a resistor when a voltage is _E_ is applied to the circuit at time _t_ = 0 is given by the equation
If _R_ / _L_ has the value 2 Ω/H (actually the same unit as seconds), what is the current when (a) _t_ = 0, (b) _t_ = 1 s?
8. The voltage _v_ across a resistor in series with an inductor when a voltage _E_ is applied to the circuit at time _t_ = 0 is given by the equation _v_ = _E_ (1 − e− _t_ / _T_ ). where _T_ is the so-called time constant of the circuit. If _T_ = 0.5 s, what is the voltage when (a) _t_ = 0, (b) _t_ = 1 s?
9. The charge _q_ on _a_ discharging capacitor is related to the time _t_ by the equation _q_ = Q0 e− _t_ / _CR_ , where _Q_ 0 is the charge at _t_ = 0, _R_ is the resistance in the circuit and C the capacitance. Determine the charge on a capacitor after a time of 0.2 s if initially the charge was 1 μC (1 μC = 10−6 C), _R_ is 1 MΩ and _C_ is 4 μF. Note that with the units in seconds (s), coulombs (C), ohms (Ω) and farads (F), the resulting charge will be in coulombs.
10. The current _i_ , in amperes, in a circuit with an inductor in series with a resistor is given by the equation _i_ = 4(1 − e−10 _t_ ), where the time _t_ is in seconds. Determine the current when (a) _t_ = 0, (b) _t_ = 0.05 s, (c) _t_ = 0.10 s, (d) _t_ = 0.15 s, (e) _t_ = infinity.
11. The voltage _v_ , in volts, across a capacitor after a time _t_ , in seconds, is given by the equation _v_ = 10 e− _t_ /3. Determine the value of the voltage _v_ after 2 s.
12. The resistance _R_ , in ohms, of an electrical conductor at a temperature of θ°C is given by the equation _R_ = _R_ 0 e _αθ_. Determine the resistance at a temperature of 1000°C if _R 0_ is 5000 Ω and α is 1.2 × 10−4 (unit per °C).
13. The current _i_ , in amperes, in an electrical circuit varies with time _t_ and is given by the equation _i_ = 2(1 − e−10 _t_ ). Determine the current after times of (a) 0.1 s, (b) 0.2 s, (c) 0.3 s.
14. The amount _N_ of a radioactive material decays with time _t_ and is given by the equation _N_ = _N_ 0 e−07 _t_ , where _t_ is in years. If at time _t_ = 0 the amount of radioactive material is 1 g, what will be the amount after five years?
15. The atmospheric pressure _p_ , in pascals, varies with the height _h_ , in kilometres, above sea level according to the equation _p_ = _p_ 0 e−0.15 _h_. If the pressure at sea level is 105 Pa, what will be the pressure at heights of (a) 1 km, (b) 2 km?
16. The voltage _v_ , in volts, across an inductor in an electrical circuit varies with time _t_ , in milliseconds, according to the equation _v_ = 200 e− _t_ /10. Determine the voltage after times of (a) 0.1 ms, (b) 0.5 ms.
17. When the voltage _E_ to a circuit consisting of an inductor in series with a resistor is switched off, the voltage across the inductor varies with time _t_ according to the equation _v_ = − _E_ e− _t_ / _T_ , where _T_ is the time constant of the circuit. If _T_ = 2 s, determine the voltage when (a) _t_ = 0, (b) _t_ = 1 s.
18. When a voltage _E_ is applied to a circuit consisting of a capacitor in series with a resistor at time _t_ = 0, the voltage _v_ across the capacitor varies with time according to the equation _v_ = _E_ (1 − e− _t_ /7). where _T_ is the time constant of the circuit. If _T_ = 0.1 s, determine the voltage when (a) _t_ = 0, (b) _t_ = 0.1 s.
19. The temperature θ, in °C, of a cooling object varies with time _t_ , in minutes, according to θ = 200 e−0.04 _t_. Determine the temperature when (a) _t_ = 0, (b) _t_ = 10 minutes, (c) _t_ is infinite.
20. Under one set of conditions the amplitude _A_ of the oscillations of a system varies with time _t_ according to the equation _A_ = _A_ 0 e _kt_. Under other conditions the amplitude varies according to the equation _y_ = _A_ 0 e− _kt_. If _k_ is a positive number, how do the oscillations differ?
21. Simplify the following:
(a) e3e5,
(b) e3te5t,
(c) e−5te3t,
(d) (e−4 _t_ )3,
(e) (1 + e2t)2,
(f)
(g)
(h)
(i)
## 1.7 Log functions
Consider the function _y_ = 2 _x_. If we are given a value of _x_ then we can determine the corresponding value of _y_. However, suppose we are given a value of _y_ and asked to find the value of _x_ that could have produced it. The inverse function is called the _logarithm function_ and is defined, for _y_ = _a x_ and _a_ > 0, as:
[44]
This is stated as 'log to base _a_ of _y_ equals _x_ '. Thus, if we take an input of _x_ to a function _f_ ( _x_ ) = _a x_ and then follow it by the inverse function _f_ −1( _x_ ) = loga( _x_ ), as in Figure 1.61, then because it is an inverse we obtain _x_ and so:
[45]
Figure 1.61 f(x)f−1(x)= x
Key points
The defining equations for logs is:
Most logarithms use base 10 or base e. Logarithms to base 10 are often just written as log or Ig, the base 10 being then understood. Logarithms to base e are termed _natural logarithms_ and often just written as In.
While logarithms can be to any base, most logarithms use base 10 or base e. Logarithms to base 10 are often just written as log or lg, the base 10 being then understood. Logarithms to base e are termed _natural logarithms_ and often just written as In. Figure 1.61 shows the graph of _y_ = e _x_ and its inverse of the natural logarithm function.
Figure 1.62 The exponential and its inverse of the natural logarithm function
Since _a A+B_ = _a AaB_ then:
[46]
[47]
Since then:
[48]
Since _a_ 1 = _a_ then loga _a_ = 1.
Sometimes there is a need to change from one base to another, e.g. loga _x_ to log _b_ _x_. Let _u_ = log _b_ _x_ then _b u_= _x_ and so taking logarithms to base _a_ of both sides gives loga _b u_ = loga _x_ and so _u_ log _a_ _b_ = log _a_ _x_. Since _u_ = log _b_ _x_ then (log _b_ _x_ )(log _a_ _b_ ) = loga _x_ and so:
[49]
Example
Write in terms of Ig _a_ , Ig _b_ and Ig _c_.
We have:
Hence:
Example
Simplify (a) Ig _x_ \+ Ig _x_ 3, (b) 3 In _x_ \+ ln(1/ _x_ ).
(a) Ig _x_ \+ Ig _x_ 3 = lg( _x_ × _x_ 3) = Ig _x_ 4
(b) 3 In _x_ \+ ln(1/ _x_ ) = In _x_ 3 \+ ln(1/ _x_ ) = ln( _x_ 3/ _x_ ) = In _x_ 2
Example
Solve for _x_ the equation 22 _x_ −1 = 12.
Taking logarithms of both sides of the equation gives:
Hence:
Thus _x_ = 2.29.
#### The decibel
The power gain of a system is the ratio of the output power to the input power. If we have, say, three systems in series (Figure 1.63) then the power gain of each system is given by:
Figure 1.63 Systems in series
The overall power gain of the system is _P_ 4/ _P_ 1 and is the product of the individual gains, i.e.
[50]
Taking logarithms gives:
[51]
We thus can add the log ratio of the powers. This log of the power ratio was said to be the power ratio in units of the _bel_ , named in honour of Alexander Graham Bell:
[52]
Thus the overall power gain in bels can be determined by simply adding together the power gains in bels of each of the series systems. The bel is an inconveniently large quantity and thus the _decibel_ is used:
[53]
A power gain of 3 dB is thus a power ratio of 2.0.
#### Log graphs
When a graph is a straight line then the relationship between the two variables can be stated as being of the form _y_ = _mx_ \+ _c_ and we can easily determine the constants _m_ and c from the graph and hence obtain the relationship. However, if we have a relationship of the form _y_ = _ax b_, where _a_ and _b_ are constants, then a plot of _y_ against _x_ gives a non-linear graph from which it is not easy to determine _a_ and _b_. However, we can write the equation as:
[54]
A graph of lg _y_ against lg _x_ will thus be a straight line graph with a gradient of _b_ and an intercept of lg _a_. Likewise, if we have the relationship _y_ = _a_ e _bx_ then, taking logarithms to base e:
[55]
A graph of In _y_ against _x_ will give a straight line graph with a gradient of _b_ and an intercept of In _a_.
To avoid having to take the logarithms of quantities, it is possible to use special graph paper which effectively takes the logarithms for you. Figure 1.64 shows the form taken by log-linear and log-log graph paper. On a logarithmic scale, the distance between 1 and 10 is the same as between 10 and 100, each of these distances being termed a cycle.
Figure 1.64 (a) Log-linear and (b) log-log graph paper
Key point
Before actually plotting graphs, or creating spreadsheets to plot graphs, it is useful to first sketch the 'form' that the graph might be expected to have in order to get the 'feel' of what the actual plotted graph should look like.
Example
It is believed that the relationship between _y_ and _x_ for the following data is of the form _y_ = a e _bx_. Show that this is the case and determine, using log-linear graph paper, the values of _a_ and _b_.
Taking logarithms to base e gives In _y_ = _bx_ \+ In _a_. We thus require log-linear graph paper. The _y_ -axis, which is the In axis, has to range from In 5.53 = 1.7 to In 8.24 = 2.1 and so just one cycle from 1 to 10 is required. Figure 1.65 shows the resulting graph. The graph is straight line and so the relationship is valid. The gradient is
The intercept with the _y_ -axis, i.e. _x_ = 0 ordinate, is at 5.
Figure 1.65 Example
Thus the required equation is _y_ = 5 e0.10 _x_.
Example
The relationship between power _P_ (in watts), the e.m.f. _E_ (in volts) and the resistance _R_ (in ohms) is thought to be of the form _P_ = _E n/R_. In a test in which _R_ was kept constant, the following measurements were recorded:
Determine whether the above relationship is true (or approximately so) and determine the values for _n_ and _R_.
Taking In of both sides of the equation gives:
So, if the relationship is true, a graph of In _P_ against In _E_ should be a straight line. The values of In _P_ and In _E_ are:
Figure 1.66 shows the plot. From the graph we obtain an intercept on the _y_ -axis of −2.3 and a gradient of about 2.
Figure 1.66 Example
We thus have −2.3 = − In _R_ and so:
In _R_ = 2.3
and _R_ = 9.9, or 10 when rounded up. With _n_ = 2 we thus have:
We can test that this is valid by choosing any two results from the test, e.g. _E_ = 5 V, _P_ = 2.5 W and substituting them into the equation. With _E_ = 5 V the equation gives _P_ = 23/10 = 2.5 V and so the test confirms the equation.
Maths in action
Radioactive materials, e.g. uranium 235, decay and the mass of that isotope decreases with time. The rate of decay of the isotope is proportional to the mass of isotope present:
where λ is a constant called the decay constant. If _m_ 0 is the mass at time _t_ = 0 and mass _m_ the mass at time _t_ , then the following relationship can be derived from the above equation:
Taking In gives:
A graph of In _m_ plotted against _t_ will be a straight line graph of slope −λ and intercept +ln _m_ 0.
## Problems 1.7
1. Simplify (a) 2 lg _x_ \+ log _x_ 2, (b) In 2 _x_ 3 − In(4/ _x_ 2).
2. Write the following in terms of lg _a_ , Ig _b_ and lg _c_ :
(a)
(b)
3. Solve for _x_ the equations: (a) 3 _x_ = 300, (b) 102−3 _x_ = 6000, (c)72 _x_ +1 = 43− _x_.
4. The following data indicates how the voltage _v_ across a component in an electrical circuit varies with time _t_. It is considered that the relationship between _V_ and _t_ might be of the form _v_ = _V_ e− _bt_. Show that this is so and determine the values of _V_ and _b_.
5. A hot object cools with time. The following data shows how the temperature θ of the object varies with time _t_. The relationship between θ and _t_ is expected to be of the form θ = _a_ e− _bx_. Show that this is so and determine the values of _a_ and _b_.
6. The rate of flow _Q_ of water over a V-shaped notch weir was measured for different heights _h_ of the water above the point of the V and the following data obtained. The relationship between _Q_ and _h_ is thought to be of the form _Q_ = _ah b_. Show that this is so and determine the values of _a_ and _b_.
7. The amplitude _A_ of oscillation of a pendulum decreases with time _t_ and gives the following data. Show that the relationship is of the form _A_ = _a_ e _bt_ and determine the values of a and b.
8. The tension _T_ and _T_ 0 in the two sides of a belt driving a pulley and in contact with the pulley over an angle of q is given by the equation _T_ = _T_ 0 e _μθ_. Determine the values of _T_ 0 and μ for the following data:
9. In an electrical circuit, the current _i_ in mA occurring when an 8.3 μF capacitor is being discharged varies with time _t_ in ms as shown in the following table:
If _I_ and _T_ are constants, with _I_ being the initial current in mA, show that the above results are connected by the equation _i_ = _I_ e _t_ / _T_ and determine _I_ and _T_.
10. The pressure _P_ at a height _h_ above ground level is given by _P_ = _P_ 0 e− _h_ / _c_ , where _P_ 0 is the pressure at ground level and _c_ is a constant. When _P_ 0 is 1.013 × 105 Pa and the pressure at a height of 1570 m is 9.871 × 104 Pa, determine graphically the value of _c_.
## 1.8 Hyperbolic functions
Key points
When we want to describe the curve a rope hangs in we use, what is termed, an hyperbolic function. The sine, cosine and tangent are termed circular functions because their definition is associated with a circle. In a similar way, the sinh (pronounced sinch or shine), cosh (pronounced cosh) and tanh (pronounced than or tanch) are _hyperbolic functions_ associated with a hyperbola. Sinh is a contracted form of 'hyperbolic sine', cosh of 'hyperbolic cosine' and tanh of 'hyperbolic tangent'. Figure 1.67 shows the comparison of the circular and hyperbolic functions. The hyperbolic functions are defined as:
Figure 1.67 (a) Circular functions, (b) hyperbolic functions
[56]
[57]
[58]
Also we have sech _x_ = 1/cosh _x_ , cosech _x_ = 1/sinh _x_ and coth _x_ = 1/tanh _x_.
Example
Determine, using a calculator, the values of (a) cosh 3, (b) sinh 3.
Some calculators have hyperbolic functions so that they can be evaluated by the simple pressing of a key, with others you will have to evaluate the exponentials.
(a) Evaluating the exponentials:
(b) Evaluating the exponentials:
### 1.8.1 Graphs of hyperbolic functions
Since cosh _x_ is the average value of e _x_ and e− _x_ we can obtain a graph of cosh _x_ as a function of _x_ by plotting the e _x_ and e− _x_ graphs and taking the average value. Figure 1.68 illustrates this. Note that unlike cos _x_ , cosh _x_ is not a periodic function. At _x_ = 0, cosh _x_ = 1. The curve is symmetrical about the _y_ -axis, i.e. cosh (− _x_ ) = cosh _x_ and is termed an even function.
Figure 1.68 cosh x
To obtain the graph of sinh _x_ from those of e _x_ and e− _x_ , at a particular value of _x_ we subtract the second from the first and then take half the resulting value. Figure 1.69 illustrates this. Note that unlike sin _x_ , sinh _x_ is not a periodic function. When _x_ = 0, sinh _x_ = 0. The curve is symmetrical about the origin, i.e. sinh(− _x_ ) = −sinh _x_ , and is said to be an odd function.
Figure 1.69 sinh x
Figure 1.70 shows the graph of tanh _x_ , obtained by taking values of e _x_ and e− _x_ and calculating values of tanh _x_ for particular values of _x_. Unlike tan _x_ , tanh _x_ is not periodic. When _x_ = 0, tanh _x_ = 0. All the values of tanh _x_ lie between −1 and +1. As _x_ tends to infinity, tanh _x_ tends to 1. As _x_ tends to minus infinity, tanh _x_ tends to −1. The curve is symmetrical about the origin, i.e. tanh(− _x_ ) = −tanh _x_ , and is said to be an odd function.
Figure 1.70 y = tanh x
Maths in action
Hyperbolic functions and suspended cables
Often it is necessary for engineers to analyse frameworks in order to test for their integrity, i.e. safety and ability to function as designed under a range of conditions. The design engineer needs to formulate a mathematical 'model' which will accurately represent the real system when built. Such problems often involve hyperbolic functions as the following example shows.
Consider a uniform cable which is suspended from two fixing points A and B and which hangs under its own weight (Figure 1.71). Point A is higher than point B and the cable has a uniform weight μ per unit length.
Figure 1.71 Sagging cable
By drawing free-body diagrams for the forces involved on an element of the cable and considering its equilibrium we can arrive at a differential equation (see chapter 4 for a discussion of such equations), which when solved leads to the equation for the gradient a distance _x_ from P:
_T_ 0 is the horizontal component of the tension in the cable at P. This equation can then give (by integration, see chapter 4) the height _y_ above P of the cable at distance _x_ as:
where _k_ is a constant. Since _x_ = 0 when _y_ = 0, we can put these values in the equation and obtain _k_ = − _T_ 0/μ. Thus:
This is the equation of the curve of the cable, known as a catenary. For a full analysis of the system, see the companion book in this series: _Mechanical Engineering Systems_ by R. Gentle, P. Edwards and W. Bolton
## Problems 1.8
1. Determine, using a calculator, the values of (a) sinh 2, (b) cosh 5, (c) tanh 2, (d) sinh(−2), (e) cosech 1.4, (0 sech 0.8.
2. A flexible cable suspended between two horizontal points hangs in the form of a catenary (Figure 1.72), the equation of the curve being given by _y_ = c[cosh( _x_ /c) − 1], where _y_ is the sag of the cable, _x_ the horizontal distance from the midpoint to one end of the cable and c is a constant. Determine the sag of a cable when _c_ = 20 and 2 _x_ = 16 m.
Figure 1.72 Problem 2
3. The speed _v_ of a surface wave on a liquid is given by:
where _g_ is the acceleration due to gravity, λ the wavelength of the waves, γ the surface tension, ρ the density and _h_ the depth of the water. What will the speed approximately be for (a) shallow water waves when _h_ /k tends to zero, (b) deep water waves when _h_ /k tends to infinity?
2
# Vectors, phasors and complex numbers
Summary
Vectors are means by which engineers describe quantities which need both a direction and a magnitude specified if their effects are to be ascertained. Vectors play a strong part in the formulation and analysis of mechanical systems, both static and dynamic. Phasors are a means by which sinusoidal alternating voltages and currents can be specified in terms of a rotating radius and an angle, they behaving like vectors. This chapter looks at how we can work with such quantities, considering both vector algebra and complex numbers.
Objectives
By the end of this chapter, the reader should be able to:
• add and subtract vector quantities;
• use vector components to add and subtract vectors;
• use phasors to describe sinusoidal alternating voltages and currents;
• represent phasors by polar notation and be able to work with quantities expressed in this way;
• represent phasors by complex numbers and work with quantities expressed in this way.
## 2.1 Vectors
Key points
A scalar quantity is defined by purely its magnitude; a vector quantity has to have both its magnitude and direction defined.
If we talk of the mass of this book then we quote just a number, this being all that is needed to give a specification of its mass. However, if we quote a force then in order to fully describe the force we need to specify both its size and the direction in which it acts. Quantities which are fully specified by a statement of purely size are termed _scalars_. Quantities for which we need to specify both size and direction in order to give a full specification are termed _vectors_. Examples of scalar quantities are mass, distance, speed, work and energy. Examples of vector quantities are displacement, velocity, acceleration and force.
To specify a vector we need to specify its magnitude and direction. Thus, we can represent it by a line segment AB (Figure 2.1) with a length which represents the magnitude of the vector and a direction, indicated by the arrow on the segment, which represents the direction of the vector. We can denote this vector representation as
the arrow indicating the direction of the line segment being from A to B. Note that:
One of the vectors is directed from A to B while the other is directed from B to A. An alternative notation is often used, lower case bold notation **a** being used in print, or underlining _a_ in writing. With this notation, if we write **a** or _a_ from the vector from A to B then the vector from B to A is represented as − **a** or − _a_ , the minus sign being used to indicate the vector is in the opposite direction.
Figure 2.1 Representing a vector
The length of the line segment represents the _magnitude_ of the vector. This is indicated by the notation:
#### Unit vector
A vector which is defined as having a magnitude of 1 is termed a _unit vector_ , such a vector often being denoted by the symbol â.
#### Like vectors
Two vectors are equal if they have the same magnitude and direction. Thus the vectors in Figure 2.2 are equal, even if their locations differ. A vector is only defined in terms of its magnitude and direction, its location is not used in its specification. Thus, for Figure 2.2, we can write:
Figure 2.2 Equal vectors
#### Multiplication of vectors by a number
If a vector is multiplied by a positive real number _k_ then the result is another vector with the same direction but with a magnitude that is _k_ times the original magnitude. This is multiplication of a vector by a scalar.
[1]
We can consider a vector a with magnitude | **a** | as being a unit vector, i.e. a vector with a magnitude 1, multiplied by the magnitude | **a** | (note that the magnitude | **a** | is a scalar), i.e.
[2]
Key points
The _triangle rule_ can be stated as: to add two vectors **a** and **b** we place the tail of the line segment representing one vector at the head of the line segment representing the other and the line that forms the third side of the triangle represents the vector sum of **a** and **b**.
The _parallelogram_ rule can be stated as: to add two vectors **a** and **b** we place the tails of the line segments representing the vectors together and then draw lines parallel to them to complete a parallelogram, the diagonal of the parallelogram drawn from the initial junction of the two tails represents the vector sum of a and b
Maths in action
In the vector analysis of a mechanical system, we can write Newton's 2nd Law of Motion as a vector equation:
where **F** is the resultant force acting on a system and a is the resulting acceleration. The equation is a vector equation since the direction of the acceleration must be in the same direction as the force; both the force and the acceleration are vector quantities. Newton's 1st Law contains the principle of equilibrium of forces and is used in the following section concerned with the addition and subtraction of vectors; we have to bother about both magnitude and direction to consider equilibrium. Newton's 3rd Law is basic to our understanding of force, stating that forces always occur in pairs with equal in magnitude but opposite in direction forces.
### 2.1.1 Adding and subtracting vectors
Consider the following situation involving displacement vectors. An aeroplane flies 100 km due west, then 60 km in a north-westerly direction. What is the resultant displacement of the aeroplane from its start point? If the initial displacement vector is **a** and the second displacement vector is **b** , then what is required is the vector sum **a** \+ **b**.
One way we can determine the sum of two vectors involves the _triangle rule_ and is shown in Figure 2.3(a). Note that **a** and **b** have directions that go in one sense round the triangle and the sum **a** \+ **b** has a direction in the opposite sense. An alternative way of determining the sum involves the _parallelogram rule_ and is shown in Figure 2.3(b).
Figure 2.3 (a) Triangle rule, (b) parallelogram rules
Subtraction of vector **b** from **a** is carried out by adding − **b** to **a** :
[3]
The addition of **a** and − **b** is carried out using the triangle (Figure 2.4(b)) or parallelogram rules (Figure 2.4(c)). Note that, whatever rule we use, the vector **a** − **b** can be represented by the vector from the end point of **b** to the end point of **a** (Figure 2.4(d)), the vector from the end point of **a** to the end point of **b** being **b** − **a** (Figure 2.4(e)).
Figure 2.4 ( _a_ ) The vectors, ( _b_ ) subtraction by the triangle rule, ( _c_ ) subtraction by the parallelogram rule ( _d_ ) **a** − **b** , ( _e_ ) **b** − **a**
The triangle rule for the addition of vectors can be extended to the addition of any number of vectors. If the vectors are represented in magnitude and direction by the sides of a _polygon_ then their sum is represented in magnitude and direction by the line segment used to close the polygon (Figure 2.5). Essentially what we are doing is determining the sum of vector 1 and vector 2 using the triangle, then adding to this sum vector 3 by a further triangle and repeating this for all the vectors.
Figure 2.5 Polygon of vectors
If we have a number of vectors and the vectors give a closed triangle or polygon, then, since the line segment needed to close the figure has zero length, the sum of the vectors must be a vector with no magnitude. This is a statement of equilibrium.
Example
An object is acted on by two forces, one of which is has a size of 10 N and acts horizontally and the other a size of 20 N which acts vertically. Determine the resultant force.
Figure 2.6 shows the vectors and the use of the parallelogram rule to determine the sum. We can calculate, using the Pythagoras theorem, the diagonal as a having a size of √(202 \+ 102) = 22.4 N. It is at an angle θ to the horizontal force, with θ = tan−1 (20/10) = 63.4°.
Figure 2.6 Example
Example
Determine the resultant velocity if we have velocities of 10 m/s acting horizontally to the right and −10 m/s acting vertically upwards.
This problem requires the addition of two vectors, Figure 2.7 showing the vectors and the use of the parallelogram rule to determine the sum. A −10 m/s vector upwards is the same as a +10 m/s vector downwards. Hence, the magnitude of the sum, i.e. the diagonal of the parallelogram, is given by the Pythagoras theorem as √(102 \+ 102) = 14.1 m/s and it is at an angle below the horizontal of θ where θ = tan−1 (10/10) = 45°.
Figure 2.7 Example
Example
For the triangle ABC (Figure 2.8) if **a** is the vector from A to B and **b** the vector from B to C, express the vector from C to A in terms of **a** and **b**.
Figure 2.8 Example
Using the triangle rule:
Since , then we have: .
### 2.1.2 Components
In mechanics a common technique to aid in the solution of problems is to replace a single vector by two components which are at right angles to each other, generally in the horizontal and the vertical directions. Then we can sum all the horizontal components, sum all the vertical components, and are then left with the simple problem of determining the resultant of two vectors at right angles to each other.
For the vector **a** in Figure 2.9 we have **h** and **v** as the horizontal and vertical components. Thus for the magnitudes we must have:
Figure 2.9 Resolution of a vector into two components
[4]
[5]
Example
Express a force of 10 N at 40° to the horizontal in terms of horizontal and vertical components.
Horizontal component = 10 cos 40° = 7.7 N
Vertical component = 10 sin 40° = 6.4 N
Example
Determine the resultant force acting on the bracket shown in Figure 2.10 due to the three forces indicated.
Figure 2.10 Example
For the 3 kN force we have:
horizontal component = 3.0 cos 60° = 1.5 kN
vertical component = 3.0 sin 60° = 2.6 kN
For the 2.0 kN force we have:
horizontal component = 2.0 cos 30° = 1.7 kN
vertical component = 2.0 sin 30° = 1.0 kN
For the 5.0 kN force we have:
horizontal component = 5.0 cos 70° = 1.7 kN
vertical component = −5.0 sin 70° = −4.7 kN
The minus sign is because this force is acting downwards and in the opposite direction to the other vertical components which we have taken as being positive. All the horizontal components are in the same direction. Thus:
sum of horizontal components = 1.5 + 1.7 + 1.7 = 4.9 kN
sum of vertical components = 2.6 + 1.0 − 4.7 = −1.1 kN
Figure 2.11 shows how we can use the parallelogram rule to find the resultant with these two components. Since the two components are at right angles to each other, the resultant can be calculated using the Pythagoras theorem. Thus, the magnitude of the resultant is:
Figure 2.11 Example
The resultant is at an angle θ downwards from the horizontal given by:
Thus θ = 12.7°.
Key points
The term _position vector_ is used for a vector that emanates from or Is directed towards a particular point. Vectors for which the location Is not significant are termed _free vectors._
A unit vector may be formed by dividing a vector by Its magnitude.
#### Components in terms of unit vectors
A useful way of tackling problems involving summing vectors by considering their components is to write them in terms of unit vectors. Consider the _x_ - _y_ plane shown in Figure 2.12. Point P has the coordinates ( _x_ , _y_ ) and is joined to the origin O by the line OP. This line from O to P can be considered to be a vector **r** anchored at O and specifying a position, being defined by its two components **a** and **b** along the _x_ and _y_ directions with:
If we define **i** to be a unit vector along the _x_ -axis then **a** = _a_ **i** , where _a_ is the magnitude of the **a** vector. If we define **j** to be a unit vector along the y-axis then **b** = _b_ **j** , where _b_ is the magnitude of the **b** vector. Thus:
But _a_ is the _x_ -coordinate of P and _b_ the _y_ -coordinate of P. Thus we can write:
[6]
For example, we might specify a position vector as 3 **i** \+ 2 **j**. This would mean a position vector from the origin to a point with the coordinates (3, 2).
Figure 2.12 Components
The magnitude of the vector r is given by the Pythagoras theorem as:
[7]
If α and β are the angles the vector **r** makes with the _x_ \- and _y_ -axes, then:
[8]
These are known as the _direction cosines_ of **r**.
Example
If **r = 4i + 7j** determine | **r** | and the angle **r** makes with the _x_ -axis.
The angle with the _x_ -axis is given by:
Thus the angle is 60.4°.
Example
Figure 2.13 shows three forces _F_ 1, _F_ 2 and _F_ 3 all acting at a single point A on a wall bracket. In order to calculate the pulling force on the bracket at the wall, so that it can be safely connected to the wall when under load, determine the size of the force components of _F_ 1, _F_ 2 and _F_ 3 in the _x_ and _y_ directions.
Figure 2.13 Example
The components of _F_ 1 in the _x_ and _y_ directions are:
The components of _F_ 2 in the _x_ and _y_ directions are (the vector forms the hypotenuse of a 3-4-5 triangle):
The components of F3 in the _x_ and _y_ directions are, with α = tan−1 (0.2/0.4) = 26.6°:
Alternatively, the size of the components of _F_ 3 may be obtained by writing _F_ 3 at a magnitude times a unit vector **r** AB in the direction of A to B. The position vector is 0.2 **i** −0.4 **j** and its magnitude is √(0.22 \+ 0.42) = 0.447. Thus the unit vector is (0.2 **i** − 0.4 **j** )/0.447 and so:
Example
In a structural test, a 500 N force was applied to a vertical pole, as shown in Figure 2.14. (a) Write the 500 N force in terms of the unit vectors **i** and **j** and identify its _x_ and _y_ components. (b) Determine the components of the 500 N force along the _x_ 1 and _y_ 1 directions, (c) Determine the components of the 500 N force along the _x_ and _y_ 1 directions.
Figure 2.14 Example
(a) **F** = (500 cos 60°) **i** − (500 sin 60°) **j** = 250 **i** − 433 **j**
Thus the vector components are **F** _x_ = 250 **i** N and **F** _y_ = −433 **j** N.
(b) Axis _y_ 1 is at 90° to _x_ 1 and so, since the 500 N is in the _x_ 1 direction we have the component in the _x_ 1 direction as 500 N and in the _y_ 1 direction as 0.
(c) Here the required directions are not at right angles to each other and so we determine them by using the parallelogram rule. Figure 2.15 shows the parallelogram. If we use the sine rule:
Hence the size of the _x_ component is 1000 N.
Figure 2.15 Example
Hence the size of the _y_ 1 component is 866 N. The two components are thus 1000 N and −866 N.
Key point
The _sine rule_ : For a triangle, the length of a side _a_ divided by the sine of the opposite angle _A_ equals the length of side _b_ divided by the sine of its opposite angle _B_.
#### Addition and subtraction of vectors
Consider the addition of the two position vectors and shown in Figure 2.16, P having the coordinates ( _x_ 1, _y_ 1) and Q the coordinates ( _x_ 2, _y_ 2). Thus:
We can obtain the sum by the use of the parallelogram rule as . R has the coordinates ( _x_ 1 \+ _x_ 2, _y_ 1 + _y_ 2). Thus:
[9]
Key point
Adding or subtracting position vectors is achieved by adding or subtracting their respective co-ordinates.
Figure 2.16 Adding position vectors
Example
If **a** = 2 **i** \+ 4 **j** and **b** = 3 **i** \+ 5 **j** , determine (a) **a** \+ **b** , (b) **a** − **b** , (c) **a** \+ 2 **b**.
(a) **a** \+ **b** = (2 + 3) **i** \+ (4 + 5) **j** = 5 **i** \+ 8 **j**
(b) **a** − **b** = (2 − 3) **i** \+ (4 − 5) **j** = −1 **i** \+ −1 **j**
(c) **a** \+ 2 **b** = (2 + 6) **i** \+ (4 + 10) **j** = 8 **i** \+ 14 **j**
Example
ABCD is a square. If forces of magnitudes 1 N, 2 N and 3 N act parallel to AB, BC and CD respectively, in the directions indicated by the order of the letters, determine the magnitude and direction of the resultant force.
Figure 2.17 shows the directions of the forces. Expressing the forces in terms of unit vector components then the force parallel to AB is 1 **i** , parallel to BC is 2 **j** and that parallel to CD is −3 **i**. Thus the resultant is 1 **i** \+ 2 **j** −3 **i** = −2 **i** \+ 2 **j** N. This will have a magnitude √[(−2)2 \+ 22] = 2.8 N at an angle of tan−1 (2/−2) = 135° to AB.
Figure 2.17 Example
Example
Forces of 5 **i** − 5 **j** N and −1 **i** \+ 3 **j** N act on a particle of mass 2 kg. Determine the resulting acceleration.
The resultant force is 5 **i** − 5 **j** − 1 **i** \+ 3 **j** = 4 **i** − 2 **j** N. Thus:
Hence **a** = 2 **i** − 1 **j** m/s2 and so the acceleration has a magnitude of √[22 \+ (−1)2] = 2.2 m/s2 and is at an angle of tan−1 (−1/2) = −26.6° to the **i** direction.
#### Vectors in space
Here we extend the consideration of components to three dimensions (Figure 2.18). A vector r from O to P, with coordinates ( _x_ , _y_ , _z_ ), is then defined by its vector components in the three mutually perpendicular directions _x_ , _y_ and _z_. If **i** , **j** and **k** are the unit vectors in the directions _x_ , _y_ and _z_ , then:
Figure 2.18 Vector in space
[10]
The magnitude of **r** is given by:
[11]
The direction of a vector in three dimensions is determined by the angles it makes with the three axes, _x_ , _y_ and _z_ , i.e. the angles α, β and γ. With ( _x_ , _y_ , _z_ ) the coordinates of the position vector:
[12]
These are termed the _direction cosines_. As with the two-dimensional case, the basic rule for position vectors is: _adding or subtracting position vectors is achieved by adding or subtracting their respective coordinates_.
Example
Determine the magnitude and the direction cosines of the vector r = 2 **i** \+ 3 **j** \+ 6 **k**.
The direction cosines are:
Example
If **a** = 2 **i** \+ 3 **j** \+ 4 **k** and **b** = 3 **i** − 2 **j** \+ 1 **k** , determine (a) **a** \+ **b** , (b) **a** − **b** , (c) **a** \+ 2 **b**.
(a) **a** \+ **b** = (2 + 3) **i** \+ (3 − 2) **j** \+ (4 + 1) **k** = 5 **i** \+ 1 **j** \+ 5 **k**
(b) **a** − **b** = (2 − 3) **i** \+ (3 + 2) **j** \+ (4 − 1) **k** = −1 **i** \+ 5 **j** \+ 3 **k**
(c) **a** \+ 2 **b** = (2 + 6) **i** \+ (3 − 4) **j** \+ (4 + 2) **k** = 8 **i** − 1 **j** \+ 6 **k**
### Problems 2.1
1. If vector **a** is a velocity of 3 m/s in a north-westerly direction and **b** a velocity of 5 m/s in a westerly direction, determine: (a) **a** \+ **b** , (b) **a** − **b** , (c) **a** − 2 **b**.
2. If vector **a** is a displacement of 5 m in a northerly direction and **b** a displacement of 12 m in an easterly direction, determine: (a) **a** \+ **b** , (b) **a** − **b** , (c) **b** − a, (d) **a** \+ 2 **b**.
3. ABCD is a quadrilateral. Determine the single vector which is equivalent to:
(a)
(b)
(c)
4. If O, A, B, C and D are five points on a plane and represents the vector **a** , the vector **b** , the vector **a** \+ 2 **b** , and the vector 2 **a** − **b** , express (a) (b) (c) and (d) in terms of **a** and **b**.
5. ABCD is a square. A force of 6 N acts along AB, 5 N along BC, 7 N along DB and 9 N along CA. Determine the resultant force.
6. Determine the vector sums of:
(a)
(b)
(c)
(d)
7. A point is acted on by two forces, a force of 6 N acting horizontally and a force of 4 N at 20° to the horizontal. Determine the resultant components of the forces in the vertical and horizontal directions.
8. For the following vectors determine their magnitudes and angles to the _x_ -axis: (a) **r** = 2 **i** \+ 3 **j** , (b) **r** = 5 **i** \+ 2 **j** , (c) **r** = 3 **i** \+ 3 **j**.
9. If **a** = −2 **i** \+ 3 **j** and **b** = 6 **i** \+ 3 **j** , determine: (a) **a** \+ **b** , (b) **a** − **b** , (c) **a** \+ 2 **b**.
10. If **a** = 5 **i** \+ 2 **j** and **b** = 2 **i** \+ 3 **j** , determine: (a) **a** \+ **b** , (b) **a** − **b** , and (c) **a** − 2 **b**.
11. If **a** = 6 **i** \+ 3 **j** , **b** = −2 **i** \+ 3 **j** and **c** = 5 **i** − 4 **j** , determine: (a) **a** \+ **b** \+ **c** , (b) **a** − **b** − **c** , (c) **a** \+ 2 **b** − 3 **c**.
12. Determine the magnitude and direction cosines of: (a) **a** = 3 **i** \+ 7 **j** − 4 **k** , (b) **a** = 2 **i** \+ 3 **j** \+ 5 **k** , (c) **a** = −3 **i** \+ 5 **j** \+ 2 **k**.
13. The position vectors of points P and Q are 2 **i** \+ 3 **j** − 5 **k** and 4 **i** − 2 **j** \+ 2 **k** respectively. Determine the length and direction cosines of the vector joining P and Q.
14. For a robot arm involving rigid links connected by flexible joints (Figure 2.19), the link vectors can be represented by **a** = 10 **i** \+ 12 **j** \+ 1 **k** , **b** = 5 **i** − 2 **j** \+ 8 **k** and **c** = 2 **i** \+ 1 **j** − 4 **k**. Determine the position vector of the tip of the robot from O and the length of each link.
Figure 2.19 Problem 14
15. Determine the angle made by the vector **v** = −5 **i** \+ 12 **j** with the positive sense of the _x_ -axis.
16. A force is specified by the vector **F** = 60 **i** − 60 **j** \+ 30 **k**. Calculate the angles made by **F** with the _x_ , _y_ and _z_ axes.
## 2.2 Phasors
A convenient way of specifying a phasor is, what is termed, by _polar notation_. Thus a phasor of length _V_ and phase angle φ can be represented by _V∠φ_. Although the length of a phasor when described in the way shown in Figure 1.25 represents the maximum value of the quantity, it is more usual to specify the length as representing the root-mean-square value. The root-mean-square value is the maximum value divided by √2 and so is just a scaled version of the one drawn using the maximum value. This is because in electrical circuit work we are more usually concerned with the root-mean-square current or voltage than the maximum values.
Key point
Polar notation is when quantities such as phasors are described by their size and an angle in the form _V∠ϕ_.
Key point
Note that there is a difference between a phasor diagram and a vector diagram. A phasor diagram represents the phasors at one instant of time, a vector diagram represents the vectors without regard to time. Otherwise the mathematics of handling vectors is applicable to phasors.
When we are working in the time domain, i.e. the current or voltage is described by a function as time as in _v_ = _V_ sin _ωt_ , and want to find, say, the sum of two voltages at some instant of time we just add the voltages. Thus, if we have a voltage across one component described by _v_ 1 = _V_ 1 sin ( _ωt_ \+ φ1) and across a series component by _v_ 2 = _V_ 2 sin ( _ωt_ \+ ϕ2), then the sum of the two voltages is:
This equation describes how the voltage sum varies with time.
When we are working with phasors and want to find the phasor representing the sum of two phasors we have to add the phasors in the same way that vector quantities are added. Thus if we have a voltage across one component described by _V_ 1 _∠φ_ 1 and across a series component by _V_ 2 _∠ϕ_ 2, then the phasor representing the sum of the two voltages is that indicated in Figure 2.20. While we can draw such diagrams for simple situations and obtain the resultant phasor graphically, a more useful technique is to describe a phasor by a complex number and use the techniques for manipulating complex numbers. In the next section we discuss complex numbers and consider their application to electrical circuit analysis in terms of phasors.
Figure 2.20 Adding phasors
## 2.3 Complex numbers
If we square the real number +2 we obtain +4, if we square the real number −2 we obtain +4. Thus the square root of +4 is ±2. But what is the square root of −4? To give an answer we need another form of number. If we invent a number j = √−1 (mathematicians often use i rather than j but engineers and scientists generally use j to avoid confusion with i used for current in electrical circuits), then we can write √−4 = √−1 × √4 = ±j2. Thus the solution of the equation _x_ \+ 4 = 0 is _x_ = ±j2.
Key points
Numbers which are multiples of j, where j = √1 is termed _imaginary_.
The term _complex number_ is used for a combination by addition or subtraction of a real number and a purely imaginary number.
Key point
j = √−1 and thus j2 = −1. Since j3 can be written as j2 × j then j3 = −j. Since j4 can be written as j2 × j2 then j4 = +1.
The solution of a quadratic equation of the form _ax_ 2 \+ _bx_ \+ _c_ = 0 is given by the formula:
Thus if we want to solve the quadratic equation _x_ 2 − 4 _x_ \+ 13 = 0 then:
We can represent √−9 as √−1 × √+9 = j3. Thus the solution can be written as 2 ± j3, a combination of a real and either plus or minus an imaginary number. Such a pair of roots is known as a _conjugate pair_ (see later in this section).
The term complex number is used for the sum of a real number and an imaginary number. Thus a complex number _z_ can be written as _z_ = _a_ \+ **j** _b_ , where _a_ is the real part of the complex number and _b_ the imaginary part.
Example
Solve the equation _x_ 2 − 4 _x_ \+ 5 = 0.
#### The Argand diagram
The effect of multiplying a real number by (−1) is to move the point from one side of the origin to the other. Figure 2.21 illustrates this for (+2) being multiplied by (−1). We can think of the positive number line radiating out from the origin being rotated through 180° to its new position after being multiplied by (−1). But (−1) = j2. Thus, multiplication by j2 is equivalent to a 180° rotation. Multiplication by j4 is a multiplication by (+1) and so is equivalent to a rotation through 360°. On this basis it seems reasonable to take a multiplication by j to be equivalent to a rotation through 90° and a multiplication by j3 a rotation through 270°. This concept of multiplication by j as involving a rotation is the basis of the use of complex numbers to represent phasors in alternating current circuits.
Figure 2.21
The above discussion leads to a diagram, called the _Argand diagram_ , which we use to represent complex numbers. Since rotation by 90° from the _x_ -axis on a graph gives the _y_ -axis, the _y_ -axis is used for imaginary numbers and the _x_ -axis for real numbers (Figure 2.22). Figure 2.22 shows how we represent the complex numbers 3 + j2 and −2 − j3 on such a diagram. The line joining the number to the origin is taken as the graphical representation of the complex number.
Figure 2.22 Argand diagram
#### Modulus and argument
If the complex number _z_ = _a_ \+ j _b_ is represented on an Argand diagram by the line OP, as in Figure 2.23, then the length _r_ of the line OP is called the _modulus_ of the complex number and its inclination θ to the real number axis is termed the _argument_ of the complex number. The length of the line is denoted by | _z_ | or modulus _z_ and the argument by θ or arg _z_.
Figure 2.23 Modulus and argument
Key point
We can specify a complex number on an Argand diagram in terms of its Cartesian coordinates as _z_ = _a_ \+ j _b_ , or its polar coordinates _z_ = _r_ ∠θ.
Using Pythagoras' theorem:
[13]
and, since tan θ = _b_ / _a_ :
[14]
Since _a_ = _r_ cos θ and _b_ = _r_ sin θ, we can write a complex number _z_ as:
[15]
Thus we can specify a complex number by either stating its location on an Argand diagram in terms of its _Cartesian coordinates a_ and _b_ or by specifying the modulus, | _z_ | = _r_ , and the argument θ. These are termed its _polar coordinates_. The specification in polar coordinates can be written as:
[16]
Example
Determine the modulus and argument of the complex number 2 + j2.
In polar form the complex number could be written as 2.8 ∠45°.
Example
Write the complex number −2 + j2 in polar form.
If we sketch an Argand diagram (Figure 2.24) for this complex number we can see that the number is in the second quadrant. The argument is thus −45° + 180° = 135°. In polar form the complex number could be written as 2.8 ∠135°.
Figure 2.24 Example
Example
Write the complex number 10 ∠60° in Cartesian form.
### 2.3.1 Manipulation of complex numbers
Addition, subtraction, multiplication and division can be carried out on complex numbers in either the Cartesian form or the polar form. Addition and subtraction is easiest when they are in the Cartesian form and multiplication and division easiest when they are in the polar form.
For two complex numbers to be equal, their real parts must be equal and their imaginary parts equal. On an Argand diagram the two numbers then describe the same line. Thus 2 + j3 is _not_ equal to 3 + j2 as Figure 2.25 shows.
Figure 2.25 2 + j3 and 3 + j2
#### Addition and subtraction
To add complex numbers we add the real parts and add the imaginary parts:
[17]
Key point
To add complex numbers, add the real parts and add the imaginary parts. To subtract complex numbers, subtract the real parts and subtract the imaginary parts.
On an Argand diagram, this method of adding two complex numbers is the same as the vector addition of two vectors using the parallelogram of vectors, the line representing each complex number being treated as a vector (Figure 2.26).
Figure 2.26 Addition of complex numbers
To subtract complex numbers we subtract the real parts and subtract the imaginary parts:
[18]
On an Argand diagram, this method of subtracting two complex numbers is the same as the vector subtraction of two vectors. To subtract a vector quantity you reverse its direction and then add it using the parallelogram of vectors (Figure 2.27).
Figure 2.27 Subtraction of complex numbers
Example
With **_z_** 1 = 4 + j2 and **_z_** 2 = 3 + j5, determine (a) **_z_** 1 \+ **_z_** 2, (b) **_z_** 1 − **_z_** 2.
(a) **_z_** 1 \+ **_z_** 2 = (4 + 3) + j(2 + 5) = 7 + j7
(b) **_z_** 1 − **_z_** 2 = (4 − 3) + j(2 − 5) = 1 − j3
#### Multiplication
Consider the multiplication of the two complex numbers in Cartesian form, _z_ 1 = _a_ \+ j _b_ and _z_ 2 = _c_ \+ j _d_. The product _z_ is given by:
[19]
Now consider the multiplication of the two complex numbers in polar form, _z_ 1 = | _z_ 1|∠θ1 and _z_ 2 = | _z_ 2|∠θ2. Using equation [5] we can write:
Thus the product _z_ is given by:
Using the equations for cos ( _A_ \+ _B_ ) and sin ( _A_ \+ _B_ ), 28] and [29] from [chapter 1:
[20]
Hence we can write for the complex numbers in polar form
[21]
Example
Multiply the two complex numbers 2 − j3 and 4 + j1.
Key point
The magnitude of the product of two complex numbers in polar form is the product of the magnitudes of the two numbers and its argument is the sum of the arguments of the two numbers.
Example
Multiply the two complex numbers 3∠40° and 2∠70°.
#### Complex conjugate
If _z_ = _a_ \+ j _b_ then the term _complex conjugate_ is used for the complex number given by _z_ * = _a_ − j _b_. The imaginary part of the complex number changes sign to give the conjugate, conjugates being denoted as z*. Figure 2.28 shows an Argand diagram with a complex number and its conjugate. The complex conjugate is the mirror image of the original complex number.
Figure 2.28 A complex number and its conjugate
Consider now the product of a complex number and its conjugate:
[22]
The product of a complex number and its conjugate is a real number.
Example
What is the conjugate of the complex number 2 + j4?
The complex conjugate is 2 − j4.
#### Division
Consider the division of _z_ 1 = _a_ \+ j _b_ by _z_ 2 = _c_ \+ j _d_ , i.e.
To divide one complex number by another we have to convert the denominator into a real number. This can be done by multiplying it by its conjugate. Thus:
[23]
Now consider the division of the two complex numbers when in polar form, _z_ 1 = | _z_ 1|∠θ1, and _z_ 2 = | _z_ 2|∠θ2:
Making the denominator into a real number by multiplying it by its conjugate:
But cos2 θ2 \+ sin2 θ2 = 1 (chapter 3, equation [32]) and so:
Using equations cos ( _A_ − _B_ ) and sin ( _A_ − _B_ ), 31] and [29] from [chapter 1:
[24]
We can express this as:
[25]
Key point
To divide two complex numbers in polar form, we divide their magnitudes and subtract their arguments.
Example
Divide 1 + j2 by 1 + j1.
Example
Divide 4∠40° by 2∠30°.
### 2.3.2 Representing phasors by complex numbers
A complex number _z_ = _a_ \+ j _b_ can be represented on an Argand diagram by a line (Figure 2.29) of length | _z_ | at an angle θ. Thus we can describe a phasor used to represent, say, a sinusoidal voltage, by a complex number in this Cartesian form as:
Figure 2.29 Complex number
[26]
An alternative way of describing a complex number, and hence a phasor, is in polar notation, i.e. the length of the phasor and its angle to some reference axis. Thus we can describe it as:
[27]
where _V_ is the magnitude of the phasor and θ its phase angle. The magnitude | _z_ | of a complex number _z_ and its argument θ are given by:
[28]
Since _a_ = | _z_ | cos θ and _b_ = | _z_ | sin θ, then:
[29]
Thus if we have the voltage across one component described by _V_ ∠φ then we can write this as:
[30]
Example
Describe the signal _v_ = 12 sin (314 _t_ \+ π/4) V by a phasor.
The phasor has a magnitude, when expressed as the maximum value, of 12 and argument π/4. Thus we can describe it as 12∠π/4 V, or by using equation [29] as 12 cos π/4 + j12 sin π/4 = 8.49 + j8.49 V. If using root-mean-square values then we would have 8.49∠π/4 r.m.s.V or 6 + j6 r.m.s.V.
#### Adding or subtracting phasors
If we have the voltage across one component described by _V_ 1∠ϕ1 then we can write: **V** 1 = _V_ 1(cos ϕ1 \+ j sin ϕ1). If we have the voltage across a series component described by _V_ 2∠ϕ2, then: **V** 2 = _V_ 2(cos ϕ2 \+ j sin ϕ2). The phasor for the sum of the two voltages is then obtained by adding the two complex numbers. Thus:
[31]
Subtraction is carried out in a similar manner. Since adding or subtracting complex numbers is easier when they are in Cartesian form rather than polar form, when phasors are to be added or subtracted they should be put in Cartesian form.
Key point
Since adding or subtracting complex numbers is easier when they are in Cartesian form rather than polar form, when phasors are to be added or subtracted they should be put in Cartesian form.
Example
A circuit has three components in series. If the voltages across each component are described by phasors 4 V, j2 V and 3 + j4 V, what is the voltage phasor describing the voltage across the three components?
Since the components are in series, the resultant phasor voltage is described by the phasor:
Example
A circuit has two components in series. If the voltages across each component are described by phasors 4∠60° V and 2∠30° V, what is the voltage phasor describing the voltage across the two components?
For adding complex numbers it is simplest to convert the phasors into Cartesian notation. Thus:
If we want this phasor in polar notation then:
Thus the phasor is 5.81∠50° V.
#### Multiplication or division of phasors
Multiplication or division of complex numbers can be carried out when they are in either Cartesian form or polar form, being easiest when they are in polar form. Thus, if we have a voltage across a component described by **V** = _V_ ∠ϕ and the current by **I** = _I_ ∠θ then the product of the two phasors is:
[32]
If the voltage and current were in Cartesian form, i.e. in the form **V** = _a_ \+ j _b_ and **I** = _c_ \+ j _d_ then the product is:
[33]
For division, if we have a voltage across a component described by **V** = _V_ ∠φ and the current by **I** = _I_ ∠θ then:
[34]
If the voltage and current were in Cartesian form, i.e. in the form **V** = _a_ \+ j _b_ and **I** = _c_ \+ j _d_ then:
[35]
Key point
Multiplication or division of complex numbers is easiest when they are in polar form.
Example
If phasor **V** is represented by 10∠300 and **I** by 2∠45°, determine **VI** and **V** / **I**.
#### Kirchhoff's laws and phasors
_Kirchhoff's laws_ apply to the voltages and currents in a circuit at any instant of time. Thus the voltage law that the sum of the voltages taken round a closed loop is zero means that, with alternating voltages having values of _v_ 1, _v_ 2, _v_ 3, etc. at the same instant of time:
and so, if these voltages are sinusoidal:
We can consider each of these sinusoidal voltages to be the vertical projection of the phasor describing it. Thus we must have:
Kirchhoff's voltage law can thus be stated as: _the sum of the phasors of all the voltages around a closed loop is zero_. Kirchhoff's current law can be stated as the sum of all the currents at a node is zero, i.e. the current entering a junction equals the current leaving it. In a similar way we can state this law for sinusoidal currents as: _the sum of the phasors of the currents at a node is zero, i.e. the sum of the phasors for currents entering a junction equals that for those leaving it_.
Example
A circuit has two components in parallel. If the currents through the components can be described by the phasors 2 + j4 A and 4 + j1 A, what is the phasor describing the current entering the junction?
Using Kirchhoff's current law we must have: the phasor for current entering junction = phasor sum for currents leaving the junction. Hence:
Example
For the a.c. circuit shown in Figure 2.30, determine the unknown voltage.
Figure 2.30 Example
Using Kirchhoff's voltage law, and writing the phasors in Cartesian notation:
Thus:
or in polar notation:
Key points
Impedance is described by a complex number but is not a phasor since it does not describe a sinusoidally varying quantity, it describes a line on an Argand diagram but not one that rotates with an angular velocity. Hence, in this book bold print is not used for it. In some textbooks, however, it is written in bold print because it is complex.
#### Impedance
The term _impedance Z_ is defined as the ratio of the phasor voltage across a component to the phasor current through it:
[36]
Thus if we have **V** = _V_ ∠θ and **I** = _I_ ∠ϕ then:
If we have impedances connected in series (Figure 2.31), then Kirchhoff's voltage law gives:
Figure 2.31 Impedances in series
Dividing by the phasor current, the current being the same through each:
Hence the total impedance _Z_ is the sum of the impedances of the three impedances:
[37]
Consider the parallel connection of impedances (Figure 2.32). Kirchhoff's current law gives:
Figure 2.32 Impedances in parallel
Dividing by the phasor voltage, the voltage being the same for each impedance:
Thus the total impedance _Z_ is given by:
[38]
Example
If the voltage across a component is 4 sin _ωt_ V and the current through it 2 sin( _ωt_ − 30°) A, what is its impedance?
Using equation [36] with the phasors in polar notation:
Example
What is the total impedance of a circuit with impedances of 2 + j5 Ω, 1 − j3 Ω and 4 + j1 Ω in series?
Example
What is the total impedance of impedances 4∠30° Ω in parallel with 2∠(−20°) Ω.
Hence _Z_ = 1.339∠(−23.3°) Ω.
#### Circuit elements
For a _pure resistor_ the current through it is in phase with the voltage across it. Thus for a voltage phasor of _V_ ∠0° we must have a current phasor of _I_ ∠0° and so the impedance of the circuit element is:
The impedance is the real number _V/I_ which is the resistance _R_.
For a _pure capacitance_ the current leads the voltage by 90°. Thus for a voltage phasor of _V_ ∠0° we must have a current phasor of _I_ ∠90° and so the impedance of the circuit element is:
The impedance is thus −j( _V/I_ ) and is just an imaginary quantity. The term _capacitive reactance X C_ is used for the ratio of the maximum, or r.m.s., voltage and current and thus for a pure capacitance:
[39]
For a _pure inductance_ the current lags the voltage by 90°. Thus for a voltage phasor of _V_ ∠0° we must have a current phasor of _I_ ∠(−90°) and so the impedance of the circuit element is:
The impedance is thus j( _V/I_ ) and is just an imaginary quantity. The term _inductive reactance X_ L is used for the ratio of the maximum, or r.m.s., voltage and current and thus for a pure inductance:
[40]
Example
Determine the impedance of a 100 Ω resistance in series with a capacitive reactance of 5 Ω.
Example
Express in Cartesian and polar notation, the impedance of each of the following circuits at a frequency of 50 Hz:
(a) a resistance of 20 Ω in series with an inductance of 0.1 H,
(b) a resistance of 50 Ω in series with a capacitance of 40 μF.
Also calculate the size of the current in each case and its phase relative to an applied voltage of 230 V at 50 Hz.
(a) With 50 Hz we have ω = 2 _πf_ = 2π × 50 = 314.16 rad/s. Thus:
Converting this to polar notation gives | _z_ | = √(202 \+ 31.422) = 37.25 Ω. The phase is tan−1 ( _X_ L/ _R_ ) = tan−1 (31.42/20) = 57.52° and so the impedance is 37.25∠57.52° Ω.
The current **I** = **V** / **Z** and so is:
(b) The capacitance reactance _X_ c = 1 _/ωC_ and so:
Converting this to polar notation | _Z_ | = √(502 \+ 79.582) = 93.98 Ω. The phase is tan−1 ( _X C/R_) = tan−1 (−79.58/50) = −57.85° and so the impedance is 93.98∠−57.85° Ω.
The current **I** = **V** / **Z** and so is:
Example
Calculate the resistance and the series inductance or capacitance for each of the following impedances if the frequency is 50 Hz: (a) _Z_ = 10 + j15 Ω, (b) _Z_ = −j80 Ω, (c) _Z_ = 50∠30° Ω, (d) _Z_ = 120∠−60° Ω.
(a) Comparing this with _Z_ = _R_ \+ j _ωL_ , then _R_ = 10 Ω and _X_ L = 15 Ω. Since _X_ L = _ωL_ = then _L_ = 15/314 = 0.048 H. (b) Here _R_ = 0 and the capacitive reactance _X C_ = 80 Ω. Since _X_ C = 1/ _ωC_ then _C_ = 1/(314 × 80) = 39.8 × 10−6 F or 39.8 μF. (c) This gives in Cartesian notation (see equation [29]) _Z_ = 50 cos 30° + j50 sin 30° = 43.3 + j25 Ω. We can compare this with _Z_ = _R_ \+ j _X_ L and so _R_ = 43.3 Ω and _X_ L = 25 Ω. Since _X_ L = ωL then _L_ = 25/314 = 0.080 H. (d) This gives in Cartesian notation (see equation [29]) _Z_ = 120 cos −60° + j120 sin −60° = 60 − j104 Ω. We can compare this with _Z_ = _R_ − j _X_ C and so _R_ = 60 Ω and _X_ C = 104 Ω. Since _X C_ = 1 _/ωC_ then _C_ = 1/(314 × 104) = 30.7 μF.
## Problems 2.3
1. Simplify
(a) j7,
(b) j8,
(c) j2 × j,
(d) j5/j3.
2. Solve the following equations:
(a) _x_ 2 \+ 16 = 0,
(b) _x_ 2 \+ 4 _x_ − 5 = 0,
(c) 2 _x_ 2 − 2 _x_ \+ 3 = 0
3. Express the following complex numbers in polar form:
(a) −4 + j,
(b) −3 − j4,
(c) 3,
(d) −j6,
(e) 1 + j,
(f) 3−j2
4. Express the following complex numbers in Cartesian form:
(a) 5∠120°,
(b) 10∠45°,
(c) 6∠180°,
(d) 2.8∠76°,
(e) 2(cos 30° + j sin 30°),
(f) 3(cos 60° − j sin 60°)
5. If _z_ 1 = 3 + j2 and _z_ 2 = −2 + j4, determine the values of:
(a) _z_ 1 \+ _z_ 2,
(b) _z_ 1 − _z_ 2,
(c) _z_ 1 _z_ 2,
(d)
(e)
6. Evaluate the following:
(a) (2 + j3) + (3 − j5),
(b) (−4 − j6) + (2 + j5),
(c) (2 + j2) − (3 − j5),
(d) (2 + j4) − (1 + j4),
(e) 4(3 + j2),
(f) j2(3 + j5),
(g) (1 − j2)(3 + j4),
(h) (2 + j2)(3 − j3),
(i) (1 + j2)(4 − j3),
(j)
(k)
(l)
(m)
7. If _z_ 1 = 10∠20°, _z_ 2 = 2∠40° and _z_ 3 = 5∠60°, evaluate the following:
(a) _z_ 1 _z_ 2,
(b) _z_ 1 _z_ 3,
(c)
(d)
(e)
(f)
8. Describe the following signals by phasors written in both polar and Cartesian forms, taking the magnitude to represent the maximum value:
(a) 10 sin (2π50t − π/6),
(b) 10 sin (314 _t_ \+ 150°),
(c) 22 sin (628f + π/4)
9. Determine, in both Cartesian and polar forms, the sum of the following phasors:
(a) 4∠0° and 3∠60°,
(b) 2 + j3 and −4 + j4,
(c) 4∠π/3 and 2∠π/6
10. If phasors **A** , **B** and **C** are represented by **A** = 10∠30°, **B** = 2.5∠60° and **C** = 2∠45° determine:
(a) **AB** ,
(b) **AC** ,
(c) **A(B + C)** ,
(d) **A/B** ,
(e) **B/C** ,
(f) **C/(A + B)**
11. If _v_ 1 = 10 sin _ωt_ and _v_ 2 = 20 sin ( _ωt_ \+ 60°), what is (a) the phasor describing the sum of the two voltages and (b) its time-domain equation?
12. If the voltage across a component is 5 sin (314 _t_ \+ π/6) **V** and the current through it 0.2 sin (314 _t_ \+ π/3) **A** , what is its impedance?
13. A voltage of 100 **V** is applied across a circuit of impedance 40 + j30 Ω, what is, in polar notation, the current taken?
14. Determine, in Cartesian form, the total impedances of:
(a) 10 Ω in series with 2 − j5 Ω,
(b) 100∠30° Ω in series with 100∠60° Ω,
(c) 20∠300 Ω in series with 15∠(−10°) Ω,
(d) 20∠300 Ω in parallel with 6∠(−90°) Ω,
(e) 10 Ω in parallel with −j2 Ω,
(f) j40 Ω in parallel with j20 Ω
15. Determine, in Cartesian form, the impedance of:
(a) a resistance of 5 Ω in series with an inductive reactance of 2 Ω,
(b) a resistance of 50 Ω in series with a capacitive reactance of 10 Ω,
(c) a resistance of 2 Ω in series with an inductive reactance of 5 Ω and a capacitive reactance of 4 Ω,
(d) three elements in parallel, a resistance of 2 Ω, an inductive reactance of 10 Ω and a capacitive reactance of 5 Ω,
(e) an inductive reactance of 500 Ω in parallel with a capacitive reactance of 100 Ω
3
# Mathematical models
Summary
Engineers frequently have to devise and use mathematical models for systems. Mathematical modelling is the activity by which a problem involving the real-world is translated into mathematics to form a model which can then be used to provide information about the original real problem. Such mathematical models are essential in the design-to-test phase, in particular forming the benchmark by which a computer generated simulation can be measured prior to manufacturing a prototype. This chapter is an introduction to mathematical modelling for engineering systems, later chapters involving more detailed consideration of models.
Objectives
By the end of this chapter, the reader should be able to:
• understand what is meant by a mathematical model and how such models are formulated;
• devise mathematical models for simple systems.
## 3.1 Modelling
Consider some real problems:
• Tall buildings are deflected by strong winds, can we devise a model which can be used to predict the amount of deflection of a building for particular wind strengths?
• Cars have suspension systems, can we devise a model which can be used to predict how a car will react when driven over a hump in the road?
• When a voltage is connected to a d.c. electrical motor, can we devise a model which will predict how the torque developed by the motor will depend on the voltage?
• Can we devise a model to enable the optimum shaft to be designed for a power transmission system connecting a motor to a load?
• Can we design a model to enable an appropriate transducer to be selected as part of the monitoring/activation circuit for the safe release of an air bag in a motor vehicle under crash conditions?
• Can we design models which will enable failure to be predicted in static and dynamic systems?
• Can we design models to enable automated, robotic controlled systems to be designed?
Key point
With reference to Figure 3.1 and point 5, together with any reconsideration of the problem, quite often this involves the formulation of a computer model. The aim is to use the computer model to 'speed' up the analysis of the mathematical model in order to compare the results with collected data from the real world. Such software models therefore have to be benchmarked against the mathematical model and real world data to be validated prior to being used.
Figure 3.1 The processes involved in devising a model
Such questions as those above are encountered by design and manufacturing system engineers daily. Quite often it is the accuracy of a mathematical model that will determine the success or otherwise of a new design. Such models also greatly help to reduce the design-test-evaluation-manufacture process lead time.
### 3.1.1 How do we devise models?
The tactics adopted to devise models involve a number of stages which can be summarised by the block diagram of Figure 3.1. The first stage involves identifying what the real problem is and then identifying what factors are important and what assumptions can be made. These assumptions are used in order to simplify the model and enable an initial model to be formulated which we can check as being a reasonable approximation. Only when we are confident with the model do we build in further considerations to make the models even more accurate! For example, when modelling mechanical systems, we often initially ignore friction and the consequential heat generation; however, as the model is refined we have to consider such effects and adjust the initial 'ball park' model in order for its application to the real world to be valid. This stage will generally involve collecting data. The next stage is then to formulate a model. An essential part of this is to translate verbal statements into mathematical relationships. When solutions are then produced from the model, they need to be compared with the real world and, if necessary the entire cycle repeated.
#### Example of formulating a model
As an illustration, consider how we might approach the problem we started this section with:
Tall buildings are deflected by strong winds, can we devise a model which can be used to predict the amount of deflection of a building for particular wind strengths?
The simplest form we might consider is that of a tall building which is subject to wind pressure over its entire height, the building being anchored at the ground but free to deflect at its top (Figure 3.2). If we assume that the wind pressure gives a uniform loading over the entire height of the building and does not fluctuate, then we might consider the situation is rather like the deflection of the free end of a cantilever when subject to a uniformly distributed load (Figure 3.3). For such a beam the deflection _y_ is given by:
[1]
where _L_ is the length of the beam, _w_ the load per unit length, _E_ the modulus of elasticity and _I_ the second moment of area. The modulus of elasticity if a measure of the stiffness of the material and the second moment of area for a rectangular section is _bd_ 3/12, with _b_ being the breadth and _d_ the depth. This would suggest that a stiff structure would deflect less and also one with a large cross-section would deflect less. Hence we might propose a model of the form:
[2]
with _p_ being the wind pressure and related to the wind velocity, _H_ the height of the building, _b_ its breadth and _d_ its depth. Thus, a short, squat building will be less deflected than a tall slender one of the same building materials.
Figure 3.2 The building deflection problem
Figure 3.3 Deflection of a uniformly loaded cantilever
### 3.1.2 Lumped element modelling
Often in engineering we can devise a model for a system by considering it to be composed of a number of basic elements. We consider the characteristics of the behaviour of the system and 'lump' all the similar behaviour characteristics together and represent them by a simple element. For some elements, the relationship between their input and output is a simple proportionality, in other cases it involves a rate of change with time or even the rate of change with time of a rate of change with time.
Key point
Lumped models are devised by considering each of the basic behaviour characteristics of a system and representing them by a single element.
#### Mechanical systems
Mechanical systems can be considered to be made up of three basic elements which represent the stiffness, damping and inertia of the system:
• **_Spring element_**
The 'springiness' or 'stiffness' of a system can be represented by a spring (Figure 3.4(a)). The force _F_ is proportional to the extension _x_ of the spring:
Figure 3.4 Mechanical system building blocks
[3]
• **_Damper element_**
The 'damping' of a mechanical system can be represented by a dashpot. This is a piston moving in a viscous medium in a cylinder (Figure 3.4(b)). The damping force _F_ is proportional to the velocity _v_ of the damping element:
where _c_ is a constant. Since the velocity is equal to the rate of change of displacement _x_ :
[4]
• **_Mass or inertia element_**
The 'inertia' of a system, i.e. how much it resists being accelerated can be represented by mass _m_. Since the force _F_ acting on a mass is related to its acceleration _a_ by _F_ = _ma_ and acceleration is the rate of change of velocity, with velocity being the rate of change of displacement _x_ :
[5]
To develop the equations relating inputs and outputs we use Newton's laws of motion.
Example
Develop a lumped-model for a car suspension system which can be used to predict how a car will react when driven over a hump in the road (the second problem listed earlier in this chapter).
Such a system can have its suspension represented by a spring, the shock absorbers by a damper and the mass of the car and its passengers by a mass. The model thus looks like Figure 3.5(a). We can then devise an equation showing how the output of the system, namely the displacement of the passengers with time depends on the input of the displacement of a car wheel as it rides over the road surface.
Figure 3.5 (a) Model for the car suspension system, (b) free-body diagram
This is done by considering a _free-body diagram_ , this being a diagram of the mass showing just the external forces acting on it (Figure 3.5(b)). We can then relate the net force acting on the mass to its acceleration by the use of Newton's law, hence obtaining an equation which relates the input to the output.
The relative extension of the spring is the difference between the displacement _x_ of the mass and the input displacement _y_. Thus, the force due to the spring _F_ s is _k_ ( _x_ − _y_ ). The dashpot force _F_ d is _c_ (d _x_ /d _f_ − d _y_ /d _t_ ). Hence, applying Newton's law:
and so we can write:
#### Rotational systems
For rotational systems, e.g. the drive shaft of a motor, the basic building blocks are a torsion spring, a rotary damper and the moment of inertia (Figure 3.6).
Figure 3.6 Rotational system elements: (a) torsional spring, (b) rotational dashpot, (c) moment of inertia
• **_Torsional spring_**
The 'springiness' or 'stiffness' of a rotational spring is represented by a torsional spring. The torque _T_ is proportional to the angle θ rotated:
[6]
where _k_ is a constant.
• **_Rotational dashpot_**
The damping inherent in rotational motion is represented by a rotational dashpot. The resistive torque _T_ is proportional to the angular velocity ω and thus, since ω is the rate of change of angle θ with time:
[7]
where _c_ is a constant.
• **_Inertia_**
The inertia of a rotational system is represented by the moment of inertia _I_ of a mass. The torque _T_ needed to produce an acceleration _a_ is given by _T_ = _I_ α and thus, since α is the rate of change of angular velocity ω with time and angular velocity is the rate of change of angle θ with time:
[8]
Example
Represent as a lumped-model the system shown in Figure 3.7(a) of the rotation of a disk as a result of twisting a shaft.
Figure 3.7 Example
Figure 3.7(b) shows the lumped-model and Figure 3.7(c) the free-body diagram for the system.
The torques acting on the disk are the applied torque, the spring torque and the damping torque. The torque due to the spring is _kθ_ and the damping torque is _c_ (dθ/d _t_ ). Hence, since the net torque acting on the mass is:
we have:
#### Electrical systems
The basic elements of electrical systems are the resistor, inductor and capacitor (Figure 3.8).
Figure 3.8 Electrical system elements
• **_Resistor_**
The resistor represents the electrical resistance of the system. The potential difference _v_ across a resistor is proportional to the current _i_ through it:
[9]
• **_Inductor_**
The inductor represents the electrical inductance of the system. For an inductor, the potential difference _v_ across it depends on the rate of change of current _i_ through it and we can write:
[10]
• **_Capacitor_**
The capacitor represents the electrical capacitance of the system. For a capacitor, the charge _q_ on the capacitor plates is related to the voltage _v_ across the capacitor by _q_ = _Cv_ , where _C_ is the capacitance. Since current _i_ is the rate of movement of charge:
But _q_ = _Cv_ , with _C_ being constant, so:
[11]
To develop the models for systems which we describe by electrical circuits involving resistance, inductance and capacitance we use Kirchhoff's laws.
Example
Develop a lumped-system model for a d.c. motor relating the current through the armature to the applied voltage.
The motor consists basically of the armature coil, this being free to rotate, which is located in the magnetic field provided by either a permanent magnet or a current through field coils. When a current flows through the armature coil, forces acting on as a result of the current carrying conductors being in a magnetic field. As a result, the armature coil rotates. Since the armature is a coil rotating in a magnetic field, a voltage is induced in it in such a direction as to oppose the change producing it, i.e. there is a back e.m.f. Thus the electrical circuit we can use to describe the motor has two sources of e.m.f., that applied to produce the armature current and the back e.m.f. If we consider a motor where there is either a permanent magnet or separately excited field coils, then the lumped electrical circuit model is as shown in Figure 3.9. We can consider there are just two elements, an inductor and a resistor, to represent the armature coil. The equation is thus:
This can be considered to be the first stage in addressing the problem posed earlier in the chapter: When a voltage is connected to a d.c. electrical motor, can we devise a model which will predict how the torque developed by the motor will depend on the voltage? The torque generated will be proportional to the current through the armature.
Figure 3.9 Lumped-model for a d.c. motor
#### Thermal systems
Thermal systems have two basic building blocks with thermal systems, resistance and capacitance.
• **_Thermal resistance_**
The thermal resistance _R_ is the resistance offered to the rate of flow of heat _q_ and is defined by:
[12]
where _T_ 1 − _T_ 2 is the temperature difference through which the heat flows.
For heat conduction through a solid we have the rate of flow of heat proportional to the cross-sectional area _A_ and the temperature gradient. Thus, for two points at temperatures _T_ 1 and _T_ 2 and a distance _L_ apart, we can write:
[13]
with _k_ being the thermal conductivity. With this mode of heat transfer, the thermal resistance _R_ is _L_ / _Ak_.
For heat transfer by convection between two points, Newton's law of cooling gives:
[14]
where ( _T_ 2 − _T_ 1) is the temperature difference, _h_ the coefficient of heat transfer and _A_ the surface area across which the temperature difference is. The thermal resistance with this mode of heat transfer is thus 1/ _Ah_.
• **_Thermal capacitance_**
The thermal capacitance is a measure of the store of internal energy in a system. If the rate of flow of heat into a system is _q_ 1 and the rate of flow out _q_ 2 then the rate of change of internal energy of the system is _q_ 1− _q_ 2. An increase in internal energy can result in a change in temperature:
where _m_ is the mass and _c_ the specific heat capacity. Thus the rate of change of internal energy is equal to _mc_ times the rate of change of temperature. Hence:
[15]
This equation can be written as:
[16]
where the capacitance _C_ = _mc_.
Example
Develop a lumped-model for the simple thermal system of a thermometer at temperature _T_ being used to measure the temperature of a liquid when it suddenly changes to the higher temperature of _T_ L (Figure 3.10).
Figure 3.10 Example
When the temperature changes there is heat flow _q_ from the liquid to the thermometer. If _R_ is the thermal resistance to heat flow from the liquid to the thermometer then _q_ = ( _T_ L − _T_ )/ _R_. Since there is only a net flow of heat from the liquid to the thermometer, if the thermal capacitance of the thermometer is _C_ , then _q_ = _C_ d _T_ /d _t_.
Thus, the equation for the model is:
#### Hydraulic systems
For a fluid system the three building blocks are resistance, capacitance and inertance. Hydraulic fluid systems are assumed to involve an incompressible liquid; pneumatic systems, however, involve compressible gases and consequently there will be density changes when the pressure changes. Here we will just consider the simpler case of hydraulic systems. Figure 3.11 shows the basic form of building blocks for hydraulic systems.
Figure 3.11 Hydraulic system elements
• **_Hydraulic resistance_**
Hydraulic resistance _R_ is the resistance to flow which occurs when a liquid flows from one diameter pipe to another (Figure 3.11(a)) and is defined as being given by the hydraulic equivalent of Ohm's law:
[17]
• **_Hydraulic capacitance_**
Hydraulic capacitance _C_ is the term used to describe energy storage where the hydraulic liquid is stored in the form of potential energy (Figure 3.11(b)). The rate of change of volume _V_ of liquid stored is equal to the difference between the volumetric rate at which liquid enters the container _q_ 1 and the rate at which it leaves _q_ 2, i.e.
But _V_ = _Ah_ and so:
The pressure difference between the input and output is:
Hence, substituting for _h_ gives:
[18]
The hydraulic capacitance _C_ is defined as:
[19]
and thus we can write:
[20]
• **_Hydraulic inertance_**
Hydraulic inertance is the equivalent of inductance in electrical systems. To accelerate a fluid a net force is required and this is provided by the pressure difference (Figure 3.11(c)). Thus:
[21]
where _a_ is the acceleration and so the rate of change of velocity _v_. The mass of fluid being accelerated is _m_ = _ALρ_ and the rate of flow _q_ = _Av_ and so:
[22]
where the inertance _I_ is given by _I_ = _Lρ_ / _A_.
Example
Develop a model for the hydraulic system (Figure 3.12) where there is a liquid entering a container at one rate _q_ 1 and leaving through a valve at another rate _q_ 2.
Figure 3.12 Example
We can neglect the inertance since flow rates can be assumed to change only very slowly. For the capacitance term we have:
For the resistance term for the valve we have _p_ 1 − _p_ 2 = _Rq_ 1. Thus, substituting for _q_ 2, and recognising that the pressure difference is _hρg_ , gives:
### Problems 3.1
1. Propose a mathematical model for the oscillations of a suspension bridge when subject to wind gusts.
2. Propose a mathematical model for a machine mounted on firm ground when the machine is subject to forces when considered in terms of lumped-parameters.
3. Derive an equation for a mathematical model relating the input and output for each of the lumped systems shown in Figure 3.13.
Figure 3.13 Problem 3
## 3.2 Relating models and data
In testing mathematical models against real data, we often have the situation of having to check whether data fits an equation. If the relationship is linear, i.e. of the form _y_ = _mx_ \+ _c_ , then it is comparatively easy to see whether the data fits the straight line and to ascertain the gradient _m_ and intercept _c_. However, if the relationship is non-linear this is not so easy. A technique which can be used is to turn the non-linear equation into a linear one by changing the variables. Thus, if we have a relationship of the form _y_ = _ax_ 2 \+ _b_ , instead of plotting _y_ against _x_ to give a non-linear graph we can plot _y_ against _x_ 2 to give a linear graph with gradient _a_ and intercept _b_. If we have a relationship of the form _y_ = _a_ / _x_ we can plot a graph of _y_ against 1/ _x_ to give a linear graph with a gradient of _a_.
Example
The following data was obtained from measurements of the load lifted by a machine and the effort expended. Determine if the relationship between the effort _E_ and the load _W_ is linear and if so the relationship.
Within the limits of experimental error the results appear to indicate a straight-line relationship (Figure 3.14). The gradient is 41/200 or about 0.21. The intercept with the _E_ axis is at 10. Thus the relationship is _E_ = 0.21 _W_ \+ 10.
Figure 3.14 Example
Example
It is believed that the relationship between _y_ and _x_ for the following data is of the form _y_ = _ax_ 2 \+ _b_. Determine the values of _a_ and _b_.
Figure 3.15 shows the graph of _y_ against _x_ 2. The graph has a gradient of AB/BC = 12.5/25 = 0.5 and an intercept with the _y_ -axis of 2. Thus the relationship is _y_ = 0.5 _x_ 2 \+ 2.
Figure 3.15 Example
### Problems 3.2
1. Determine, assuming linear, the relationships between the following variables:
(a) The load _L_ lifted by a machine for the effort _E_ applied.
(b) The resistance _R_ of a wire for different lengths _L_ of that wire.
2. Determine what form the variables in the following equations should take when plotted in order to give straight-line graphs and what the values of the gradient and intercept will have.
(a) The period of oscillation _T_ of a pendulum is related to the length _L_ of the pendulum by the equation:
where _g_ is a constant.
(b) The distance _s_ travelled by a uniformly accelerating object after a time _t_ is given by the equation:
where _u_ and _a_ are constants.
(c) The e.m.f. _e_ generated by a thermocouple at a temperature θ is given by the equation;
where _a_ and _b_ are constants.
(d) The resistance _R_ of a resistor at a temperature h is given by the equation:
where _R_ 0 and α are constants.
(f) The pressure _p_ of a gas and its volume _V_ are related by the equation:
where _k_ is a constant.
(g) The deflection _y_ of the free end of a cantilever due to it own weight of _w_ per unit length is related to its length _L_ by the equation:
where _w_ , _E_ and _I_ are constants.
3. The resistance _R_ of a lamp is measured at a number of voltages _V_ and the following data obtained. Show that the law relating the resistance to the voltage is of the form _R_ = ( _a_ / _V_ ) + _b_ and determine the values of _a_ and _b_.
4. The resistance _R_ of wires of a particular material are measured for a range of wire diameters _d_ and the following results obtained. Show that the relationship is of the form _R_ = ( _a_ / _d_ 2) + _b_ and determine the values of _a_ and _b_.
5. The volume _V_ of a gas is measured at a number of pressures _p_ and the following results obtained. Show that the relationship is of the form _V_ = _ap b_ and determine the values of _a_ and _b_.
6. When a gas is compressed adiabatically the pressure _p_ and temperature _T_ are measured and the following results obtained. Show that the relationship is of the form _T_ = _a_ ρ _b_ and determine the values of _a_ and _b_.
7. The cost _C_ per hour of operating a machine depends on the number of items _n_ produced per hour. The following data has been obtained and is anticipated to follow a relationship of the form _C_ = _an_ 3 \+ _b_. Show that this is the case and determine the values of _a_ and _b_.
8. The following are suggested braking distances _s_ for cars travelling at different speeds _v_. The relationship between _s_ and _v_ is thought to be of the form _s_ = _av_ 2 \+ _bv_. Show that this is so and determine the values of _a_ and _b_.
Hint: consider _s_ / _v_ as one of the variables.
9. The luminosity _I_ of a lamp depends on the voltage _V_ applied to it. The relationship between _I_ and _V_ is thought to be of the form _I_ = _aV_ _b_. Use the following results to show that this is the case and determine the values of _a_ and _b_.
10. From a lab test, it is believed that the law relating the voltage _v_ across an inductor and the time _t_ is given by the relationship _v_ = _A_ e _t/B_ , where _A_ and _B_ are constant and e is the exponential function. From the lab test the results observed were:
Show that the law relating the voltage to time is, in fact, true. Then determine the values of the constants _A_ and _B_.
4
# Calculus
Summary
Calculus is concerned with two basic operations, differentiation and integration, and is a tool used by engineers to determine such quantities as rates of change and areas; in fact, calculus is the mathematical 'backbone' for dealing with problems where variables change with time or some other reference variable and a basic understanding of calculus is essential for further study and the development of confidence in solving practical engineering problems. This will become evident in the next chapter where physical systems will be modelled and the use of 'rates of change' equations (called differential equations) will allow the physical system to be represented, an analysis made and a solution formed under defined conditions. This chapter is an introduction to the techniques of calculus and a consideration of some of their engineering applications. The topic continues in the next chapter with a discussion of the use of differential equations to represent physical systems and their solution for various inputs.
Objectives
By the end of this chapter, the reader should be able to:
• understand the concept of a limit and its significance in rate of change relationships;
• use calculus notation for describing a rate of change (differentiation) and understand the significance of the operation;
• solve engineering problems involving rates of change;
• understand what is involved in the calculus operation of integration;
• solve engineering problems involving integration.
## 4.1 Differentiation
Suppose we have an equation describing how the distance covered in a straight line by a moving object varies with time. We could plot a graph of displacement against time and determine the velocity at some instant as the gradient of the tangent to the curve at that instant. By taking a number of such gradient measurements we could then determine how the velocity varied with time. However, _differentiation_ is a mathematical technique which can be used to determine the rate at which functions change and hence the gradients, this thus enabling the velocity to be obtained from the equation without drawing the graph and tangents. We could also, for example, describe how the deflection of an initially horizontal beam in the _y_ -direction alters with distance along the beam in the _x_ -direction, or how the volume of a gas changes with temperature, or how electric charge at a point in a circuit changes with time (called current), etc. The list of uses is endless!
Key point
Differentiation is a mathematical technique which can be used to determine the rate at which functions change and hence the gradients of the tangents to graphs.
#### Limits
Consider the problem of determining the gradient of a tangent at a point on a graph. It might, for example, be a distance–time graph for a moving object to determine velocity as the rate at which distance is covered or a current–time graph for the current in an electrical circuit in order to determine the rate of change of current with time. Suppose we want to determine the gradient at point A on the curve shown in Figure 4.1. We can select another point B on the curve and join them together and then find the gradient of the line AB. The value of the gradient determined in this way will depend on where we locate the point B. If we let B slide along the curve towards A then the closer B is to A the more the line approximates to the tangent at point A. Thus the line AB1 is a better approximation to the tangent than the line AB. The method we can use to determine the gradient of the tangent at point A is:
Figure 4.1 Gradient at A
1. Take another point B on the same curve and determine the gradient of the line joining A and B.
2. Then move B closer and closer to A. In the limit, as the distance between A and B becomes infinitesimally small, i.e. as AB tends to zero (written as AB → 0), the gradient of the line becomes the gradient of the tangent at A.
Consider the gradient of the line AB in Figure 4.2:
Figure 4.2 Gradient
The gradient is the difference in the value of _y_ between points A and B divided by the difference in the value of _x_ between the points. We can write this difference in the value of _x_ as _δx_ and this difference in the value of _y_ as _δy_. The δ symbol in front of a quantity means 'a small bit of it' or 'an interval of'. Thus the equation can be written as
An alternative symbol which is often used is Δ _x_ , with the Δ symbol being used to indicate that we are referring to a small bit of the quantity _x_. These forms of notation do _not_ mean that we have δ or Δ multiplying _x_. The _δx_ or Δ _x_ should be considered as a single symbol representing a single quantity.
As we move B closer to A then the interval _δx_ is made smaller. The gradient of the line AB then becomes closer to the tangent to the curve at point A (Figure 4.3). Eventually when the difference in _x_ between A and B, i.e. _δx_ , tends to zero then we have the gradient of the tangent at point A. We can write this as:
[1]
This reads as: the limiting value of _δy_ / _δx_ as _δx_ tends to a zero value equals d _y_ /d _x_. A _limit_ is a value to which we get closer and closer as we carry out some operation. Thus d _y_ /d _x_ is the value of the gradient of the tangent to the curve at A. Since the tangent is the instantaneous rate of change of _y_ with _x_ at that point then d _y_ /d _x_ is the instantaneous rate of change of _y_ with respect to _x_. d _y_ /d _x_ is called the _derivative_ of _y_ with respect to _x_. The process of determining the derivative for a function is called _differentiation_. The notation d _y_ /d _x_ should not be considered as d multiplied by _y_ divided by d multiplied by _x_ , but as a single symbol representing the gradient of the tangent and so the rate of change of _y_ with _x_ ; if you like, it is a shorthand way of writing 'the rate of change of _y_ with respect to _x_ '.
Key point
For any graph of _y_ = _f_ ( _x_ ), if A is the point ( _x_ , _y_ ), i.e. ( _x_ , _f_ ( _x_ )), and B the point ( _x_ \+ d _x_ , _y_ \+ _δy_ ), i.e. ( _x_ \+ _δx_ , _f_ ( _x_ \+ _δx_ )), then the gradient of the tangent at point P at the limiting value as the _δx_ tends to zero:
Figure 4.3 Gradient at A
Since we can interpret the derivative as representing the slope of the tangent to a graph of a function at a particular point, this means with a continuous function, i.e. a function which has values of _y_ which smoothly and continuously change as _x_ changes for all values of _x_ , that we have derivatives for all values of _x_. However, with a discontinuous graph there will be some values of _x_ for which we can have no derivative. For example, with the graph shown in Figure 4.4, there is no derivative for _x_ = 1.
Figure 4.4 Discontinuous function
Example
Determine the slope of the tangent to the curve _y_ = _f_ ( _x_ ) = _x_ 2 when we have _x_ = 1 and _y_ = 1.
As _δx_ tends to zero, the slope of the tangent tends to the value 2 _x_. With _x_ = 1 then the slope of the tangent is 2.
Maths in action
There are situations in engineering where, given an initial condition of some variable, we need to determine its rate of change with respect to some parameter. As an illustration, consider a square flat metal plate of side length _x_ (Figure 4.5(a)). The initial condition we are concerned with is the area _A_ of the plate:
Now suppose the plate is heated and the dimension _x_ changes by an amount _δx_ (which is small compared with the original dimension _x_ but which may have an effect on such things as tolerances in assembly). The plate, which expands equally in all directions, now has sides of length ( _x_ \+ _δx_ ) (Figure 4.5(b)). The new area is thus:
If we denote the changes in area as _δA_ , then:
Since d _x_ is very small, then (δ _x_ )2 → 0 and in this limiting condition we can write:
We now have an expression which describes how the rate of change of the area with side length depends on the side length. We have determined the derivative.
Figure 4.5 Expansion of a plate
### 4.1.1 Derivatives of common functions
The above examples illustrate how, given some initial condition, we can determine the derivative of a function. Rather than always work from first principles in this way, it is useful to work out some general rules we can use. The following illustrates how some commonly used functions can be differentiated.
#### Derivative of a constant
A graph of a constant, e.g. _y_ = 2, has a gradient of 0. Thus its derivative is zero. Thus for _y_ = _c_ , where _c_ is a constant:
Key point
The derivative of a constant is zero.
[2]
#### Derivative of _x n_
If we differentiate from first principles _y_ = _x_ we obtain d _y_ /d _x_ = 1. If we differentiate _y_ = _x_ 2, as in the above example and maths in action, we obtain d _y_ /d _x_ = 2 _x_. If we differentiate _y_ = _x_ 3 we obtain d _y_ /d _x_ = 3 _x_ 2. The pattern in these differentiations is that if we have _y_ = _x n_, then:
Key point
[3]
This relationship applies for positive, negative and fractional values of _n_.
Example
Determine the derivative of the functions (a) _y_ = _x_ 3/2, (b) _y_ = _x_ −4.
(a)
(b)
#### Derivatives of trigonometric functions
Consider the determination of how the gradients of the graph of _y_ = sin _x_ (Figure 4.6(a)) vary with _x_. Examination of the graphs shows that:
Figure 4.6 Gradients of y = sin _x_
1. As _x_ increases from 0 to π/2 then the gradient, which is positive, gradually decreases to become zero at _x_ = π/2.
2. As _x_ increases from π/2 to π then the gradient, which is now negative, becomes steeper and steeper to reach a maximum value at _x_ = π.
3. As _x_ increases from π to 3 π/2 then the gradient, which is negative, decreases to become zero at _x_ = 3 π/2.
4. As _x_ increases from 3 π/2 to 2 π the gradient, which is now positive again, increases to become a maximum at _x_ = 2 π.
Figure 4.6(b) shows the result that is obtained by plotting the gradients against _x_ ; it is a cosine curve. Thus, the derivative of _y_ = sin _x_ is:
[4]
We can prove that the above is the case as follows. For the function _f_ ( _x_ ) = sin _x_ , we have _f_ ( _x_ \+ _δx_ ) − _f_ ( _x_ ) = sin( _x_ \+ _δx_ ) − sin _x_. Using equation 28] from [chapter 1 for the sum of two angles, sin ( _x_ \+ _δx_ ) = sin _x_ cos _δx_ \+ cos _x_ sin _δx_. As _δx_ tends to 0 then cos _δx_ tends to 1 and sin _δx_ to _δx_. Thus, the derivative can be written as:
If we had considered the function sin _ax_ then we would have obtained:
key points
The derivatives of tan _ax_ , cosec _ax_ , sec _ax_ and cot _ax_ can be derived using the quotient rule (see later in this chapter) as:
The derivatives of sin ( _ax_ \+ _b_ ), cos ( _ax_ \+ _b_ ), etc. can be derived by using the chain rule (see later in this chapter) as:
[5]
In a similar manner we can consider _y_ = cos _x_ (Figure 4.7(a)) and the gradients at various points along the graph.
Figure 4.7 _Gradients of y_ = cos _x_
1. Between _x_ = 0 and _x_ = π/2 the gradient, which is negative, becomes steeper and steeper and reaches a maximum value at _x_ = π/2.
2. Between _x_ = π/2 and _x_ = π the gradient, which is negative decreases until it becomes zero at _x_ = π.
3. Between _x_ = π and _x_ = 3π/2 the gradient, which is positive, increases until it becomes a maximum at _x_ = 3π/2.
4. Between _x_ = 3π/2 and _x_ = 2π the gradient, which is positive, decreases to become zero at _x_ = 2π.
Figure 4.7(b) shows how the gradient varies with _x_. The result is an inverted sine graph. Thus, for _y_ = cos _x_ :
[6]
If we had considered _y_ = cos _ax_ , where _a_ is a constant, then we would have obtained for the derivative:
[7]
We can prove that the above is the case in a similar way to that used for the sine.
Example
Determine the derivatives of (a) sin 2 _x_ , (b) cos 3 _x_.
(a)
(b)
Maths in action
Consider a sinusoidal current _i_ = _I_ m sin _ωt_ passing through a _pure inductance_. A pure inductance is one which has only inductance and no resistance or capacitance. With an inductance a changing current produces a back e.m.f. of − _L_ × the rate of change of current, i.e. _L_ d _i_ /d _t_ , where _L_ is the inductance. The applied e.m.f. must overcome this back e.m.f. for a current to flow. Thus the voltage across the inductance is _L_ d _i_ /d _t_. Hence:
Since cos _ωt_ = sin ( _ωt_ \+ 90°), the current and the voltage are out of phase with the voltage leading the current by 90°.
Consider a circuit having just _pure capacitance_ with a sinusoidal voltage _v_ = _V m_ sin _ωt_ being applied across it. A pure capacitance is one which has only capacitance and no resistance or inductance. The charge _q_ on the plates of a capacitor is related to the voltage _v_ by _q_ = _Cv_. Thus, since current is the rate of movement of charge d _q_ /d _t_.
i.e. _i_ = C d _v_ /d _t_. Since current is the rate of change of charge _q_ :
Since cos _ωt_ = sin ( _ωt_ \+ 90°), the current and the voltage are out of phase, the current leading the voltage by 90°.
#### Derivatives of exponential functions
Consider the exponential equation _y_ = _e _x__ and a small increase in _x_ of _δx_. The corresponding increase in the value of _y_ is _δy_ where:
Thus:
If we let _δx_ = 0.01 then (e0.01 − 1)/0.01 = 1.005. If we take yet smaller values of d _x_ then in the limit this has the value 1. Thus:
[8]
The derivative of e _x_ is e _x_. Thus the gradient of the graph of _y_ = e _x_ at a point is equal to the value of _y_ at that point (Figure 4.8). For example, at the point _x_ = 0 on the graph the gradient is _y_ = e0 = 1. At _x_ = 2 the gradient is _y_ = e2 = 7.39. At _x_ = −2 the gradient is _y_ = e−2 = 0.14.
Figure 4.8 y = ex
If we had _y_ = e _ax_ then:
[9]
Key point
Example
Determine the derivative of _y_ = e2 _x_.
Example
The variation of current _i_ with time _t_ in an electrical circuit is given by the equation _i_ = sin 314 _t_. Derive an equation for the rate of change of current with time.
Maths in action
The variation with time _t_ of the displacement _y_ of a system oscillating with simple harmonic motion is described by the equation:
where _A_ is the amplitude and ω the angular frequency. The linear velocity _v_ is the rate of change if displacement with time, i.e. d _y_ /d _t_ , and so:
The acceleration a is the rate of change of velocity, i.e. d _v_ /d _t_ , and so:
The acceleration is thus proportional to the displacement and the minus sign indicates that it is always in the opposite direction to that in which _y_ increases, i.e. it is always directed towards the central rest position. This is the definition used for harmonic motion or cyclic motion which is referred to as _simple harmonic motion_ or, for short, SHM.
### 4.1.2 Rules of differentiation
In this section the basic rules are developed for the differentiation of constant multiples, sums, products and quotients of functions and the chain rule for functions of functions.
#### Multiplication by a constant
Consider a multiple of some function, e.g. _cf_ ( _x_ ) where _c_ is a constant.
[10]
The derivative of some function multiplied by a constant is the same as the constant multiplying the derivative of the function.
Key point
The derivative of some function multiplied by a constant is the same as the constant multiplying the derivative of the function.
Example
Determine the derivatives of (a) 4 _x_ 2, (b) 2 sin 3 _x_ , (c)
(a)
(b)
(c)
#### Sums of functions
Consider a function which can be considered to be a sum of a number of other functions, e.g. _y_ = _f_ ( _x_ ) + _g_ ( _x_ ):
[11]
The derivative of the sum of two differentiable functions is the sum of their derivatives.
Key point
The derivative of the sum of two differentiable functions is the sum of their derivatives.
As an illustration, consider the differentiation of the hyperbolic function _y_ = sinh _x_. This function (see Section 1.8) can be written as ½(e _x_ − e− _x_ ). Thus:
[12]
In a similar way we can differentiate sinh _ax_ and cosh _ax_ , obtaining:
Key point
The hyperbolic function tanh _ax_ can be differentiated using the quotient rule (see later in this Section).
[13]
[14]
Example
Determine the derivatives of:
(a) _y_ = 2 _x_ 3 \+ _x_ 2,
(b) _y_ = sin _x_ \+ cos 2 _x_ ,
(c) _y_ = e4 _x_ \+ _x_
(a)
(b)
(c)
#### The product rule
Consider a function _y_ = _f_ ( _x_ ) _g_ ( _x_ ) which is the product of two other differentiable functions, e.g. _y_ = _x_ sin _x_ :
We can simplify this by adding and subtracting the same quantity to the numerator, namely _f_ ( _x_ \+ _δx_ ) _g_ ( _x_ ), to give:
[15]
This is often written in terms of _u_ and _v_ , where _u_ = _f_ ( _x_ ) and _v_ = _g_ ( _x_ ):
Key point
The derivative of the product of two differentiate functions is the sum of the first function multiplied by the derivative of the second function and the second function multiplied by the derivative of the first function.
[16]
Example
Determine the derivatives of the following functions:
(a) _y_ = _x_ sin _x_ ,
(b) _y_ = _x_ 2 e3 _x_ ,
(c) _y_ = (2 + _x_ )2,
(d) _y_ = _x_ e _x_ sin _x_
(a)
(b)
(c) This can be written as (2 + _x_ )(2 + _x_ ) and so:
(d) This product has three terms and so we have to carry out the differentiation in two stages. Thus, if we first consider _x_ e _x_ as one term and the sin _x_ as the other term:
We can then use the product rule to evaluate the derivative of _x_ e _x_.
Hence:
#### The quotient rule
Consider obtaining the derivative of a function which is the quotient of two other functions, e.g. _f_ ( _x_ )/ _g_ ( _x_ ):
Adding and subtracting _f_ ( _x_ ) _g_ ( _x_ ) to the numerator enables the above equation to be simplified:
[17]
This is often written in terms of _u_ and _v_ , where _u_ = _f_ ( _x_ ) and _v_ = _g_ ( _x_ ):
Key point
[18]
Note that if we have just the reciprocal of some function, i.e. l/ _g_ ( _x_ ), then we have _f_ ( _x_ ) = 1 and so equation [18] gives:
[19]
Equation [18] can be used to determine the derivative of tan _x_ , since tan _x_ = sin _x_ /cos _x_. Thus _f_ ( _x_ ) = sin _x_ and _g_ ( _x_ ) = cos _x_. Hence:
[20]
Likewise, equation [18] can be used to determine the derivative of tanh _x_.
[21]
Example
Determine the derivative of _y_ = (2 _x_ 2 \+ 5 _x_ )/( _x_ \+ 3).
Using equation [18] with _f_ ( _x_ ) = 2 _x_ 2 \+ 5 _x_ and _g_ ( _x_ ) = _x_ \+ 3:
Example
Determine the derivative of _y_ = _x_ e _x_ /cos _x_.
This example requires the use of both the quotient and product rules for differentiation. Using equation [19] with _f_ ( _x_ ) = _x_ e _x_ and _g_ ( _x_ ) = cos _x_ :
Now using equation [16] to obtain the derivative for the product _x e _x__ :
#### The chain rule
Suppose we have _y_ = cos _x_ 4 and, in order to differentiate it, write it in the form _y_ = cos _u_ and _u_ = _x_ 4. We can then obtain d _y_ /d _u_ and d _u_ /d _x_ , but how from them do we obtain d _y_ /d _x_?
Consider the function _y_ = _f_ ( _u_ ) where _u_ = _g_ ( _x_ ) and the obtaining of the derivative of _y_ = _f_ ( _g_ ( _x_ )). For _u_ = _g_ ( _x_ ) a small increase of _δx_ in the value of _x_ causes a corresponding small increase of _δu_ in the value of _u_. But _y_ = _f_ ( _u_ ) and so the small increase _δu_ causes a correspondingly small increase of _δy_ in the value of _y_. We can write, since the _δu_ terms cancel:
Thus:
and so:
Key point
[22]
This is known as the _function of a function rule_ or the _chain rule_.
The chain rule can be used to determine the derivative a function such as _y_ = sin _x_ _n_ , for _n_ being positive or negative or fractional. Let _u_ = _x n_ and so consequently _y_ = sin _u_. Then d _u_ /d _x_ = _nx_ _n_ −1 and d _y_ /d _u_ = cos _u_. Hence, using the chain rule (equation [22]) we have d _y_ /d _x_ = cos _u_ × _nx_ _n_ −1 = _nx_ _n_ −1 cos _x n_.
Another application of the chain rule is to determine the derivatives of functions of the form _y_ = ( _ax_ \+ _b_ ) _n_ , _y_ = _e ax_+ _b_ , _y_ = sin( _ax_ \+ _b_ ), etc. With such functions we let _u_ = _ax_ \+ _b_ and so then we have, for the three examples, _y_ = _u n_, _y_ = e _u_ , _y_ = sin _u_. Then we have d _u_ /d _x_ = _a_ and d _y_ /d _u_ = _nx_ _n_ −1, d _y_ /d _u_ = e _u_ , d _y_ /d _u_ = cos _u_. Using the chain rule we then obtain d _y_ /d _x_ = _anu_ _n_ −1, d _y_ /d _x_ = _a e ax_ \+ _b_ and d _y_ /d _x_ = _a_ cos _u_. Thus, for the three examples, we have:
Example
Determine the derivative of _y_ = (2 _x_ − 5)4.
Let _u_ = 2 _x_ − 5 and so _y_ = _u_ 4. Then d _u_ /d _x_ = 2 and d _y_ /d _u_ = 4 _u_ 3 and so, using equation [22]:
Example
Determine the derivative of _y_ = sin _x_ 3.
Let _u_ = _x_ 3 and so _y_ = sin _u_. Then d _y_ /d _u_ = 3 _x_ 2 and d _y_ /d _u_ = cos _u_ and so, using equation [22]:
Example
Determine the derivative of
Let _u_ = _x_ 2/( _x_ 2 \+ 1) and so _y_ = _u_ 1/2. Using the quotient rule:
Using the chain rule:
### 4.1.3 Higher-order derivatives
Consider a moving object for which we have a relationship between the displacement _s_ of the object and time _t_ of the form:
where _u_ and _a_ are constants. We can plot this equation to give a distance−time graph. If we differentiate this equation we obtain:
d _s_ /d _t_ is the gradient of the distance–time graph. It also happens to be the velocity. The gradient varies with time. We could thus plot a velocity–time graph, i.e. a d _s_ /d _t_ graph against _t_. Then differentiating for a second time, to obtain the gradients to this graph, we obtain the acceleration _a_.
The derivative of a derivative is called the _second derivative_ and can be written as:
The first derivative gives information about how the gradients of the tangents change. The second derivative gives information about the rate of change of the gradient of the tangents.
If the second derivative is then differentiated we obtain the third derivative.
This, in turn, may be differentiated to give a fourth derivative, and so on.
Example
Determine the second derivative of _y_ = _x_ 3.
The first derivative is:
The second derivative is given by differentiating this equation again:
Example
Determine the second derivative of _y_ = _x_ 4 \+ 3 _x_ 2.
The first derivative is
The second derivative is
Maths in action
This illustrates how differential calculus may be used in the analysis of a beam which is deflected in one plane as a result of loading. Consider a beam which is bent into a circular arc and the radius _R_ of the arc. For a segment of circular arc (Figure 4.9), the angle _δθ_ subtended at the centre is related to the arc length _δs_ by _δs_ = _Rδθ_. Because the deflections obtained with beams are small _δx_ is a reasonable approximation to _δs_ and so we can write _δx_ = _Rδθ_ and 1/ _R_ = _δθ_ / _δx_. The slope of the straight line joining the two end points of the arc is _δy_ / _δx_ and thus tan _δθ_ = _δy_ / _δx_. Since the angle will be small we can make the approximation that _δθ_ = _δy_ / _δx_. Hence:
In the limit we can thus write:
When a beam is bent as a result of the application of a bending moment _M_ it curves with a radius _R_ given by the general bending equation as:
where _E_ is the modulus of elasticity and _I_ the second moment of area (a property of the shape of the beam) and so we can write:
This differential equation provides the means by which the deflections of beams can be determined.
Figure 4.9 The deflection curve of radius R
In Section 4.1.3 the determination of maximum and minimum points is discussed. We can use the criteria for a maximum in order to determine the conditions necessary for the deflection of the beam to be a maximum. In Section 4.2 we then use the above differential equation, with the condition for maximum deflection, to determine the maximum deflection of a beam.
### 4.1.4 Maxima and minima
There are many situations in engineering where we need to establish maximum or minimum values. For example, with a projectile we might need to determine the maximum height reached. With an electrical circuit we might need to determine the condition for maximum power to be dissipated.
Consider a graph of _y_ against _x_ when the values of _y_ depend in some way on the values of _x_. Points on the graph at which d _y_ /d _x_ = 0 are called _turning points_ and can be:
• **_A local maximum_**
The term _local_ is used because the value of _y_ is only necessarily a maximum for points in the locality and there could be higher values of _y_ elsewhere on the graph. Figure 4.10 shows such a maximum. At the maximum point A we have zero gradient for the tangent, i.e. d _y_ /d _x_ = 0. Consider two points P and Q close to A, with P having a value of _x_ less than that at A and Q having a value greater than that at A. The gradient of the tangent at P is positive, the gradient of the tangent at Q is negative. Thus for a maximum we have the gradient changing from being positive prior to the turning point to negative after it.
Figure 4.10 A maximum
• **_A local minimum_**
The term _local_ is used because the value of _y_ is only necessarily a minimum for points in the locality and there could be lower values of _y_ elsewhere on the graph. Figure 4.11 shows such a minimum. At the minimum point C we have zero gradient for the tangent, i.e. d _y_ /d _x_ = 0. Consider two points R and S close to A, with R having a value of _x_ less than that at C and S having a value greater than that at C. The gradient of the tangent at R is negative, the gradient of the tangent at S is positive. Thus for a minimum we have the gradient changing from being negative prior to the turning point to positive after it.
Figure 4.11 A minimum
• **_A point of inflexion_**
Consider points of inflexion, as illustrated in Figure 4.12. At such points d _y_ /d _x_ = 0. However, in neither of the graphs is there a local maximum or minimum. In Figure 4.12(a), the gradient at a point T prior to the point is negative and the gradient at a point U after the point is also negative. In Figure 4.12(b), the gradient at a point V prior to the point is positive and the gradient at a point W after the point is also positive. For a point of inflexion the sign of the gradient prior to the point is the same as that after the point.
Figure 4.12 Points of inflexion
The gradient at a point on a graph is given by d _y_ /d _x_. We can thus determine whether a turning point is a maximum, a minimum or a point of inflexion by considering how the value of d _y_ /d _x_ changes for a value of _x_ smaller than the turning point value compared to that for a value of _x_ greater than the turning point value.
There is an alternative method we can use to distinguish between maximum and minimum points. We need to establish how the gradient changes in going from points before to after turning points. Consider, for a maximum, a graph of the gradients plotted against _x_ (Figure 4.13(a)). The gradients prior to the maximum are positive and decrease in value to become zero at the maximum. They then become negative and as _x_ increases become more and more negative. The second derivative d2 _y_ /d _x_ 2 measures the rate of change of d _y_ /d _x_ with _x_ , i.e. the gradient of the d _y_ /d _x_ graph. The gradient of the gradient graph is negative before, at and after the maximum point. Hence at a maximum d2 _y_ /d _x_ 2 is negative.
Figure 4.13 (a) A maximum, (b) a minimum
Key point
For the gradients in the vicinity of maxima, minima and points of inflexion, in moving from points before to after the turning point:
At a _maximum_ the gradient changes from being positive to negative; the second derivative is negative.
At a _minimum_ the gradient changes from being negative to positive; the second derivative is positive.
At a _point of inflexion_ the sign of the gradient does not change.
Consider a minimum (Figure 4.13(b)). The gradients prior to the minimum are negative and become less negative until they become zero at the minimum. As _x_ increases beyond the minimum the gradients become positive, increasing in value as _x_ increases. The second derivative d2 _y_ /d _x_ 2 measures the rate of change of d _y_ /d _x_ with _x_ , i.e. the gradient of the d _y_ /d _x_ graph. The gradient of the gradient graph is positive before, at and after the minimum point. Hence at a minimum d2 _y_ /d _x_ 2 is positive.
Example
Determine, and identify the form of, the turning points on a graph of the equation _y_ = 2 _x_ 3 − 3 _x_ 2 − 12 _x_.
Differentiating the equation gives
Thus the gradient of the graph is zero when 6 _x_ 2 − 6 _x_ − 12 = 0. We can rewrite this as:
The gradient is zero, and hence there are turning points, at _x_ = −1 and _x_ = 2.
To establish the form of these turning points consider the gradients just prior to and just after them.
Prior to the _x_ = −1 turning point at _x_ = −2, the gradient is 6 _x_ 2 − 6 _x_ − 12 = 6 × (−2)2 − 6 × (−2) − 12 = 12. After the point at _x_ = 0 we have a gradient of −12. Thus the gradient prior to the _x_ = −1 point is positive and after the point it is negative. The point is thus a maximum.
Consider the _x_ = 2 turning point. Prior to the turning point at _x_ = 0 the gradient is −12. After the turning point at _x_ = 3 the gradient is 6 × 32 − 6 × 3 − 12 = 24. Thus the gradient prior to the _x_ = 2 point is negative and after the point it is positive. The point is thus a minimum.
Alternatively we could determine the form of the turning points by considering the sign of the second derivative at the points. The second derivative is obtained by differentiating the d _y_ /d _x_ equation. Thus
At _x_ = −1 then the second derivative is 12 × (−1) − 6 = −18. The negative value indicates that the point is a maximum. At _x_ = 2 the second derivative is 12 × 2 − 6 = 18. The positive value indicates that the point is a maximum.
Figure 4.14 shows a graph of the equation _y_ = 2 _x_ 3 − 3 _x_ 2 − 12 _x_ , showing the maximum at _x_ = −1 and the minimum at _x_ = 2.
Figure 4.14 Example
Example
The displacement _y_ in metres of an object is related to the time _t_ in seconds by the equation _y_ = 5 + 4 _t_ − _t_ 2. Determine the maximum displacement.
Differentiating the equation gives:
d _y_ /d _t_ is 0 when _t_ = 2 s. There is thus a turning point at the displacement _y_ = 5 + 4 × 2 − 22 = 9 m. We need to check that this is a maximum displacement. The gradient prior to the turning point at _t_ = 1 has the value 4 − 2 = 2. After the turning point at _t_ = 3 it has the value 4 − 6 = −2. The gradient changes from a positive value prior to the turning point to a negative value afterwards. It is thus a maximum.
Alternatively we could have established this by determining the second derivative. Differentiating 4 − 2 _t_ gives d2 _y_ /d _x_ 2 = −2. Thus, the turning point is a maximum.
Example
If the sum of two numbers is 40, determine the values which will give the minimum value for the sum of their squares.
Let the two numbers be _x_ and _y_. Then we must have _x_ \+ _y_ = 40. We have to find the minimum value of _S_ when we have _S_ = _x_ 2 \+ _y_ 2. We need an equation which expresses the sum in terms of just one variable. Thus substituting from the previous equation gives
Differentiating this equation, then
The value of _x_ to give a zero value for d _S_ /d _x_ is when 4 _x_ − 80 = 0 and so when _x_ = 20.
We can check that this is the value giving a minimum by considering the values of d _S_ /d _x_ prior to and after the point. Thus prior to the point at _x_ = 19 we have d _S_ /d _x_ = 4 × 19 − 80 = −4. After the point at _x_ = 21 we have d _S_ /d _x_ = 4 × 21 − 80 = 4. Thus d _S_ /d _x_ changes from being negative to positive. The turning point is thus a minimum. Alternatively we could check that this is a minimum by obtaining the second derivative. Differentiating 4 _x_ − 80 gives d2 _S_ /d _x_ 2 = 4. Since this is positive then we have a minimum. Thus the two numbers which will give the required minimum are 20 and 20.
Example
Determine the maximum area of a rectangle with a perimeter of 32 cm.
If the width of the rectangle is w and its length _L_ then the area _A_ = _wL_. But the perimeter has a length of 32 cm. Thus 2 _w_ \+ 2 _L_ = 32. If we eliminate w from the two equations:
Hence:
d _A_ /d _L_ is zero when 16 − 2 _L_ = 0 and so when _L_ = 8.
We can check that this gives a maximum area by considering values of the gradient at values of _L_ below and above 8. At _L_ = 7 the gradient is 2 and at _L_ = 9 it is −2. It is thus a maximum. Alternatively we could have considered the second derivative. Since d2 _A_ /d _L_ 2 = −2 and so is negative, we have a maximum.
For a maximum area we must therefore have _L_ = 8 cm, and, after substituting this value in 2 _w_ \+ 2 _L_ = 32, _w_ = 8 cm.
Maths in action
Consider the circuit shown in Figure 4.15 where a d.c. source of e.m.f. _E_ and internal resistance _r_ supplies a load of resistance _R_. The power _P_ supplied to the load is _PR_ with the current _I_ being _E_ /( _R_ \+ _r_ ). Thus:
Differentiating with respect to _x_ by using the quotient rule gives:
_dP_ / _dR_ = 0 when:
and so _R_ = _r_. We can check that this is the condition for maximum power transfer by considering the second derivative. We have the sum of two terms and so for the _E_ 2/( _R_ \+ _r_ )2 term, let _u_ = _R_ \+ _r_ and _y_ = _E_ 2/ _u_ 2. Then, d _u_ /d _R_ = 1 and d _y_ /d _u_ = −2 _E_ 2/ _u_ 3 and d _y_ /d _R_ = −2 _E_ 2/( _R_ \+ _r_ )3. For _y_ = 2 _E_ 2 _R_ /( _R_ \+ _r_ )3 we can use the quotient rule to give d _y_ /d _R_ = [( _R_ \+ _r_ )32 _E_ 2 − 2 _E_ 2 _R_ 3( _R_ \+ _r_ )2]/( _R_ \+ _r_ )6 which can be simplified to 2 _E_ 2/( _R_ \+ _r_ )3 − 6 _E_ 2 _R_ /( _R_ \+ _r_ )4. Hence:
With _R_ = _r_ the second derivative is negative and so we have maximum power transfer.
Figure 4.15 Circuit
Figure 4.15 _dy/dx_ = 2x. All the above graphs have gradients which are 2x.
### 4.1.5 Inverse functions
If we have a function _y_ which is a continuous function of _x_ then the derivative, i.e. the slope of the tangent to a graph of _y_ plotted against _x_ , is d _y_ /d _x_. However, if we have _x_ as a continuous function of _y_ then the derivative, i.e. the slope of the tangent to a graph of _x_ plotted against _y_ , is d _x_ /d _y_. How are these derivatives related? We might, for example, have _y_ = _x_ 2 and so d _y_ /d _x_ = 2 _x_. For the inverse function _x_ = √y and d _x_ /d _y_ = ½ _y_ −1/2.
If we have a function _y_ = _f_ ( _x_ ) then we can write for the inverse _x_ = _g_ ( _y_ ). Thus _x_ = _g_ { _f_ ( _x_ )}. Differentiating both sides of this equation with respect to _x_ , using the chain rule for the right-hand side, gives:
Hence, with _y_ = _f_ ( _x_ ), the derivatives of the inverse function can be derived by using:
[23]
For example, for _y_ = _x_ 2 we have d _y_ /d _x_ = 2 _x_ ; the inverse function is and d _x_ /d _y_ = ½ _y_ −1/2. Then
Example
Determine d _y_ /d _x_ for the function described by the equation _x_ = _y_ 2 \+ 2 _y_.
It is easier to obtain d _x_ /d _y_ from the equation and thus the problem is tackled by doing that operation first. Thus d _x_ /d _y_ = 2 _y_ \+ 2. Then, using equation [23]:
#### Logarithmic functions
Consider the function _y_ = ln _x_. We can write this as _x_ = e _y_. Differentiating _x_ with respect to _y_ gives:
Hence, using equation [23]:
[24]
Note that since _x_ must be positive for ln _x_ to have any meaning, equation [24] only applies for positive values of _x_.
Example
Determine the derivative of _y_ = e− _x_ ln _x_.
Using the product rule then:
## Problems 4.1
1. Determine the derivatives of the following functions:
(a) _y_ = _x_ 5,
(b) _y_ = 2 _x_ −4,
(c) _y_ = −3 _x_ 2,
(d) _y_ = ½ _x_ ,
(e)
(f)
(g)
(h)
(i)
(j)
(k)
(l)
(m)
(n)
(o)
(p)
(q)
(r)
(s)
(t)
(u)
(v)
(w)
(x)
(y)
(z)
2. Determine the second derivatives of the following functions:
(a)
(b)
(c)
(d)
(e)
(f)
3. Determine the velocity and acceleration after a time of 2 s for an object which has a displacement _x_ which is a function of time _t_ and given by _x_ = 12 + 15 _t_ − 2 _t_ 2, with _t_ being in seconds.
4. Determine the velocity and acceleration at a time _t_ for an object which has a displacement _x_ in metres given by _x_ = 3 sin 2 _t_ \+ 3 cos 3 _t_ , _t_ being in seconds.
5. The voltage _v_ , in volts, across a capacitor of capacitance 2 μF varies with time _t_ , in seconds, according to the equation _v_ = 3 sin 5 _t_. Determine how the current varies with time.
6. The current _i_ , in amps, through an inductor of inductance 0.05 H varies with time _t_ , in seconds, according to the equation _i_ = 10(1 − e−100 _t_ ). Determine how the potential difference across the inductor varies with time.
7. The volume of a cone is one-third the product of the base area and the height. For a cone with a height equal to the base radius, determine the rate of change of cone volume with respect to the base radius.
8. The volume of a sphere of radius _r_ is Determine the rate of change of the volume with respect to the radius.
9. With the Doppler effect, the frequency _f o_ heard by an observer when a sound source of frequency _f s_ is moving away from the observer with a velocity v is given by _f_ o = _f_ s/(1 + _v/c_ ), where _c_ is the velocity of sound. Determine the rate of change of the observed frequency with respect to the velocity.
10. The length _L_ of a metal rod is a function of temperature _T_ and is given by the equation _L_ = _L_ o(1 + a _T_ \+ _bT_ 2). Determine an equation for the rate of change of length with temperature.
11. Determine and identify the form of the turning points on graphs of the following functions:
(a) _y_ = _x_ 2 − 4 _x_ \+ 3,
(b) _y_ = _x_ 3 − 6 _x_ 2 \+ 9 _x_ \+ 3,
(c) _y_ = _x_ 5 − 5 _x_ ,
(d) _y_ = sin _x_ for _x_ between 0 and 2π,
(e) _y_ = 2 _x_ 3 \+ 3 _x_ 2 − 12 _x_ \+ 3
12. A cylindrical container, open at one end, has a height of _h_ m and a base radius of _r_ m. The total surface area of the container is to be 3π m2. Determine the values of _h_ and _r_ which will make the volume a maximum.
13. A cylindrical metal container, open at one end, has a height of _h_ cm and a base radius of _r_ cm. It is to have an internal volume of 64π cm3. Determine the dimensions of the container which will require the minimum area of metal sheet in its construction.
14. The bending moment _M_ of a uniform beam of length _L_ at a distance _x_ from one end is given by _M_ = ½ _wLx_ − ½ _wx_ 2, where _w_ is the weight per unit length of beam. Determine the value of _x_ at which the bending moment is a maximum.
15. The deflection _y_ of a beam of length _L_ at a distance _x_ from one end is found to be given by _y_ = 2 _x_ 4 − 5 _Lx_ 3 \+ 2 _L_ 2 _x_ 2. Determine the values of _x_ at which the deflection is a maximum.
16. Determine the maximum value of the alternating voltage described by the equation _v_ = 40 cos 1000 _t_ \+ 15 sin 1000 _t V_.
17. The intensity of illumination from a point light source of intensity _I_ at a distance d from it is _I_ / _d_ 2. Determine the point along the line between two sources 10 m apart at which the intensity of illumination is a minimum if one of the sources has eight times the intensity of the other.
18. Determine the maximum rate of change with time of the alternating current _i_ = 10 sin 1000 _t_ mA, the time _t_ being in seconds.
19. The deflection _y_ of a propped cantilever of length _L_ at a distance _x_ from the fixed end is given by:
where _w_ is the weight per unit length and _E_ and _Is_ are constants. Determine the value of _x_ at which the deflection is a maximum.
20. The e.m.f. _E_ produced by a thermocouple depends on the temperature _T_ and is given by _E_ = _aT_ \+ _bT_ 2. Determine the temperature at which the e.m.f. is a maximum.
21. The horizontal range _R_ of a projectile projected with a velocity _v_ at an angle θ to the horizontal is given by _R_ = ( _v_ 2/ _g_ ) sin 2θ. Determine the angle at which the range is a maximum for a particular velocity.
22. A 100 cm length of wire is to be bent to form two squares, one with side _x_ and the other with side _y_. Determine the values of _x_ and _y_ which give the minimum area enclosed by the squares.
23. The rate _r_ at which a chemical reaction proceeds depends on the quantity _x_ of a chemical and is given by _r_ = _k_ ( _a_ − _x_ )( _b_ \+ _x_ ). Determine the maximum rate.
24. A cylinder has a radius _r_ and height _h_ with the sum of the radius and height being 2 m. Determine the radius giving the maximum volume.
25. A rectangle is to have an area of 36 cm2. Determine the lengths of the sides which will give a minimum value for the perimeter.
26. An open tank is to be constructed with a square base and vertical sides and to be able to hold, when full to the brim, 32 m3 of water. Determine the dimensions of the tank if the area of sheet metal used is to be a minimum.
## 4.2 Integration
_Integration_ can be considered to be the mathematical process which is the reverse of the process of differentiation. It also turns out to be a process for finding areas under graphs.
As an illustration of the application of integration in engineering as the reverse of differentiation, consider the situation where the velocity _v_ of an object varies with time _t_ , say _v_ = 2 _t_ (Figure 4.14). Since velocity is the rate of change of distance _x_ with time we can write this as:
Thus we know how the gradient of the distance–time graph varies with time. Integration is the method we can use to determine from this how the distance varies with time. We thus start out with the gradients and find the distance–time graph responsible for them, the reverse of the process used with differentiation.
Figure 4.14 Velocity–time graph
### 4.2.1 Integration as the reverse of differentiation
Suppose we have an equation _y_ = _x_ 2. When this equation is differentiated we obtain the derivative of d _y_ /d _x_ = 2 _x_. Thus, in this case, when given the gradient as 2 _x_ we need to find the equation which on being differentiated gave 2 _x_. Thus, integrating 2 _x_ should give us _x_ 2. However, the derivative of _x_ 2 \+ 1 is also 2 _x_ , likewise the derivative of _x_ 2 \+ 2, the derivative of _x_ 2 \+ 3, and so on. Figure 4.15a shows part of the family of graphs which all have the gradients given by 2 _x_.
Key point
Differentiation is the determination of the relationship for the gradient of a graph. We can define integration as the mathematical process which reverses differentiation, i.e. given the gradient relationship then finding the equation which was responsible for it.
Thus, for each of the graphs, at a particular value of _x_ , such as _x_ = 1, they all give the same gradient of 2. Thus in the integration of 2 _x_ we are not sure whether there is a constant term or not, or what value it might have. Hence a constant _C_ has to be added to the result. Thus the outcome of the integration of 2 _x_ has to be written as being _x_ 2 \+ _C_. The integral, which has to have a constant added to it, is referred to as an _indefinite integral_.
To indicate the process of integration a special symbol:
[25]
is used. This sign indicates that integration is to be carried out and the d _x_ that _x_ is the variable we are integrating with respect to. Thus the integration referred to above can be written as
#### Integrals of common functions
The integrals of functions can be determined by considering what equation will give the function when differentiated. For example, consider:
Considering integration as the inverse of differentiation, the question becomes as to what function gives _x n_ when differentiated. The derivative of _x n_+1 is ( _n_ \+ 1) _x n_. Thus, we have the derivative of _x n_+1/( _n_ \+ 1) as _x n_. Hence:
[26]
This is true for positive, negative and fractional values of _n_ other than _n_ = −1, i.e. the integral of _x_ −1. For the integral of _x_ −1 i.e.
then since the derivative of ln _x_ is 1/ _x_ :
[27]
This only applies if _x_ is positive, i.e. _x_ > 0. If _x_ is negative, i.e. _x_ < 0, then the integral of 1/ _x_ in such a situation is _not_ ln _x_. This is because we cannot have the logarithm of a negative number as a real quantity. To show that only positive values of a quantity are to be considered, we write it as | _x_ |.
Consider the integral of the exponential function e _x_ , i.e.
The derivative of e _x_ is e _x_. Thus:
[28]
The key points shows some functions and their integrals.
Key point
Example
Evaluate the integrals:
(a) ∫ _x_ 4 d _x_ ,
(b) ∫ _x_ 1/2 d _x_ ,
(c) ∫ _x_ −4 d _x_ ,
(d) ∫ _x_ −1 d _x_ ,
(e) ∫ cos 4 _x_ d _x_ ,
(f) ∫ e2 _x_ d _x_ ,
(g) ∫ 5 d _x_.
(a) Using
(b) Using
(c) Using
(d) Using the relationship giving in the table:
(e) Using
(f) Using
(g)
The above is just a particular version of the standard integral:
with _n_ = 0 since _x_ ° = 1.
#### Integral of a sum
The derivative of, for example, _x_ 2 \+ _x_ is the derivative of _x_ 2 plus the derivative of _x_ , i.e. it is 2 _x_ \+ 1. The integral of 2 _x_ \+ 1 is thus _x_ 2 \+ _x_ \+ C. Thus, the integral of the sum of a number of functions is the sum of their separate integrals.
Key point
The integral of a sum of two functions equals the sum of their individual integrals.
Example
Determine the integral ∫( _x_ 3 \+ 2 _x_ +4)d _x_.
We can write this as:
Hence the integral is:
where _P_ , _Q_ and _R_ are constants. We can combine these constants into a single constant _C_. Hence the integral is:
#### Finding the constant of integration
The solution given by the above integration is a general solution and includes a constant. As was indicated earlier in Figure 4.15 the integration of 2 _x_ gives _y_ = _x_ 2 \+ _C_. This solution indicates a family of possible equations which could give d _y_ /d _x −_ 2 _x_. We can, however, find a _particular solution_ if we are supplied with information giving specific coordinate values which have to fall on the graph curve. Thus, in this case, we might be given the condition that when _y_ = 1 we have _x_ = 1. This must fit the equation _y_ = _x_ 2 \+ _C_ and can only be the case when _C_ = 0. Hence the solution is _y_ = _x_ 2.
Key point
Note that when we integrate a relationship for d _y_ /d _x_ with respect to _x_ we obtain a relationship for _y_ ; when we integrate a relationship for d2 _y_ /d _x_ 2 with repect to _x_ we obtain a relationship for d _y_ /d _x_.
Example
Determine the equation of a graph if it has to have _y_ = 0 when _x_ = 2 and has a gradient given by:
To obtain the general solution, i.e. the family of curves which fit the above gradient equation, we integrate. Thus:
The particular curve we require must have _y_ = 0 when _x_ = 2. Putting this data into the equation gives 2 = 0 + 0 + _C_. Hence _C_ = 2 and so the particular solution is:
Example
A curve is such that its gradient is described by the equation d _y_ /dθ = cos θ and _y_ = 1 when θ = π/2 radians. Find the equation of the curve.
Here we have the relationship d _y_ /dθ = cos θ, and so integration gives:
This is the general solution. To find the specific equation we need to evaluate _C_ by substituting the known conditions, namely that _y_ = 1 when θ = π/2 radians. Thus:
Therefore _C_ = 0 and the required equation is:
Example
At any point on a curve we have a gradient of d _y_ /d _t_ = 3 sin _t_. Find the equation of the curve given that _y_ = 2 when _t_ has the value of 25°.
Given the relationship d _y_ /d _t_ = 3 sin _t_ , integration gives:
The general equation is thus _y_ = −3 cos _t_ \+ _C_. But _y_ = 2 when _t_ = 25° and so:
Thus _C_ = 4.73 and the specific equation, which is the required equation, is:
Maths in action
The bending of beams
See the Maths in action in Section 4.1.3 for a preliminary discussion of the bending of beams.
The deflection _y_ of a beam can be obtained by integrating the differential equation:
with respect to _x_ to give:
with _A_ being the constant of integration and then carrying out a further integration with respect to _x_ to give:
with _B_ being a constant of integration.
At the point where maximum deflection occurs, the slope of the deflection curve will be zero and thus the point of maximum deflection can be determined by equating d _y_ /d _x_ to zero.
As an illustration, consider a horizontal cantilever supporting a load at its free end (Figure 4.16). The bending moment a distance _x_ from the fixed end is given by _M_ = − _F_ ( _L_ − _x_ ) and so the differential equation becomes:
Integrating with respect to _x_ gives:
Since the slope of the beam d _y_ /d _x_ = 0 at the fixed end where _x_ = 0, then we must have _A_ = 0 and so:
Integrating again gives:
Since, at the fixed end we have zero deflection, i.e. we have _y_ = 0 at _x_ = 0, then we must have _B_ = 0 and so:
When _x_ = _L_ :
For beams with a number of concentrated loads, there will be discontinuities in the bending moment diagram and so we cannot write a single bending moment equation to cover the entire beam but have to write separate equations for each part of the beam between adjacent loads. Integration of each expression then gives the deflections relationship for each part of the beam. There is an alternative and that involves writing a single equation using, what are termed, Macaulay's brackets. For a discussion and examples of this method, see the companion book: _Mechanical Engineering Systems_ by R. Gentle, P. Edwards and W. Bolton.
Figure 4.16 Example
### 4.2.2 Integration as the area under a graph
Consider a moving object and its graph of velocity _v_ against time _t_ (Figure 4.17). The distance travelled between times of _t_ 1 and _t_ 2 is the area under the graph between those times. If we divide the area into a number of equal width strips then we can represent this area under the velocity–time graph as being the sum of the areas of these equal width strip areas, as illustrated in Figure 4.17. If _t_ is the value of the time at the centre of a strip of width δ _t_ and _v_ the velocity at this time, then a strip has an area of _v δt_. Thus the area under the graph between the times _t_ 1 and _t_ 2 is equal to the sum of the areas of all such strips between the times _t_ 1 and _t_ 2,
Figure 4.17 Velocity-time graph
We can write this summation as:
The Σ sign is used to indicate that we are carrying out a summation of a number of terms. The limits between which this summation is to be carried out are indicated by the information given below and above the sign. If we make _δt_ very small, i.e. let _δt_ tend to 0, then we denote it by d _t_. The sum is then the sum of a series of very narrow strips and is written as:
Key point
The sigma notation used for a summation has the numbers above it and below indicating where the summation is to start and finish.
[29]
The integral sign is an "S" for summation and the _t_ 1 and _t_ 2 are said to be the limits of the range of the variable _t_. Here _x_ is the _integral_ of the _v_ with time _t_ between the limits _t_ 1 and _t_ 2. The process of obtaining _x_ in this way is termed _integration_. Because the integration is between specific limits it is referred to as a _definite integral_.
#### Integration as reverse of differentiation and area under a graph
The definitions of integration in terms of the reverse of differentiation and as the area under a graph describe the same concept. Suppose we increase the area _A_ under a graph of _y_ plotted against _x_ by one strip (Figure 4.18). Then the increase in the area _δA_ is the area of this strip. Thus:
Key point
The notation
with no limits specified stands for any antiderivative of _f_ ( _x_ ). When we evaluate such an integral there will be a constant in the answer.
is termed a definite integral because it takes a definite value, representing an area under a curve. When we evaluate such an integral there is no constant in the answer. The integral gives the area under the curve of the function f(x) between x = a and x = b and:
The term inside the square brackets is the result of the integration before the limits have been applied.
Figure 4.18 Area increased by one strip
So we can write:
In the limit as _δx_ tends to 0 then we can write d _A_ /d _x_ and so
With integration defined as the inverse of differentiation then the integration of the above equation gives the area _A_ , i.e.
[30]
This is an indefinite integral, which is the same as that given by the definition for integration as the area under a graph when limits are imposed. An _indefinite integral_ has no limits and the result has a constant of integration. Integration between specific limits gives a _definite integral_.
#### Areas under graphs
Consider the integration of _y_ with respect to _x_ when we have _y_ = 2 _x_. This has no specified limits and so is an indefinite integral, with the solution as the function which differentiated would give 2 _x_ :
Now consider the area under the graph of _y_ = 2 _x_ between the limits of _x_ = 1 and _x_ = 3 (Figure 4.19). We can write this as the definite integral:
The square brackets round the _x_ 2 \+ _C_ are used to indicate that we have to impose the limits of 3 and 1 on it. Thus the integral is the value of _x_ 2 \+ _C_ when _x_ = 3 minus the value of _x_ 2 \+ _C_ when _x_ = 1.
The constant term C vanishes when we have a definite integral.: Note that an area below the _x_ -axis is negative. If the area is required when part of it is below the _x_ -axis then the parts below and above the _x_ -axis must be found separately and then added, disregarding the sign of the area.
Figure 4.19 _y_ = 2 _x_
Example
Determine the area between a graph of _y_ = _x_ \+ 1 and the _x_ -axis between _x_ = −2 and _x_ = 4.
Figure 4.20 shows the graph. The area required is that between the values of _x_ of −2 and 4. We can break this area down into a number of elements. The area under the graph between _x_ = 0 and _x_ = 4 is that of a rectangle 4 × 1 plus a triangle ½(4 × 4) and so is +12 square units. The area between _x_ = −1 and 0 is that of a triangle ½(1 × 1) = 0.5 square units and the area between _x_ = −1 and _x_ = −2 is a triangular area below the axis and so is negative and given by ½(1 × 1) = −0.5 square units. Hence the total area under the graph is + 12 + 0.5 − 0.5 = 12 square units.
Figure 4.20 Example
Alternatively we can consider this area as the integral:
and so:
Example
Determine the value of the integral
We can consider that this integral represents the area under the graph between _x_ = −2 and _x_ = 4 of e2 _x_ plotted against _x_.
The value of the integral is thus 1490.470.
Example
Determine the value of the integral
We can consider that this integral represents the area under the graph between _x_ = 0 and _x_ = π/3 of cos 2 _x_ plotted against _x_.
The value of the integral is thus 0.433.
Example
Find the areas under the curve _y_ = _x_ 3 between (a) _x_ = 0 and _x_ = 1, (b) _x_ = −1 and _x_ = 1.
(a) Figure 4.21 shows the graph. The area is:
Figure 4.21 Example
(b) The area taking into account the sign of _y_ , is:
The area is zero because the area between _x_ = −1 and _x_ = 0 is negative. What we have is the sum of two areas:
The sum of the two areas is thus zero. If we want the total area, regardless of sign, between the axis and the curve for any function then we have to determine the positive and negative elements separately and then, ignoring the sign, add them. For this curve this gives ½.
Maths in action
Work
With a constant force _F_ acting on a body and a displacement _x_ in the direction of the force, then the work done _W_ on a body, i.e. the energy transferred to it, is given by _W_ = _Fx_.
Consider a variable force described by the graph shown in Figure 4.22 for the force applied to an object and how it varies with the displacement of that object. For a small displacement _δx_ we can consider the force to be effectively constant at _F_. Thus the work done for that displacement is _F δx_. This is the area of the strip under the force–distance graph. If we want the work done in changing the displacement from _x_ 1 to _x_ 2 then we need to determine the sum of all such strips between these displacements, i.e. the total area under the graph between the ordinates for _x_ 1 and _x_ 2. Thus:
If we make the strips tend towards zero thickness then the above summation becomes the integral, i.e.
Consider the work done in stretching a spring when a force _F_ is applied and causes a displacement change in its point of application, i.e. an extension, from 0 to _x_ if _F_ = _kx_ , where _k_ is a constant. Figure 4.23 shows the force–distance graph.
Figure 4.22 Force-displacement graph
Figure 4.23 Stretching a spring
The work done is the area under the graph between 0 and _x_. This is the area of a triangle and so the work done is ½ _Fx_. Since _F_ = _kx_ we can write this as ½ _kx_ 2.
We could have solved this problem by integration. Thus:
As a further illustration, consider the work done as a result of a piston in Figure 4.24 being moved to reduce the volume of a gas. The work done in moving the piston through a small distance _δx_ when the force is _F_ is _F δx_. Since pressure is force per unit area, then if the force acts over an area _A_ the pressure _p_ = _F_ / _A_. Thus:
But _A δx_ is the change in volume _δV_ of the gas. Hence, the work done = _p δV_. The total work done in changing the volume of a gas from _V_ 1 to _V_ 2 is thus:
If we consider _δV_ tending to zero then we can write
For a gas that obeys Boyle's law, i.e. _pV_ = a constant _k_ , the work done in compressing a gas from a volume _V_ 1 to _V_ 2 is thus:
Hence the work done is ln _V_ 2 − ln _V_ 1.
Figure 4.24 Compressing a gas
Maths in action
Centre of gravity and centroid
The weight of a body is made up of the weights of each constituent particle, each such particle having its weight acting at a different point. However, it is possible to replace all the weight forces of an object by a single weight force acting at a particular point, this point being termed the _centre of gravity_.
If we consider a sheet to be made of a large number of small strip elements of mass at different distances from an axis (Figure 4.25) then the weight of each element will give rise to a moment about that axis. Thus the total moment due to all the weight elements is _δw_ 1 _x_ 1 \+ _δw_ 2 _x_ 2 \+ _δw_ 3 _x_ 3 \+.... If a single weight _W_ at a distance is to give the same moment, then:
Thus the distance of the centre of gravity from the chosen axis is:
For a thin flat plate of uniform density, the weight of an element is proportional to its area. We then refer to the _centroid_ since it is purely geometric. The distance of the centroid from the chosen axis is thus:
where _δa_ represents the area of an elemental strip. The product of an area and its distance from an axis is known as the _first moment of area_ of that area about the axis. Thus the centroid distance from an axis is the sum of the first moments of all the area elements divided by the sum of all the areas of the elements.
Figure 4.25 Moments of elements
If we consider infinitesimally small elements, i.e. δ _a_ → 0, then we can write:
Consider the determination of the centroid of a triangular area (Figure 4.26). Consider a small strip of area _δA_ = _x δy_. By similar triangles _x_ /( _h_ − _y_ ) = _b_ / _h_ and so _x δy_ = [ _b_ ( _h_ − _y_ )/ _h_ ] _δy_. The total area _A_ = ½ _bh_. Hence, the _y_ coordinate of the centroid is:
The centroid is located at one-third the altitude of the triangle. The same result is obtained if we consider the location with respect to the other sides. The centroid is at one-third the altitude along of the lines drawn from each apex to the opposite side.
Figure 4.26 Triangular area
Maths in action
Moment of inertia
Consider a rigid body rotating with a constant angular acceleration _a_ about some axis (Figure 4.27). We can consider the body to be made up of small elements of mass _δm_. For such an element a distance _r_ from the axis of rotation we have a linear acceleration of _a_ = _rα_. Thus the force acting on the element is _δm_ × _rα_. The moment of this force is thus _Fr_ = _r_ 2 _α δm_. The total moment, i.e. torque _T_ , due to all the elements of mass in the body is thus:
Thus if we have elements of mass at radial distance from 0 to _R_ , in the limit as _δm_ → 0:
Since _a_ is a constant we can write the above equation as:
where _I is the moment of inertia_
Figure 4.27 Rotation of a rigid body
As an illustration consider the determination of the moment of inertia of a uniform disc about an axis through its centre and at right angles to its plane. Figure 4.28 shows the disc with an element of mass being chosen as a disc with a radius _x_ and width _δx_. The element is a strip of length 2 _πx_ and so an area of 2 _πx δx_. If the mass of the disc is _m_ per unit area, then the mass of the element is _δm_ = 2 _πmx δx_. The moment of inertia of the element is _x_ 2 _δm_ = 2 _πmx_ 3 _δx_. Thus the moment of inertia of the disc is:
As another illustration, consider a sphere of radius _r_ and mass per unit volume _m_. If we take a thin slice of thickness _δx_ of the sphere perpendicular to the diameter about which the moment of inertia is to be determined and a distance _x_ from the sphere centre (Figure 4.29), then with the slice radius _y_ we have an element of volume _πy_ 2 _δx_ and hence mass _πmy_ 2 _δx_. The moment of inertia of a disc is ½ mass × radius2 (see the previous example) and thus the moment of inertia of the slice is ½( _πmy_ 2 _δx_ ) _y_ 2 and the moment of inertia of the sphere as the sum of all the slices as _δx_ → 0 is:
Since _r_ 2 = _y_ 2 \+ _x_ 2:
The total mass _M_ of the sphere is so
Figure 4.28 Disc
Figure 4.29 Sphere
Maths in action
Second moment of area
Consider a beam that has been bent into the arc of a circle so that the uppermost surface is in tension and the lower surface in compression (Figure 4.30). The upper surface has increased in length and the lower surface decreased in length; between the two there is a plane which is unchanged in length; this is called the neutral plane and the line where the plane cuts the cross-section of the beam is the neutral axis.
Figure 4.30 Bending stretches the upper surface and contracts the lower surface, in-between there is an unchanged in length surface
An initially horizontal plane through the beam which is a distance _y_ from the neutral axis changes in length as a consequence of the beam being bent and the strain it experiences is the change in length Δ _L_ divided by its initial unstrained length _L_. For circular arcs, the arc length is the radius of the arc multiplied by the angle it subtends, and thus, _L_ \+ Δ _L_ = ( _R_ \+ _y_ )θ. The neutral axis NA will, by definition, be unstrained and so for it we have _L_ = _Rθ_. Hence, the strain on aa is:
Provided we can use Hooke's law, the stress due to bending which is acting this plane is:
Looking at a cross-sectional slice of the beam cut by PP we have Figure 4.31. The moment _M_ of the elemental force _F_ about the neutral axis is _Fy_ and the stress σ acting on the elemental area is _F_ / _δA_. Therefore the moment is (σ _δA_ ) _y_. Hence, using the equation we derived above for the stress, the moment of this element about the neutral axis is:
The total moment _M_ produced over the entire cross-section is the sum of all the moments produced by all the elements of area in the cross-section. Thus, if we consider each such element of area to be infinitesimally small, we can write:
The integral is termed the _second moment of area I_ of the section:
Thus we can write:
For a rectangular cross-section of breadth _b_ and depth _d_ (Figure 4.32) with a segment of thickness _δy_ a distance _y_ from the neutral axis, the second moment of area for the segment is:
The total second moment of area for the section about the neutral axis is thus:
Figure 4.31 Elemental area in a section PP
Figure 4.32 Second moment of area
### 4.2.3 Techniques for integration
There are a number of techniques which can aid in the integration of functions. In this section we look at integration by substitution, integration by parts and partial fractions.
#### Integration by substitution
This involves simplifying integrals by making a _substitution_. The term _integration by change of variable_ is often used since the variable has to be changed as a result of the substitution. The aim of making a substitution is to put the integral into a simpler form for integration. As an illustration, consider the integral:
The substitution _u_ = 5 _x_ reduces e5 _x_ to e _u_. However, we also need to change d _x_ in the variable to d _u_ for the integration. Since d _u_ /d _x_ = 5, we can write the integral as:
Key points
Commonly used substitutions:
∫ cos _m_ _ax_ sin _n_ _ax_ d _x_ , when _n_ is od
Let _u_ = cos _ax_. Use sin2 _ax_ \+ cos2 _ax_ = 1 in the simplification.
∫ cos _m_ _ax_ sin _n_ _ax_ d _x_ , when _m_ is od
Let _u_ = cos _ax_. Use sin2 _ax_ \+ cos2 _ax_ = 1 in the simplification.
∫ cos _m_ _ax_ sin _n_ _ax_ d _x_ , when _m_ and are both even or both odd Rewrite the integral using:
In the above case the substitution of _u_ for 5 _x_ seemed a sensible way to simplify the integral. However, there are no general rules for finding suitable substitutions and the key points show some of the more commonly used substitutions.
Example
Determine the indefinite integral ∫(4 _x_ \+ 1)3 d _x_.
If we let _u_ = 4 _x_ \+ 1 then d _u_ /d _x_ = 4 and d _x_ = ¼ d _u_ :
Example
Determine the indefinite integral
If we let _u_ = 3 _x_ 2 \+ 4, then d _u_ /d _x_ = 6 _x_ and so _x_ d _x_ = (1/6) d _u_. Hence:
The modulus sign is used with the integration of 1/ _u_ because no assumption is made at that stage as to whether _u_ is positive or negative. The sign is dropped when the substitution is made because 3 _x_ 2 \+ 4 is always positive.
Example
Determine the indefinite integral ∫ cos2 _x_ sin3 d _x_.
If we let _u_ = cos _x_ , then d _u_ /d _x_ = −sin _x_ and so sin _x_ d _x_ = −d _u_. The integral then can be written as:
Example
Determine the indefinite integral ∫ cos _x_ sin2 _x_ d _x_.
Let u = sin _x_. Then d _u_ /d _x_ = sin _x_ and so sin _x_ d _x_ = d _u_. The integral can then be written as:
#### Trigonometric substitutions
A useful group of substitutions is to use trigonometric functions. For example, for integrals involving √( _a_ 2 − _x_ 2) terms, we can use the substitution _x_ = _a_ sin θ. Then √( _a_ 2 − _x_ 2) = √( _a_ 2 − _a_ 2 sin2 θ) = _a_ cos θ, since we have 1 − sin2 θ = cos2 θ. Since d _x_ /dθ = _a_ cos θ then _a_ cos θ dθ. The key points give other such substitutions.
Key points
Useful trigonometric substitutions:
Example
Determine the indefinite integral
Let _x_ = sin θ. Then √(1 − _x_ 2) = √(1 − sin2 θ) = cos θ. Since d _x_ /dθ = cos θ then d _x_ = cos θ dθ. Thus the integral becomes:
Since cos 2θ = 2 cos2 θ − 1, we have:
Back substitution using θ = sin−1 _x_ gives:
However, a simpler expression is obtained if we first replace the sin 2θ using sin 2θ = 2 sin θ cos θ = 2sinθ √(1 − sin2 θ).
Example
Determine the indefinite integral
Let _x_ = 2 tan θ. Then d _x_ /dθ = 2 sec2 θ and so:
Another form of useful substitution, when we have integrals involving sin _x_ , cos _x_ , tan _x_ terms, is to let _u_ = tan ½ _x_. Then d _u_ /d _x_ = ½ sec2 ½ _x_. But sec2 _x_ = 1 + tan2 _x_ , thus d _u_ /d _x_ = ½(1 + tan2 _x_ ) = ½(1 + _u_ 2). Thus d _x_ = 2 d _u_ /(1 + _u_ 2). The trigonometric functions can all be expressed in terms of _u_. Thus:
Figure 4.33 shows the right-angled triangle with such an angle. Hence:
Figure 4.33 Angle x
Note that integration of the squares of trigonometric functions can be obtained by using trigonometric identities to put the functions in non-squared form. Thus:
Example
Determine the indefinite integral
Let _u_ = tan ½ _x_ , then d _u_ /d _x_ = ½ sec2 ½ _x_ = ½(1 + tan2 ½ _x_ ) = ½(1 + _u_ 2) and replacing sin _x_ by 2 _u_ /(1 + _u_ 2):
#### Substitution with definite integrals
The above has discussed the substitution procedure with indefinite integrals where the variable was changed from _x_ to _u_. When we have definite integrals we can do the same procedure and take account of the limits of integration at the end _after_ reversing the substitution. The limits are in terms of values of _x_. However, it is often simpler to express the limits in terms of _u_ and take account of the limits _before_ reversing the substitution. To illustrate this, consider the integration of cos3 _x_ between the limits 0 and ½π. If we let _u_ = sin _x_ then d _u_ /d _x_ = cos _x_ and so cos _x_ d _x_ = d _u_. When _x_ = 0 then _u_ = 0 and when _x_ = _½π_ then _u_ = 1. Thus the integral can be written as:
#### Integration by parts
The product rule for differentiation gives:
Integrating both sides of this equation with respect to _x_ gives:
Hence:
[31]
This is the formula for _integration by parts_. This is often written in terms of _u_ = _f_ ( _x_ ) and _v_ = _g_ ( _x_ ) as:
[32]
With a definite integral the equation becomes:
Key point
Integration by parts:
[33]
Example
Determine the indefinite integral ∫ _x_ e _x_ d _x_.
The integral consists of the product of two factors. If we let _u_ = _x_ and d _v_ /d _x_ = e _x_ , then _v_ = ∫e _x_ d _x_ and equation [32] gives:
Example
Determine the indefinite integral ∫e _x_ sin _x_ d _x_.
Let _u_ = _e _x__ and d _v_ /d _x_ = sin _x_. Then:
Hence, using equation [32] gives:
Applying integration by parts again, with _u_ = ex and d _v_ /d _x_ = cos _x_. Then _v_ = ∫ cos _x_ d _x_ = sin _x_. Hence, using equation [32] gives:
Thus:
Example
Determine the definite integral
Let _u_ = _x_ 2 and d _v_ /d _x_ = e _x_. Then _v_ = ∫e _x_ d _x_ = e _x_. Thus, using equation [9]:
Applying integration by parts again, with _u_ = _x_ and d _v_ /d _x_ = e _x_. Then _v_ = ∫e _x_ d _x_ = e _x_. Thus, using equation [33]:
#### Integration by partial fractions
Integrals involving fractions can often by simplified by expressing the integral as the sum or difference of two or more partial fractions which then lend themselves to easier integration. For example:
Key points
The procedure for obtaining partial fractions can be summarised as:
1. If the degree of the denominator is equal to, or less than, that of the numerator, divide the denominator into the numerator to obtain the sum of a polynomial plus a fraction which has the degree of the denominator greater than that of the numerator.
2. Write the denominator in the form of linear factors, i.e. of the form ( _ax_ \+ _b_ ), or irreducible quadratic factors, i.e. of the form ( _ax_ 2 \+ _bx_ \+ _c_ ).
3. Write the fraction as a sum of partial fractions involving constants _A, B_ , etc.
4. Determine the unknown constants which occur with the partial fractions by equating the fraction with the partial fractions and either solving the equation for specific values of _x_ or equating the coefficients of equal powers of _x_.
5. Replace the constants in the partial fractions with their values.
can be expressed as the partial fractions:
When the degree of the denominator is greater than that of the numerator then an expression can be directly resolved into partial fractions. The form taken by the partial fractions depends on the type of denominator concerned.
• If the denominator contains a _linear factor_ , i.e. a factor of the form ( _x_ \+ _a_ ), then for each such factor there will be a partial fraction of the form:
where _A_ is some constant.
• If the denominator contains _repeated linear factors_ , i.e. a factor of the form ( _x_ \+ _a_ ) _n_ , then there will be partial fractions:
with one partial fraction for each power of ( _x_ \+ _a_ ).
• If the denominator contains an _irreducible quadratic factor_ , i.e. a factor of the form _ax_ 2 \+ _bx_ \+ _c_ , then there will be a partial fraction of the form:
for each such factor.
• If the denominator contains _repeated quadratic factors_ , i.e. a factor of the form ( _ax_ 2 \+ _bx_ \+ _c_ ) _n_ , there will be partial fractions of the form:
with one for each power of the quadratic.
The values of the constants _A_ , _B_ , _C_ , etc. can be found by either making use of the fact that the equality between the fraction and its partial fractions must be true for all values of the variable _x_ or that the coefficients of _x n_ in the fraction must equal those of _x n_ when the partial fractions are multiplied out.
When the degree of the denominator, i.e. the power of its highest term, is equal to or less than that of the numerator, the denominator must be divided into the numerator until the result is the sum of terms with the remainder fraction term having a denominator which is of higher degree than its numerator. Consider, for example, the fraction:
The numerator has a degree of 3 and the denominator a degree of 2. Thus, dividing has to be used. Thus
The fractional term can then be simplified using partial fractions.
to give:
Example
Simplify into its partial fraction form:
This has two linear factors in the denominator and so the partial fractions are of the form:
with one partial fraction for each linear term. Thus for the expressions to be equal we must have:
Thus
Consider the requirement that this relationship is true for all values of _x_. Then, when _x_ = −1 we must have:
Hence _A_ = 1. When _x_ = −2 we must have:
Hence _B_ = 2.
Alternatively, we could have determined these constants by multiplying out the expression and considering the coefficients, i.e.
Thus, for the coefficients of _x_ to be equal we must have 3 = _A_ \+ _B_ and for the constants to be equal 4 = 2 _A_ \+ _B_. These two simultaneous equations can be solved to give _A_ and _B_. The partial fractions are thus:
Example
Determine the indefinite integral
The fraction 1/( _x_ 2 − 1) can be written as:
Hence, equating coefficients of _x_ gives _A_ \+ _B_ = 0 and equating integers gives _A_ − _B_ = 1. Thus _A_ = ½ and _B_ = −½. Hence the integral can be expressed as:
We can determine these integrals by substitution. Thus if we let _u_ = _x_ − 1 then d _u_ /d _x_ = 1 and so:
Likewise the integral of 1/( _x_ \+ 1) is ln | _x_ \+ 1| + _B_. Hence:
Example
Determine the indefinite integral
This fraction has a numerator of higher degree than the denominator and so the numerator must be divided by the denominator until the remainder is of lower degree than the denominator. Thus:
Hence the integral becomes:
Example
Determine the indefinite integral
Expressed as partial fractions:
Equating the constant terms gives _A_ = 1. Equating the coefficients of _x_ gives _C_ = 0. Equating the coefficients of _x_ 2 gives _A_ \+ _B_ = 0, and so _B_ = −1. Thus the integral becomes:
The integration of 1/( _x_ 2 \+ 1) can be carried out by using a substitution. Let _u_ = _x_ 2 \+ 1 and so d _u_ /d _x_ = 2 _x_. Thus:
and so:
Maths in action
The technique of using partial fractions to simplify expressions has many uses. In chapter 6, we shall see how partial fractions can help in the solution of differential equations using the Laplace transform. As an illustration, consider a differential equation relating rotational displacement θ to time _t_ for a rotating power transmission shaft:
Given that when _t_ = 0 we have θ = 4 and dθ/d _t_ = 25/2, the Laplace transform enables the differential equation to be written in the form:
We can use the method of partial fractions to simplify the expression. Let the fraction be replaced by:
Then we must have:
If we let _s_ = 2, then 32 − 78 + 84 − 40 = _B_ (2)(4 − 12 + 10) and so _B_ = −1/2. If we let s = 0, then −40 = _A_ (−2)(10) and so _A_ = 2. Comparing coefficients of _s_ gives 42 = 22 _A_ \+ 10 _B_ − 2 _D_ and so _D_ = −3/2. Comparing coefficients of _s_ 3 gives 4 = _A_ \+ _B_ \+ _C_ and so _C_ = 5/2. Putting these values into the partial fraction equation gives:
This is a lot easier to handle than the original equation.
### 4.2.4 Means
The _mean_ of a set of numbers is their sum divided by the number of numbers summed. The _mean value of a function_ between _x_ = _a_ and _x_ = _b_ is the mean value of all the ordinates between these limits. Suppose we divide the area into _n_ equal width strips (Figure 4.34), then if the values of the mid-ordinates of the strips are _y_ 1, _y_ 2,... _y_ n the mean value is:
Figure 4.34 Mean value
If _δx_ is the width of the strips, then _n δx_ = _b_ − _a_. Thus:
Hence, as _δx_ → 0:
[34]
Since the sum of all the _y δx_ terms is the area under the graph between _x_ = _a_ and _x_ = _b_ :
But the product of the mean value and ( _b_ − _a_ ) is the area of a rectangle of height equal to the mean value and width ( _b_ − _a_ ). Figure 4.35 shows this mean value rectangle.
Example
Determine the mean value of the function _y_ = sin _x_ between _x_ = 0 and _x_ = π.
Figure 4.35 Mean value rectangle
The mean value of function is:
#### Root-mean-square values
The power dissipated by an alternating current _i_ when passing through a resistance _R_ is _i_ 2 _R_. The mean power dissipated over a time interval from _t_ = 0 to _t_ = _T_ will thus be:
If we had a direct current _I_ generating the same power then we would have:
and:
[35]
This current _I_ is known as the _root-mean-square_ current. There are other situations in engineering and science where we are concerned with determining root-mean-square quantities. The procedure is thus to determine the mean value of the squared function over the required interval and then take the square root.
Example
Determine the root-mean-square current value of the alternating current _i_ = _I_ sin _ωt_ over the time interval _t_ = 0 to _t_ = 2π/ω.
The root-mean-square value is:
Example
Determine the root-mean-square value of the waveform shown in Figure 4.36 over a period of 0 to 2 s.
Figure 4.36 Example
From _t_ = 0 to _t_ = 1 s the waveform is described by _y_ = 2. From _t_ = 1 s to _t_ = 2 s the waveform is described by _y_ = 0. Thus the root-mean-square value is given by:
Example
An alternating current is defined by the equation:
Determine its mean value over half-a-cycle and the root-mean-square value over a cycle.
We have 100π = 2 _πf_ = 2π/ _T_ , where _f_ is the frequency and _T_ the periodic time. Hence the periodic time is 0.02 s and the time for half-a-cycle is 0.01 s.
Using equation [34], the mean value over half-a-cycle is:
We can use the standard form for the integral of sin a _x_ to give the mean value as:
Thus the mean value over half-a-cycle is 15.92 mA. Note that for a sinusoidal signal the mean value over a full cycle is zero.
The root-mean-square value over a cycle is given by equation [35] as:
Since sin2θ = ½(1 − cos 2θ), we can write:
and so the r.m.s. current 25/√2 = 17.68 mA. Note, that in general, the root-mean-square value of a sinusoidal signal is always the maximum value divided by √2.
## Problems 4.2
1. Determine the integrals of the following:
(a) 4,
(b) 2 _x_ 3,
(c) 2 _x_ 3 \+ 5 _x_ ,
(d) _x_ 2/3 − 3 _x_ 1/2,
(e) 4 + cos 5 _x_ ,
(f) 2 e−3 _x_ ,
(g) 4 e _x_ /2 \+ _x_ 2 \+ 2,
(h) 4/ _x_
2. Determine the areas under the following curves between the specified limits and the _x_ -axis:
(a) _y_ = 4 _x_ 3 between _x_ = 1 and _x_ = 2,
(b) _y_ = _x_ between _x_ = 0 and _x_ = 4,
(c) _y_ = 1/ _x_ between _x_ = 1 and _x_ = 3,
(d) _y_ = _x_ 3 − 3 _x_ 2 − 2 _x_ \+ 2 between _x_ = −1 and _x_ = 2,
(e) _y_ = _x_ 2 − _x_ − 2 between _x_ = −1 and _x_ = 2,
(f) _y_ = _x_ 2 − 1 between _x_ = −1 and _x_ = 2,
(g) the area between _x_ = 0 and _x_ = 2 for the curve defined by _y_ = _x_ 2 between _x_ = 0 and _x_ = 1 and by _y_ = 2 − _x_ between _x_ = 1 and _x_ = 2.
3. Determine the areas bounded by graphs of the following functions and between the specified ordinates:
(a) _y_ = 9 − _x_ 2, _y_ = − 2, _x_ = −2 and _x_ = 2,
(b) _y_ = 4, _y_ = _x_ 2, _x_ = 0 and _x_ = 1
4. Determine the geometrical area enclosed between the graph of the function _y_ = _x_ ( _x_ − 1)( _x_ − 2) and the _x_ -axis.
5. Determine the area bounded by graphs of _y_ = _x_ 3 and _y_ = _x_ 2.
6. Determine the area bounded by the graph of _y_ = sin _x_ , the _x_ -axis and the line _x_ = π/2.
7. Determine the area bounded by graphs of _y_ = _x_ 2 − 2 _x_ \+ 2 and _y_ = 4 − _x_.
8. Determine the values, if they exist, of the following definite integrals:
(a)
(b)
(c)
(d)
9. Determine the following indefinite integral by using the given substitutions:
(a)
(b)
(c)
(d)
(e)
(f)
(g)
(h)
(i)
10. Determine the following indefinite integrals by making appropriate substitutions:
(a)
(b)
(c)
(d)
(e)
(f)
(g)
(h)
(i)
11. By making appropriate substitutions, evaluate the following definite integrals:
(a)
(b)
(c)
(d)
(e)
12. Using the method of integration by parts, determine the following indefinite integrals:
(a)
(b)
(c)
(d)
(e)
(f)
13. Using the method of integration by parts, evaluate the following definite integrals:
(a)
(b)
(c)
14. Determine the following indefinite integrals:
(a)
(b)
(c)
(d)
(e)
(f)
(g)
(h)
(i)
(j)
(k)
15. Determine the moment of inertia for a uniform triangular sheet of mass _M_ , base _b_ and height _h_ about (a) an axis through the centroid and parallel to the base and (b) about the base. The centroid is at one-third the height.
16. Determine the moment of inertia of a flat circular ring with an inner radius _r_ , outer radius 2 _r_ and mass _M_ about an axis through its centre and at right angles to its plane.
17. Determine the moment of inertia of a uniform square sheet of mass _M_ and side _L_ about (a) an axis through its centre and in its plane, (b) an axis in its plane a distance _d_ from its centre.
18. Determine the mean values of the following functions between the specified limits:
(a) _y_ = 2 _x_ between _x_ = 0 and _x_ = 1,
(b) _y_ = _x_ 2 between _x_ = 1 and _x_ = 4,
(c) _y_ = 3 _x_ 2 − 2 _x_ between _x_ = 1 and _x_ = 4,
(d) _y_ = cos2 _x_ between _x_ = 0 and _x_ = 2π.
19. With simple harmonic motion, the displacement _x_ of an object is related to the time _t_ by _x_ = _A_ cos _ωt_. Determine the mean value of the displacement during one-quarter of an oscillation, i.e. between when _ωt_ = 0 and _ωt_ = o.
20. The number _N_ of radioactive atoms in a sample is a function of time _t_ , being given by _N_ = _N_ 0 e− _λt_. Determine the mean number of radioactive atoms in the sample between _t_ = 0 and _t_ = 1/λ.
21. Determine the root-mean-square values of the following functions between the specified limits:
(a) _y_ = _x_ 2 from _x_ = 1 to _x_ = 3,
(b) _y_ = _x_ from _x_ = 0 to _x_ = 2,
(c) _y_ = sin _x_ \+ 1 from _x_ = 0 to _x_ = 2π,
(d) _y_ = sin 2 _x_ from _x_ = 0 to _x_ = π,
(e) _y_ = e _x_ from _x_ = −1 to _x_ = +1
22. Determine the root-mean-square value of a half-wave rectified sinusoidal voltage. Between the times _t_ = 0 and _t_ = π/ω the equation is _v_ = _V_ sin _ωt_ and between _t_ = π/ω and _t_ = 2π/ω we have _v_ = 0.
5
# Differential equations
Summary
This chapter introduces ordinary differential equations, shows how they can be used to model the behaviour of systems in engineering and looks at their solution for different inputs to the systems. Differential equations arise from such situations as the lumped models designed to represent systems (see chapter 3), the motion of projectiles, the cooling of a solid or liquid, transient currents and voltages in electrical circuits, oscillations with mechanical or electrical systems and the rate of decay of radioactive substances.
Objectives
By the end of this chapter, the reader should be able to:
• represent engineering systems by differential equations;
• solve first- and second-order differential equations;
• solve the differential equations representing models of engineering systems for step and ramp inputs.
## 5.1 Differential equations
A _differential equation_ is an equation involving derivatives of a function. Thus examples of differential equations are:
The term _ordinary differential equation_ is used when there is only one independent variable, the above examples having only _y_ as a function of _x_ and so being ordinary differential equations.
Chapter 3 showed how differential equations can be evolved for the mathematical models of lumped engineering systems. The following extends that analysis to illustrate how ordinary differential equations can be evolved for some simple systems.
#### Mechanical systems
Consider a freely falling body of mass _m_ in air (Figure 5.1). The gravitational force acting on the body is _mg_ , where _g_ is the acceleration due to gravity. Opposing the movement of the body through the air is air resistance. Assuming that the air resistance force is proportional to the velocity _v_ , the net force _F_ acting on the body is _mg_ − _kv_ , where _k_ is a constant. But Newton's second law gives the net force _F_ acting on a body as the product of its mass _m_ and acceleration _a_ , i.e. _F_ = _ma_. But acceleration is the rate of change of velocity _v_ with time _t_. Thus we can write:
Key point
The order of a differential equation is equal to the order of the highest derivative that appears in the equation.
Figure 5.1 Body falling in air
and so the differential equation describing this system is:
[1]
This is a _first-order differential equation_ because the highest derivative is d _v_ /d _t_. It describes how the velocity varies with time.
Consider another mechanical system, an object of mass _m_ suspended from a support by a spring (Figure 5.2). When the mass is placed on the spring it stretches by _d_ , called the static displacement (Figure 5.2(a)). Assuming Hooke's law, and so the displacement proportional to the force exerted by the spring, we can write _F_ = _kd_. At equilibrium, considering the vertical forces and applying _F_ = _ma_ , we have _mg_ − _kd_ = _ma_ = 0 as there is zero acceleration. Now if we pull the body down a distance _x_ from this equilibrium position (Figure 5.2(b)) and again apply _F_ = _ma_ to the system when released, the net restoring force acting on the body is _mg_ − _k_ ( _d_ \+ _x_ ) = _ma_ and so − _kx_ = _ma_. Since acceleration is the rate of change of velocity with time, with velocity being the rate of change of displacement with time:
and so:
[2]
This is a _second-order differential equation_ because the highest derivative is d2 _x_ /d _t_ 2. It describes the resulting oscillations of the body after it has been released.
Figure 5.2 Mass on spring
#### Electrical systems
For an electrical circuit with a resistor in series with a capacitor (Figure 5.3), the supply voltage _V_ equals the sum of the voltages across the resistor and capacitor:
When a pure capacitor has a potential difference _v_ applied across it, the charge _q_ on the plates is given by _q_ = _Cv_ , where _C_ is the capacitance. Current _i_ is the rate of movement of charge and so:
[3]
and thus we have:
[4]
This first-order differential equation describes how the capacitor voltage changes with time from when the switch is closed.
Figure 5.3 Series RC circuit
When a charged capacitor discharges through a resistance (Figure 5.4) then _v_ R \+ _v_ C = 0 and so:
[5]
This first-order differential equation describes how the capacitor voltage changes with time from when the switch is closed.
Figure 5.4 RC discharge circuit
When a pure inductor has a current _i_ flowing through it, then the induced e.m.f. produced in the component is proportional to the rate of change of current, the induced e.m.f. being − _L_ d _i_ /d _t_ where _L_ is the inductance. If the component has only inductance and no resistance, then there can be no potential drop across the component due to the current through the resistance and thus to maintain the current through the inductor the voltage source must supply a potential difference _v_ which just cancels out the induced e.m.f. Thus the potential difference across an inductor is:
[6]
If we have an electrical circuit containing an inductor in series with a resistor (Figure 5.5) then, when the supply voltage _V_ is applied, we have _V_ = _v_ L \+ _v_ R. Thus, using equation [6]:
Figure 5.5 Series RL circuit
The steady state current _I_ will be attained when the current ceases to change with time. We then have _RI_ = _V_ and so the first-order differential equation can be written as:
[7]
Consider now a circuit including a resistor, a capacitor and an inductor in series (Figure 5.6). When the switch is closed the supply voltage _v_ is applied across the three components and _V_ = _v_ R \+ _v_ L \+ _v_ c. Thus, using equation [6]:
Figure 5.6 Series RLC circuit
Since _i_ = _C_ d _v_ c/d _t_ (equation [3]), then:
[8]
This second-order differential equation describes how the voltage across the capacitor varies with time.
#### Hydraulic systems
Consider an open tank into which liquid can enter at the top through one pipe and leave at the base through another (Figure 5.7). If the liquid enters at the rate of a volume of _q_ 1 per second and leaves at the rate of _q_ 2 per second, then the rate at which the volume _V_ of liquid in the tank changes with time is:
But _V_ = _Ah_ , where _A_ is the cross-sectional area of the tank and _h_ the height of the liquid in the tank. Thus:
The rate at which liquid leaves the tank, when flowing from the base of the tank into the atmosphere, is given by Torricelli's theorem as _q_ 2 = √(2 _gh_ ). Thus we have:
[9]
#### Solving differential equations
The differential equation d _y_ /d _x_ = 2 describes a straight line with a constant gradient of 2 (Figure 5.8). There are, however, many possible graphs which fit this specification, the family of such lines having equations of the form _y_ = 2 _x_ \+ _A_ , where _A_ is a constant. These are all solutions for the differential equation.
Figure 5.7 Liquid level in a tank
Figure 5.8 General solution
Thus the differential equation d _y_ /d _x_ = 2 has many solutions given by _y_ = 2 _x_ \+ _A_ , this being termed the _general solution_. Only if constraints are specified which enable constants like _A_ to be evaluated will there be just one solution, this being then termed a _particular solution_. The term _initial conditions_ are used for the constraints if specified at _y_ = 0 and _boundary conditions_ if specified at some other value of _y_. Thus if, for a general solution _y_ = 2 _x_ \+ _A_ , we have the initial condition that _y_ = 0 when _x_ = 0 then _A_ is 0 and so the particular solution is _y_ = 2 _x_.
Key point
The term solution is used with a differential equation for the relationship between the dependent and independent variables such that the differential equation is satisfied for all values of the independent variable. The general solution consists of a family of equations which satisfy the differential equation; with a first-order differential equation the general solution involves just one arbitrary constant. Given initial or boundary conditions, it is possible to find a value for the arbitrary constant and so obtain a particular solution.
Example
Verify that _y_ = e _x_ is a particular solution of the differential equation d _y_ /d _x_ = _y_. If _y_ = e _x_ then d _y_ /d _x_ = e _x_. Thus for all values of _y_ we have d _y_ /d _x_ = _y_ and so _y_ = e _x_ is a solution.
Example
_y_ = _A_ e _x_ \+ _B_ e2 _x_ is a general solution of the differential equation:
Determine the particular solution for the boundary conditions _y_ = 3 when _x_ = 0 and d _y_ /d _x_ = 5 when _x_ = 0.
For _y_ = _A_ e _x_ \+ _B_ e2 _x_ with _y_ = 3 when _x_ = 0 we have 3 = _A_ \+ _B_. With _y_ = _A_ e _x_ \+ _B_ e2 _x_ we have d _y_ /d _x_ = _A_ e _x_ \+ 2 _B_ e2 _x_ and thus with d _y_ /d _x_ = 5 when _x_ = 0 we have 5 = _A_ \+ 2 _B_. This pair of simultaneous equations gives _A_ = 1 and _B_ = 2. Thus the particular solution is:
We can check that this is a valid solution by substituting it in the differential equation:
### Problems 5.1
1. Derive differential equations to represent the following situations:
(a) The velocity _v_ of an object of mass _m_ in terms of time _t_ when thrown vertically upwards against air resistance proportional to the square of its velocity.
(b) The displacement _x_ of a mass _m_ on a spring when the mass is pulled down from its equilibrium position and released when there is a damping force proportional to the velocity.
(c) The velocity _v_ of a boat of mass _m_ on still water in terms of time _t_ after the engines are switched off if the drag forces acting on the boat are proportional to the velocity.
(d) The velocity _v_ of an object falling from rest in air if the drag forces are proportional to the square of the velocity.
(e) The intensity _I_ of a beam of light emerging from a block of glass in terms of the thickness _x_ of the glass if the intensity decreases at a rate proportional to the block thickness.
(f) The rate at which the pressure _p_ at the base of a tank changes with time if liquid of density q enters the tank at the volume rate of _q_ 1 and leaves at the rate of _q_ 2.
(g) The height _h_ of a liquid in a tank open to the atmosphere as a function of time _t_ after a leak from the base of the tank occurs.
2. Verify that the following are solutions of the given differential equations:
(a)
(b)
(c)
(d)
3. For the following general solutions of differential equations, verify that they are solutions and determine the particular solution for the given boundary conditions:
(a) _y_ = _A_ e _x_ \+ _Bx_ e _x_ for:
(b) _y_ = _A_ sin _ωt_ \+ _B_ cos _ωt_ for:
(c) _y_ = ( _A_ \+ _x_ 2) e− _x_ for:
4. The differential equation relating the deflection _y_ with distance _x_ from the fixed end of a cantilever with a uniformly distributed load is:
The general solution is given as:
Verify that this is the general solution and determine the particular solution for _y_ = 0 and d _y_ /d _x_ = 0 at _x_ = 0.
## 5.2 First-order differential equations
First-order differential equations are often used to model the behaviour of engineering systems. For example, the exponential growth system where the rate of change d _N_ /d _t_ of some quantity is proportional to the quantity _N_ present can be represented by:
or exponential decay, e.g. radioactivity, where the rate at which a quantity decreases is proportional to the quantity present:
Such differential equations are of the form:
[10]
Another form of differential equation is illustrated by the growth of the voltage _v c_ across a capacitor in an electrical circuit having a capacitor _C_ in series with a resistor _R_ and connected to a step voltage input of _V_ :
Such equations are of the form:
[11]
where _P_ and _Q_ are constants or functions of _x_.
This section looks at probably the most common method that is used for the solution of such differential equations, the separation of variables, and how it can be used to determine the output of systems which are modelled by first-order differential equations.
#### Separation of variables
A first-order equation is said to be _separable_ if the variables _x_ and _y_ can be separated. To solve such equations we simply separate the variables and then integrate both sides of the equation with respect to _x_. The following shows solutions of the various forms taken by separable equations:
• **Equations of the form**
If we integrate both sides of the equation with respect to _x_ :
This is equivalent to separating the variables and writing:
[12]
Example
Solve the differential equation d _y_ /d _x_ = 2 _x_.
Separating the variables gives:
and thus _y_ = _x_ 2 \+ _A_.
Example
If d _p_ /d _t =_ (3 − _t_ )2, find _p_ in terms of _t_ given the condition that _p_ = 3 when _t_ = 2.
Separating the variables gives:
and so:
This is the general solution. Using the given conditions that _p_ = 3 when _t_ = 2 gives:
Hence _C_ = −5.67 and so the specific solution is:
• **Equations of the form**
This can be rearranged to give:
Integrating both sides with respect to _x_ :
This is equivalent to separating the variables:
[13]
Example
Solve the differential equation d _y_ /d _x_ = 2 _y_.
Separating the variables gives:
Thus In _y_ = 2 _x_ \+ _A_. We can write this as _y_ = _e_ 2 _x_ \+ _A_ = e2 _x_ e _A_ = _B_ e2 _x_ , where _B_ is a constant.
• **Equations of the form**
Integrating both sides of the equation with respect to _x_ gives:
This is equivalent to:
[14]
Example
Solve the differential equation d _y_ /d _x_ = 2 _x_ / _y_.
Separating the variables give:
Thus ½ _y_ 2 = _x_ 2 \+ _A_.
Key point
To solve first-order differential equations by separation of variables:
1. Write the differential equation in the form _f_ ( _y_ ) d _y_ = _g_ ( _x_ ) d _x_.
2. Solve by integrating both sides of the equation.
• **Equations of the form**
This can be rearranged and integrated with respect to _x_ to give:
This is equivalent to:
[15]
Example
Solve the differential equation d _y_ /d _x_ = 2 _yx_.
Separating the variables gives:
Thus In _y_ = _x_ 2 \+ _A_.
Equations which are not of any of the above forms may often be put into one of the forms by a _change of variable_. As an illustration, consider the differential equation d _y_ /d _x_ = _y_ /( _y_ \+ _x_ ). This can be written as:
If we let _v_ = _y_ / _x_ then _y_ = _vx_ and d _y_ /d _x_ = _v_ \+ _x_ d _v_ /d _x_. Thus the above equation can be written as:
Integrating with respect to _x_ :
This is equivalent to:
Hence In _v_ − (1/ _v_ ) = −ln _x_ \+ _A_ and so ln( _y_ / _x_ ) − ( _x_ / _y_ ) = −ln _x_ \+ _A_.
Example
Solve the differential equation d _y_ /d _x_ = cos2 _y_ if _y_ = π/4 when _x_ = 0.
We can write the equation as:
Hence, separating the variables gives:
and so we have tan _y_ = _x_ \+ _A_. Since _y_ = π/4 when _x_ = 0 then tan π/4 = _A_ and so _A_ = 1. Thus tan _y_ = _x_ \+ 1 or _y_ = tan−1( _x_ \+ 1).
### 5.2.1 The responses of first-order systems
This section looks at how, when differential equations are involved in modelling a system, the dynamic responses of systems can be predicted. For example, if the input signal to a measurement system suddenly changes, the output will not instantaneously change to the new value but some time will elapse before it reaches a steady-state value. If the voltage applied to an electrical circuit suddenly changes to a new value, the current in the circuit will not change instantly to the new value but some time will elapse before it reaches the steady new value. If a continually changing signal is applied to a system, the response of the system may lag behind the input. The way in which a system reacts to input changes is termed its _dynamic characteristic_.
#### First-order systems and step inputs
Consider a thermometer (Figure 5.9) at temperature _T_ 0 inserted into a liquid at a temperature _T_ 1. We can thus think of the thermometer being subject to a step input, i.e. the input abruptly changes from _T_ 0 to _T_ 1. The thermometer will then, over a period of time, change its temperature until it becomes _T_ 1. Thus we have a measurement system, the thermometer, which has a step input and an output which changes from _T_ 0 to _T_ 1 over some time. How does the output, i.e. the reading of the thermometer _T_ , vary with time.
Figure 5.9 Thermometer inserted in liquid
The rate at which energy enters the thermometer from the liquid is proportional to the difference in temperature between the liquid and the thermometer. Thus, at some instant of time when the temperature of the thermometer is _T_ , we can write:
where _h_ is a constant called the _heat transfer coefficient_. For a thermometer with a specific heat capacity _c_ and a mass _m_ , the relationship between heat input _Q_ and the consequential temperature change is:
When the rate at which heat enters the thermometer is d _Q_ /d _t_ , we can write for the rate at which the temperature changes:
Thus:
We can rewrite this with all the output terms on one side of the equals sign and the input on the other, thus:
[16]
We no longer have a simple relationship between the input and output but a relationship which involves time. The form of this equation is typical of first-order systems.
We can solve this equation by separation of the variables:
where _A_ is a constant. This can be rewritten as:
where τ = _mc_ / _h_ and is termed the _time constant_. The time constant can be defined as the value of the time which makes the exponential term become e−1. _T_ = _T_ 0 at _t_ = 0 and so _C_ = _T_ 1 − _T_ 0 Thus:
[17]
The first term is the _steady-state value_ , i.e. the value that will occur after sufficient time has elapsed for all transients to die away, and the second term a transient one which changes with time, eventually becoming zero. Figure 5.10 shows graphically how the temperature _T_ indicated by the thermometer changes with time.
Figure 5.10 Response of a first-order system to a step input
After a time equal to one time constant the output has reached about 63% of the way to the steady-state temperature, after a time equal to two time constants the output has reached about 86% of the way, after three time constants about 95% and after about four time constants it is virtually equal to the steady-state value. The error at any instant is the difference between what the thermometer is indicating and what the temperature actually is. Thus:
[18]
This error changes with time and eventually will become zero. Thus it is a transient error.
If a thermometer is required to be fast reacting and quickly attain the temperature being measured, it needs to have a small time constant. Since τ = _mc_ / _h_ , this means a thermometer with a small mass, a small thermal capacity and a large heat transfer coefficient. If we compare a mercury-in-glass thermometer with a thermocouple, then the smaller mass and specific heat capacity of the thermocouple will give it a smaller time constant and hence a faster response to temperature changes.
Example
A thermometer indicates a temperature of 20°C when it is suddenly immersed in a liquid at a temperature of 60°C. If the thermometer behaves as a first-order system and has a time constant of 5 s what will its readings be after (a) 5 s, (b) 10 s, (c) 15 s.
The temperature _T_ of the thermometer varies with time according to equation [17]:
After 5 s the thermometer reading will have reached about 63% of the way to the steady-state value, after 10 s about 86%, after 15 s about 95% and after 20 s it is virtually at the steady-state value. Thus after 5 s the reading is 45.3°C, after 10 s it is 54.6°C, after 15 s it is 58.0°C.
Example
A thermometer which behaves as a first-order element has a time constant of 15 s. Initially it reads 20°C. What will be the time taken for the temperature to rise to 90% of the steady-state value when it is immersed in a liquid of temperature 100°C, i.e. a temperature of 92°C?
Equation [17], _T_ = _T_ 1 \+ ( _T_ 0 − _T_ 1) e− _t/τ_ , can be arranged as:
With _T_ − _T_ 0 as 90% of _T_ 1 − _T_ 0, then we have _T_ − _T_ 1 as 10% of _T_ 1 − _T_ 0 and thus:
Taking logarithms gives −2.30 = − _t_ /15 and so _t_ = 34.5 s.
Maths in action
Transients in electrical circuits
Consider the growth of current in a circuit possessing inductance and resistance (Figure 5.11). At some time _t_ after the switch is closed and the current is _i_ , we have:
Since _v_ R = _Ri_ and _v_ L = _L_ d _i_ /d _t_ (equation [6]):
This is a first-order differential equation. It can be solved by separating the variables:
If the switch is closed at time _t_ = 0 then _i_ = 0 when _t_ = 0. For the other limit of integration we look for the current to be _i_ at time _t_. Thus:
The maximum circuit current _I_ is _V_ / _R_ and so:
When _t_ = _L_ / _R_ then _i_ = _I_ (1 − e−1) = 0.63/. This is the same as in Figure 5.10 and so _L_ / _R_ is the time constant of the circuit.
Figure 5.11 Circuit with series inductance and resistance
#### First-order systems in general
In general, a first-order system has a differential equation which can be written in the form:
[19]
where _x_ is the output, _t_ the time and _y_ the input; _a_ 0, _a_ 1 and _b_ are constants for the system represented by the equation. The left-hand side of the equals sign contains the output related terms and the right-hand side the input related terms. This equation can be rearranged as:
and, if we let τ = _a_ 1/ _a_ 0 and _k_ = _b_ / _a_ 0, then we have:
[20]
τ defines the _time constant_ of the system and _k_ the _static system sensitivity_.
The steady-state value of the output occurs when d _x_ /d _t_ = 0 and so _x_ = _ky_ and thus:
[21]
The solution of the differential equation for a step input from some initial value to final value at time _t_ = 0 is of the form:
[22]
Table 5.1 shows the percentage of the response, i.e. ( _x_ – initial value)/(steady – initial values) × 100%, that will have been achieved after various multiples of the time constant. The percentage dynamic error is (steady-state value − _x_ )/(steady –initial value) × 100%. With a step input, the time constant can be defined as the time taken for the output to reach 63.2% of the steady-state value (see Figure 5.10).
Table 5.1
First-order system response
Time | % response | % dynamic error
---|---|---
0 | 0.0 | 100.0
1τ | 63.2 | 36.8
2τ | 86.5 | 13.5
3τ | 95.0 | 5.0
4τ | 98.2 | 1.8
5τ | 99.3 | 0.7
∞ | 100.0 | 0.0
There is an alternative way of defining the time constant. At the instant the input starts and we have _t_ = 0, then _x_ = 0 and so equation [20] gives τ d _x_ 0/d _t_ = _ky_ , where d _x_ 0/d _t_ is the initial gradient of the graph of output with time. Thus, since _ky_ is the steady-state value:
[23]
Thus, on a graph of output plotted against time for a step input (Figure 5.12), if we draw the tangent to the curve at time _t_ = 0, equation [23] gives the initial gradient and so the time constant can be considered to be the time taken for the output to reach the steady-state value if the initial rate of change of output with time were maintained.
Figure 5.12 Step response of a first-order system
Key point
All first-order systems have the input-output relationship defined by a differential equation of the form:
and all give a response to a step input of the form shown in Figure 5.10.
The time constant can be defined as the time taken, when there is a step input, as:
• the output to reach 63.2% of the steady-state value;
• the output to reach the steady-state value if the initial rate of change of output with time were maintained.
Key point
A way of looking at differential equations which you might come across is in terms of the D-operator. The term _operator_ is used for a function which transforms one function into another function. The _D-operator_ is such a function which is sometimes used with differential equations. With such an operator we regard d _y_ /d _x_ as the result of an operator applied to the function _y_ and write this as _Dy_.
The differential equation:
thus becomes written as:
_D_ behaves like an ordinary algebraic quantity and so we can write:
Thus _k_ /( _τD_ \+ 1) is the quantity we operate on the input by in order to give the output and is called a _transfer function_.
Likewise, we have:
and can write second-order differential equations in terms of the _D_ -operator and obtain a transfer function.
Example
An electrical circuit consisting of resistance _R_ in series with an initially uncharged capacitor of capacitance _C_ has an input of a step voltage _V_ at time _t_ = 0. Determine (a) how the potential difference across the capacitor will change with time and (b) with _R_ = 1 MΩ, _C_ = 4 μF and a step voltage of 12 V, the potential difference across the capacitor after 2 s.
(a) The differential equation, equation [5], is:
This equation is the same form as equation [19] so we can recognise that the solution must be of the form given by equation [21] with the time constant being _RC_ :
(b) The time constant is _RC_ = 1 × 106 × 4 × 10−6 = 4 s. Thus after 2 s, _v C_ = 12(1 − e−2/4) = 4.72 V.
Example
A thermocouple in a protective sheath has an output voltage θ0 in volts related to the input temperature θi in °C by the equation:
Determine the time constant τ and the static system sensitivity _k_.
To put the equation in the standard form of equation [20] we divide by 3:
The time constant is thus 10 s and the static system sensitivity 0.5 × 10−5 V/°C. The thermocouple thus takes 10 s to reach 63.2% of its steady-state output and we need to wait at least three times this time for the output to be close to the voltage corresponding to the temperature being measured.
Example
Determine the time constant and the static system sensitivity for the hydraulic system shown in Figure 5.13 in which a liquid flows into a container at a constant rate and liquid also flows out of the container through a valve at a constant rate.
Figure 5.13 Example
The differential equation for this system was developed in chapter 3. The rate of change of liquid volume in the container with time is _A_ d _h_ /d _t_ and so:
For the resistance term for the valve we have _p_ 1 − _p_ 2. = _Rq_ 1 and so, since the pressure difference is _hρg_ :
Thus, substituting for _q_ 2 gives:
and so we can write:
We can put this equation in the standard form by dividing by _ρg_ / _R_ :
Comparison with equation [20] thus gives:
and:
Note, that in terms of the _D_ -operator, we can write the differential equation as:
and so a transfer function of:
### Problems 5.2
1. Solve, by separation of the variables, the following differential equations:
(a)
(b)
(c)
(d)
(e)
(f)
(g)
(h)
(i)
(j)
(k)
(l)
2. Determine the solution of the differential equation d _y_ /d _x_ = 2 _xy_ 2 if _y_ = ½ when _x_ = 0.
3. A capacitor of capacitance _C_ which has been charged to a voltage _V_ 0 is discharged through a resistance _R_. Determine how the voltage _v C_ across the capacitor changes with time _t_ if d _v c_/d _t_ = − _V/RC_.
4. The rate at which radioactivity decays with time _t_ is given by the differential equation d _N_ /d _t_ = − _kN_ , where _N_ is the number of radioactive atoms present at time _t_. If at time _t_ = 0 the number of radioactive atoms is _N_ 0, solve the differential equation and show how the number of radioactive atoms varies with time.
5. When a steady voltage _V_ is applied to a circuit consisting of a resistance _R_ in series with inductance _L_ , determine how the current _i_ changes with time _t_ if _L_ d _i_ /d _t_ \+ _Ri_ = _V_ and _i_ = 0 when _t_ = 0.
6. Determine the solution of the differential equation d _y_ /d _x_ = 2 − _y_ if _y_ = 1 when _x_ = 0.
7. A stone freely falls from rest and is subject to air resistance which is proportional to its velocity. Derive the differential equation describing its motion and hence determine how its velocity _v_ varies with time _t_ if _v_ = 0 at _t_ = 0. Take the acceleration due to gravity as 10 m/s2.
8. For a belt drive, the difference in tension _T_ between the slack and tight sides of the belt over a pulley is related to the angle of lap θ on the pulley by d _T_ /dθ = _μT_ , where μ is the coefficient of friction. Solve the differential equation if _T_ = _T_ 0 when θ = 0°.
9. A rectangular tank is initially full of water. The water, however, leaks out through a small hole in the base at a rate proportional to the square root of the depth of the water. If the tank is half empty after one hour, how long must elapse before it is completely empty?
10. For a circuit containing resistance _R_ in series with capacitance _C_ , the potential difference _v c_ across the capacitor varies with time, being given by _v C_ = _V_ − _V_ e− _t_ / _RC_. What is the time constant for the circuit?
11. A hot object cools at a rate proportional to the difference between its temperature and that of its surroundings. If it initially is at 75°C and cooling at a rate of 2° per minute, what will be its temperature after 15 minutes if the surroundings are at a temperature of 15°C?
12. A sphere of ice melts so that its volume _V_ changes at the rate given by d _V_ /d _t_ = −4 _πkr_ 2, where _k_ is a constant and _r_ is the radius at time _t_ after it began to melt. Show that, if _R_ is the initial radius, _r_ = _R_ e− _kt_.
13. A 1000 μF capacitor has been charged to a potential difference of 12 V. At time _t_ = 0 it is discharged through a 20 kΩ resistor. What will be the potential difference across the capacitor after 2 s?
14. Determine how the circuit current varies with time when there is a step voltage _V_ input to a circuit having an inductance _L_ in series with resistance _R_.
15. A sensor behaves as a capacitance of 2 μF in series with a 1 MΩ resistance. As such the relationship between its input _y_ and output _x_ is given by 2(d _x_ /d _y_ ) + _x_ = _y_. How will the output vary with time when the input is a unit step input at time _t_ = 0?
16. A system is specified as being first order with a differential equation relating output _x_ to input _y_ by:
If it has a time constant of 10 s and a steady-state value of 5. How will the output of the system vary with time when subject to a step input?
17. A sensor is first order with a differential equation relating its output _x_ for input _y_ by:
If it has a time constant of 1 s, what will be the percentage dynamic error after (a) 1 s, (b) 2 s, from a unit step input signal to the sensor?
18. How long must elapse for the dynamic error of a sensor with a differential equation of the form:
and subject to a step input to drop below 5% if the sensor is first order with a time constant of 4 s?
19. A thermometer originally indicates a temperature of 20°C and is then suddenly inserted into a liquid at 45°C. The thermometer has a time constant of 2 s. (a) Derive a differential equation showing how the thermometer reading is related to the temperature input and (b) give its solution showing how the thermometer reading varies with time.
## 5.3 Second-order differential equations
As an example of a second-order ordinary differential equation, consider the displacement _y_ of a freely falling object in a vacuum as a function of time _t_. It falls with the acceleration due to gravity _g_ and is described by the second-order differential equation:
[24]
Key point
In general, a second-order differential equation has the form:
where a2, a1, a0 and b are functions of x, b often being termed the forcing function.
Another example is the displacement _y_ of an object when freely oscillating with simple harmonic motion when there is damping, this being described by the second-order differential equation:
[25]
If the oscillating object is not left freely to oscillate when some external force is applied, say _F_ sin _ωt_ , then we have:
[26]
With a series electrical circuit containing resistance _R_ , capacitance _C_ and inductance _L_ , the potential difference _v_ C across the capacitor when it is allowed to discharge is described by the second-order differential equation:
[27]
If such a circuit has a voltage _V_ applied to it we have:
[28]
#### Arbitrary constants
Consider an object falling freely with the acceleration due to gravity _g_. If we take g to be 10 m/s2 then equation [1] becomes:
If we integrate both sides of the equation with respect to _t_ we have:
where _A_ is the constant of integration. If we now integrate this equation with respect to _t_ :
where _B_ is the constant arising from this integration. Thus the above general solution for the second-order differential equation has two arbitrary constants. With all second-order differential equations there will be two arbitrary constants because two integrations are needed to obtain the solution.
Key point
The general solution for a second-order differential equation will have two arbitrary constants.
Because there are two arbitrary constants with a second-order differential equation, two sets of values are needed to determine them. This is generally done by specifying two initial conditions: the value of the solution and the value of the derivative at a single point. Thus we might have the initial conditions that _y_ = 20 at _t_ = 0 and d _y_ /d _t_ = 0 at _x_ = 0.
Example
If the general solution to the differential equation:
is _y_ = _A_ e _x_ \+ _Bx_ e _x_ , determine the solution if _y_ = 1 at _x_ = 0 and d _y_ /d _x_ = −1 at _x_ = 0.
From the initial condition _y_ = 1 at _x_ = 0 we have, when substituting these values in the general solution, 1 = _A_ \+ 0. Thus _A_ = 1. If we differentiate the general solution to give d _y_ /d _x_ = _A_ e _x_ \+ _Bx_ e _x_ \+ _B_ e _x_ and substitute the initial condition d _y_ /d _x_ = 0 at _x_ = 0, then −1 = _A_ \+ 0 + _B_ and so _B_ = −2. Thus the solution is _y_ = e _x_ − 2 _x_ e _x_.
### 5.3.1 Second-order homogeneous differential equations
Consider a second-order differential equation of the basic form:
[29]
where _a_ 2, _a_ 1 and _a_ 0 are constants. Such a differential equation is said to be _homogeneous_ since, when all the dependent variables are moved to the left of the equal sign, there is just a zero on the right. In the case of a homogeneous linear first-order differential equation with constant coefficients:
we have d _y_ /d _x_ = −( _a_ 0 _/a_ 1) _y_ and thus, by separation of the variables, the solution is ln _y_ = −( _a_ 0 _/a_ 1) _x_ \+ _A_ or _y_ = _C_ e _kx_ , where _k_ = −( _a_ 0 _a_ 1). To solve the constant coefficient second-order differential equation it seems reasonable to consider that it might have a solution of the form _y_ = _A_ e _sx_ , where _A_ and _s_ are constants. Thus, trying this as a solution, the second-order differential equation [6] becomes:
Since the exponential function is never zero we must have, if _y_ = _A_ e _sx_ is to be a solution:
[30]
Equation [30] is called the _auxiliary equation_ or _characteristic equation_ associated with the differential equation [29]. This quadratic equation has the roots:
[31]
The roots of the auxiliary equation, as given by equation [31], can be:
• **Two distinct real roots if**
The general solution to the differential equation is then:
[32]
Example
Determine the general solution of the differential equation:
Trying _y_ = _A_ e _sx_ as a solution gives the auxiliary equation:
which factors as ( _s_ − 3)( _s_ \+ 2) = 0 and so _s_ 1 = 3 and _s_ 2 = −2. Thus the general solution is:
• **Two equal real roots if**
This gives _s_ 1 = _s_ 2 = − _a_ 1/2 _a_ 2. In order to have a solution with two arbitrary constants we _cannot_ have a general solution of:
since this can be reorganised to imply only one constant. Thus we try a second solution of the form _y_ = _Bx_ e _sx_. Then, since d _y_ /d _x_ = _B_ e _sx_ \+ _Bsx_ e _sx_ and d2 _yl_ d _x_ 2 = 2 _Bs_ e _sx_ \+ _Bs_ 2 _x_ e _sx_ , substituting into equation [29] gives:
But _a_ 2 _s_ 2 \+ _a_ 1 _s_ \+ _a_ 0 = 0 is the auxiliary equation and so the first term is zero. Also _s_ = − _a_ 1/2 _a_ 2 and so the second term is zero. Thus _y_ = _Bx_ e _sx_ is a solution. The general solution is thus:
[33]
Example
Determine the general solution of the differential equation:
Trying _y_ = _A_ e _sx_ as a solution gives the auxiliary equation:
This factors as ( _s_ \+ 4)( _s_ \+ 4) = 0 and so we have two roots of _s_ = −4. The solution is thus of the form given in equation [33]:
• **Two distinct complex roots if**
With this condition, equation [8] can be written as:
where α = −( _a_ 1/2 _a_ 2) and Thus the general solution is:
This can be written as:
Key point
_Euler's equation_
A complex number _z_ can be expressed as:
Hence:
But this means that the derivative is just j times _z_. A function with this property that the derivative is proportional to itself is the exponential. Thus we can write:
This is known as Euler's equation.
An alternative way of arriving at this equation is to consider sines and cosines expressed as series, and so write:
Since j2 = −1, j3 = −j, j4 = 1, j5 = j, etc. we can write the equation as:
But this is the form of the series for ex. Thus we can write:
There is a relationship called Euler's formula which enables the above equation to be written as:
[34]
Example
Determine the general solution of the differential equation:
and the particular solution if _y_ = 1 and d _y_ /d _x_ = 2 at _x_ = 0.
Trying _y_ = _A_ e _sx_ as a solution gives, since d _y_ /d _x_ = _sA_ e _sx_ and d2 _y_ /d _x_ 2 = _s_ 2 _A_ es _x_ , the auxiliary equation:
This has roots:
The general solution will thus be of the form given for equation [34]:
With _y_ = 1 when _x_ = 0 we have:
and thus _C_ = 1. Differentiating the solution gives:
With d _y_ /d _x_ = 2 at _x_ = 0 we have:
and so _D_ = −½. Thus the particular solution is:
Key point
If the roots of the auxiliary equation are both real, i.e. then:
If the roots are real and equal, i.e.
If the roots are imaginary, i.e. then:
Maths in action
System of a mass on a spring
Consider engineering systems which can be represented by a mass on a spring (Figure 5.14); we will assume there is no damping. If the mass is pulled downwards and then released, it oscillates on the spring. The force acting on the mass is just the restoring force and so:
This is a homogeneous second-order differential equation. If we try the solution _x_ = _A_ e _t_ , then we obtain:
and so _s_ 2 = − _k/m_ and we can write:
with _s_ an imaginary quantity. If we let ω = √( _k/m_ ), then the solution to the differential equation:
which we can write as:
If _x_ = 0 when _t_ = 0 then _C_ = 0 and thus:
The oscillations can be described by a sine function with angular frequency ω and amplitude _D_.
Figure 5.14 Mass on a spring
Maths in action
System of a damped mass and a spring
In a similar manner to the previous Maths in action, we may consider the oscillations of the damped system shown in Figure 5.15. The mass is constrained to move in purely a horizontal motion so we need only consider horizontal forces. There are many engineering systems which can be modelled by such a system. Later in this section we look at this system when there is an external force acting on the mass.
Figure 5.15 Mass, spring, damper system. Note that the mass is considered to be on rollers so that we can neglect friction
The force resulting from compressing the spring is proportional to the change in length _x_ of the spring, i.e. _kx_ with _k_ being a constant termed the spring stiffness. The force arising from the damping is proportional to the rate at which the displacement of the piston is changing, i.e. _c_ d _x_ /d _t_ with _c_ being a constant. Thus:
This net force will cause the mass to accelerate. Thus:
We can write this as:
In the absence of damping we have _m_ d2 _x_ /d _t_ 2 \+ _kx_ = 0 and the spring _naturally_ oscillating (see Earlier Maths in action in this chapter) with an angular frequency, which we can call the natural angular frequency of ωn given by:
If we define a constant ζ, termed the _damping ratio_ , by:
then we can write the second-order differential equation as:
Now, in order to solve this differential equation, we use a technique similar to that detailed in the previous Maths in action. We will try a solution of the form _x_ = _A_ e _st_. This produces the auxiliary equation:
and so:
This is a quadratic and we can use the usual equation for the roots of a quadratic to obtain:
The general solution is thus:
The resulting oscillation of the system depends on the term inside the square root sign.
_Damping ratio with a value between 0 and 1_
This gives two complex roots:
If we let then _s = −ω_ n _ζ ± jω_ we obtain:
By using Euler's equation (see Key point earlier in this section) we can write this as:
The exponential term means we have a damped oscillation. The equation can be expressed in an alternative form, since for the sine of a sum we can write sin( _ωt_ \+ ϕ) = sin _ωt_ cos ϕ + cos _ωt_ sin ϕ. If we let _P_ and _Q_ represent the opposite sides of a right-angled triangle of angle ϕ (Figure 5.16), then sin ϕ = _P_ /√( _P_ 2 \+ _Q_ 2) and cos ϕ = _Q_ /√( _P_ 2 \+ _Q_ 2) and so:
where _C_ is a constant and ϕ a phase difference. This describes a sinusoidal oscillation which is damped, the exponential term being the damping factor which gradually reduces the amplitude of the oscillation (Figure 5.17). Such a motion is said to be _under-damped_.
Figure 5.16 Angle φ
Figure 5.17 Under-damped oscillation
_Damping ratio with the value 1_
This gives two equal roots _s_ 1 = _s_ 2 = −ω _n_ and thus:
where _A_ and _B_ are constants. This describes a situation where no oscillations occur but _x_ exponentially changes with time. Such a motion is said to be _critically damped_.
_Damping ratio greater than 1_
This gives two real roots and thus:
This describes a situation where no oscillations occur but _x_ exponentially changes with time, taking longer to reach the steady-state zero displacement value than the critically damped motion. Such a motion is said to be _over-damped_.
Example
This example illustrates the discussion in the above Maths in action. For the system shown in Figure 5.18, the 8 kg mass is moved 0.2 m to the right of the equilibrium position and released from rest at time _t_ = 0. Determine its displacement at time _t_ = 2s.
Figure 5.18 Example
First we consider whether the system is underdamped, critically damped or overdamped.
Since the damping factor is less than 1 the system is underdamped. The natural frequency is:
and so the undamped frequency is:
The motion of the underdamped mass is described by:
At _t_ = 0, we have _x_ = 0.2 and so 0.2 = _C_ sin ϕ. Its velocity _v_ is d _x_ /d _t_ :
At _t_ = 0 the velocity is 0 and so 0 = −1.25C sin ϕ + 1.561C cos ϕ. We can solve these two simultaneous equations to give _C_ = 0.256 m and ϕ = 0.896 rad.
The displacement _x_ at time _t_ is thus given by:
Thus, at time _t_ = 2 s we have:
The minus sign indicates that the displacement is to the left of the equilibrium position.
### 5.3.2 Second-order non-homogeneous differential equations
Consider a non-homogeneous linear second-order differential equation with constant coefficients _a_ 2, _a_ 1 and _a_ 0 _with f_ ( _x_ ) being some function of _x_ , often being referred to as the _forcing function_ , applied to the system:
[35]
Key point
To determine the solution of an nonhomogeneous second-order differential equation:
1. Find the general solution of the corresponding homogeneous differential equation. This is called the complimentary function.
2. Then add to it any solution which fits the nonhomogeneous differential equation. This is called the particular integral.
With such a non-homogeneous differential equation there is a general solution which is equal to the sum of, what are called, the _complementary function y c_ and the _particular integral y_ p.
The complementary function is obtained by solving the equivalent homogeneous differential equation, i.e. with _f_ ( _x_ ) = 0, and the particular integral by considering the form of the _f_ ( _x_ ) function and trying a particular solution of a similar form but which contains undetermined coefficients.
Right-hand side of non-homogeneous equation | Trial function, with _A, B, C,_ etc. being undetermined coefficients
---|---
Constant | _A_
Polynomial | _A_ \+ _Bx_ \+ _Cx_ 2 \+...
Exponential | _A_ e _kx_
Sine or cosine | _A_ sin _kx_ \+ _B_ cos _kx_
Note: if the right-hand side is a sum of more than one term then the trial solution is the sum of the trial functions for these terms.
Example
Determine the general solution of the differential equation:
To obtain the complementary function we consider the equivalent homogeneous differential equation, i.e.
Trying _y_ = _A_ e _sx_ as a solution gives the auxiliary equation:
This can be factored as ( _s_ − 3)( _s_ − 2) = 0 and so _s_ 1 = 3 and _s_ 2 = 2. The complementary function is thus:
To find the particular integral with _x_ 2 we try a solution of the form _y_ = _C_ \+ _Dx_ \+ _Ex_ 2. This gives d _y_ /d _x_ = _D_ \+ 2 _Ex_ and d2 _y_ /d _x_ 2 = 2 _E_. Substituting into the non-homogeneous differential equation gives:
Equating coefficients of x2 gives 6 _E_ = 1 and so _E_ = 1/6. Equating coefficients of _x_ gives −10 _E_ \+ 6 _D_ = 0 and so _D_ = 10/36 = 5/18. Equating constants gives 2 _E_ − 5 _D_ \+ 6 _C_ = 0 and so _C_ = 19/108. Thus the particular integral is:
and so the general solution is:
Example
Determine the general solution of the differential equation:
The corresponding homogeneous differential equation is:
Trying _y_ = _A_ e _sx_ as a solution gives the auxiliary equation:
This can be factored as ( _s_ \+ 2)( _s_ − 1) = 0 and so the roots are _s_ 1 = −2 and _s_ 2 = 1 and the complementary function is:
For the particular integral with an exponential forcing function we try a solution of the form _y_ = _C_ e _kx_. Substituting this into the non-homogeneous differential equation gives:
Thus we must have _k_ = 2 for equality of the exponentials and for the coefficients ( _k_ 2 \+ _k_ − 2) _C_ = 3 and hence _C_ = ¾. Hence the particular integral is _y_ p = ¾ e2 _x_ and the general solution is:
Example
Determine the general solution of the differential equation:
The corresponding homogeneous differential equation is:
Trying _y_ = _A_ e _sx_ as a solution gives the auxiliary equation:
This can be factored as (3 _s_ − 2)( _s_ \+ 1) = 0 and so the roots are _s_ 1 = 2/3 and _s_ 2 = −1 and the complementary function is:
For the particular integral we try a solution of the form _y_ = _C_ cos _kx_ \+ _D_ sin _kx_. Substituting this into the non-homogeneous differential equation gives:
For equality of the cosines we must have _k_ = 1 and −3 _C_ \+ _D_ −2 _C_ = 2. Equating coefficients of the sines gives −3 _D_ − _C_ − 2 _D_ = 0. Thus we have _C_ = −5/13 and _D_ = 1/13. The particular integral is thus:
The general solution is thus:
#### Exceptional cases of particular integrals
There are situations when the obvious form of function to be tried to obtain the particular integral yields no result because when it is substituted in the differential equation we obtain 0 = 0. This occurs when the right-hand side of the non-homogeneous differential equation consists of a function that is also a term in the complementary function. To illustrate this, consider the differential equation:
The complementary function is _y_ = _A_ e−2 _x_ \+ _B_ e _x_. For the particular integral, if we try the solution _y_ = _A_ e _kx_ we obtain:
and so no solution for _A_. In such cases we have to try something different.
Thus we try _y_ = _Ax_ e _kx_. This gives, for the above differential equation:
Thus _k_ = −2 and −2 _A_ \+ 4 _Ax_ − 2 _A_ \+ _A_ − 2 _Ax_ − 2 _Ax_ = 1. Equating constants gives 3 _A_ = 1, equating the _x_ coefficients gives 0 = 0, and so the particular integral is:
Example
Determine the general solution of the differential equation:
The corresponding homogeneous differential equation is:
Trying _y_ = _A_ e _sx_ as a solution gives the auxiliary equation:
This can be factored as ( _s_ − 5)( _s_ \+ 2) = 0 and so the complementary function is _y_ c = _A_ e5 _x_ \+ _B_ e−2 _x_. The right-hand side of the non-homogeneous differential equation is the sum of two terms for which the trial functions would be _C_ and _Dx_ e _kx_. We thus try the sum of these. Thus:
Equating exponential terms gives _k_ = −2, 4 _Dx_ − 2 _D_ − 2 _D_ \+ 6 _Dx_ − 3 _D_ − 10 _Dx_ = −1 and so _D_ = 1/7. Equating constants gives −10 _C_ = 4 and so _C_ = −4/10. Thus the particular integral is −(4/10) + (1/7) e2 _x_. The general solution is therefore:
#### Forced oscillations of elastic systems
Oscillations of elastic systems in which the system is free to adopt its own frequency of oscillation are said to be natural or free oscillations. When a system is forced to oscillate by some external force _F_ at the frequency of this force, then the oscillations are said to be forced. In general, a simple model of such oscillations is given by a second-order differential equation of the form:
[36]
where _F_ is the externally applied force, _k_ a constant, _x_ the system output, ω _n_ the natural angular frequency and ζ the damping ratio. Steady-state conditions occur when d _x_ /d _t_ and d2 _x_ /d _t_ 2 are zero and so we then have ωn2 _x_ s = _kF_.
Maths in action
System of a damped mass on a spring
There are many engineering systems which can be modelled by the lumped system of damped mass on a spring and then subject to some externally applied force. An example of a measurement system which can be modelled in this way is a diaphragm pressure gauge. Figure 5.19 illustrates the basic features of such systems.
Figure 5.19 Mass, spring, damper system
The net force applied to the mass is the applied force _F_ minus the force resulting from the compressing, or stretching, of the spring and the force from the damper:
This net force will cause the mass to accelerate. Thus:
We can write this as:
In the absence of damping and a force _F_ , we have _m_ d2 _x_ /d _t_ 2 \+ _kx_ = 0 and the spring _naturally_ oscillating (see Earlier Maths in action in this chapter) with an angular frequency, which we can call the natural angular frequency of ωn given by:
If we define a constant ζ, termed the _damping ratio_ , by:
then we can write the differential equation as:
Consider a step input such that the applied force jumps from zero to _F_ at time _t_ = 0. We can solve the differential equation by determining the complementary function and the particular integral. For the homogeneous form of the differential equation we try a solution of the form _x_ = _A_ e _st_ (see earlier Maths in action in this chapter). We thus have the homogeneous equation solutions: _Damping ratio less than 1, i.e. underdamped_
_Damping ratio with the value 1, i.e. critically damped_
_Damping ratio greater than 1, i.e. critically damped_
When we have a step input then we can try for the particular integral _x_ = _A_. Substituting this into the differential equation gives 0 + 0 + _A_ = _F/m_. Thus the particular integral is _x_ = _F/m_ and the solutions for the different degrees of damping are:
As _t_ tends to an infinite value, in all cases the response tends to a steady-state value of _F_ / _m_. Figure 5.20 shows the form the solution of the second-order differential equation takes for different values of the damping ratio.
Figure 5.20 Response of second-order system to step input for different damping factors. The output is plotted as a multiple of the steady-state value F/m. Instead of just giving the output variation with time t, the axis used is _ω nt_. This is because _t_ and _ω n_ always appear as the product _ω nt_ and using this product makes the graph applicable for any value of _ω n_.
Example
The dynamic performance of a piezoelectric accelerometer is described by the following second-order differential equation:
where θ0 is the output charge in pC and θi is the input acceleration in m/s2. Determine the natural angular frequency, the damping factor and the static system sensitivity.
We can compare the differential equation with the standard form of equation [36] for the oscillations of an elastic system. We thus have:
and so ωn = 150 × 103 rad/s. Since ωn = 2 _πf_ n then the natural frequency _f_ n = 150 × 103/2π = 23.87 kHz. We also have:
and so ζ = 3 × 103/(2 × 150 × 103) = 0.01. The oscillation is thus underdamped.
Steady state occurs when ωn2 _x_ s = _kF_ and so the static system sensitivity is _x_ s _/F_ = _k/ω n_2 = 110 × 109/(22.5 × 109) = 4.89 pC/m/s2).
### Problems 5.3
1. Determine the unique solutions for the following differential equations given the general solutions and initial conditions:
(a)
(b)
(c)
(d)
(e)
(f)
2. Determine the general solutions of:
(a)
(b)
(c)
(d)
(e)
(f)
(g)
(h)
3. Determine the particular solutions of:
(a)
(b)
(c)
(d)
(e)
(f)
4. Determine the general solutions of:
(a)
(b)
(c)
(d)
(e)
(f)
(g)
5. Determine the particular solution of the following differential equation if _y_ = −2 and d _y_ /d _x_ = −3 when _x_ = 0:
6. Determine the particular solution of the following differential equation if _y_ = 1 and d _y_ /d _x_ = 0 when _x_ = 0:
7. An object of mass 1 kg is suspended from a rigid support by a vertical spring of stiffness 4 N/m. Determine how the displacement of the object varies with time when the object is pulled down from its initial position and released to freely move if the object is subject to a damping force of five times its velocity?
8. An object of mass 1 kg is suspended from a rigid support by a vertical spring of stiffness 9 N/m. The object is pulled down for an initial displacement of 0.2 m and then released with zero initial velocity. Determine how the displacement of the object varies with time when (a) there is no damping, (b) the damping is twice the velocity of the object.
9. A second-order system has a natural angular frequency of 2.0 rad/s and a damped angular frequency of 1.8 rad/s. What is the damping factor?
10. Determine the natural angular frequency and damping factor for a second-order system with input _y_ and output _x_ described by the following differential equation:
11. A sensor can be considered to be a mass-damper-spring system with a mass of 10 g and a spring of stiffness 1.0 N/mm. Determine the natural angular frequency and the damping constant required for the damping element if the system is to be critically damped.
12. Determine whether the system described by the following differential equation is under-damped, critically damped or over-damped when subject to a step input _y_ :
13. An object of mass 1 kg is suspended from a rigid support by a vertical spring of stiffness 9 N/m. What is the damping force per unit velocity which would be needed to give critical damping?
14. Determine the natural angular frequency and damping force per unit velocity for a system having its displacement _x_ with time _t_ described by the following second-order differential equation:
15. An object of mass 1 kg is suspended from a rigid support by a vertical spring of stiffness 9 N/m. If there is a damping force of 1 _v_ opposing the motion of the object, where _v_ is the velocity, determine how the displacement varies with time when the object is given an initial displacement of 0.2 m and an initial velocity of −0.3 m/s.
16. The angular displacement θ of a door controlled by a hydraulic damping mechanism is described by the differential equation:
Determine how the angular displacement varies with time _t_ when there is an initial displacement of π/3 and zero initial angular velocity.
17. An electrical circuit having resistance _R_ , inductance _L_ and capacitance C in series with a step voltage source _V_ at _t_ = 0 has the potential difference across the capacitor vC described by the differential equation:
Show that the three possible solutions are:
6
# Laplace transform
Summary
In order to consider the response of engineering systenns, e.g. electrical or control systems, to inputs such as step, or perhaps an impulse, we need to be able to solve the differential equation for that system with that particular form of input. As the previous chapter indicates, this can be rather laborious. A simpler method of tackling the solution is to transform a differential equation into a simple algebraic equation which we can easily solve. This is achieved by the use of the Laplace transform, the subject of this chapter.
Objectives
By the end of this chapter, the reader should be able to:
• understand what using the Laplace transform involves;
• use Laplace transform tables to convert first- and second-order differential equations into algebraic equations;
• use Laplace transform tables, and where appropriate partial fractions, to convert Laplace transform equations into real world equations;
• determine the outputs of systems to standard input signals such as step, impulse and ramp.
## 6.1 The Laplace transform
In this chapter a method of solving such differential equations is introduced which transforms a differential equation into an algebraic equation. This is termed the _Laplace transform_. It is widely used in engineering, in particular in control engineering and in electrical circuit analysis where it is commonplace not even to write differential equations to describe conditions but to write directly in terms of the Laplace transform.
We can think of the Laplace transform as being rather like a function machine (Figure 6.1). As input to the machine we have some function of time _f_ ( _t_ ) and as output a function we represent as _F_ ( _s_ ). The input is referred to as being the _time domain_ while the output is said to be in the _s-domain_. Thus we take information about a system in the time domain and use our 'machine' to transform it into information in the _s_ -domain. Differential equations which describe the behaviour of a system in the time domain are converted into algebraic equations in the _s_ -domain, so considerably simplifying their solution. We can thus transform a differential equation into an _s_ -domain equation, solve the equation and then use the 'machine' in inverse operation to transform the _s_ -domain equation back into a time-domain solution (Figure 6.2).
Figure 6.1 The Laplace transform
Figure 6.2 Using the Laplace transform. As an illustration, Ohm's Law gives the time-domain equation v(t) = Ri(t), both v and i being functions of time and R assumed to remain constant. In the s-domain this becomes V(s) = Rl(s). After working with this equation in the s-domain we can then transform back to the time domain
Key point
To obtain the Laplace transform of a function of time _f_ ( _t_ ), multiply it by _e –at_ and integrate the product between zero and infinity.
Key point
When electrical circuits are discussed in terms of currents or voltages varying with time we use differential equations and are said to be working in the _time domain_. When we use phasors we can be said to be working in the _frequency domain_ ; we are no longer working with time-varying quantities. As we shall see later in this chapter, we can transform currents or voltages which vary with time into the _s-domain_ by using the Laplace transform; like the transformation using phasors, we are no longer working with time-varying quantities.
Now this definition may look rather daunting, but do not fear, it is very likely that you will not need to use it but rather will make use of tables which other people have worked out. However, you should appreciate the basis of the transform. The _Laplace transform_ of some function of time is defined by:
_Multiply a given function of time f(t) by e −st and integrate the product between zero and infinity. The result, if it exists, is called the Laplace transform of f(t) and is denoted by_ .
[1]
Note that the integration is between 0 and +∞ and so is _one-sided_ and not over the full range of time from –∞ to +∞.
Example
Determine the Laplace transform of _f_ ( _t_ ) = 1.
Using equation [1]:
This is provided that _s_ > 0 so that e– _st_ → 0 as _t_ → ∞
Example
Determine the Laplace transform of _f_ ( _t_ ) = e _at_.
Using equation [1]:
That is provided we have ( _s_ – _a_ ) > 0.
Example
Determine the Laplace transform of _f_ ( _t_ ) = _t_.
Using equation [1]:
Using integration by parts:
That is provided we have _s_ > 0.
#### Laplace transforms for step and impulse function
Consider the _unit step function u_ ( _t_ ) shown in Figure 6.3. The Laplace transform is given by equation [1] as:
Figure 6.3 Unit step at time t = 0
[2]
Thus a unit size step input signal to an engineering system occurring at time _t_ = 0 will have a Laplace transform of 1/ _s_.
Key points
The Laplace transforms of signals commonly used as inputs to systems are:
Unit impulse: 1
Unit step: 1/ _s_
Unit ramp: 1/ _s_ 2
Now consider obtaining the _unit impulse function_ (represented as δ( _t_ )). Such an impulse can be considered to be a unit area rectangular pulse which has its width _k_ decreased to give the unit impulse in the limit when _k_ → 0. For the unit area rectangular pulse shown in Figure 6.4, the Laplace transform is:
We can replace the exponential by a series, thus obtaining:
Thus in the limit as _k_ → 0, the Laplace transform tends to the value 1 and so:
[3]
Thus a unit size impulse input signal occurring at time _t_ = 0 to an engineering system will have a Laplace transform of 1.
Figure 6.4 Unit area rectangular pulse
#### Standard Laplace transforms
The transforms derived above, together with others, are tabulated as a set of standard transforms so that it becomes unnecessary to derive them by the use of equation [1]. Table 6.1 gives some of the more common standard transforms. As indicated in the following section, these standard transforms can be used to derive the transforms for a wide range of functions.
Table 6.1
Laplace transforms
#### Properties of Laplace transforms
The following are basic properties of Laplace transforms and can be used with the above table of standard transforms to obtain a wide range of other transforms.
Key point
If two separate time functions have Laplace transforms then the transform of the sum of the time functions is the sum of the Laplace transforms of the two functions considered separately.
• Sum of two functions
If two separate time functions _f_ ( _t_ ) and _g(t_ ) have Laplace transforms then the transform of the sum of the time functions, i.e. _f_ ( _t_ ) + _g_ ( _t_ ), is the sum of the Laplace transforms of the two functions considered separately:
[4]
This property is derived by using equation [1]:
Since 2 _f_ ( _t_ ) equals _f_ ( _t_ ) + _f_ ( _t_ ), then the Laplace transform of 2 _f_ ( _t_ ) will be twice the Laplace transform of _f_ ( _t_ ). Thus, in general:
[5]
The Laplace transform of a constant multiplying a function is the same as a the constant multiplying the Laplace transform of the function.
Key point
The Laplace transform of a constant multiplying a function is the same as a the constant multiplying the Laplace transform of the function.
Example
Determine the Laplace transform of 1 + 2 _t_.
Using equations [4] and [5] and Table 6.1:
Example
Determine the Laplace transform of 3 sin 2 _t_ \+ cos 2 _t_.
Using equations [4] and [5] and Table 6.1:
Example
Determine the Laplace transform of 3 _t_ 2 \+ 2 e– _t_.
Using equations [4] and [5] and Table 6.1:
Example
Determine the Laplace transform of sin( _ωt_ \+ θ).
We can write sin( _ωt_ \+ θ) as sin _ωt_ cos θ + cos _ωt_ sin θ. Thus, using equations [4] and [5] and Table 6.1:
Example
What is the Laplace transform of an alternating voltage which is described by 240 sin 314.16 _t_?
We have an equation of the form constant multiplied by a sine function. Equation [12] in Table 6.1 gives for sin _ωt_ the transform ω/( _s_ 2 \+ ω2). Hence:
Key point
The first shift theorem: if then:
• _The first shift theorem, factor e –at_
This theorem states that if then:
[6]
Thus the substitution of _s_ \+ _a_ for _s_ corresponds to multiplying a time function by e– _at_. This can be demonstrated if we consider equation [1] with such a function:
Example
Determine the Laplace transform of e−2t cosh 3 _t_.
Using the first shift theorem and the transform for cosh 3 _t_ given by Table 39.1, the transform is that of cosh 3 _t_ with the s replaced by _s_ \+ 2:
Example
Determine the Laplace transform of 2 e−2t sin2 _t_.
Since cos 2 _t_ = 1 − 2 sin2 _t_ we have:
Hence, using the first shift theorem and replacing the _s_ by _s_ \+ 2:
Key point
The second shift theorem: if a signal is delayed by a time _T_ then its Laplace transform is multiplied by e– _st_.
• _The second shift theorem, time shifting_
The second shift theorem states that if a signal is delayed by a time _T_ then its Laplace transform is multiplied by e– _s T_. A function _u_ ( _t_ ) which is delayed is represented by _u_ ( _t_ – _T_ ), where _T_ is the delay. Thus if _F_ ( _s_ ) is the Laplace transform of _f_ ( _t_ ) then:
[7]
This can be demonstrated by considering a unit step function which is delayed by a time _T_ (Figure 6.5). Equation [1] gives for such a function:
Example
Determine the Laplace transform for a unit impulse which occurs at a time of _t_ = 2 S.
Figure 6.5 Delayed unit step
The Laplace transform for a unit impulse at _t_ = 0 is 1. Thus the transform for the delayed impulse is 1 e−2 _s_.
Example
Determine the Laplace transform of a single pulse consisting of just the first half of a sine wave (Figure 6.6).
Figure 6.6 Example
We can think of such a function as being the sum of a sine function extending over an infinite number of cycles and a sine function that has had its start delayed by ½ _T_. In this way all but the first half period waveform are cancelled out. Thus the Laplace transform is:
Key point
A periodic function of period _T_ has a Laplace transform of:
where _F_ 1( _s_ ) is the Laplace transform of the function for the first period.
• _Periodic functions_
A periodic function of period _T_ has a Laplace transform of:
[8]
where _F_ 1( _s_ ) is the Laplace transform of the function for the first period. This can be proved by considering the periodic function to be the sum of the function _f_ 1( _t_ ) describing the first period, the first period function delayed by 1 period, the first period function delayed by 2 periods, etc. The Laplace transform of the sum is thus:
The term in the brackets is a geometric series with the sum to infinity of 1/(1 – e– _s T_). Thus we obtain the equation given above.
Example
Determine the Laplace transform of a full-wave rectified sine wave (Figure 6.7).
Figure 6.7 Example
Such a wave consists of a sequence of the pulses shown in Figure 6.6. Thus the first period function has the transform:
Therefore the periodic wave has the Laplace transform:
• _The Laplace transforms of derivatives_
Consider the determination of the Laplace transform of the derivative of a function, i.e. Using equation [1]:
Using integration by parts:
[9]
where _f_ (0) is the value of _f_ ( _t_ ) when _t_ = 0 and _F_ ( _s_ ) is the Laplace transform of _f_ ( _t_ ).
For a second derivative we can similarly obtain:
[10]
where d _f_ (0)/d _t_ is the value of the first derivative when _t_ = 0.
Likewise for a third derivative we can obtain:
[11]
where d2 _f_ (0)/d _t_ 2 is the value of the second derivative at _t_ = 0.
Key points
where _f_ (0) is the value of _t_ ( _t_ ) when _t_ = 0 and _F_ ( _s_ ) is the Laplace transform of _f_ ( _t_ ).
where d _f_ (0)/d _t_ is the value of the first derivative when _t_ = 0.
Example
Given the initial condition that _x_ = 2 when _t_ = 0, determine the Laplace transform of
Using equation [9]:
where _X_ ( _s_ ) is the Laplace transform of _x(t_ ).
Example
Given the initial conditions that _x_ = 0 and d _x_ /d _t_ = 0 when _t_ = 0, determine the Laplace transform of
Using equations [9] and [10]:
Key point
• _Laplace transform of an integral_
Consider the determination of the Laplace transform of the integral of a function, i.e.
If we let then Then, using equation [7]:
Since _g_ (0) = 0 and :
Thus:
[12]
Example
Determine the Laplace transform of
Using equation [12]:
Since:
### 6.1.1 The inverse transform
The inverse Laplace transform is the transformation of a Laplace transform into a function of time. If _then f_ ( _t_ ) is the _inverse Laplace transform_ of _F_ ( _s_ ), the inverse being written as:
[13]
The inverse can generally be obtained by using standard transforms, e.g. those in Table 6.1. The basic properties of the inverse, see the following notes, can be used with the standard transforms to obtain a wider range of transforms than just those in the table. Often _F_ ( _s_ ) is the ratio of two polynomials and cannot be readily identified with a standard transform. However, the use of partial fractions (see Section 4.2.3) can often convert such an expression into simple fraction terms which can then be identified with standard transforms. This is illustrated in the examples given in the next section.
Example
Determine the inverse Laplace transform of 1/ _s_ 2.
Table 6.1 indicates that the function which has the Laplace transform of 1/ _s_ 2 is _t_. Thus the inverse is _t_.
Key points
A Laplace transform which is the sum of two separate terms has an inverse of the sum of the inverse transforms of each term considered separately.
A Laplace transform which is a constant multiplied by a function has an inverse of the constant multiplied by the inverse of the function.
First shift theorem:
where _f_ ( _t_ ) is the inverse transform of _F_ ( _s_ ).
Second shift theorem: if the inverse transform numerator contains an e– _s T_ term, we remove this term from the expression, determine the inverse transform of what remains and then substitute ( _t_ – _T_ ) for _t_ in the result.
#### Basic properties of the inverse transform
The following are basic properties which aid in the obtaining of inverse transforms.
• _Additive property_
If we have a Laplace transform as the sum of two separate terms then we can take the inverse of each separately and the sum of the two inverse transforms is the inverse of the sum:
[14]
Also:
[15]
where α is a constant.
• _First shift theorem_
The _first shift theorem_ (see Section 6.1) can be written in inverse form as:
[16]
where _f_ ( _t_ ) is the inverse transform of _F_ ( _s_ ).
• _Second shift theorem_
The _second shift theorem_ (see Section 6.1) can be written in inverse form as:
[17]
Thus if the inverse transform numerator contains an e– _sT_ term, then we remove this term from the expression, determine the inverse transform of what remains and then substitute ( _t_ – _T_ ) for _t_ in the result.
Example
Determine the inverse Laplace transform of
Table 6.1 shows the Laplace transform of cos _ωt_ as being _s_ /( _s_ 2 \+ ω2). Thus:
Thus, using equation [15]:
Example
Determine the inverse Laplace transform of
We can write the fraction in a simpler form by the use of partial fractions. Thus:
and so we must have 3 _s_ − 1 = _A_ ( _s_ − 1) + _Bs_. Equating coefficients of s gives 3 = _A_ \+ _B_ and equating numerical terms gives −1 = − _A_. Hence:
The inverse transform of 1/ _s_ is 1 and of 1/( _s_ − 1) is e _t_. Thus:
Example
Determine the inverse Laplace transform of
This fraction can be rearranged as:
The fraction term is now in the form _ω/_ ( _s_ 2 \+ ω2), i.e. the transform of sin _ωt_ when _s_ has been replaced by _s_ − 3. This corresponds to a multiplication by e3 _t_. Thus, using equation [16]:
Example
Determine the inverse Laplace transform of 6e−3 _t_ /( _s_ \+ 2).
Using equation [17], extracting e−3 _s_ from the expression gives 6/( _s_ \+ 2). This has the inverse Laplace transform of 6 e−2 _t_. Thus the required inverse is 5( _t_ − 3) e−2( _t_ −3) _u_ ( _t_ − 3).
#### Initial and final values
The _initial value_ of a function of time is its value at zero time, the _final value_ being the value at infinite time. Often there is a need to determine the initial value and final values of systems, e.g. for an electrical circuit when there is, say, a step input. The final value in such a situation is often referred to as the _steady-state value_. The initial and final value theorems enable the initial and final values to be determined from a Laplace transform without the need to find the inverse transform.
• _The initial value theorem_
The Laplace transform of _f_ ( _t_ ) is given by equation [1] as:
and so:
[18]
Integration by parts then gives:
[19]
As _s_ tends to infinity then e– _s T_ tends to 0. Thus we must have, as a result of equation [18], tending to 0 as _s_ tends to infinity. Hence equation [19] gives:
Key point
Initial value theorem:
But _f_ (0) is the initial value of the function at _t_ = 0. Thus, provided a limit exists:
[20]
This is known as the _initial value theorem_.
Example
Determine the initial value of the function _f_ ( _t_ ) giving the Laplace transform 4/( _s_ \+ 2).
Applying equation [20]:
Key point
Final value theorem:
• _The final value theorem_
As with the initial value theorem, for a function _f_ ( _t_ ) having a Laplace transform _F_ ( _s_ ) we can write (equations [18] and [19]):
[21]
As _s_ tends to zero then e– _s T_ tends to 1 and so:
We can write this integral as:
Hence, with equation [21] we obtain:
and so, provided a limit exists:
[22]
This is termed the _final value theorem_.
Example
Determine the final value of the function which has the Laplace transform:
Using equation [22]:
### Problems 6.1
1. Determine, working from first principles and the definition of the transform, the Laplace transforms of:
(a) _f_ ( _t_ ) = _t_ 2
(b) _f_ ( _t_ ) = _t_ 3,
(c) _f_ ( _t_ ) = sinh _at_.
(Hint: sinh _at_ = ½(e _at_ – e– _at_ )
2. Determine, by the use of the transforms given in Table 6.1 and the properties of Laplace transforms, the Laplace transforms of the following functions:
(a) 4,
(b) 3 _t_ − 1,
(c) e3 _t_ ,
(d) 2 _t_ \+ 3 e _t_ ,
(e) _t_ 2 \+ 4 e−2 _t_ ,
(f) _t_ 2 \+ 2 _t_ \+ 1,
(g) 2 sin 3 _t_ ,
(h) 5 sinh 3 _t_ ,
(i) sin 3 _t_ cos 3 _t_ ,
(j) _t_ e−3 _t_ ,
(k) 4 − 2 sin 3 _t_ \+ e2 _t_ ,
(l) _t_ 3 e−2 _t_ ,
(m) (1 + e _t_ )(1 – e– _t_ ),
(n) e3 _t_ cos _t_ ,
(o) (1 + _t_ )2 e– _t_ ,
(p) e– _t_ sin2 _t_ ,
(q) _t_ cosh 3 _t_ ,
(r) _t_ 2 cosh 3 _t_ ,
(s) _t_ 3 e−3 _t_
3. Use the additive property to determine the Laplace transforms of the following functions:
(a) _t_ 2 \+ 3 _t_ \+ 2,
(b) 2 + 4 sin 3 _t_ ,
(c) e4 _t_ \+ cosh 2 _t_ ,
(d) 2 + 5 e3 _t_ ,
(e) cos 2 _t_ \+ cos 3 _t_ ,
(f) _t_ 3 \+ 4 e– _t_
4. Use the first shift theorem to determine the Laplace transforms of the following functions:
(a) e−3 _I_ sin 2f,
(b) e4 _t_ _t_ 2,
(c) e2 _t_ cos _t_
5. Use the second shift theorem to determine the Laplace transform of the following functions:
(a) a unit step function which starts at _t_ = 5 s,
(b) a unit impulse which occurs at _t_ = 4 s,
(c) the function described by 3( _t_ − 10) _u_ ( _t_ − 10)
6. Determine the Laplace transform of the periodic function shown in Figure 6.8.
Figure 6.8 Problem 6
7. Determine the Laplace transform for the periodic signal shown in Figure 6.9.
Figure 6.9 Problem 7
8. Determine the Laplace transform for the following periodic signals:
(a) _f_ ( _ft_ ) = 1 for 0 ≤ _t_ < 1 and 0 for 1 ≤ _t_ < 2, _f_ ( _t_ \+ 2) = _f_ ( _t_ ),
(b) _f_ ( _t_ ) = _t_ for 0 ≤ _t_ < 1 and 0 for 1 ≤ _t_ < 2, _f_ ( _t_ \+ 2) = _f_ ( _t_ ),
(c) _f_ ( _t_ ) = _t_ for 0≤ _t_ < 1 and 2 – _t_ for 1 ≤ _t_ < 2, _f_ ( _t_ \+ 2) = _f_ ( _t_ )
9. Determine the inverse Laplace transforms of:
(a)
(b)
(c)
(d)
(e)
(f)
(g)
(h)
10. Determine, by the use of partial fractions, the inverse Laplace transforms of the following:
(a)
(b)
(c)
(d)
11. Determine the initial values of the functions giving the following Laplace transforms:
(a)
(b)
12. Determine the final values of the functions having the following Laplace transforms:
(a)
(b)
## 6.2 Solving differential equations
Laplace transforms offer a method of solving differential equations. The procedure adopted is:
1. Replace each term in the differential equation by its Laplace transform, inserting the given initial conditions.
2. Algebraically rearrange the equation to give the transform of the solution.
3. Invert the resulting Laplace transform to obtain the answer as a function of time.
Example
Given that _x_ = 0 at _t_ = 0, solve the first-order differential equation 3 (d _x_ /d _t_ ) + 2 _x_ = 4.
Taking the Laplace transform gives:
Substituting the initial condition gives:
Hence:
Simplifying by the use of partial fractions:
Hence _A_ (3 _s_ \+ 2) + _Bs_ = 4 and so _A_ = 2 and _B_ = −2/3. Thus:
and so _x_ ( _t_ ) = 2 − 2 e−2 _t_ /3.
Example
Given that _x_ = 0 and d _x_ /d _t_ = 1 at _t_ = 0, solve the second-order differential equation:
Taking the Laplace transform gives:
Substituting the initial conditions:
We can simplify the above expression by the use of partial fractions. Thus:
Hence _A_ ( _s_ − 2)( _s_ − 3) + _B_ ( _s_ \+ 1)( _s_ − 3) + _C_ ( _s_ \+ 1)( _s_ − 2) = 2 and so _A_ = 1/6, _B_ = −2/3 and _C_ = ½
Hence _D_ ( _s_ − 3) + _E_ ( _s_ − 2) = 1 and so _D_ = −1 and _E_ = 1. Thus:
The inverse transform is
Example
Solve the following second-order differential equation:
given the conditions (a) d _x_ /d _t_ = 0 and _x_ = 2 when _t_ = 0, (b) d _x_ /d _t_ = 2 and _x_ = 0 when _t_ = 0.
Taking the Laplace transform gives:
(a) We have d _x_ /d _t_ = 0 and _x_ = 2 when _t_ = 0 and so:
As Table 6.1 indicates, the bracketed term has the inverse of a cosine. Thus the solution is _x_ = 2 cos 8 _t_.
(b) We have d _x_ /d _t_ = 2 and _x_ = 0 when _t_ = 0, and so:
To put this in the form _ω/_ ( _s_ 2 \+ ω2) we multiply by 4/4:
The solution is thus sin 8 _t_.
### Problems 6.2
1. Solve the following differential equations:
(a)
(b)
(c)
(d)
(e)
(f)
(g)
(h)
(i)
(j)
(k)
## 6.3 Transfer function
In general, when we consider inputs and outputs of systems as functions of time then the relationship between the output and input is given by a differential equation. If we have a system composed of two elements in series with each having its input–output relationships described by a differential equation, it is not easy to see how the output of the system as a whole is related to its input. We can overcome this problem by transforming the differential equations into a more convenient form by using the Laplace transform. This form is a much more convenient way of describing the relationship than a differential equation since it can be easily manipulated by the basic rules of algebra.
Key point
The _transfer function G_ ( _s_ ) of a system is defined as [output _Y_ ( _s_ )]/[input _X_ ( _s_ )] when all initial conditions before we apply the input are zero.
For a simple system we might use the term gain to relate the input and output of a system with gain _G_ = output/input. This tells us how much bigger the output is than the input. When we are working with inputs and outputs described as functions of _s_ we define the _transfer function G_ ( _s_ ) as [output _Y_ ( _s_ )]/[input _X_ ( _s_ )] when all initial conditions before we apply the input are zero, i.e.
[23]
A transfer function can be represented as a block diagram (Figure 6.10) with _X_ ( _s_ ) the input, _Y_ ( _s_ ) the output and the transfer function _G_ ( _s_ ) as the operator in the box that converts the input to the output. The block represents a multiplication for the input. Thus, by using the Laplace transform of inputs and outputs, we can use the transfer function as a simple multiplication factor, like the gain.
Figure 6.10 Transfer function as the factor that multiplies the input to give the output
Example
Determine the transfer function for an electrical system for which we have the relationship:
The transfer function _G_ ( _s_ ) is thus:
To get the output _V c_( _s_ ) we multiply the input _V_ ( _s_ ) by 1/( _RCs_ \+ 1).
Example
Determine the transfer function for the mechanical system having mass, stiffness and damping, and input _F_ and output _x_ and described by the differential equation:
Figure 6.11 shows the type of system that would give such a differential equation.
Figure 6.11 Mass, spring, damper system
If we now write and and with initial conditions zero:
When _t_ = 0 we have _x_ = 0 and d _x_ /d _t_ = 0 and so:
Thus, we have for the differential equation in the _s_ -domain:
Hence the transfer function _G_ ( _s_ ) of the system is:
Key point
The overall transfer function for a system composed of elements in series is the product of the transfer functions of the individual series elements.
#### Systems in series
Consider a system of two subsystems in series (Figure 6.12). The first subsystem has an input of _X_ ( _s_ ) and an output of _Y_ 1( _s_ ); thus, _G_ 1( _s_ ) = _Y_ 1( _s_ )/ _X_ ( _s_ ). The second subsystem has an input of _Y_ 1( _s_ ) and an output of _Y_ ( _s_ ); thus, _G_ 2( _s_ ) = _Y_ ( _s_ )/ _Y_ 1( _s_ ). We thus have:
Figure 6.12 Systems in series
The overall transfer function _G_ ( _s_ ) of the system is _Y_ ( _s_ )/ _X_ ( _s_ ) and so:
[24]
Key point
A simple feedback control system to, say, control the temperature of a room will have a negative feedback loop. This feeds back a measure of the output of the system which is then subtracted from the input. The input is the required temperature and the output the actual temperature. The difference between these signals, i.e. the error, is used to actuate some heating system which will continue as long as there is an error.
Example
Determine the overall transfer function for a system which consists of two elements in series, one having a transfer function of 1/( _s_ \+ 1) and the other 1/( _s_ \+ 2).
_The overall transfer function is thus:_
#### Systems with negative feedback
For a control system with a negative feedback loop we can have the situation shown in Figure 6.13 where the output is fed back via a system with a transfer function _H_ ( _s_ ). This fed back signal subtracts from the input to the system _G_ ( _s_ ) to give the error signal. The feedback system has an input of _Y_ ( _s_ ) and thus an output of _H_ ( _s_ ) _Y_ ( _s_ ). Thus the feedback signal is _H_ ( _s_ ) _Y_ ( _s_ ). The error is the difference between the input signal _X_ ( _s_ ) and the feedback signal:
Figure 6.13 System with negative feedback
This error signal is the input to the _G_ ( _s_ ) system and gives an output of _Y_ ( _s_ ). Thus:
and so:
which can be rearranged to give:
[25]
Key point
For a system with a negative feedback, the overall transfer function is the forward path transfer function divided by one plus the product of the forward path and feedback path transfer functions.
Example
Determine the overall transfer function for a control system (Figure 6.14) which has a negative feedback loop with a transfer function 4 and a forward path transfer function of 2/( _s_ \+ 2).
Figure 6.14 Example
The overall transfer function of the system is:
### 6.3.1 Determining outputs of systems
The procedure we can use to determine how the output of a system will change with time when there is some input to the system is:
1. _Determine the output as an s function_
In terms of the transfer function _G_ ( _s_ ) we have:
[26]
We can thus obtain the output of a system as an _s_ function by multiplying its transfer function by the input _s_ function.
2. Determine the time function corresponding to the output s function
To obtain the output as a function of time we need to find the time function that will give the particular output _s_ function that we have obtained. Tables of _s_ functions and their corresponding time functions can be used (Table 6.1). Often, however, the _s_ function output has to be rearranged to put it into a form given in the table.
Example
A system has a transfer function of 1/( _s_ \+ 2). What will be its output as a function of time when it is subject to a step input of 1 V?
The step input has a Laplace transform of (1/ _s_ ). Thus:
The nearest form we have in Table 6.1 to the output is item 6 as ½ × 2/[ _s_ ( _s_ \+ 2)]. Thus the output, as a function of time, is ½(1 – e−5 _t_ ) V.
#### First-order systems
A first-order system has a differential equation of the form:
As a function of _s_ this can be written as:
and so a transfer function of the form:
[27]
where _k_ is the _gain_ of the system when there are steady-state conditions and τ is the _time constant_ of the system.
• _Unit impulse input_
When a first-order system is subject to a unit impulse input then _X_ ( _s_ ) = 1 and the output transform _Y_ ( _s_ ) is:
Hence, since we have the transform in the form 1/( _s_ \+ _a_ ), using item 6 in Table 6.1 gives:
[28]
Figure 6.15 shows how the output _x_ varies with time.
Figure 6.15 Output with a unit impulse input to a first-order system
• _Unit step input_
When a first-order system is subject to a unit step input then _X_ ( _s_ ) = 1/ _s_ and the output transform _Y_ ( _s_ ) is:
Hence, since we have the transform in the form _a_ / _s_ ( _s_ \+ _a_ ), using item 6 in Table 6.1 gives:
[29]
Figure 6.16 shows how the output _x_ varies with time.
Figure 6.16 Behaviour of a first-order system when subject to a unit step input
Example
A circuit has a resistance _R_ in series with a capacitance _C_. The differential equation relating the input _v_ and output _v_ C, i.e. the voltage across the capacitor, is:
Determine the output of the system when there is a 2 V impulse input.
As a function of s the differential equation becomes:
Hence the transfer function is
The output when there is 2 V impulse input is:
Hence, since we have the transform in the form 1/( _s_ \+ _a_ ), using item 6 in Table 6.1 gives:
Example
A thermocouple which has a transfer function linking its voltage output _V_ and temperature input of:
Determine the response of the system when it is suddenly immersed in a water bath at 100°C.
The output as an s function is:
The sudden immersion of the thermometer gives a step input of size 100°C and so the input as an s function is 100/ _s_. Thus:
The fraction element is of the form _a_ / _s_ ( _s_ \+ _a_ ), item 6 in Table 6.1, and so the output as a function of time is:
#### Second-order systems
The differential equation for a second-order system can be written as:
where _x_ is the input and _y_ the output (see chapter 5, equation [36]). Since the steady-state output occurs when _ω n_2 _y_ = _kx_ , a more usual way of writing the standard form of the equation, so that the steady-state value occurs when _y_ = _kx_ , is as:
[30]
where ωn is the natural angular frequency with which the system oscillates and ζ is the damping ratio. Hence we have:
and so a transfer function of:
[31]
When a second-order system is subject to a unit step input, i.e. _X_ ( _s_ ) = 1/ _s_ , then the output transform is:
Key point
In general, we can write a transfer function as:
with the values of s that make _G_ ( _s_ ) zero being termed zeros and so correspond to _s_ = _Z_ 1, _Z_ 2, _... Z m_. The values of s that make _G_ ( _s_ ) infinite are known as poles and so correspond to _s_ = _p_ 1, _p_ 2,... _p n_. As will be apparent from the discussion on this page and the next, poles and zeros can be real or complex. We can thus plot them on an Argand diagram with the vertical axis being the imaginary element and the horizontal axis the real part. Such a plot is said to have the poles and zeros plotted on an _s_ -plane diagram.
There are three different forms of answer to this equation for the way the output varies with time; these depending on the value of the damping constant and whether it gives an overdamped, critically damped or underdamped system. We can determine the condition for these three forms of output by putting the equation in the form:
[32]
where _p_ 1 and _p_ 2 are the roots of the quadratic term:
[33]
Hence, if we use the equation to determine the roots of a quadratic equation, we obtain:
and so the two roots are given by:
[34]
The important issue in determining the form of the roots is the value of the square root term and this is determined by the value of the damping factor (Figure 6.17).
Figure 6.17 Behaviour of a second-order system when subject to a unit step input signal
• _Damping factor ζ > 1_
With the damping factor ζ greater than 1 the square root term is real and will factorise. To find the inverse transform we can either use partial fractions to break the expression down into a number of simple fractions or use item 10 in Table 6.1. The output is thus:
[35]
This describes an output which does not oscillate but dies away with time and thus the system is _overdamped_. As the time _t_ tends to infinity then the exponential terms tend to zero and the output becomes the steady value of _kω_ n2/( _p_ 1 _p_ 2). Since _p_ 1 _p_ 2 = ωn2, the steady value is _k_.
• Damping factor ζ = 1
With ζ = 1 the square root term is zero and so _p_ 1 = _p_ 2 = − _ω n_; both roots are real and both the same. The output equation then becomes:
This equation can be expanded by means of partial fractions to give:
Hence:
[36]
This is the critically damped condition and describes an output which does not oscillate but dies away with time. As the time _t_ tends to infinity then the exponential terms tend to zero and the output tends to the steady state value of _k_.
• _Damping factor ζ < 1_
With ζ < 1 the square root term does not have a real value. Using item 19 in Table 6.1 then gives:
[37]
where cos ϕ = ζ. This is an under-damped oscillation. The angular frequency ω of the damped oscillation is:
[38]
Only when the damping is very small does the angular frequency of the oscillation become nearly the natural angular frequency ωn. As the time _t_ tends to infinity then the exponential term tends to zero and so the output tends to the value _k_.
Example
What will be the state of damping of a system having the following transfer function and subject to a unit step input?
The output _Y_ ( _s_ ) from such a system is given by:
For a unit step input _X_ ( _s_ ) = 1/ _s_ and so the output is given by:
The roots of _s_ 2 \+ 8 _s_ \+ 16 are _p_ 1 = _p_ 2 = − 4. Both the roots are real and the same, hence we have critical damping.
Example
A system has an output _y_ related to the input _x_ by the differential equation:
What will be the output from the system when it is subject to a unit step input? Initially both the output and input are zero.
We can write the Laplace transform of the equation as:
The transfer function is thus:
For a unit step input the output is given by:
Because the quadratic term has two real roots, the system is overdamped. We can directly use one of the standard forms given in Table 6.1 or partial fractions to first simplify the expression before using Table 6.1. Using partial fractions:
Thus, we have 1 = _A_ ( _s_ \+ 3)( _s_ \+ 2) + _Bs_ ( _s_ \+ 2) + _Cs_ ( _s_ \+ 3). When _s_ = 0 then 1 = 6 _A_ and so _A_ = 1/6. When _s_ = −3 then 1 = 3 _B_ and so _B_ = 1/3. When _s_ = −2 then 1 = −2 _C_ and so _C_ = −1/2. Hence we can write the output in the form:
Hence, using Table 6.1 gives:
Example
A system has the transfer function:
Determine its natural frequency, the damping ratio and the frequency of the damped oscillation.
If we compare the transfer function with that given in equation [31], i.e.
we are led to conclude that ωn2 = 9 and so ωn = 3 rad/s and _f n_ = ωn/2π = 3/2π = 0.48 Hz. The damping ratio is given by 2ζ _ω n_ = 3.6 and so ζ = 3.6/(2 × 3) = 0.6; the system is underdamped. Using equation [38], the angular frequency of the undamped oscillation is given by:
Example
For the system shown in Figure 6.18, determine its transfer function if _M_ = 50 kg, _k_ 1 = _k_ 2 = 400 N/m and _c_ = 180 Ns/m. What will be the damped frequency of its oscillation when subject to a unit step input?
Figure 6.18 Example
Considering the free-body diagram of the mass (Figure 6.19), and applying Newton's second law, we have:
The Laplace transform of this equation, with zero initial conditions, is:
For comparison with the standard form of the transfer function equation, we write the above equation as:
Hence, with the given data:
Comparing this with the standard form of transfer function for a second-order system [31]:
then the natural angular frequency _ω n_2 = 16 and _ω n_ = 4 rad/s. The damping ratio ζ is given by 2ζ _ω n_ = 3.6 and so ζ = 3.6/(2 × 4) = 0.45. The oscillation is underdamped.
Figure 6.19 Example
Using equation [38], the angular frequency of the undamped oscillation is given by:
### 6.3.2 Electrical circuit analysis
While we could write differential equations to represent electrical circuits and then solve them by the use of the Laplace transform, a simpler method is to replace time-domain components by their equivalents in the _s_ -domain.
Resistance _R_ in the time domain is defined as _v_ ( _t_ )/ _i_ ( _t_ ). Taking the Laplace transform of this equation gives a definition of resistance in the _s_ -domain (Figure 6.20) as:
Figure 6.20 Resistance: (a) time, (b) s-domain
[39]
Inductance _L_ in the time domain (Figure 6.21(a)) is defined by:
Figure 6.21 Inductance: (a) time, (b), (c), (d) s-domain
The Laplace transform of this equation is _V_ ( _s_ ) = _L_ [ _sI_ ( _s_ ) – _i_ (0)]. With zero initial current then _V_ ( _s_ ) = _sLI_ ( _s_ ). Impedance in the _s_ -domain _Z_ ( _s_ ) is defined as _V_ ( _s_ )/ _I_ ( _s_ ), thus for inductance (Figure 6.21(b)):
[40]
If the current was not initially zero but _i_ (0) = _i_ 0, then _V_ ( _s_ ) = _sLI_ ( _s_ ) – _Li_ 0. This equation can be considered to describe two series elements (Figure 6.21(c)). The first term then represents the potential difference across the inductance _L_ , being _Z_ ( _s_ ) _I_ ( _s_ ), and the second term a voltage generator of (– _Li_ 0). Alternatively we can rearrange equation _V_ ( _s_ ) = _sLI_ ( _s_ ) – _Li_ 0 in a form to represent two parallel elements (Figure 6.21(d)):
[41]
_I_ ( _s_ ) is the current into the system, _V_ ( _s_ )/ _sL_ = _V_ ( _s_ )/ _Z_ ( _s_ ) can be considered to be the current through the inductance and _i_ 0/ _s_ a parallel current source.
Capacitance _C_ in the time domain (Figure 6.22(a)) is defined by:
The Laplace transform of this equation is _I_ ( _s_ ) = _C_ [ _sV_ ( _s_ ) – _v_ (0)]. If we have _v_ (0) = 0 then (Figure 6.22(b)):
[42]
If _v_ (0) = _v_ 0 then _I_ ( _s_ ) = _CsV_ ( _s_ ) – _Cv_ (0) = _CsV_ ( _s_ ) – _Cv_ 0. We can think of this representing _I_ ( _s_ ) entering a parallel arrangement (Figure 6.22(c)) of a capacitor, and giving a current through it is _V_ ( _s_ )/ _Z_ ( _s_ ) = _CsV_ ( _s_ ), and a current source (– _Cv_ 0). Alternatively we can rearrange the equation as:
[43]
This equation now represents a capacitor in series with a voltage source of _v_ 0/ _s_ (Figure 6.22(d)).
Figure 6.22 Capacitance: (a) time domain, (b), (c), (d) s-domain
Key points
In the _s_ -domain:
Resistance = _R_
Impedance of an inductance = _SL_
Impedance of a capacitance = 1/ _sC_
Example
Determine the impedance and equivalent series circuit in the _s_ -domain of an inductance of 50 mH if there is a current of 0.1 A at time _t_ = 0.
The impedance in the _s_ -domain is given by equation [40] as 0.050s Ω. Its equivalent series circuit with the initial condition _i_ (0) = 0.1 A is of a voltage source of −0.050 × 0.1 = 0.005 V in series with the impedance of 0.050s Ω.
Example
Determine the impedance in the _s_ -domain of a capacitance of 0.1 μF and its equivalent series circuit when the capacitor has been charged to 5 V at time _t_ = 0.
The impedance in the _s_ -domain is given by equation [42] as 1/ _sC_ = 1/(0.1 × 10–6 _s_ ) Ω, and its equivalent series circuit with the initial condition _v_ (0) = 5 V is of a voltage source of −5/ _s_ in series with the impedance of 107/ _s_ Ω.
Key point
All the techniques developed for use in the analysis of circuits in the time domain can be used in the _s_ -domain.
#### Using Kirchhoff's laws
Because of the additive property of the Laplace transform, the transform of a number of time-domain functions is the sum of the transforms of each separate function. Thus with _Kirchhoff's current law_ , the algebraic sum of the time-domain currents at a junction is zero and so the sum of the transformed currents is also zero. With _Kirchhoff's voltage law_ , the sum of the time-domain voltages around a closed loop is zero and thus the sum of the transformed voltages is also zero. A consequence of this is that:
_All the techniques developed for use in the analysis of circuits in the time domain can be used in the s-domain._
The following examples illustrate this.
Example
Determine the impedance in the _s_ -domain of a 10 Ω resistor in (a) series and (b) parallel with a 1 mH inductor.
(a) For impedances in series _Z_ ( _s_ ) = _Z_ 1( _s_ ) + _Z_ 2( _s_ ) = 10 + 0.001 _s_ Ω.
(b) For impedances in parallel we have:
Hence _Z_ ( _s_ ) = 0.01 _s_ /(0.001 _s_ \+ 10) Ω.
Example
Determine how the circuit current varies with time for a circuit having a resistance _R_ in series with an initially uncharged capacitance _C_ when the input to the circuit is a step voltage _V_ at time _t_ = 0.
Figure 6.23(a) shows the circuit in the time domain and Figure 6.23(b) the equivalent circuit in the _s_ -domain. A unit step at _t_ = 0 has the Laplace transform 1/ _s_ and thus a voltage step of _V_ has a transform of _V/s_. The impedance of the capacitance is 1/ _sC_. Thus, applying Kirchhoff's voltage law to the circuit:
This is a constant multiplied by 1/( _s_ \+ _a_ ), thus:
Figure 6.23 Example
Example
A ramp voltage of _v_ = _kt_ is applied at time _t_ = 0 to a circuit consisting of an inductance _L_ in series with a resistance _R_. If initially at _t_ = 0 there is no current in the circuit, determine how the circuit current varies with time.
The Laplace transform of _kt_ is _k/s_ 2. The inductance has an impedance in the _s_ -domain of _sL_. Thus the circuit in the _s_ -domain is as shown in Figure 6.24. Applying Kirchhoff's voltage law to the circuit gives:
and so:
This can be simplified by partial fractions, writing a for _R/L_ :
Hence _A_ = 1, _B_ = −1/ _a_ and _C_ = 1/ _a_. Thus:
Hence:
Figure 6.24 Example
Example
Determine the transfer function of the circuit shown in Figure 6.25 and the output _v_ 0( _t_ ) resulting from a unit step input, given that _R_ 1 = 10 kΩ, _R_ 2 = 22 kΩ and _C_ = 1 μF.
Figure 6.25 Example
The Laplace equivalent circuit is shown in Figure 6.26. The impedance _Z_ p( _s_ ) for the parallel arrangement of _R_ 2 and the capacitor is given by:
We have a potential divided circuit and so:
The transfer function _G_ ( _s_ ) of the system is thus:
Using the given values:
For a unit step input we have _V i_( _s_ ) = 1/ _s_ and so:
We can use equation [6] in Table 6.1 or partial fractions to obtain the inverse. Partial fractions give:
and so:
Figure 6.26 Example
### Problems 6.3
1. A system has an input of a voltage of 3 V which is suddenly applied by a switch being closed. What is the input as an _s_ function?
2. A system has an input of a voltage impulse of 2 V. What is the input as an s function?
3. A system has an input of a voltage of a ramp voltage which increases at 5 V per second. What is the input as an s function?
4. A system gives an output of 1/(s + 5) _V_ ( _s_ ). What is the output as a function of time?
5. A system has a transfer function of 5/( _s_ \+ 3). What will be its output as a function of time when subject to (a) a unit step input of 1 V, (b) a unit impulse input of 1 V?
6. A system has a transfer function of 2/( _s_ \+ 1). What will be its output as a function of time when subject to (a) a step input of 3 V, (b) an impulse input of 3 V?
7. A system has a transfer function of 1/( _s_ \+ 2). What will be its output as a function of time when subject to (a) a step input of 4 V, (b) a ramp input unit impulse of 1 V/s?
8. Use partial fractions to simplify the following expressions:
(a)
(b)
(c)
9. A system has a transfer function of:
What will be the output as a time function when it is subject to a unit step input? Hint: use partial fractions.
10. A system has a transfer function of:
What will be the output from the system when it is subject to a unit impulse input? Hint: use partial fractions.
11. What will be the state of damping of systems having the following transfer functions and subject to a unit step input?
(a)
(b)
(c)
12. The input _x_ and output _y_ of a system are described by the differential equation:
Determine how the output will vary with time when there is an input which starts at zero time and then increases at the constant rate of 6 units/s. The initial output is zero.
13. The input _x_ and output _y_ of a system are described by the differential equation:
If initially the input and output are zero, what will be the output when there is a unit step input?
14. The input _x_ and output _y_ of a system are described by the differential equation:
If initially the input and output are zero, what will be the output when there is a unit impulse input?
15. A control system has a forward path transfer function of 2/( _s_ \+ 2) and a negative feedback loop with transfer function 4. What will be the response of the system to a unit step input?
16. A system has a transfer function of 100/( _s_ 2 \+ _s_ \+ 100). What will be its natural frequency ωn and its damping ratio ζ?
17. A system has a transfer function of 10/( _s_ 2 \+ 4 _s_ \+ 9). Is the system under-damped, critically damped or over-damped?
18. A system has a transfer function of 3/( _s_ 2 \+ 6 _s_ \+ 9). Is the system under-damped, critically damped or over-damped?
19. A system has a forward path transfer function of 10/( _s_ \+ 3) and a negative feedback loop with transfer function 5. What is the time constant of the resulting first-order system?
20. Determine the series and parallel models in the _s_ -domain for (a) an inductance of 10 mH when _i_ (0) = 0.2 A, (b) a capacitance of 2 μF when _v_ (0) = 5 V.
21. Determine the impedance in the _s_ -domain of a resistance of 10 Ω in (a) series, (b) parallel with a 2 mH inductance.
22. Determine how the current varies with time when a charged capacitor, with a potential difference of _v_ 0, is allowed to discharge through a resistance _R_.
23. Determine how the current varies with time when a step voltage _Vu_ ( _t_ ) is applied to a circuit consisting of a resistance _R_ in series with an inductance _L_ , there being no initial current in the circuit.
24. Determine how the current varies with time when a 1 V impulse is applied at time _t_ = 0 to a circuit consisting of a resistance _R_ in series with a capacitance _C_ , there being no initial potential difference across the capacitor.
7
# Sequences and series
Summary
This chapter introduces the idea of sequences, such concepts proving particularly relevant in considerations of digital signals which can be thought of as sequences of pulses. The main aspect of the chapter is, however, series and the use of the Fourier series to represent non-sinusoidal signals.
Objectives
By the end of this chapter, the reader should be able to:
• understand what is meant by a sequence and uses the idea to describe digital signals;
• recognise arithmetic and geometric series;
• recognise that some series can converge to a limit, determining the sums of such series;
• recognise the binomial series and uses it in engineering problems;
• represents waveforms by Fourier series and applies the series in the analysis of electrical circuit problems involving non-sinusoidal signals.
## 7.1 Sequences and series
This section considers what is meant by sets and sequences, considering some commonly encountered forms and their relevance to engineering.
### 7.1.1 Sequences
Consider the numbers, 1, 3, 5, 7, 9. Such a set of numbers is termed a _sequence_ because the numbers are stated in a definite order, 1 followed by 3 followed by 5, etc. Another sequence might be . These sequences have a finite number of terms but often we can meet ones involving an infinite number of terms, e.g. 2, 4, 6, 8, 10, 12,..., etc.
_The term sequence is used for a set of quantities stated in a definite order._
Key point
A sequence is a set of quantities stated in a definite order.
In general we can write a sequence as:
first value of variable, second value of variable, third value of variable,..., etc.
or, if _x_ is the variable:
This is usually more compactly written as _x_ [ _k_ ], where _k_ = 1, 2, 3,..., etc. Such a form of notation is commonly encountered in signal processing when perhaps an analogue signal is sampled at a number of sequential points and the resulting sequence of digital signal values processed. For example, if an analogue unit step signal is sampled the sampled data output might be expressed as _x_ [ _k_ ] = 0 for _k_ < 0, _x_ [ _k_ ] = 1 for _k_ ≥ 0 with _k_ = 0, 1, 2, 3, 4, etc. Figure 7.1 shows graphs of the unit step input and the sampled output.
Figure 7.1 (a) Unit step, (b) unit step sequence
Sometimes it is possible to describe a sequence by giving a rule for the _k_ th term, common forms being the arithmetic and geometric sequences.
Key points
An arithmetic sequence has each term formed from the previous term by simply adding on a constant value.
A geometric sequence has each term formed from the previous term by multiplying it by a constant factor, e.g. 3, 6, 12, 24,...
• **_Arithmetic sequence_**
An arithmetic sequence has each term formed from the previous term by simply adding on a constant value. If _a_ is the first term and _d_ the common difference between successive terms, the terms are:
[1]
The _k_ th term is _a_ \+ ( _k_ − 1) _d_ , with _k_ = 1, 2, 3, 4,..., etc. (note that if _k_ has the values 0, 1, 2, etc. the _kth_ term is _a_ \+ _kd_ ). Thus for such a sequence we can write:
[2]
• **_Geometric sequence_**
A geometric sequence has each term formed from the previous term by multiplying it by a constant factor, e.g. 3, 6, 12, 24,... If _a_ is the first term and _r_ the common ratio between successive terms, the terms are:
[3]
The _k_ th term is _ar_ _k_ –1, with _k_ = 1, 2, 3, 4,..., etc. Thus for such a sequence we can write:
[4]
• **_Harmonic sequence_**
The sequence is termed the _harmonic sequence_ and defined for _k_ = 1, 2, 3, etc. by:
[5]
Sequences can be generated by other rules. For example, the sequence 1, 2, 5, 10, 17,... is generated by _x_ [ _k_ ] = 1 + ( _k_ − l)2, where _k_ = 1, 2, 3,.... This sequence is neither an arithmetic nor a geometric sequence.
Example
Write down the first five terms of the sequence _x_ [ _k_ ] defined by _x_ [ _k_ ] = ½ _k_ 2 \+ _k_ when _k_ ≥ 0.
When _k_ = 0 we have 0 + 0, when _k_ = 1 we have 0.5 + 1, when _k_ = 2 we have 2 + 2, and so on. The sequence is thus 0,1.5, 4, 7.5,12.
Key points
A series is the sum of the terms of a sequence.
### 7.1.2 Series
A _series_ is formed by adding the terms of a sequence. Thus 1 + 3 + 5 + 7 + 9 +..., etc. is a series.
_A series is the sum of the terms of a sequence._
The sum of _n_ terms of a series is written using _sigma notation_ as:
[6]
The first and the last values of _k_ are shown below and above the sigma. For example, the series 1 + 3 + 5 + 7 + 9 would have the sum, over the five terms, written as:
Common series are:
• **_Arithmetic series_**
An arithmetic series has each term formed from the previous term by simply adding on a constant value. Such a series can be written in the general form as:
[7]
The sum to _k_ terms is:
If we write this back to front then:
Adding these two equations gives first term plus first term, second term plus second term, etc. and we obtain:
for _k_ terms. Thus 2 _S k_ = _n_ {2 _a_ \+ ( _n_ − 1) _d_ } and so:
[8]
• **_Geometric series_**
A geometric series has each term formed from the previous term by multiplying it by a constant factor. Such a series can be written in the general form as:
[9]
The sum to the _k_ th terms is:
Multiplying by _r_ gives:
Hence _S k_ − _rS k_ = _a_ − _ar n_, and so, provided _k_ ≠ 1:
[10]
Example
Determine the sum of the arithmetic series 1 + 5 + 9 +... if it contains 10 terms.
Such a series has a first term _a_ of 1 and a common difference _d_ of 4. Thus, using equation [8]:
Example
Determine the sum of the geometric series 4 + 6 + 9 +... if it contains 10 terms.
Such a series has a first term of 4 and a common ratio of 3/2. Thus, using equation [10]:
#### Convergent and divergent series
So far we have considered the sums of series with a finite number of terms. What about the sum when we have a series with an infinite number of terms?
_A series in which the sum of the series tends to a definite value as the number of terms tends to infinity is called a convergent series._
Key points
A series in which the sum of the series tends to a definite value as the number of terms tends to infinity is called a convergent series.
Consider an _arithmetic series a_ \+ ( _a_ \+ _d_ ) + ( _a_ \+ 2 _d_ ) +... for an infinite number of terms. For _k_ terms we have the sum (equation [8]) of:
As _k_ tends to infinity then _n_ tends to infinity and so the sum tends to infinity. The sum of an infinite arithmetic series is infinity. The series is said to be _divergent._
Consider a _geometric series a_ \+ _ar_ \+ _ar_ 2 \+... for an infinite number of terms. For _k_ terms we have the sum (equation [10]) of:
[11]
Suppose we have −1 < _r_ < 1, as _n_ tends to infinity then _r n_ tends to 0. Thus the second term converges to zero and we are left with just the first term. Thus such a series converges to the sum:
[12]
Thus the geometric series _x_ [ _k_ ] = 31/2 converges to the sum 6. However, if we had the geometric series _x_ [ _k_ ] = 32 then the sum is given by equation [15] as −3 + 3 × 2 _n_ and thus as _n_ tends to infinity the sum tends to infinity. For | _r_ | ≥ 1 the geometric series does not converge.
There are a number of ways that are used to determine whether a series will converge:
• _**Comparison test**_
A series of positive terms is convergent if its terms are less than the corresponding terms of a positive series which is known to converge. Similarly, the series is divergent if its terms are greater than the corresponding terms of a series which is known to be divergent. As an example, consider:
Suppose we know that the series:
converges (it is a geometric series with _r_ = ½), then if, after the first two terms, we compare terms we find that every term in our convergent series is greater than the one we are considering. Thus the series must also converge.
• **_D'Alembert's ratio test_**
An infinite series is convergent if, as _k_ tends to infinity, the ratio of each term _u_ _n_ +1 to the preceding term _u n_ is numerically less than 1 and divergent if greater than 1, i.e.
Consider the series:
The _n_ th term _u n_ is and the ( _n_ \+ 1)th term _u_ _n_ +1 is . Therefore:
As _n_ tends to infinity then:
and so the series converges.
Key points
The size of a real number _x_ is called its modulus and denoted by | _x_ |.
Key points
The symbol! appearing after a number means that it is multiplied by all the integers between it and 1, e.g. 5!= 5×4×3×2×1.
Example
Find the sum to infinity of the series 4 + 2 + 1 + ½ +....
This is a geometric series with a = 4 and _r_ = ½. Using equation [12]:
Example
Determine, using the comparison test, whether the series _x_ [ _k_ ] = 1/ _n n_, i.e. 1 + 1/22 \+ 1/33 \+ 1/44 \+..., is convergent.
If we exclude the first two terms we can compare it with the geometric series 1/23 \+ 1/24 \+ 1/25 \+... which is known to be convergent. Each term in this series being tested is smaller than the comparable term in the comparison series. Thus it must be convergent.
Example
Determine, using d'Alembert's ratio test, whether the series 1 + _x_ \+ _x_ 2/2! + _x_ 3/3! +... is convergent.
Using d'Alembert's ratio test, since _u n_ = _x_ _n_ −1/( _n_ − 1)! and _u_ _n_ +1 = _x n_/ _n_!:
In the limit as _n_ tends to infinity then the ratio tends to 0. Thus the series is convergent.
#### Power series
A series of the type:
is known as a _power series_. If we apply d'Alembert's ratio test then the series will be convergent when:
This can be written as:
or:
[13]
Thus there are conditions attached to the value of _x_ if the series is to converge. Examples are given later in this chapter.
Example
For what values of _x_ is the series _x_ [ _k_ ] = _x n_/ _n_ convergent?
Here _a n_ = 1/ _n_ and = _a_ _n+1_ = 1/( _n_ \+ 1). Thus | _a_ _n_ +1/ _a n_| = ( _n_ \+ 1)/ _n_ = 1 + 1/ _n_ and so in the limit we have the value of 1 for the limit. Thus the condition for convergence is that | _x_ | < 1 or − 1 < _x_ < +1.
#### Binomial series
For (1 + _x_ )2 we can readily show that it can be written as 1 + 2 _x_ \+ _x_ 2. If we multiply this by (1 + _x_ ) we obtain 1 + 3 _x_ \+ 3 _x_ 2 \+ _x_ 3. Multiplying by repeated factors of (1 + _x_ ) enables expansions of higher powers of (1 + _x_ ) to be generated. This is, however, rather cumbersome if, say, we wanted the expansion of (1 + _x_ )10. There is, however, a pattern in the results:
If we just write the coefficients the pattern is more readily discerned:
Every coefficient is obtained by adding the two either side of it in the row above. Thus, for example, we have:
The above pattern is known as _Pascal's triangle._
However, we can show that the above pattern can be given by:
[14]
This is known as the _Binomial theorem_. The theorem can be used for both positive and negative values of _n_ and fractional values. With _n_ a positive number the series will eventually terminate. With _n_ a negative number, the series does not terminate. The series converges if we have − 1 < _x_ < 1.
Example
Expand by the binomial theorem (1 + _x_ )6.
Example
Write the first four terms in the expansion of (1 + _x_ )1/2.
Maths in action
Making approximations
A common use of the Binomial theorem in engineering is for making approximations. For example, we might want to determine the change in the second moment of area of a rectangle which was given by _bL_ 3/12 if _b_ is increased by 3% and _L_ reduced by 2%. The new second moment of area is:
Using the Binomial theorem for the cubed term and neglecting, since they will be very small, all terms involving powers of 0.02:
Hence, _l_ = 0.97bL3/12 and so the percentage change is a reduction by approximately 3%.
#### Useful power series
Table 7.1 gives some commonly met functions and their series expansions.
Table 7.1
Power series
Example
Using series given in Table 7.1, determine the series expansion of the function e _x_ sin _x._
Table 7.1 gives:
We can multiply these two series to give
Example
Using Table 6.1, determine the series, as far as the _x_ 3 term, for the function _y_ = e4 _x_.
Table 6.1 gives:
If we substitute 4 _x_ for _x_ then we obtain:
### Problems 7.1
1. A sinusoidal signal _f_ ( _t_ ) = sin _t_ is sampled every quarter period starting when _t_ = 0. State the sequence of sampled values.
2. Write down the first five terms of the sequence _x_ [ _k_ ] defined, for _k_ ≥ 0, by (a) _x_ [ _k_ ] = _k_ , (b) _x_ [ _k_ ] = e− _k_.
3. State the fifth term of (a) the arithmetic sequence given by 4, 7, 10,..., (b) the geometric sequence given by 12, 6, 3,....
4. Write an equation for the _k_ th term, where _k_ = 1, 2, 3,..., for the following sequences (a) 1, −1, 1, −1,..., (b) 5, 10, 15, 20,..., (c) 2, 1, 5, 1, 0.5,...,
5. Write down the first five terms of the sequence _x_ [ _k_ ] defined, for _k_ ≥ 0, by (a) _x_ [ _k_ ] = _k_ 2, (b) _x_ [ _k_ ] = e _k_ , (c) _x_ [ _k_ ] = ½ _k_ 2 \+ 2 _k_.
6. State the fifth term of the arithmetic progression given by 5, 7, 9,....
7. State the fifth term of the geometric progression given by 8, 4, 2,....
8. Write an equation for the _k_ th term for the following sequences: (a) (b) −2, +2, −2,..., (c) 3.1, 3.01, 3.001,...
9. State the first three terms of the sequences given by:
(a) (0.1) _k_ ,
(b) 5 + (0.1) _k_ ,
(c) (−1) _k_
10. Determine the sums of the following series if each contains 12 terms:
(a) 2 + 5 + 8 +...,
(b)
(c) 4 + 3.6 + 3.24 +...
11. Determine the sums of the following arithmetic or geometric series if each contains 10 terms:
(a) 3 + 2.5 + 2.0 +...,
(b) 12 + 6 + 3 +...,
(c) 1 + 2 + 4 + 8 +...
12. Find the sum to infinity of the series:
(a) 6 + 3 + 1.5 +...,
(b) 4 + 3 + 2.25 +...,
(c) 12 + 3 + 0.75 +...
13. Using the comparison test, determine whether the following series are convergent or divergent:
(a) _x_ [ _k_ ] = 1/3 _n_ (compare with 1/2 _n_ ),
(b) _x_ [ _k_ ] = 1.5 _n_ (compare with 1 _n_ )
14. Using d'Alembert's ratio test, determine whether the following series are convergent or divergent:
(a)
(b)
15. Determine which of the following series is convergent and which divergent:
(a) −1 + 1 − 1 +... (−1) _n_ \+...,
(b) 1 e−1 \+ 2 e−2 \+ 3 e−3 \+... _n_ e− _n_ \+...,
(c)
(d)
(e)
(f)
16. Expand by the binomial theorem:
(a) (1 + _x_ )4
(b) (1 + _x_ )3/2,
(c) (1 − _x_ )−5/2,
(d) (1 + 0.25)−1 for four terms,
(e) (4 + _x_ )1/2 for four terms.
17. Use the binomial theorem to write the first four terms of:
(a) (1 + _x_ )12,
(b) (1 − 2 _x_ )−2,
(c) (3 − 2 _x_ )2/5,
(d)
(e) (1 + 3 _x_ )−1/2,
(f)
18. By using the binomial theorem, determine the cube root of 1.04 to four decimal places. Hint: write 1.04 as 1 + 0.04.
19. The transverse deflection δ of a column of length L when subject to a vertical load _F_ and a horizontal load _H_ at the top is given by:
where _a_ 2 = _F/EI_. Show that as _F_ tends to a zero value that δ tends to _HL_ 3/3 _EI_.
20. Determine the series expansion for cosh _x_ using the relationship cosh _x_ = ½(e _x_ \+ e− _x_ ).
21. Determine the series expansion for tan _x_ using tan _x_ = sin _x_ /cos _x_.
22. Using Table 7.1, determine the series for the following functions:
(a) _y_ = e2 _x_ ,
(b) _y_ = e _x_ cos _x_ ,
(c) _y_ = (1 + _x_ )−1/2,
(d) _y_ = e _x_ ln(1 + _x_ ),
(e) _y_ = sec _x_ ,
(f) _y_ = cos2 _x_
23. Show that, if _x_ is small:
24. For a continuous belt passing round two wheels, diameters _d_ and _D_ , with centres a distance _x_ apart, the length _L_ of belt required, if there is no sag, is:
where sin _a_ = ( _D_ − _d_ )/2 _x_. Show that:
25. The displacement _x_ of the slider of a reciprocating mechanism depends on the crankshaft angle θ, being related by
where _r_ is the radius of the crankshaft and _L_ the length of the connecting rod. Show, when _r_ / _L_ is considerably smaller than 1, that:
26. Determine the approximate percentage change in the volume of a cylinder if its radius is reduced by 4% and its height increased by 2%.
27. The resonant frequency of an electrical circuit containing capacitance _C_ and inductance _L_ is given by 1/[2π√( _LC_ )]. Determine the approximate percentage change in the frequency if the capacitance is increased by 2% and the inductance decreased by 1%.
## 7.2 Fourier series
Alternating waveforms in, say, electrical circuits are not always sinusoidal. For example, many voltages which might initially have been sinusoidal have their waveforms 'distorted' by being applied to some non-linear device and thus we need to be able to considered the behaviour of such a waveform with an electrical circuit. In other cases we might have a rectangular waveform rather than a sinusoidal one. This section is a consideration of how we can use a series to describe such waveforms.
Key point
_Fourier series:_
Any periodic waveform can be represented by a constant d.c. signal term plus terms involving sinusoidal waveforms of multiples of a basic frequency.
The Fourier series is concisely expressed as:
### 7.2.1 Fourier series
In 1822 Jean Baptiste Fourier proposed that any periodic waveform could be made up of a combination of sinusoidal waveforms, i.e.
[15]
This is termed the _Fourier_ series, where _A_ 0 is a non-alternating component, e.g. a d.c. component. The waveform element with the 1 _ωt_ frequency is called the _fundamental frequency_ or the _first harmonic_ , the element with the 2ω _t_ frequency the _second harmonic_ , the element with the 3ω _t_ the third harmonic, and so on. _A_ 1, _A_ 2, _A_ 3 are the amplitudes of the components and ϕ1, ϕ2, ϕ3, etc. their phases.
As an illustration, consider the waveform produced by having just sine terms with the fundamental and the third harmonic and _A_ 3 = _A_ 1/3, i.e.
Figure 7.2 shows graphs of the two terms and the waveform obtained by adding the two, ordinate by ordinate.
Figure 7.2 Adding two waveforms
The result of adding the two waveforms is something that begins to look a bit like a rectangular waveform. The addition of a d.c. term shifts the waveform up or down. If we add a d.c. term of 0.79 _A_ 1 then Figure 7.2 becomes transformed to Figure 7.3:
A better approximation to a rectangular waveform is given by adding more terms:
We then obtain a rectangular waveform which approximates to a periodic sequence of pulses (Figure 7.4).
Figure 7.3 Adding a d.c. term
Figure 7.4 Rectangular waveform
#### Alternative way of writing the Fourier series
There is an alternative, simpler, way of writing equation [15]. Since sin ( _A_ \+ _B_ ) = sin _A_ cos _B_ \+ cos _A_ sin _B_ we can write:
If we represent the non-time varying terms _A_ 1 sin ϕ1 by a constant _a_ 1 and _A_ 1 cos ϕ1 by _b_ 1, then:
Likewise we can write:
and so on. If, for convenience we choose to write ½ _a_ 0 for _A_ 0, equation [10] can be written as:
[16]
Hence we can write the Fourier series equation as:
[17]
The _a_ and _b_ terms are called the _Fourier coefficients._
Key points
Note: in Figure 7.3 the addition of a d.c. term of 0.79 to the waveform results in an average value of this waveform over one cycle of 0.79. The term ½ _a_ 0 in the Fourier series thus represents the average value of the waveform over a cycle.
Since we have _a n_ = _A n_ sin _φ n_ and _b n_ = _A n_ cos _φ n_ then:
[18]
and, since:
we have:
[19]
### 7.2.2 Fourier coefficients
Now consider how we can establish the Fourier coefficients for a waveform. Suppose we have the Fourier series in the form of equation [16]:
If we integrate both sides of the equation over one period _T_ of the fundamental, the integral for each cosine and sine term will be the area under the graph of that expression for one cycle and thus zero. A consequence of this is that the only term which is not zero when we integrate the equation is the integral of the _a_ 0 term. Thus, integrating over one period _T_ gives:
and so:
[20]
We can obtain the _a_ 1 term by multiplying the equation by cos ω _t_ and then integrating over one period. Thus the equation becomes:
The integration over a period _T_ of all the terms involving sin _ωt_ and cos _ωt_ will be zero. Thus we are only left with the cos2 _ωt_ term and so, using equation [13]:
and so we have:
[21]
In general, multiplying the equation by cos _nωt_ gives:
[22]
This equation gives for _n_ = 0 the equation given earlier for _a_ 0. This would not have been the case if the first term in the Fourier series had been written as _a_ 0 instead of _a_ 0/2.
In a similar way, multiplying the equation by sin _ωt_ and integrating over a period enables us to obtain the _b_ coefficients. Thus:
The integration over a period _T_ of all the terms involving sin _ωt_ and cos _ωt_ will be zero and so:
Hence:
[23]
In general, multiplying the equation by sin _nωt_ and integrating gives:
[24]
The following illustrates how the Fourier series can be established for a number of common waveforms.
#### Rectangular waveform
Consider the _rectangular waveform_ shown in Figure 7.4. It can be described as:
Now consider the determination of the coefficients. Equation [20] for _a_ 0:
has an integral which is the area under the graph of _y_ against _t_ for the period _T_. Since this area is _AT_ /2, we have _a_ 0 = _A_. To obtain _a n_ we use equation [22]:
Since _y_ has the value _A_ up to _T_ /2 and is zero thereafter, we can write the above equation in two parts as:
The value of the second integral is 0 and so:
Since ω = 2π/ _T_ then the sine term is sin 2 _nπt_ / _T_. Thus with _t_ = _T_ /2 we have sin _nπ_ which is zero and since sin 0 = 0, we have _an_ = 0.
For the _b n_ terms we use equation [24]:
Since we have _y_ = _A_ from 0 to _T_ /2 and then _y_ = 0 for the remainder of the period, this equation can be written in two parts as:
The value of the second integral is 0 and so:
Hence:
Thus the Fourier series for the rectangular waveform can be written as:
[25]
Note that only odd harmonics are present.
#### Sawtooth waveform
Consider the _sawtooth waveform_ shown in Figure 7.5. It can be described by:
To determine _a_ 0 we use equation [20]:
The integral is the area under the graph of _y_ against _t_ between 0 and time _T_. This is _AT_ /2 and so _a_ 0 = _A_. To obtain _a n_ we use equation [22]:
Since ω = 2π/ _T_ and _y_ = _At_ / _T_ then:
Using integration by parts gives:
The values of _a n_ are zero for all values other than _a_ 0. The values of _b n_ can be found by using equation [24]:
Integration by parts gives:
The Fourier series for the sawtooth waveform is thus:
[26]
We can write this as:
#### Half-wave rectified sinusoid
Consider a half-rectified sinusoidal waveform of period _T_ (Figure 7.6). This can be described by:
Figure 7.5 Sawtooth waveform
Figure 7.6 Half-wave rectified sinusoid
We can determine _a_ 0 by using equation [20]:
Equation [22] can be used to determine _a n_:
Since 2 sin _A_ cos _B_ = sin( _A_ \+ _B_ ) + sin( _A_ − _B_ ):
For _n_ = 1 we have:
For _n_ > 1 we have:
For even values of _n_ we have cos(1 + _n_ )π = −1 and cos(1 − _n_ )π = −1 and so:
For odd values, other than 1, of _n_ we have cos(1 + _n_ )π = 1 and cos(1 − _n_ )π = 1. This gives:
The values of _b n_ can be found using equation [24]:
Since 2 sin _A_ sin _B_ = cos( _A_ − _B_ ) − cos( _A_ \+ _B_ ):
For _n_ = 1 we have:
For _n_ > 1 we have:
Since sin(1 − _n_ )π = 0 and sin(1 + _n_ )π = 0, we have _b n_ = 0 for all values of _n_ other than 1.
The Fourier series for the half-wave rectified sinusoid is thus:
[27]
#### Shift of origin
The Fourier series for the rectangular waveform shown in Figure 7.7(a) is:
Figure 7.7 Origin shifts
[28]
Now consider the waveform in Figure 33.7(b). This is the waveform in (a) with the time origin shifted to the right by π/2. If we work out the Fourier series for this waveform we find that it is equation [28] with _t_ replaced by ( _t_ \+ π/2).
and so:
[29]
Thus we have the rule:
_Shifting the time origin of a waveform to the right by θ means replacing _t_ by ( _t_ \+ θ) in the Fourier series. Shifting the time origin to the left by h means replacing _t_ by ( _t_ − θ)._
Now consider the waveform in Figure 33.7(c). This is that in (a) shifted vertically by _A_ , i.e. the waveform in (a) plus _A_. The Fourier series is then that of (a) plus _A_ :
[30]
This gives the rule:
_Shifting the time axis vertically downwards adds to the Fourier series the amount of the shift, shifting upwards subtracts._
Key points
Shifting the time origin of a waveform to the right by θ means replacing _t_ by ( _t_ \+ θ) in the Fourier series. Shifting the time origin to the left by h means replacing t by ( _t_ − θ).
Shifting the time axis vertically downwards adds to the Fourier series the amount of the shift, shifting upwards subtracts.
### 7.2.3 Odd and even symmetry
As will be apparent from the above examples, certain terms are not always present in a Fourier series. Consideration of whether functions have odd or even symmetry about the origin enables us to determine the presence or otherwise of terms.
• **_Odd symmetry_**
A function with odd symmetry is defined as having _f_ (− _t_ ) = − _f_ ( _t_ ). This means that the function value for a particular positive value of time is equal in magnitude but of opposite sign to that for the corresponding negative value of that time. Thus _y_ = _f_ ( _x_ ) = _x_ 3 is an odd function since _f_ (−2) = −8 = − _f_ (2). For every point on the waveform for positive times there is a corresponding point on the waveform on a straight line drawn through the origin and equidistant from it (Figure 7.8).
Figure 7.8 Odd symmetry
• **_Even symmetry_**
A function with even symmetry is defined as having _f_ (− _t_ ) = _f_ ( _t_ ). This means that the function value for a particular positive value of time is identical to that for the corresponding negative value of that time. Thus _y_ = _f_ ( _x_ ) = _x_ 2 is an even function since _f_ (−2) = 4 = (2). If the y-axis was a plane mirror then the reflection of the positive time values for the waveform would give the negative time values (Figure 7.9).
Figure 7.9 Even symmetry
In determining Fourier coefficients it is necessary to consider the odd or even nature of products of two odd or even functions.
• **__Product of two even functions__**
Consider _f_ ( _x_ ) and _g_ ( _x_ ) and the product _F_ ( _x_ ) = _f_ ( _x_ ) _g_ ( _x_ ). We can write _F_ (− _x_ ) = _f_ (− _x_ ) _g_ (− _x_ ). Thus if _f_ ( _x_ ) and _g_ ( _x_ ) are both even we must have _F_ (− _x_ ) = _f_ ( _x_ ) _g_ ( _x_ ) and so _F_ (− _x_ ) = _F_ ( _x_ ). The product of two even functions is an even function.
• **__Product of two odd functions__**
Consider _f_ ( _x_ ) and _g_ ( _x_ ) and the product _F_ ( _x_ ) = _f_ ( _x_ ) _g_ ( _x_ ). We can write _F_ (− _x_ ) = _f_ (− _x_ ) _g_ (− _x_ ). Thus if _f_ ( _x_ ) and _g_ ( _x_ ) are both odd we must have _F_ (− _x_ ) = {− _f_ ( _x_ )}{− _g_ ( _x_ )} and so _F_ (− _x_ ) = _F_ ( _x_ ). The product of two odd functions is an even function.
• **__Product of an odd and an even function__**
Consider _f_ ( _x_ ) and _g(x)_ and the product _F_ ( _x_ ) = _f_ ( _x_ ) _g_ ( _x_ ). We can write _F_ (− _x_ ) = _f_ (− _x_ ) _g_ (− _x_ ). Thus if _f_ ( _x_ ) is even and _g_ ( _x_ ) is odd we must have _F_ (− _x_ ) = _f_ ( _x_ ){− _g_ ( _x_ )}= − _f_ ( _x_ ) _g_ ( _x_ ) and so _F_ (− _x_ ) = − _F_ ( _x_ ). The product of an even and an odd function is an odd function.
Example
Determine whether (a) _x_ 2, (b) cos 2 _x_ and ( _c_ ) _x_ 2 cos 2 _x_ are even or odd functions.
(a) _y_ = _f_ ( _x_ ) = _x_ 2 is an even function since if we consider some particular value of _x_ , say −2, we have _f_ (−2) = 4 = _f_ (2).
(b) _y_ = _f_ ( _x_ ) = cos 2 _x_ is an even function since if we consider some particular value of _x_ , say −π/2 we have _f_ (− _πl_ 2) = 0 = _f_ (π/2).
(c) Since the product of two even functions is even, _x_ 2 cos 2 _x_ is an even function.
#### Fourier coefficients for odd/even symmetry
Consider the coefficients for a Fourier series for functions showing odd or even symmetry.
• **__a_ 0 _coefficients__**
_a_ 0 is given by equation [20] as:
For a function with even symmetry we have the areas under the waveform on each side of the _y_ -axis equal in both size and sign. Figure 7.10(a) illustrates this. A consequence of this is:
[31]
But for an odd function (Figure 7.10(b)) the areas under the waveform on each side of the _y_ -axis are equal in size but opposite in sign. A consequence of this is that there can be no _a_ 0 term:
[32]
We can look at this issue in another way. The mean value over one cycle of a waveform is _a_ 0/2. Thus for an odd function the mean value is 0 because the mean value is 0.
Figure 7.10 (a) Even, (b) odd
• _**a n coefficients**_
For the _a n_ coefficients equation [22] gives:
Since cos _nωt_ is an even function, if _f_ ( _t_ ) is even then the product is even. Hence we have, on the basis of the discussion used for _a_ 0:
[33]
If _f_ ( _t_ ) is odd then the product is odd. Thus on the basis of the discussion used for _a_ 0:
[34]
• _**b n coefficients**_
For the _b n_ coefficients equation [24] gives:
Since sin _nωt_ is an odd function, if _f_ ( _t_ ) is even then the product is odd. Thus, on the basis of the discussion used for _a_ 0:
[35]
If _f_ ( _t_ ) is odd then the product is even. Thus, on the basis of the discussion used for _a_ 0:
[36]
Key points
If f(t) is an even function then the Fourier series contains an a0 term and only cosine terms. If f(t) is an odd function then the Fourier series contains no a0 term and only sine terms.
To summarise:
_If f(t) is an even function then the Fourier series contains an a0 term and only cosine terms. If f(t) is an odd function then the Fourier series contains no a0 term and only sine terms._
Example
Determine the Fourier series for the function shown in Figure 7.11.
Figure 7.11 Example
The function is an even function and so the _b_ coefficients are all zero. The period is 2 and so ω = π. Thus, using equation [31]:
Using equation [33]:
Thus the Fourier series is:
#### Half-wave symmetry
It is often possible by considering the symmetry of successive half-cycle waves within a waveform to recognise whether it will contain odd or even harmonics.
• **Half-wave inversion**
Any complex waveform which has a negative half-cycle which is just the positive cycle inverted will contain only odd harmonics, such a form of symmetry being termed _half-wave inversion_. Thus Figure 7.12(a) shows a waveform which has negative half-cycles which are just the positive half-cycles inverted and so does not contain any even harmonics.
Figure 7.12 Waveform with (a) identical positive and negative half-cycles, (b) repetition every half-cycle
• **Half-wave repetition**
Waveforms which repeat themselves after each half-cycle of the fundamental frequency will have just even harmonics, such a form of symmetry being termed _half-wave repetition_. Figure 7.12(b) show a waveform which repeats itself after each half-cycle and so has just even harmonics.
We can see why the above statements occur by considering the conditions that are necessary for a Fourier series to give the required symmetry. Thus if we have the series describing the waveform at time _t_ :
[37]
To btain the value of the waveform after half a cycle, i.e. at time _t_ \+ π, we put this value of time into equation [37]:
[38]
Key points
Any complex waveform which has a negative half-cycle which is just the positive cycle inverted will contain only odd harmonics, such a form of symmetry being termed _half-wave inversion._
Waveforms which repeat themselves after each half-cycle of the fundamental frequency will have just even harmonics, such a form of symmetry being termed _half-wave repetition._
If the waveform is to have negative half-cycles which are just the positive half-cycles inverted we must have the waveform after half a cycle, i.e. at time _t_ \+ π, which is − _v_ at _t_. Thus we must have:
[39]
This can only occur if _a_ 0 = 0, _a_ 2 = 0, and all even harmonics are 0.
If the waveform is to have waveforms which repeat themselves after half a cycle then we must have the waveform at time _t_ \+ π equal to v at time _t_. Thus we must have:
[40]
This can only occur if _a_ 1 = 0, _a_ 3 = 0 and all odd harmonics are 0.
Key points
Equations [41] and [42] are for the Fourier series in the form:
Note that if we want to refer to a series in the form:
then, in order to take account of sin _ωt_ = cos ( _ωt_ − π/2), i.e. the phase difference of −90° between the cosine and sine, equation [42] becomes:
### 7.2.4 Frequency spectrum
The _frequency spectrum_ comprises an _amplitude spectrum_ , which is a graph of the amplitudes of each of the constituent sinusoidal components in the Fourier series plotted against frequency, and a _phase spectrum_ which is their phases. The amplitudes are given from the Fourier coefficients by equation [19]:
[41]
and the phases of sinusoidal components by equation [18] as:
[42]
Example
Determine the frequency spectrum of the rectangular waveform with _a_ 0 = 1, _a n_ = 0 and _b n_ = (1 − cos _nπ_ )/ _nπ._
We have _b_ 1 = 2/π = 0.64, _b_ 2 = 0, _b_ 3 = 2/3π = 0.21, _b_ 4 = 0, _b_ 5 = 2/5π = 0.13, etc. The _A_ 0 term is 1. Using equation [41], the _A_ 1 term is 0.64, the _A_ 2 term 0, the _A_ 3 term 0.21, the _A_ 4 term 0, the _A_ 5 term 0.13, etc. The phases, when referred to a sine wave, are 0° for all components. When referred to a cosine wave they are −90°. Figure 7.13(a) shows the resulting amplitude spectrum and Figure 7.13(b) the phase spectrum when referring to sinusoidal components.
Figure 7.13 Frequency spectrum
Example
Determine the frequency spectrum for a half-wave rectified sinusoidal waveform if it has the Fourier series:
The _A_ 0 term is 1/π = 0.32. Using equation [41], the _A_ 1 term is 0.5, the _A_ 2 term 2/3π = 0.21, the _A_ 3 term 0 and the _A_ 4 term 2/15π = 0.04. The phases, when referred to a sine wave are φ1 = 0 and since −cos _ωt_ = sin( _ωt_ − 90°), φ2 = −90° and φ4 = −90°. Figure 7.14(a) shows the amplitude spectrum and Figure 7.14(b) the phase spectrum when referring to sinusoidal components.
Figure 7.14 Frequency spectrum
Maths in action
Electric circuit analysis
Often in considering electrical systems the input is not a simple d.c. or sinusoidal a.c. signal but perhaps a square wave periodic signal or a distorted sinusoidal signal or a half-wave rectified sinusoid. Such problems can be tackled by representing the waveform as a Fourier series and using the _principle of superposition_ ; we find the overall effect of the waveform by summing the effects due to each term in the Fourier series considered alone. Thus if we have a voltage waveform:
then we can consider the effects of each element taken alone. Thus we can calculate the current due to the voltage _V_ 0, that due to _V_ 1 sin _ωt_ , that due to _V_ 2 sin 2 _ωt_ , that due to _V_ 3 sin 3 _ωt_ , and so on for all the terms in the series. We then add these currents to obtain the overall current due to the waveform.
Consider the application to a pure resistance _R_. Since _i_ = _v_ / _R_ and resistance _R_ is independent of frequency, then the current due to the _V_ 0 term is _V_ 0/ _R_ , that due to the first harmonic term is ( _V_ 1 sin _ωt_ )/ _R_ , that due to the second harmonic term is ( _V_ 2 sin 2 _ωt_ )/ _R_ and so on. Thus the resulting current waveform is:
Because the resistance is the same for each harmonic, the amplitude of each voltage harmonic is reduced by the same factor, i.e. the resistance. The phases of each harmonic are not changed. The current waveform is thus the same shape as the voltage waveform.
Consider the application to a pure inductance _L_. The impedance of a pure inductance depends on the frequency, i.e. its reactance _X_ L = _ωL_. Also the current lags the voltage by 90°. The impedance is 0 when the frequency is 0 and thus the current due to the _V_ 0 term will be 0. The current due to the first harmonic will be the voltage of that harmonic divided by the impedance at that frequency and so _V_ 1 sin ( _ωt_ − 90°)/ _ωL_. The current due to the second harmonic will be the voltage of that harmonic divided by the impedance at that frequency and so V1 sin (2 _ωt_ − 90°)/2 _ωL_. Thus the current waveform will be:
Each of the voltage terms has its amplitude altered by a different amount; the phase, however, is changed by the same amount. The result is that the shape of the current waveform is different to that of the voltage waveform.
Consider a pure capacitor capacitance _C_. The impedance of a pure capacitor depends on the frequency, i.e. its reactance _X_ c = 1/ _ωC_ , and the current leads the voltage by 90°. The impedance is 0 when the frequency is 0 and thus the current due to the _V_ 0 term will be 0. The current due to the first harmonic will be the voltage of that harmonic divided by the impedance at that frequency and so _V_ 1 sin ( _ωt_ \+ 90°)/(1/ _ωC_ ). For the second harmonic the current will be the voltage of that harmonic divided by the impedance at that frequency and so _V_ 1 sin (2 _ωt_ \+ 90°)/(1/2 _ωC_ ). Thus the current waveform will be:
Each of the voltage terms has had their amplitude altered by a different amount but the phase changed by the same amount. The result of this is that the shape of the current waveform is different to that of the voltage waveform.
Example
A voltage of 2.5 + 3.2 sin 100 _t_ \+ 1.6 sin 200 _t_ V is applied across a resistor having a resistance of 100 Ω. Determine the current through the resistor.
The complex current will be the sum of the currents due to each of the voltage terms in the complex voltage. Since the resistance is the same at all frequencies, the complex current will be:
Thus, each of the elements has the same phase as the corresponding voltage element.
Example
A complex voltage of 2.5 + 3.2 sin 100 _t_ \+ 1.6 sin 200 _t_ V is applied across a pure inductor having an inductance of 100 mH. Determine the current through the inductor.
The impedance is 0 when the frequency is 0 and thus the current due to the 2.5 V term will be 0. For the second term, the reactance is 100 × 0.100 = 10 Ω and the current lags the voltage by 90° and so the current due to this harmonic is 0.32 sin (100 _t_ − 90°) A. For the third term, the reactance is 200 × 0.100 = 20 Ω and the current lags the voltage by 90° and so the current due to this harmonic is 0.08 sin (100 _t_ − 90°) A. Thus the current waveform is:
Example
Determine the waveform of the current occurring when a 2μF capacitor has connected across it the half-wave rectified sinusoidal voltage _v_ = 0.32 + 0.5 cos 100 _t_ \+ 0.21 cos 200 _t_ V.
There will be no current arising from the d.c. term. For the first harmonic the reactance is 1/(2 × 10−6 × 100) Ω and so we have a current of 0.5 × 2 × 10−6 × 100 cos (100 _t_ \+ 90°) A. For the second harmonic the reactance is 1/(2 × 10−6 × 200) Ω and so the current is 0.21 × 2 × 10−6 × 200 cos (200 _t_ \+ 90°). Thus the resulting current is:
Example
A voltage of _v_ = 100 cos 314 _t_ \+ 50 sin(5 × 314 _t_ − π/6) V is applied to a series circuit consisting of a 10 Ω resistor, a 0.02 H inductor and a 50 μF capacitor. Determine the circuit current.
For the first harmonic, the resistance is 10 Ω, the inductive reactance is _ωL_ = 314 × 0.02 = 6.28 Ω and the capacitive reactance is 1/ _ωC_ = 1/(314 × 50 × 10−6) = 63.8 Ω. Thus the total impedance is:
Thus the current due to the first harmonic is:
For the fifth harmonic, the resistance is 10 Ω, the inductive reactance is 5 _ωL_ = 5 × 314 × 0.02 = 31.4 Ω and the capacitive reactance is 1/5 _ωC_ = 1/(5 × 314 × 50 × 10−6) = 12.76 Ω. Thus the total impedance is:
Thus the current due to the fifth harmonic is:
Thus the current waveform is:
Example
A half-wave rectified sinusoidal voltage:
is applied to a circuit consisting of a 1 Ω resistor in series with a 1 F capacitor. Determine the waveform of the voltage output across the capacitor.
Figure 7.15 shows the circuit. The output is the fraction of the input voltage that is across the capacitor. Thus, using phasors and the component values given:
where ω = π is the fundamental frequency. For the d.c. component, with ω = 0, we have **V** out 0 = **V** in 0 = 0.32 **V**. For the first harmonic we have **V** in 1 = −j0.5 **V** and thus the output due to this term is:
For the second harmonic we have **V** in 2 = −j0.21 **V** and thus the output due to this term is:
For the fourth harmonic we have **V** in 4 = −j0.04 **V** and thus the output due to this term is:
Thus the output is:
Figure 7.15 Example
Maths in action
Rectifier filter circuit
A full-wave rectifier produces a far-from-smooth output and relies on the use of a _LC_ filter in order to give an output which reasonably approximates to a smooth d.c. voltage. Figure 7.16 shows the circuit. The output from the full-wave rectifier can be described by the Fourier series:
The first term is a constant and so represents a d.c. component. The second, and succeeding terms, represent alternating voltages which can be considered to be superimposed on the d.c. voltage.
Figure 7.16 Full-wave recifier with filter
The output voltage from filter circuit is across the resistive load. Assuming ideal components, we have a d.c. voltage of 2 _V_ m/π across the load resistor. For the capacitor to provide effective smoothing of the output, its reactance must be low compared with the load resistance so as to divert most of the a.c. element away from the load resistor. For the a.c. elements, the circuit is effectively just a pure inductance in series with a pure capacitor. It is a voltage-divider circuit, thus for the _n_ th harmonic:
and so:
For the 2nd harmonic we have _X_ c = 1/2 _ωC_ and _X_ L = 2 _ωL_. Thus:
Since 4ω2 _LC_ will be much greater than 1, the equation approximates to:
For the 2nd harmonic _v_ = −(2 _V_ m/π)(2/3) cos 2 _ωt_ and so:
This will give a ripple on the output d.c. voltage. The size of the ripple is the peak-to-peak value of the alternating component and so is 2 times the maximum amplitude of the ripple:
A measure of the smoothness of the d.c. output is provided by the _ripple factor r_. This can be defined as:
Thus, since we have a d.c. voltage component of 2 _V_ m/π:
As an illustration, consider the inductance needed with such a filter circuit to give a 1% ripple factor for a frequency of 50 Hz and a smoothing capacitor of 10 μF. Using these values in the above equation:
### Problems 7.2
1. What harmonics are present in the waveform given by _v_ = 1.0 − 0.67 cos 2 _ωt_ − 0.13 cos 4 _ωt_?
2. Determine the Fourier series for the waveform shown in Figure 7.17.
Figure 7.17 Problem 2
3. Determine the Fourier series for the waveform in Figure 7.18.
Figure 7.18 Problem 3
4. Determine the Fourier series for the full-wave rectified sinusoid (Figure 7.19).
Figure 7.19 Problem 4
5. Determine the nature of the terms within the Fourier series for the waveforms shown in Figure 7.20. _T_ is the periodic time for a cycle.
Figure 7.20 Problem 5
6. Given that the Fourier series for the waveform in Figure 7.21(a) is:
Determine, by considering the shift of origin, the Fourier series for the waveform shown in Figure 7.21(b).
Figure 7.21 Problem 6
7. What terms will be present in the Fourier series for the waveforms shown in Figure 7.22?
Figure 7.22 Problem 7
8. From considerations of the mean values of the waveforms in Figure 7.23, what will be the values of a0?
Figure 7.23 Problem 8
9. Determine whether the following are even or odd functions:
(a) sin _x_ ,
(b) _x_ ,
(c) _x_ sin _x_ ,
(d) _x_ cos 2 _x_ ,
(e) _x_ 3 cos 2 _x_
10. Determine what terms the following waveforms will contain in their Fourier series:
(a) _f_ ( _t_ ) = 3 _t_ for −π ≤ _t_ < π, period 2π,
(b) _f_ ( _t_ ) = cos _t_ for −π ≤ _t_ < π, period 2π,
(c) _f_ ( _t_ ) = _t_ 2 cos _t_ for −π ≤ _t_ < π, period 2π
11. Determine the Fourier series for the waveform described by _f_ ( _t_ ) = _t_ for −π ≤ _t_ < O with a period of 2π.
12. Determine the amplitude and phase (referred to a sine) elements for the frequency spectrum of the waveforms giving the following Fourier series:
(a)
(b) _a_ 0 = π/2, _a n_ = 0 for _n_ even and −2/ _n_ 2π for _n_ odd, _b n_ = −(−1)n/ _n_
13. Determine the waveform of the current occurring when a resistor of resistance 1 kΩ has connected across it the half-wave rectified sinusoidal voltage _v_ = 0.32 + 0.5 cos 100 _t_ \+ 0.21 cos 200 _t_ V.
14. Determine the waveform of the current when a pure inductor of inductance 10 mH has connected across it the half-wave rectified sinusoidal voltage _**v**_ = 0.32 + 0.5 cos 100 _t_ \+ 0.21 cos 200 _t_ V.
15. A voltage of 2.5 + 3.2 sin 100 _t_ \+ 1.6 sin 200 _t_ V is applied across a 10 μF capacitor. Determine the current.
8
# Logic gates
Summary
In digital circuits extensive use is made of switching circuits. A switch is either on or off with these states being denoted by the digits 1 or 0. A logic circuit can be considered as a collection of switching circuits. In this chapter the basic mathematics necessary to analyse and synthesise such circuits is introduced. The mathematics involved is named after George Boole (1815–64) who first developed the modern ideas of the mathematics concerned with the manipulation of logic statements. In this chapter, Boolean algebra is approached by means of the analysis of switching circuits.
Objectives
By the end of this chapter, the reader should be able to:
• represent switching systems by logic gates;
• represent the action of such gates by truth tables;
• describe switching logic by Boolean statements;
• manipulate Boolean statements by the use of the rules of Boolean algebra.
## 8.1 Logic gates
Digital electronic logic gates are relatively cheap and readily available as integrated circuits. Such gates find a wide range of applications. For example, they might be used to determine when an input signal control system is to be allowed to give an output, as in an alarm system. Such logic gates are essentially just switching devices and this section considers the basics of such devices.
### 8.1.1 Switching circuits
Consider a simple on-off switch (Figure 8.1). If we denote a closed contact by a 1 and an open contact by a 0 then the switch has just two possible states: 1 or 0.
Figure 8.1 The two states of a switch
Suppose we have two switches _a_ and _b_ in series. Each switch has two possible states, 0 and 1. Figure 8.2 shows the various possibilities for switches. In (a) both switches are open, in (b) _a_ is open and _b_ is closed, in (c) _a_ is closed and _b_ is open and in (d) _a_ and _b_ are both closed. With (a) the effect of both switches being open is the same as would be obtained by a single open switch; (b) and (c) likewise are equivalent to a single open switch but (d) is equivalent to a single closed switch. Thus we can say that the two elements are equivalent to 0 for (a), (b) and (c) but 1 for (d). In tabular form we can represent the state of the circuit by Table 8.1:Such a table is known as a _truth table_. If _a_ AND _b_ are 1 then the result is 1. Such an arrangement is known as an AND gate since both _a_ and _b_ have to be 1 for the output to be 1.
Table 8.1
_Truth table for a_ AND _b_
_a_ | _b_ | Output
---|---|---
0 | 0 | 0
0 | 1 | 0
1 | 0 | 0
1 | 1 | 1
Figure 8.2 Switches in series
Key point
A truth table lists the outputs for each combination of inputs.
Consider two switches in parallel. Figure 8.3 shows the various possibilities for switches. In (a) both switches are open, in (b) _a_ is open and _b_ is closed, in (c) _a_ is closed and _b_ is open and in (d) _a_ and _b_ are both closed. With (a) the effect of both switches being open is the same as would be obtained by a single open switch; (b), (c) and (d) are equivalent to a single closed switch. Thus we can say that the two elements are equivalent to 0 for (a), and 1 for (b), (c) and (d). In tabular form we can represent the state of the circuit by the truth table (Table 8.2):Such an arrangement is known as an OR gate since if _a_ or _b_ is 1 then the result is 1.
Table 8.2
_Truth table for a_ OR _b_
_a_ | _b_ | Output
---|---|---
0 | 0 | 0
0 | 1 | 1
1 | 0 | 1
1 | 1 | 1
Figure 8.3 Parallel switches
Another possible form of switch circuit is where two switches are connected together so that the closing of one switch results in the opening of the other. Figure 8.4(a) illustrates the switch action with (b) showing the upper switch open when the lower switch is closed and (c) the upper switch closed when the lower switch is open. The lower switch is said to give the _complement_ of the upper switch. Table 8.3 is the truth table:Such an arrangement constitutes a NOT switching circuit, since if one switch is 1 then the other switch is not 1.
Table 8.3
_Truth table for_ NOT
Upper switch | Lower switch
---|---
0 | 1
1 | 0
Figure 8.4 Complement
#### Logic gates
With a mechanical switch we can represent the two logical states of 0 or 1 as the switch being open and closed. With electronic switches, 0 is taken to be a low voltage level and 1 a high voltage level for what is called _positive logic_ , although the opposite convention ( _negative logic_ ) can be used with 0 being represented by a high voltage level and 1 by a low voltage level. The 0 and the 1 do not represent actual numbers but the state of the voltage or current. The term _logic level_ is often used with the voltage being said to be at logic level 0 or logic level 1.
The basic building blocks of digital electronic circuits are called _logic gates_. A logic gate is an electronic block which has one or more inputs and an output. The output can be either high or low depending on the digital levels at the input terminals. The following sections take a look at the logic gates: AND, OR, INVERT/NOT, NAND, NOR and XOR. Different sets of standard circuit symbols have been developed in Britain, Europe and the United States; an international standard (IEEE/ANSI) has, however, been developed based on squares. In this text, both the IEEE/ANSI form and the older United States form are shown.
• **AND _gate_**
The AND gate gives an output 1 when both input _A_ and input _B_ are 1. Figure 8.5 shows the symbol, the associated truth table being given in Table 8.4
Table 8.4
AND _gate_
_A_ | _B_ | Output
---|---|---
0 | 0 | 0
0 | 1 | 0
1 | 0 | 0
1 | 1 | 1
Figure 8.5 AND
• **OR _gate_**
The OR gate gives an output 1 when either input _A_ or input _B_ is 1. Figure 8.6 shows the symbol and Table 8.5 the truth table.
Table 8.5
OR _gate_
_A_ | _B_ | Output
---|---|---
0 | 0 | 0
0 | 1 | 1
1 | 0 | 1
1 | 1 | 1
Figure 8.6 OR
• **INVERT/NOT _gate_**
The INVERT or NOT gate has a single input and gives a 1 output when the input is 0. The gate inverts the input, giving a 1 when the input is 0 and a 0 when the input is 1. Figure 8.7 shows the gate symbol and Table 8.6 gives the truth table.
Table 8.6
NOT _gate_
_A_ | Output
---|---
0 | 1
1 | 0
Figure 8.7 NOT
• **NAND _gate_**
This gate (Figure 8.8) is logically equivalent to a NOT gate in series with an AND gate, NAND standing for NotAND. The symbol for the gate is the AND symbol followed by a small circle, the small circle being used to indicate negation. The gate has the truth table shown in Table 8.7. There is a 1 output when _A_ and _B are_ both not 1, i.e. are both 0.
Table 8.7
NAND _gate_
_A_ | _B_ | Output
---|---|---
0 | 0 | 1
0 | 1 | 1
1 | 0 | 1
1 | 1 | 0
Figure 8.8 NAND
• **NOR _gate_**
This gate (Figure 8.9) is logically equivalent to a NOT gate in series with an OR gate. It is represented by the OR gate symbol followed by a small circle to indicate negation. Table 8.8 gives the truth table, there being a 1 output when neither _A_ nor _B_ is 1.
Table 8.8
NOR _gate_
_A_ | _B_ | Output
---|---|---
0 | 0 | 1
0 | 1 | 0
1 | 0 | 0
1 | 1 | 0
Figure 8.9 NOR
• **EXCLUSIVE OR (XOR) _gate_**
The OR gate gives an output 1 when either input _A_ or input _B_ is 1 or both _A_ and _B_ are 1. The EXCLUSIVE OR gate gives an output 1 when either input _A_ or input _B_ is 1 but not when both are. Figure 8.10 shows the gate symbol and Table 8.9 the truth table.
Table 8.9
XOR _gate_
_A_ | _B_ | Output
---|---|---
0 | 0 | 0
0 | 1 | 1
1 | 0 | 1
1 | 1 | 0
Figure 8.10 XOR
By combining gates it is possible to generate other switching operations. This is illustrated in the following example and discussed later in this chapter.
Example
Suppose we wanted to design a switching circuit in order to operate a relay from a combination of four switches so that the relay is energised when switch 1 and switch 2 are both closed, or when switch 3 and switch 4 are both closed, or when switch 1 and switch 3 are both closed. Design a system of logic gates which would give this.
The output required is when we have (S1 and S2) or (S3 and S4) or (S1 and S3). Figure 8.11 shows how this may be realised with gates.
Figure 8.11 Example
Maths in action
Ladder programming with PLCs
Programmable logic controllers (PLCs) use a simple form of programming in order to exercise control functions. This program involves drawing each step in a program as the rung on a ladder, each rung then being taken in turn from top to bottom. Each rung can execute logic switching functions such as AND and OR. Figure 8.12(a) shows the symbols used to represent normally open switches, normally closed switches and output devices. Figure 8.12(b) shows three rungs in a ladder program. With rung 1 we have an AND gate situation in that both _A_ and _B_ have to be on for there to be an output. With rung 2 either _A_ or _B_ have to be on for there to be an output and so we have an OR gate. Rung 3 shows a NOT gate in that when _A_ has an input it switches the output off.
Figure 8.12 Ladder programming
## 8.2 Boolean algebra
In this section we look at how we can develop algebraic notation and rules to describe and manipulate logic gate arrangements.
#### Notation
For the AND operation, i.e. the series connections of switches _a_ and _b, a_ is considered to be _multiplied_ by _b_. Generally • is used for the multiplication symbol. From truth Table 8.4 we thus have the rules:
[1]
For the OR operation, i.e. the parallel connection of switches _a_ and _b, a_ is considered to be _added_ to _b_. From truth Table 8.5 we have the rules:
[2]
For the NOT operation, i.e. the complement with the switches, we use a bar over a symbol to indicate NOT. Thus truth Table 8.6 gives the rules:
[3]
#### Boolean algebra
The binary digits 1 and 0 are the _Boolean variables_ and, together with the operations ⋅, + and the complement, form what is known as _Boolean algebra_. By constructing the appropriate truth tables the following laws can be derived:
• **_Anything_ OR _ed with itself is equal to itself_**
See Table 8.10.
Table 8.10
OR
_a_ | _a_ | _a_ + _a_
---|---|---
0 | 0 | 0
1 | 1 | 1
[4]
• **_Anything_ AND _ed with itself is equal to itself_**
See Table 8.11.
Table 8.11
AND
_a_ | _a_ | _a_ · _a_
---|---|---
0 | 0 | 0
1 | 1 | 1
[5]
• **_Input sequence for_ OR _and_ AND**
It does not matter in which order we take the inputs for OR and AND gates, the output is the same. This is illustrated by Table 8.12 for OR.
Table 8.12
OR
_a_ | _a_ | _a_ + _b_
---|---|---
0 | 0 | 0
0 | 1 | 1
1 | 0 | 1
1 | 1 | 1
[6]
[7]
• **_Handling bracketed terms_**
As Table 8.13(a) indicates:
Table 8.13(a)
[8]
As Table 8.13(b) indicates.
[9]
• **_Complementary law_**
Table 8.13(b)
Anything ORed with its own negative is 1. See Table 8.14.
Table 8.14
[10]
Anything ANDed with its own negative is 0. See Table 8.15.
[11]
• **OR _ing with 0 or 1_**
Table 8.15
Anything ORed with a 0 is equal to itself, anything ORed with a 1 is equal to 1. See Table 8.16.
Table 8.16
_a_ | _a_ +0 | _a_ +1
---|---|---
0 | 0 | 1
1 | 1 | 1
[12]
• **AND _ing with 0 or 1_**
Anything ANDed with a 0 is equal to 0, any thing ANDed with a 1 is equal to itself. See Table 8.17.
Table 8.17
_a_ | _a_ ·0 | _a_ ·1
---|---|---
0 | 0 | 0
1 | 0 | 1
[13]
Key points
De Morgan laws:
#### De Morgan laws:
• The complement of the outcome of switches _a_ and _b_ in parallel, i.e. an OR situation, is the same as when the complements of _a_ and _b_ are separately combined in series, i.e. the AND situation. Table 8.18 shows the validity of this.
Table 8.18
[14]
• The complement of the outcome of switches _a_ and _b_ in series, i.e. the AND situation, is the same as when the complements of _a_ and _b_ are separately considered in parallel, i.e. the OR situation. Table 8.19 shows the validity of this.
Table 8.19
[15]
Using the rules given above, complicated switching circuits can be reduced to simpler equivalent circuits.
Example
Simplify the following Boolean function:
Using equation [9]: gives _a_ · ( _b_ \+ _c_ ) = _a_ · _b_ \+ _a_ · _c_. Since:
we can write:
Using equation [9] for the first two terms gives:
Then using equations [7] and [10] gives:
Example
Simplify the function:
Using equation [13] we can replace a by a · 1. The function can then be written as:
Then using equation [9]:
Using the second of the equations in [12] gives 1 + ( _b_ ⋅ _c_ ) = 1 and so:
Since _a_ · 1 = _a_ (equation [10]), and applying equation [8]:
But: and so, using equation [13]:
Example
The operation of an output relay controlled by a PLC program is given by the Boolean expression:
(a) Represent this expression as rungs in a PLC ladder program, with a rung for each part of the expression.
(b) Simplify the ladder program and hence write another Boolean expression which describes the simplified program.
(a) Each bracketed term can be represented by a rung in a ladder program and so give the program shown in Figure 8.13(a).
Figure 8.13 Example
(b) Figure 8.13(b) shows how we can simplify the ladder program and still give the same outcome. Such a program can then be described by the Boolean expression:
### Problems 8.2
1. Complete the following:
(a) 1 + 0 = ?, (b) 1 ⋅ 1 = ?, (c)
2. Simplify the following Boolean functions:
(a) (b) (c)( _a_ \+ _b_ ) · ( _a_ \+ _b_ ),
(d) (e)
## 8.3 Logic gate systems
The operations ·, + and the complement can be used to write the Boolean functions for complex switching circuits, the states of such circuits being determined by developing the truth table to indicate all the various switching possibilities. Boolean algebra might then be used to simplify the switching circuits.
Example
Write, for the circuit shown in Figure 8.14, (a) the truth table and (b) the Boolean function to describe that truth table.
Figure 8.14 Example
(a) _a_ and _b_ are in series, and in parallel with the series arrangement of _c_ and _d_. The result of using the switches is that only when either _a_ and _b_ are closed or _c_ and _d_ are closed will there be an output. Table 8.20 shows the truth table.
Table 8.20
Example
(b) The Boolean function for the two switches _a_ and _b_ in series is _a_ · _b_ , the AND function, and thus, since the function for two items in parallel is OR, the function for the circuit as a whole is:
Example
Derive the Boolean function for the switching circuit shown in Figure 8.15.
Figure 8.15 Example
In the upper parallel arm of the circuit, the switches _a_ and _b_ are in series and so have a Boolean expression of _a_ · _b_. In the lower arm the complements of _a_ and _b_ are in series. Thus the Boolean expression for that part of the circuit is
Because the two arms are in parallel the expression for the parallel part of the circuit is
In series with this is switch _c_. Thus the Boolean function for the circuit is:
#### Combining gates
By combining logic gates it is possible to represent other Boolean functions and use of Boolean algebra can often be used to simplify the arrangement.
Example
Determine the Boolean function describing the relation between the output from the logic circuit shown in Figure 8.16. Hence, consider how the circuit could be simplified.
Figure 8.16 Example
This might be a circuit used with a car warning buzzer so that it sounds when the key is in the ignition ( _A_ ) and a car door is opened ( _B_ ) or the headlights are on ( _C_ ) and a car door is opened ( _A_ ). We have two AND gates and an OR gate. The output from the top AND gate is _A_ · _B_ , and from the lower AND gate _C_ · _A_. These outputs are the inputs to the OR gate and thus the output is
The circuit can be simplified by considering the Boolean algebra. Using equation [9] the Boolean function can be written as:
We now have _A_ and _B_ or _C_. This function now describes a logic circuit with just two gates, an OR gate and an AND gate. Figure 8.17 shows the circuit.
Figure 8.17 Example
Example
Devise a logic gate system to generate the Boolean function
_A_ · _B_ requires an AND gate, but as the _B_ input has to be inverted we precede the input from _B_ to the AND gate by a NOT gate. We then require an OR gate for the output from the AND gate and _C_. Figure 8.18 shows the gate system.
Figure 8.18 Example
#### Boolean function generation from truth tables
Often the requirements for a system are specified in terms of a truth table and the problem then becomes one of determining how a logic gate system can be devised, using the minimum number of gates, to give that truth table. The forms to which most are minimised are an AND gate driving a single OR gate or vice versa.
• **_Sum of products_**
Two AND gates driving a single OR gate (Figure 8.19(a)) give, what is termed, the sum of products form:
Figure 8.19 (a) Sum of products, (b) product of sums
• **_Product of sums_**
Two OR gates driving a single AND gate (Figure 8.19(b)) give, what is termed the product of sums form:
The usual procedure to find the minimum logic gate system is thus to find the sum of products or the product of sums form that fits the data. Generally the sum of products form is used. The procedure used is:
1. Consider each row of the truth table in turn that generates a 1 output and find the product that would fit a row. Only a row of a truth table that has an output of 1 need be considered, since the rows with 0 output do not contribute to the final expression. For example, suppose we have a row in a truth table of: _A_ = 1, _B_ = 0 and output = 1. When _A_ is 1 and _B_ is not 1 then the output is 1, thus the product which fits this is:
2. The overall result is the sum of all the products for the rows giving 1 output.
Example
Determine a logic gate system to give the following truth table.
we only need consider the third row, thus the result is:
The logic gate system that will give this truth table is thus that shown in Figure 8.20.
Figure 8.20
Example
Determine a logic gate system which will give the following truth table.
There are two rows for which we need to find a product. Thus the sum of products which fits this table is:
This can be simplified to give:
The truth table can thus be generated by just a NAND gate.
Maths in action
**A PLC and a central heating system**
Consider a domestic central heating system (Figure 8.21) and its control by a PLC (see earlier Maths in Action in this chapter). The central heating boiler is to be thermostatically controlled and supply hot water to the radiator system in the house and also to the hot water tank to provide hot water from the taps in the house. Pump motors have to be switched on to direct the hot water from the boiler to either, or both, of the radiator and hot water systems according to whether the temperature sensors for the room temperature and the hot water tank indicate that the radiators or tank need heating. The entire system is to be controlled by a clock so that it only operates for certain hours of the day. Figure 8.22 shows how a PLC might be used and its ladder program.
Figure 8.21 Central heating system
Figure 8.22 Central heating system
The boiler, output Y430, is switched on if X400 and X401 and either X402 or X403 are switched on. This means if the clock switched is on, the boiler temperature sensor gives an on input, and either the room temperature sensor or the water temperature sensors give on inputs. The motorised valve M1, output Y431, is switched on if the boiler, Y430, is on and if the room temperature sensor X402 gives an on input. The motorised valve M2, output Y432, is switched on if the boiler, Y430, is on and if the water temperature sensor gives an on input.
Example
Design a PLC ladder program that will control a simple red–amber–green traffic light sequence for two inputs X0 and X1 to give the outputs Y0, Y1 and Y2 (Figure 8.23) shown in the following table:
Note: logic 0 defines an open switch or a light turned OFF, logic 1 defines a closed switch and a light turned ON.
Figure 8.23 Example
Figure 8.24(a) shows how we can represent the above truth table by a logic gate system and Figure 8.24(b) by rungs in a ladder program.
Figure 8.24 Example
When there is no input to X0 then the red light is ON. Thus, when the input to X0 is 0 then, as the switch is normally closed, the output Y1 is ON; when the input is 1, to open the switch, then the output is OFF.
#### Karnaugh maps
The _Karnaugh map_ is a graphical way of representing a truth table and a method by which simplified Boolean expressions can be obtained from sums of products. The Karnaugh map is drawn as a rectangular array of cells with each cell corresponding to the output for a particular combination of inputs, i.e. a particular product value. Thus, Figure 8.25(a) shows the four-cell box corresponding to two input variables A and B, this giving four product terms. We then insert the function for each input combination, Figure 8.25(b) showing this for an AND gate and Figure 8.25(c) for an OR gate. Figure 8.26 shows how we can represent such maps with input labels A and B for 1 entries and not A and B for 0 entries.
Figure 8.25 Karnaugh map with a four-cell box
Figure 8.26 Karnaugh map with a four-cell box
Karnaugh maps not only pictorially represent truth tables but also can be used for minimisation. Suppose we have the following truth table:
Figure 8.27 shows the Karnaugh map for this truth table with just the 1 output shown. On the map this entry is just the cell with the coordinates _A_ = 1, _B_ = 0 and so gives the indicated product. The Karnaugh map enables the minimisation to be spotted visually.
Figure 8.27 Karnaugh map
As a further example, consider the following truth table:
Figure 8.28 shows the Karnaugh map for this truth table with just the 1 output shown. This has an output given by:
This can be simplified to:
Thus we have a rule for the map that: _when two cells containing a 1 have a common edge then we can simplify them to just the common variable, the variable that appears in the complemented and uncomplemented form is eliminated_. To help with such simplifications, we draw loops round Is in adjacent cells. Note that in looping, adjacent cells can be considered to be those in the left- and right-hand columns. Think of the map as though it is wrapped round a vertical cylinder and the left- and right-hand edges of the map are joined together. There are other rules we can develop, namely: _looping a quad of adjacent 1s eliminates the two variables that appear in complemented and uncomplemented form_ and _looping an octet of adjacent 1s eliminates the three variables that appear in both complemented and uncomplemented form._
Figure 8.28 Karnaugh map
Figure 8.29(a) shows how we can draw a Karnaugh map for three inputs and Figure 8.29(b) for four inputs. Note that the cells are labelled so horizontally adjacent cells differ by just one variable, likewise adjacent vertical cells.
Figure 8.29 Karnaugh maps: (a) three-input, (b) four-input
Key point
_Simplification using Karnaugh maps_
1. Construct the Karnaugh map and place 1s in those squares which correspond to the 1s in the truth table.
2. Examine the map for adjacent 1s and loop them.
3. Form the OR sum of all those terms generated by each loop.
Example
Determine the simplified Boolean expression for the Karnaugh map shown in Figure 8.30.
Figure 8.30 Karnaugh map
We have three loops and so the outcome is:
Example
Determine the simplified Boolean expression for the Karnaugh map shown in Figure 8.31.
Figure 8.31 Karnaugh map
We have a doublet loop and a quad loop and so the outcome is:
### Problems 8.3
1. State a Boolean function that can be used to represent each of the switching circuits shown in Figure 8.32.
Figure 8.32 Problem 1
2. Give the truth tables for the switching circuits represented by the Boolean functions:
(a) (b)
3. Determine the Boolean functions that could generate the outputs in Figure 8.33.
Figure 8.33 Problem 3
4. Give the truth table for the switching circuit corresponding to the Boolean function:
5. Draw switching circuits to represent the following Boolean functions:
(a) (b) (c)
6. Determine the Boolean equations describing the logic circuits in Figure 8.34, then simplify the equations and hence obtain simplified logic circuits.
Figure 8.34 Problem 6
7. Draw switching circuits to represent the Boolean functions:
(a) (b) (c) (d)
8. Derive the Boolean functions for the truth tables in Table 8.21(a) and (b).
Table 8.21(a)
Table 8.21(b)
9. Determine the Boolean equations describing the logic circuits in Figure 8.35, then simplify the equations and hence obtain simplified logic circuits.
Figure 8.35 Problem 8
10. For the Karnaugh maps in Figure 8.36, produce the simplified Boolean expression.
Figure 8.36 Problem 10
9
# Probability and statistics
Summary
In any discussion of system reliability or quality control, the concept of probability plays a vital part. It is also a vital issue in the consideration of statistics when errors have to be considered in experimental measurements, all measurements being subject to some degree of uncertainty. For example, in the control of manufactured items (statistical process control SPC) control is exercised of measured variables against go/no-go criteria, the attribute, to avoid incurring scrap or reworking costs. This chapter is an introductory consideration of the principles of probability and statistics allied to such engineering issues.
Objectives
By the end of this chapter, the reader should be able to:
• understand the concept of probability;
• use probability principles in the consideration of quality control and system reliability;
• plot experimentally obtained data to show its distribution;
• use the idea of probability distributions and be able to interpret them;
• determine measures of the location and spread of distributions;
• use measures obtained from the Binomial, Poisson and Normal distributions;
• determine the errors in results obtained from experimental measurements.
## 9.1 Probability
What is the chance an engineering system will fail? What is the chance that a product emerging from a production line is manufactured to the required engineering tolerances, thus avoiding reworking or scrap? What is the chance that if you make a measurement in some experiment that it will be the true value of that quantity? Within what range of experimental error might you expect a measurement to be the true value? These, and many other questions in engineering and science, involve a consideration of chances of events occurring. The term _probability_ is more often used in mathematics than chance and has the same meaning in the above questions. This section is about probability, its definition and determination in a number of situations.
### 9.1.1 Basic definitions
If you flip a coin into the air, what is the chance that it will land heads uppermost? We can try such an experiment and determine the outcomes. The result of a large number of trials leads to the result that about half the time it lands heads uppermost and half the time tails uppermost. If _n_ is the number of trials then we can define probability _P_ as:
[1]
This view of probability is _the relative frequency in the long run with which an event occurs_. In the case of the coin this leads to a probability of ½ = 0.5. If an event occurs all the time then the probability is 1. If it never occurs the probability is 0.
The result of flipping the coin might seem obvious since there are just two ways a coin can land and just one of the ways leads to heads uppermost. If there is no reason to expect one way is more likely than the other then we can define probability _P_ as _the degree of uncertainty about the way an event can occur_ and as:
[2]
In the case of the coin, this also gives a probability of 0.5. If every possible way events can occur is the required way, then the probability is 1. If none of the possible ways are the event required, then the probability is 0.
Key points
Probability can be defined as the relative frequency in the long run with which an event occurs or as the fraction of the total number of ways with which an event can occur.
Consider a die-tossing experiment. A die can land in six equally likely ways, with uppermost 1, 2, 3, 4, 5, or 6. Of the six possible ways the die could land, only one way is with 6 uppermost. Thus using definition [2], the probability of obtaining a 6 is 1/6. The probability of _not_ obtaining a 6 is 5/6 since there are 5 ways out of the 6 possible ways we can obtain an outcome which is not a 6.
Another way the term probability is used is as _degree of belief_. Thus we might consider the probability of a particular horse winning a race as being 1 in 5 or 0.2. The probability in this case is highly subjective.
Example
In the testing of products on a production line, for every 100 tested 5 were found to contain faults. What is the probability that in selecting one item from 100 on the production line that it will be faulty?
There are 100 ways the item can be selected and 5 of the ways give faulty items. Thus, using equation [2], the probability is 5/100 = 0.05.
Key points
An event which has a certainty has a probability of 1. Thus such an event will always occur every time. An event which has a probability of 0 will never occur.
Key points
Mutually exclusive events are ones for which each outcome is such that one outcome excludes the occurrence of the other.
Addition rule: If an event can happen in a number of different and mutually exclusive ways, the probability of its happening is the sum of the separate probabilities that each event happens.
Maths in action
The term _reliability_ in relation to engineering systems is defined as the probability that the system will operate to an agreed level of performance for a specified period, subject to specified environmental conditions. Thus, for example, the reliability of an instrument might be specified as being 0.8 over a 1000 hour period with the ambient temperature at 20°C ± 10°C and no vibration.
#### Probability of events
If an event can occur in two possible ways, e.g. a piece of equipment can be either operating satisfactorily or have failed, then if the probability of one way is _P_ 1 and the probability of the other way is _P_ 2, we must have:
[3]
The probability of either event 1 or event 2 occurring equals 1, i.e. a certainty, and is the sum of the probability of event 1 occurring, i.e. _P_ 1, added to the probability of event 2, i.e. _P_ 2, occurring.
_A probability of 1 for an event means that the probability of it occurring is a certainty._
Suppose with the die-tossing experiment we were looking for the probability that the outcome would be an even number. Of the six possible outcomes of the experiment, three ways give the required outcome. Thus, using definition [2], the probability of obtaining an even number is 3/6 = 0.5. This is the sum of the probabilities of 2 occurring, 4 occurring and 6 occurring, i.e. 1/6 + 1/6 + 1/6. The 2, the 4 and the 6 are mutually exclusive events in that if the 2 occurs then 4 or 6 cannot also be occurring. Thus:
_If A and B are mutually exclusive, the probability of A or B occurring is the sum of the probabilities of A occurring and of B occurring._
Example
The probability that a circuit will malfunction is 0.01. What is the probability that it will function?
The probability that it will function and the probability that it will not function must together be 1. Hence the probability that it will function is 0.99.
Key points
Multiplication rule: If one experiment has n1 possible outcomes and a second experiment n2 possible outcomes then the compound experiment of the first experiment followed by the second has n1 × n2 possible outcomes.
Example
A company manufactures two products _A_ and _B_. Market research over a month showed 30% of enquiries by potential customers resulting in product _A_ alone being bought, 50% buying product _B_ alone, 10% buying both _A_ and _B_ and 10% buying neither. Determine the probability that an enquiry will result in (a) product _A_ alone being bought, (b) product _A_ being bought, (c) both product _A_ and product _B_ being bought, (d) product _B_ not being bought.
(a) 30% buy product _A_ alone so the probability is 0.30.
(b) 30% buy product _A_ alone and 10% buy _A_ in conjunction with _B_. Thus the probability of _A_ being bought is 0.30 + 0.10 = 0.40.
(c) 10% buy products _A_ and _B_ so the probability is 0.10.
(d) 50% buy product _B_ alone and 10% buy _B_ in conjunction with _A_. Thus the probability of buying _B_ is 0.50 + 0.10 = 0.60. The probability of not buying _B_ is thus 1 − 0.60 = 0.40.
### 9.1.2 Ways events can occur
Suppose we flip two coins. What is the probability that we will end up with both showing heads uppermost? The ways in which the coins can land are:
There are four possible results with just one of the ways giving HH. Thus the probability of obtaining HH is ¼ = 0.25.
There were two possible outcomes from the experiment of tossing the first coin and two possible outcomes from the experiment of tossing the second coin. For each of the outcomes from the first experiment there were two outcomes from the second experiment. Thus for the two experiments the number of possible outcomes is 2 × 2 = 4. This is an example of, what is termed, the _multiplication rule_.
Tree diagrams can be used to visualise the outcomes in such situations, Figure 9.1 showing this for the two experiments of tossing coins.
Example
A company is deciding to build two new factories, one of them to be in the north and one in the south. There are four potential sites in the north and two potential sites in the south. Determine the number of possible outcomes.
Figure 9.1 Tree diagram
For the first experiment there are 4 possible outcomes _A, B, C_ and _D_ and for the second 2 possible outcomes _E_ and _F_. Thus the total number of possible outcomes is given by the multiplication rule as 8. Figure 9.2 shows the tree diagram.
Figure 9.2 Example
#### Permutations
Suppose we had to select two items from a possible three different items _A_ , _B_ , _C_. The first item can be selected in three ways. Then, since the removal of the first item leaves just two remaining, the second item can be selected in two ways. Thus the selections we can have are:
Each of the ordered arrangements is known as a _permutation_ , each representing the way distinct objects can be arranged.
If there are _n_ ways of selecting the first object, there will be ( _n_ − 1) ways of selecting the second object, ( _n_ − 2) ways of selecting the third object and ( _n_ − _r_ \+ 1) ways of selecting the _r_ th object. Thus, by the multiplication rule, the total number of different permutations of selecting _r_ objects from _n_ distinct objects is thus:
The number _n_ ( _n_ − 1)( _n_ − 2)... (3)(2)(1) is represented by _n_!. The number of permutations of _k_ objects chosen from _n_ distinct objects is represented by _n Pr_ or _n Pr_ or and is thus:
[4]
_r_ taking values from 0 to _n_. Note that 0! is taken as having the value 1. The number of permutations of _n_ objects chosen from _n_ distinct objects is represented by _n Pn_ or and is thus:
[5]
Example
In the wiring up of an electronic component there are four assemblies that can be wired up in any order. In how many different ways can the component be wired?
This involves determining the number of permutations of four objects from four. Thus, using equation [5]:
Example
How many four-digit numbers can be formed from the digits 0 to 9 if no digit is to be repeated within any one number?
This involves determining the number of permutations of 4 objects from 10. Thus, using equation [4]:
#### Combinations
There are often situations where we want to know the number of ways _r_ items can be selected from _n_ objects without being concerned with the order in which the objects are selected. Suppose we had to select two items from a possible three different items _A_ , _B_ , _C_. The selections, i.e. permutations, we can have are:
But if we are not concerned with the sequence of the letters then we only have the three ways _AB_ , _AC_ and _BC_. Such an unordered set is termed a _combination_.
Consider the selection of a combination of _r_ items from _n_ distinct objects. In the selected _r_ items there will be _r_! permutations (equation [5]) of distinct objects so that the permutation of _r_ items from _n_ contains each group of _r_ items _r_! times. Since there are _n_!/( _n_ − _r_ )! different permutations of _r_ items from _n_ we must have:
where _n Cr_, _n Cr_ or is used to represent the combination of _r_ items from _n_. Thus:
[6]
_n Cr_ is often termed a _binomial coefficient_. This is because numbers of this form appear in the expansion of ( _x_ \+ _y_ ) _n_ by the binomial theorem (see Section 7.1.2).
When _r_ items are selected from _n_ distinct objects, _n_ − _r_ items are left. The number of ways of selecting _r_ items from _n_ is given by equation [6] as _n_!/ _r_!( _n_ − _r_ )!. The number of ways of selecting _n_ − _r_ items from _n_ is given by equation [6] as:
Thus we can say that there are as many ways of selecting _r_ items from _n_ as selecting _n_ − _r_ objects from _n_ :
[7]
There is just one combination of _n_ items from _n_ objects. Thus _n Cn_ = 1. If we select 0 items from _n_ , then because equation [6] gives _n C_0 = _n_!/0! and we take 1/0! = 1, we have _n C_0 = 1. Evidently there are as many ways of selecting none of the items in a set of _n_ as there are of choosing the _n_ objects that are left.
Example
In how many ways can three objects be chosen from a sample of 20?
Using equation [6]:
Example
If a batch of 20 objects contains 3 with faults and a sample of 5 is chosen, what is the probability of obtaining a sample with (a) 0, (b) 1, (c) 2 faulty items?
The number of ways we choose the sample of 5 items out of 20 is, using equation [6]:
(a) The number of ways we can choose a sample with 0 defective items is the number of ways we choose 5 items from 17 good items and is thus:
Thus the probability of choosing a sample with 0 faulty items is:
(b) The number of ways we can choose a sample with 1 faulty item and 4 good items, i.e. selecting 1 faulty item from 3 faulty items and 4 good items from 17 good items, is given by the multiplication rule as 3 _C_ 1 × 17 _C_ 4. Thus the probability of choosing a sample with 1 faulty item is:
(c) The number of ways we can choose a sample with 2 faulty items and 3 good items, i.e. selecting 2 faulty items from 3 faulty items and 3 good items from 17 good items, is given by the multiplication rule as 3 _C_ 2 × 17 _C_ 3. Thus the probability of choosing a sample with 2 faulty items is:
#### Conditional probability
The multiplication rule is only valid when the occurrence of one event has no effect upon the probability of the second event occurring. While this can be used in many situations, there are situations where a successful occurrence of the first event affects the probability of occurrence of the second event. Suppose we have 50 objects of which 15 are faulty. What is the probability that the second object selected is faulty given that the first object selected was fault-free? This is a probability problem where the answer depends on the additional knowledge given that the first selection was fault-free. This means that there are less fault-free objects among those remaining for the second selection. Such a problem is said to involve _conditional probability_.
Example
Suppose we have 50 objects of which 15 are faulty. What is the probability that the second object selected is faulty given that the first object selected was fault-free?
Selecting the first object from 50 as fault-free has a probability of 35/50. Because the first object was fault-free we now have 34 fault-free and 15 faulty objects remaining. Now selecting a faulty object from 49 has a probability of 15/49. Using the multiplication rule gives the probability of the first object being fault-free followed by the second object faulty as (35/50)(15/49) = 0.21.
### Problems 9.1
1. In a testing period of 1 year, 4 out of 50 of the items tested failed. What is the probability of finding one of the items failing?
2. In a pack of 52 cards there are 4 aces. What is the probability of selecting, at random, an ace from a pack?
3. Testing of a particular item bought for incorporation in a product shows that of 100 items tested, 4 were found to be faulty. What is the probability that one item taken at random will be (a) faulty, (b) free from faults?
4. Resistors manufactured as 10 Ω by a company are tested and 5% are found to have values below 9.5 Ω and 10% above 10.5 Ω. What is the probability that one resistor selected at random will have a resistance between 9.5 Ω and 10.5 Ω?
5. 100 integrated circuits are tested and 3 are found to be faulty. What is the probability that one, taken at random, will result in a working circuit?
6. Tests of an electronic product show that 1% have defective integrated circuits alone, 2% have defective connectors alone and 1% have both defective integrated circuits and connectors. What is the probability of one of the products being found to have a (a) defective integrated circuit alone, (b) defective integrated circuit, (c) defective connector, (d) no defects?
7. Cars coming to a junction can turn to the left, to the right or go straight on. If observations indicate that all the possible outcomes are equally likely, determine the probability that a car will (a) go straight on, (b) turn from the straight-on direction.
8. In how many ways can (a) 8 items be selected from 8 distinct objects, (b) 4 items be selected from 7 distinct items, (c) 2 items be selected from 6 distinct items?
9. In how many ways can (a) 2 items be selected from 7 objects, (b) 5 items be selected from 7 objects, (c) 7 items be selected from 7 objects?
10. How many samples of 4 can be taken from 25 items?
11. A batch of 24 components includes 2 that are faulty. If a sample of 2 is taken, what is the probability that it will contain (a) no faulty components, (b) 1 faulty component, (c) 2 faulty components?
12. A batch of 10 components includes 3 that are faulty. If a sample of 2 is taken from the batch, what is the probability that it will contain (a) no faulty components, (b) 2 faulty components?
13. Of 10 items manufactured, 2 are faulty. If a sample of 3 is taken at random, what is the probability it will contain (a) both the faulty items, (b) at least 1 faulty item?
14. A security alarm system is activated and deactivated by keying-in a three-digit number in the proper sequence. What is the total number of possible code combinations if digits may be used more than once?
15. When checking on the computers used in a company it was found that the probability of one having the latest microprocessor was 0.8, the probability of having the latest software 0.6 and the probability of having the latest processor and latest software 0.3. Determine the probability that a computer selected as having the latest software will also have the latest microprocessor.
## 9.2 Distributions
All measurements are affected by random uncertainties and thus repeated measurements will give readings which fluctuate in a random manner from each other. This section considers the statistical approach to such variability of data, dealing with the measures of location and dispersion, i.e. mean, standard deviation and standard error, and the binomial, Poisson and Normal distributions. This statistical approach to variability is especially important in the production environment and the consideration of the variability of measurements made on products.
Key points
Quantities whose variation contains an element of chance are called _random variables_.
Variables which can only assume a number of particular values are called _discrete variables_. Variables which can assume any value in some range are called _continuous variables_.
For example, if we count the number of times per hour that cars pass a particular point then the result will be series of numbers such as 12, 30, 17, etc. The variable is thus discrete. However, if we repeatedly measure the time taken for 100 oscillations of a pendulum then the results will vary as a result of experimental errors and will be a series of values within a range of, say, 20.0 to 21.0 s. The variable can assume any value within that range and so is said to be a continuous variable.
_Terms used in industry_ In manufacturing, two types of 'variables' are encountered, namely variables and attributes. Generally speaking, variables are quantities which may be quantitatively measured against a calibrated standard, e.g. voltage in volts, mass in kg, temperature in K, etc. Variables which cannot be measured against a set standard, e.g. taste, smell, colour, etc. are termed attributes and are more difficult to control. There are, of course, grey areas; for example, taste is subjective unless broken down into clearly defined chemical constituents and measured chemically against quantitative standards.
### 9.2.1 Probability distributions
Consider the collection of data on the number of cars per hour passing some point and suppose we have the following results:
When the discrete variable is sampled 10 times the value 10 appears once. Thus the probability of 10 appearing is 1/10. The value 11 appears three times and so its probability is 3/10, 12 has a probability of 4/10, 13 has the probability 1/10, 14 has the probability 1/10. Figure 9.3 shows how we can represent these probability values as a _probability distribution_.
Figure 9.3 Probability distribution with a discrete variable
Consider some experiment in which repeated measurements are made of the time taken for 100 oscillations of a simple pendulum and suppose we have the following results:
With a continuous variable there are an infinite number of values that can occur within a particular range so the probability of one particular value occurring is effectively zero. However, it is meaningful to consider the probability of the variable falling within a particular subinterval. The term _frequency_ is used for the number of times a measurement occurs within an interval and the term _relative frequency_ or _probability P_ ( _x_ ) for the fraction of the total number of readings in a segment. Thus if we divide the range of the above results into 0.2 intervals, we have:
values >20.0 and ≤20.2 come up twice, thus _P_ ( _x_ ) = 2/20
values >20.2 and ≤20.4 come up five times, thus _P_ ( _x_ ) = 5/20
values >20.4 and ≤20.6 come up seven times, thus _P_ ( _x_ ) = 7/20
values >20.6 and ≤20.8 come up five times, thus _P_ ( _x_ ) = 5/20
values >20.8 and ≤21.0 come up once, thus _P_ ( _x_ ) = 1/20
The probability always has a value less than 1 and the sum of all the probabilities is 1. Figure 9.4 shows how we can represent this graphically. The probability that _x_ lies within a particular interval is thus the height of the rectangle for that strip divided by the sum of the heights of all the rectangles. Since each strip has the same width _w_ :
[8]
The histogram shown in Figure 9.4 has a jagged appearance. This is because it represents only a few values. If we had taken a very large number of readings then we could have divided the range into smaller segments and still had an appreciable number of values in each segment. The result of plotting the histogram would now be to give one with a much smoother appearance. When the probability distribution graph is a smooth curve, with the area under the curve scaled to have the value 1, then it is referred to as the _probability density function f_ ( _x_ ) (Figure 9.5). Then equation [8] gives:
Figure 9.4 Probability distribution with a continuous variable
Figure 9.5 Probability distribution function
[9]
Key point
The _probability density function_ is a function that allocates probabilities to all of the range of values that the random variable can take. The probability that the variable will be in any particular interval is obtained by integrating the probability density function over that interval.
Consider the probability, with a very large number of readings, of obtaining a value between 20.8 and 21.0 with the probability distribution function shown in Figure 9.6. If we take a segment 20.8 to 21.0 then the area of that segment is the probability. If, say, the area is 0.30, the probability of taking a single measurement and finding it in that interval is 0.30, i.e. the measurement occurs on average 30 times in every 100 values taken.
Figure 9.6 Probability for interval 20.8 to 21.0
Example
The following readings, in metres, were made for a measurement of the distance travelled by an object in 10 s. Plot the results as a distribution with segments of width 0.01 m.
With segments of width 0.01 m we have:
Segment 13.45 to 13.46, frequency 1, so probability 1/16
Segment 13.46 to 13.47, frequency 0, so probability 0
Segment 13.47 to 13.48, frequency 5, so probability 5/16
Segment 13.48 to 13.49, frequency 1, so probability 1/16
Segment 13.49 to 13.50, frequency 4, so probability 4/16
Segment 13.50 to 13.51, frequency 4, so probability 4/16
Segment 13.51 to 13.52, frequency 1, so probability 1/16
Figure 9.7 shows the resulting distribution.
Figure 9.7 Example
Key point
The mean is used as a measure of the location of a distribution and the standard deviation its spread.
### 9.2.2 Measures of location and spread of a distribution
Parameters which can be specified for distributions to give an indication of location and a measure of the dispersion or spread of the distribution about that value are the _mean_ for the location and the _standard deviation_ for the measure of dispersion.
#### Mean
The mean value of a set of readings can be obtained in a number of ways, depending on the form with which the data is presented:
• For a list of discrete readings, sum all the readings and divide by the number _N_ of readings, i.e.:
[10]
• For a distribution of discrete readings, if we have _n_ 1 readings with value _x_ 1, _n_ 2 readings with value _x_ 2, _n_ 3 readings with value _x_ 3, etc., then the above equation for the mean becomes:
[11]
But _n_ 1/ _N_ is the relative frequency or probability of value _x_ 1, _n_ 2/ _N_ is the relative frequency or probability of value _x_ 2, etc. Thus, to obtain the mean, multiply each reading by its relative frequency or probability _P_ and sum over all the values:
[12]
• For readings presented as a continuous distribution curve, we can consider that we have a discrete-value distribution with very large numbers of very thin segments. Thus if _f_ ( _x_ ) represents the probability distribution and _x_ the measurement values, the probability that _x_ will lie in a small segment of width _δx_ is _f_ ( _x_ ) _δx_. Thus the rule given above for discrete-value distributions translates into:
[13]
With a very large number of readings, the mean value is taken as being the _true value_ about which the random fluctuations occur. The mean value of a probability distribution function is often termed the _expected value_.
#### Standard deviation
Any single reading _x_ in a distribution (Figure 9.8) will deviate from the mean of that distribution by:
Figure 9.8 Deviation
[14]
With one distribution we might have a series of values which is widely scattered around the mean while another has readings closely grouped round the mean. Figure 9.9 shows the type of curves that might occur.
Key point
_Accuracy and precision_
Figure 9.9 Distributions with different spreads but the same mean
Consider two marksmen firing 5 shots, from the same gun under the same conditions, at a target with the aim of hitting the central bulls-eye. If the results obtained are as shown in Figure 9.10, then marksman A is accurate in that his shots have a mean which coincides with the bulls-eye. Marksman B has a smaller scatter of results but his mean is not centres on the bulls-eye. His shots have greater precision but are less accurate. Dispersion measurement is thus extremely important when designing manufacturing processes and machining to a set target mean value, i.e. the bulls-eye, with attainable tolerances.
Figure 9.10 Accuracy and precision
A measure of the spread of a distribution _cannot_ be obtained by taking the mean deviation from the mean, since for every positive value of a deviation there will be a corresponding negative deviation and so the sum will be zero. The measure used is the _standard deviation_.
_The standard deviation σ is the root-mean-square value of the deviations for all the measurements in the distribution. The quantity σ 2 is known as the variance of the distribution._
Thus, for a number of discrete values, _x_ 1, _x_ 2, _x_ 3,..., etc., we can write for the mean value of the sum of the squares of their deviations from the mean of the set of results:
Hence the mean of the square root of this sum of the squares of the deviations, i.e. the standard deviation, is:
[15]
Key point
The standard deviation σ is the root-mean-square value of the deviations for all the measurements in the distribution.
However, we need to distinguish between the standard deviation _s_ of a sample and the standard deviation σ of the entire population of readings that are possible and from which we have only considered a sample (many statistics textbooks adopt the convention of using Greek letters when referring to the entire population and Roman for samples). When we are dealing with a sample we need to write:
[16]
with being the mean of the sample. The reason for using _N_ − 1 rather than _N_ is that the root-mean-square of the deviations of the readings in a sample around the sample mean is less than around any other figure. Hence, if the true mean of the entire population were known, the estimate of the standard deviation of the sample data about it would be greater than that about the sample mean. Therefore, by using the sample mean, an underestimate of the population standard deviation is given. This bias can be corrected by using one less than the number of observations in the sample in order to give the sample mean.
For a continuous probability density distribution, since ( _n j_/ _N_ ) _δx_ is the probability for that interval _δx_ , i.e. _f_ ( _x_ ) _δx_ where _f_ ( _x_ ) is the probability distribution function, the standard deviation becomes:
[17]
We can write this equation in a more useful form for calculation:
Since the total area under the probability density function curve is 1, the third integral has the value 1 and so the third term is the square of the means. The second integral is the mean. The first integral is the mean value of _x_ 2. Thus:
[18]
i.e. the mean value of _x_ 2 minus the square of the mean value.
Example
Determine the mean value and the standard deviation of the sample of 10 readings 8, 6, 8, 4, 7, 5, 7, 6, 6, 4.
The mean value is (8 + 6 + 8 + 4 + 7 + 5 + 7 + 6 + 6 + 4)/10 = 6.1.
The standard deviation of the sample can be calculated by considering the deviations of each reading from the mean, these being:
The squares of these deviations are:
The sum of these squares is 21.7. If we consider we do not have the entire population but just a sample, then the standard deviation is √(21.7/9) = 1.6.
Example
In an experiment involving the counting of the number of events that occurred in equal size time intervals the following data was obtained:
0 events 13 times, 1 event 12 times, 2 events 9 times 3 events 5 times, 4 events once
Determine the mean number of events occurring in the time interval and the standard deviation.
The total number of measurements is 13 + 12 + 9 + 5+1 = 40 and so the mean value is (0 × 13 + 1 × 12 + 2 × 9 + 3 × 5 + 4 × 1)/40 = 1.25.
We have 13 measurements with deviation −1.25, 12 with deviation −0.25, 9 with deviation +0.75, 5 with deviation +1.75 and 1 event with deviation 2.75. We can take the squares of these deviations, sum them and divide by (40 − 1). Hence the standard deviation is 1.1.
Example
A probability density function has _f_ ( _x_ ) = 1 for 0 ≤ _x_ 1 and elsewhere 0. Determine its mean and standard deviation.
The mean value is given by equation [13] as:
The standard deviation is given by equation [17]:
Alternatively, using equation [18]:
since we have:
then:
#### Standard error of the mean
With a sample set of readings taken from a large population we can determine its mean, but what is generally required is an estimate of the error of that mean from the true value, i.e. the mean of an infinitely large number of readings. We can consider any set of readings as being just a sample taken from the very large set.
Consider one sample of readings with _n_ values being taken: _x_ 1, _x_ 2, _x_ 3,... _x n_. The mean of this sample is:
This mean will have a deviation or error _E_ from the true mean value _X_ of:
Hence we can write:
If we write e1 for the error of the first reading from the true mean, _e_ 2 for the error of the second, etc. we obtain:
Thus:
_E_ is the error from the mean for a single sample of readings. Now, consider a large number of such samples with each set having the same number _n_ of readings. We can write such an equation as above for each sample. If we add together the equations for all the samples and divide by the number of samples considered, we obtain an average value over all the samples of _E_ 2. Thus _E_ is the standard deviation of the means and is known as the _standard error of the means e m_ (more usually the symbol σ). Adding together all the error product terms will give a total value of zero, since as many of the error values will be negative as well as positive. The average of all the terms is , where es is, what can be termed, the _standard error of the sample_. Thus:
But how can we obtain a measure of the standard error of the sample? The standard error is measured from the true value _X_ , which is not known. What we can measure is the standard deviation of the sample from its mean value. The best estimate of the standard error for a sample turns out to be the standard deviation s of a sample when we define it as:
Key point
The standard error of the mean obtained from a sample is the standard deviation of the sample divided by the square root of the sample size.
i.e. with a denominator of _N_ − 1, rather than just _N_. Thus the best estimate of the standard error of the mean can be written as:
[19]
Example
Measurements are to be made of the percentage of an element in a chemical by making measurements on a number of samples. The standard deviation of any one sample is found to be 2%. How many measurements must be made to give a standard error of 0.5% in the estimated percentage of the element.
If _n_ measurements are made, then the standard error of the sample mean is given by equation [9] and so:
### 9.2.3 Common distributions
There are three basic forms of distribution which are found to represent many forms of distributions commonly encountered in engineering and science. These are the binomial distribution, the Poisson distribution and the normal distribution (sometimes called the Gaussian distribution). Binomial distributions are often approximated by the Poisson distribution. The normal distribution is a model widely used for experimental measurements when there are random errors.
#### Binomial distribution
In the tossing of a single coin the result is either heads or tails uppermost. We can consider this as an example of an experiment where the results might be termed as either success or failure, one result being the complement of the other. If the probability of succeeding is _p_ then the probability of failing is 1 − _p_. Such a form of experiment is termed a _Bernoulli trial_.
Suppose the trial is the throwing of a die with a 6 uppermost being success. The probability of obtaining a 6 as the result of one toss of the die is 1/6 and the probability of not obtaining a 6 is 5/6. Suppose we toss the die _n_ times. The probability of obtaining no 6s in any of the trials is given by the product rule as (5/6) _n_. The probability of obtaining one 6 in, say, just the first trial out of the _n_ is (5/6) _n_ −1 (1/6). But we could have obtained the one 6 in any one of the _n_ trials. Thus the probability of one 6 is _n_ (5/6) _n_ −1 (1/6). The probability of obtaining two 6s in, say, just the first two trials is (5/6) _n_ −2(l/6)2. But these two 6s may occur in the _n_ trials in a number of combinations _n_!/2!( _n_ − 2) (see Section 9.1.2). Thus the probability of two 6s in _n_ trials is _n_!/2!( _n_ − 2) _n_ −2(l/6)2. We can continue this for three 6s, 4s, etc.
In general, if we have _n_ independent Bernoulli trials, each with a success probability _p_ , and of those _n_ trials _k_ give successes, and ( _n_ − _k_ ) failures, the probability of this occurring is given by the product rule as:
[20]
This is termed the _binomial distribution_. This term is used because, for _k_ = 0, 1, 2, 3,... _n_ , the values of the probabilities are the successive terms of the binomial expansion of [(1 − _p_ ) + _p_ ] _n_.
For a single Bernoulli trial of a random variable _x_ with probability of success _p_ , the mean value is _p_. The standard deviation is given by equation [18] as:
Key point
The characteristics of a variable that gives a binomial distribution are that the experiment consists of _n_ identical trials, there are two possible complementary outcomes, success or failure, for each trial and the probability of a success is the same for each trial, the trials are independent and the distribution variable is the number of successes in _n_ trials.
For _n_ such trials:
[21]
[22]
Example
The probability that an enquiry from a potential customer will lead to a sale is 0.30. Determine the probabilities that among six enquiries there will be 0,1, 2, 3, 4, 5, 6 sales.
Using equation [20]:
The probability of 0 is:
The probability of 1 is:
The probability of 2 is:
The probability of 3 is:
The probability of 4 is:
The probability of 5 is:
The probability of 6 is:
Figure 9.11 shows the distribution.
Figure 9.11 Example
Key point
In the example, if the probability of a sale is _p_ and the probability of no sale Is _q_ , then _q_ \+ _p_ = 1. We are concerned with 6 enquiries and so if we consider the binomial expansion of ( _q_ \+ _p_ )6 we have:
Each successive term in the expansion gives the probability of 0, 1,2, 3, 4, 5 or 6 sales. Thus, with _p_ = 0.3 we have _q_ = 0.7 and so the probability of 0 sales is 0.76 = 0.118.
#### Poisson distribution
The Poisson distribution for a variable λ is:
[23]
for _k_ = 0, 1, 2, 3, etc. The mean of this distribution is λ and the standard deviation is √λ. When the number _n_ of trials is very large and the probability _p_ small, e.g. _n_ > 25 and _p_ < 0.1, binomial probabilities are often approximated by the _Poisson distribution_. Thus, since the mean of the binomial distribution is _np_ (equation [21]) and the standard deviation (equation [22]) approximates to √ _np_ when _p_ is small, we can consider λ to represent _np_. Thus λ can be considered to represent the average number of successes per unit time or unit length or some other parameter.
Key point
_Approximating the binomial distribution to the Poisson distribution_
If _p_ is the possibility of an event occurring and _q_ the possibility that it does not occur, then _q_ \+ _p_ = 1 and so if we consider _n_ samples, we have ( _q_ \+ _p_ ) _n_ = 1 and the binomial expression gives:
If _p_ is small then _q_ = 1 − _p_ × 1 and with _n_ large the first few terms have _n_ − 1 approximating to _n_ , _n_ − 2 to _n_ , etc. The binomial expression can thus be approximated to:
If we let _np_ = λ then:
But this is the series for e _1_ (see Table 7.1) and so 1 _n_ = e _1_. We can thus write the binomial expression as:
and so the terms in the expression are given by equation [23].
Example
2% of the output per month of a mass produced product have faults. What is the probability that of a sample of 400 taken that 5 will have faults?
Assuming the Poisson distribution, we have λ = _np_ = 400 × 0.02 = 8 and so equation [23] gives for _k_ = 5:
Example
The output from a CNC machine is inspected by taking samples of 60 items. If the probability of a defective item is 0.0015, determine the probability of the sample having (a) two defective items, (b) more than two defective items.
(a) We have _n_ = 60 and _p_ = 0.0015. Thus, assuming a Poisson distribution, we have λ = _np_ = 60 × 0.0015 = 0.09 and so equation [23] gives the probability of two defective items as (0.092 × e−0.09)/2! = 3.7 × 10−3 or 0.37%.
(b) The probability of there being more than two defective items is 1 − { _P_ (0) + _P_ (1) + _P_ (2)] = 1 − e−λ{1 + λ + λ2/2!) = 1 − e−0.09{1 + 0.09 + 4.5 × 10−3} = 1.36 × 10−4 or 0.01%.
Example
There is a 1.5% probability that a machine will produce a faulty component. What is the probability that there will be at least 2 faulty items in a batch of 100?
Assuming the Poisson distribution can be used, we have λ = _np_ = 100 × 0.015 = 1.5 and so the probability of at least 2 faulty items will be:
#### Normal distribution
A particular form of distribution, known as the _normal distribution_ or _Gaussian distribution_ , is very widely used and works well as a model for experimental measurements when there are random errors. This form of distribution has a characteristic bell shape (Figure 9.12). It is symmetric about its mean value, having its maximum value at that point, and tends rapidly to zero as _x_ increases or decreases from the mean. It can be completely described in terms of its mean and its standard deviation. The following equation describes how the values are distributed about the mean:
Figure 9.12 Normal distribution
[24]
The fraction of the total number of values that lies between − _x_ and + _x_ from the mean is the fraction of the total area under the curve that lies between those ordinates (Figure 9.13). We can obtain areas under the curve by integration.
Figure 9.13 Values within + or − x of the mean
Key point
The normal distribution is standardised so that we can compare measurements from samples where their units of measure differ, their means differ and their standard deviations may differ, e.g. when comparing similar machining processes for geometrically similar components running off different tolerances.
To save the labour of carrying out the integration, the results have been calculated and are available in tables. As the form of the graph depends on the value of the standard deviation, as illustrated in Figure 9.12, the area depends on the value of the standard deviation σ. In order not to give tables of the areas for different values of _x_ for each value of σ, the distribution is considered in terms of the value of:
this commonly being designated by the symbol _z_ , and areas tabulated against this quantity. _z_ is known as the _standard normal random variable_ and the distributions obtained with this as the variable are termed the _standard normal distribution_ (Figure 9.14). Any other normal random variable can be obtained from the standard normal random variable by multiplying by the required standard deviation and adding the mean, i.e.
Table 9.1 shows examples of the type of data given in tables for _z_.
Table 9.1
Areas under normal curve
_z_ | Area from mean
---|---
0 | 0.000 0
0.2 | 0.079 3
0.4 | 0.155 5
0.6 | 0.225 7
0.8 | 0.288 1
1.0 | 0.341 3
1.2 | 0.384 9
1.4 | 0.419 2
1.6 | 0.445 2
1.8 | 0.464 1
2.0 | 0.477 2
2.2 | 0.486 1
2.4 | 0.491 8
2.6 | 0.495 3
2.8 | 0.497 4
3.0 | 0.498 7
Figure 9.14 Standard normal distribution
When we have:
then _z_ = 1.0 and the area between the ordinate at the mean and the ordinate at 1σ as a fraction of the total area is 0.341 3. The area within ±1σ of the mean is thus the fraction 0.681 6 of the total area under the curve, i.e. 68.16%. This means that the chance of a value being within ±1σ of the mean is 68.16%, i.e. roughly two-thirds of the values.
When we have:
then _z_ = 2.0 and the area between the ordinate at the mean and the ordinate at 1σ as a fraction of the total area is 0.477 2. The area within ±2σ of the mean is thus the fraction 0.954 4 of the total area under the curve, i.e. 95.44%. This means that the chance of a value being within ±2σ of the mean is 95.44%.
Key point
Table 9.1 is only a rough version of the more detailed tables that are needed in statistical process control. Detailed tables are readily available.
When we have:
then _z_ = 3.0 and the area between the ordinate at the mean and the ordinate at 3σ as a fraction of the total area is 0.498 7. The area within ±1σ of the mean is thus the fraction 0.997 4 of the total area, i.e. 99.74%. This means that the chance of a reading being within ±3σ of the mean is 99.74%. Thus, virtually all the readings will lie within ±3σ of the mean. Figure 9.15 illustrates the above.
Example
Measurements are made of the tensile strengths of samples taken from a batch of steel sheet. The mean value of the strength is 800 MPa and it is observed that 8% of the samples give values that are below an acceptable level of 760 MPa. What is the standard deviation of the distribution if it is assumed to be normal?
Figure 9.15 Nomal distribution
This means that an area from the mean of 0.50 − 0.08 = 0.42. To the accuracy given in Table 9.1, this occurs when _z_ = 1.4 (Figure 9.16). Thus,
and so the standard deviation is 29 MPa. In the above analysis, it was assumed that the mean given was the true value or a good enough approximation.
Figure 9.16 Example
Example
A pharmaceutical manufacturer produces tablets having a mean mass of 4.0 g and a standard deviation of 0.2 g. Assuming that the masses are normally distributed and that a table is chosen at random, determine the probability that it will (a) have a mass between 3.55 and 3.85 g, (b) will differ from the mean by less than 0.35 g, and (c) determine the number that might be expected to have a mass less than 3.7 g in a carton of 400.
(a) The probability of tablets having masses between 3.55 g and 3.85 g is the area between the normal distribution with ordinates of these masses (Figure 9.17). We have _z_ 1 = (3.55 − 4.0)/0.2 = −2.25 and _z_ 2 = (3.85 − 4.0)/0.2 = −0.75. Table 9.1, or better tables, gives, approximately, (area between mean and 2.25) − (area between mean and 0.75) = 0.4878 − 0.2734 = 0.2144 or about 21.4%.
Figure 9.17 Example
(b) To determine the probability of tables differing from the mean of 4.0 g by less than 0.35 g we consider the area between the ordinates for masses between −3.65 g and 4.35 g. These give _z_ values of −1.75 and +1.75 and the area is thus as indicated in Figure 9.18. This is 2 × 0.4599 = 0.9198 or about 92%.
Figure 9.18 Example
(c) The probability of a mass less than 3.7 g is for a _z_ value less than (3.7 − 4.0)/0.2 = −1.5 (Figure 9.19). The area within +1.5 and −1.5 of the mean is 2 × 0.433 = 0.866. The total area under the curve is 1 and so the area outside these limits is 1 − 0.866 = 0.134. Just half of this area will be for less than 3.7 g and so the area is 0.134. For 400 tablets this means 0.134 × 400 or about 27 tablets.
Figure 9.19 Example
### Problems 9.2
1. Determine the mean value of a variable which can have the discrete values of 2, 3, 4 and 5 and for which 2 occurs twice, 3 occurs three times, 4 occurs three times and 5 occurs once.
2. The probability density function of a random variable _x_ is given by ½ _x_ for 0 ≤ _x <_ 2 and 0 for all other values. Determine the mean value of the variable.
3. Determine the mean and the standard deviation for the following data: 10, 20, 30, 40, 50.
4. Determine the standard deviation of the resistance values for a sample of 12 resistors taken from a batch if the values are: 98, 95, 109, 99, 102, 99, 106, 96, 101, 108, 94, 102 Ω.
5. Determine the standard deviation of the six values: 1.3, 1.4, 0.8, 0.9, 1.2, 1,0.
6. Determine the standard deviation of the probability distribution function _f_ ( _x_ ) = 2 _x_ for 0 ≤ _x_ < 1 and 0 elsewhere.
7. The following are the results of 100 measurements of the times for 50 oscillations of a simple pendulum:
Between 58.5 and 61.5 s, 2 measurements
Between 61.5 and 64.5 s, 6 measurements
Between 64.5 and 67.5 s, 22 measurements
Between 67.5 and 70.5 s, 32 measurements
Between 70.5 and 73.5 s, 28 measurements
Between 73.5 and 76.5 s, 8 measurements
Between 76.5 and 79.5 s, 2 measurements
(a) Determine the relative frequencies of each segment.
(b) Determine the mean and the standard deviation.
8. A random sample of 25 items is taken and found to have a standard deviation of 2.0. (a) What is the standard error of the sample? (b) What sample size would have been required if a standard error of 0.5 was acceptable?
9. It has been found that 10% of the screws produced are defective. Determine the probabilities that a random sample of 20 will contain 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 defectives.
10. The probability that any one item from a production line will be accepted is 0.70. What is the probability that when 5 items are randomly selected that there will be 2 unacceptable items?
11. Packets are filled automatically on a production line and, from past experience, 2% of them are expected to be underweight. If an inspector takes a random sample of 10, what will be the probability that (a) 0, (b) 1 of the packets will be underweight?
12. 1% of the resistors produced by a factory are faulty. If a sample of 100 is randomly taken, what is the probability of the sample containing no faulty resistors?
13. The probability of a mass-produced item being faulty has been determined to be 0.10. What are the probabilities that a random sample of 50 will contain 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 faulty items?
14. A product is guaranteed not to contain more than 2% that are outside the specified tolerances. In a random sample of 10, what is the probability of getting 2 or more outside the specified tolerances?
15. A large consignment of resistors is known to have 1% outside the specified tolerances. What would be the expected number of resistors outside the specified tolerances in a batch of 10 000 and the standard deviation?
16. The number of cars that enter a car park follows a Poisson distribution with a mean of 4. If the car park can accommodate 12 cars, determine the probability that the car park is filled up by the end of the first hour it is open.
17. On average six of the cars coming per day off a production line have faults. What is the probability that four faulty cars will come off the line in one day?
18. The number of breakdowns per month for a machine averages 1.8. Determine the probability that the machine will function for a month with only one breakdown.
19. Measurements of the resistances of resistors in a batch gave a mean of 12 Ω with a standard deviation of 2 Ω. If the resistances can be assumed to have a normal distribution about this mean, how many from a batch of 300 resistors are likely to have resistances more than 15 Ω?
20. Measurements made of the lengths of components as they come off the production line have a mean value of 12 mm with a standard deviation of 2 mm. If a normal distribution can be assumed, in a sample of 100 how many might be expected to have (a) lengths of 15 mm or more, (b) lengths between 13.7 and 16.1 mm?
21. Measurements of the times taken for workers to complete a particular job have a mean of 29 minutes and a standard deviation of 2.5. Assuming a normal distribution, what percentage of the times will be (a) between 31 and 32 minutes, (b) less than 26 minutes?
22. Inspection of the lengths of components yields a normal distribution with a mean of 102 mm and a standard deviation of 1.5 mm. Determine the probability that if a component is selected at random it will have a length (a) less than 100 mm, (b) more than 104 mm, (c) between 100 and 104 mm.
23. A machine makes resistors with a mean value of 50 Ω and a standard deviation of 2 Ω. Assuming a normal distribution, what limits should be used on the values of the resistance if there are to be not more than 1 reject in 1000.
24. A series of measurements was made of the periodic time of a simple pendulum and gave a mean of 1.23 s with a standard deviation of 0.01 s. What is the chance that, when a measurement is made, it will lie between 1.23 and 1.24 s?
25. The measured resistance per metre of samples of a wire have a mean resistance of 0.13 Ω and a standard deviation of 0.005 W. Determine the probability that a randomly selected wire will have a resistance per metre of between 0.12 and 0.14 Ω.
26. A set of measurements has a mean of 10 and a standard deviation of 5. Determine the probability that a measurement will lie between 12 and 15.
27. A set of measurements has a normal distribution with a mean of 10 and a standard deviation of 2.1. Determine the probability of a reading having a value (a) greater than 11 and (b) between 7.6 and 12.2.
## 9.3 Experimental errors
Experimental _error_ is defined as the difference between the result of a measurement and the true value:
[25]
Key points
_Random errors_ are those errors which vary in a random manner between successive readings of the same quantity. Random errors can be determined and minimised by the use of statistical analysis.
_Systematic errors_ are those errors which do not vary from one reading to another, e.g. those arising from a wrongly set zero. Systematic errors require the use of a different instrument or measurement technique to establish them.
Errors can arise from such causes as instrument imperfections, human imprecision in making measurements and random fluctuations in the quantity being measured. This section is a brief consideration of the estimation of errors and their determination in a quantity which is a function of more than one measured variable.
With measurements made with an instrument, errors can arise from fluctuations in readings of the instrument scale due to perhaps the settings not being exactly reproducible and operating errors because of human imprecision in making the observations. The term _random error_ is used for those errors which vary in a random manner between successive readings of the same quantity. The term _systematic error_ is used for errors which do not vary from one reading to another, e.g. those arising from a wrongly set zero. Random errors can be determined and minimised by the use of statistical analysis, systematic errors require the use of a different instrument or measurement technique to establish them.
With random errors, repeated measurements give a distribution of values. This can be generally assumed to be a normal distribution. The standard error of the mean of the experimental values can be estimated from the spread of the values and it is this which is generally quoted as the error, there being a 68% probability that the mean will lie within plus or minus one standard error of the true mean. Note that the standard error does not represent the maximum possible error. Indeed there is a 32% probability of the mean being outside the plus and minus standard error interval.
With just a single measurement, say the measurement of a temperature by means of a thermometer, the error is generally quoted as being plus or minus one-half of the smallest scale division. This, termed the _reading error_ , is then taken as an estimate of the standard deviation that would occur for that measurement if it had been repeated many times.
Example
A rule used for the measurement of a length has scale readings every 1 mm. Estimate the error to be quoted when the rule is used to make a single measurement of a length.
The error is quoted as ±0.5 mm.
Example
Measurements of the tensile strengths of test pieces taken from a batch of incoming material gave the following results: 40, 42, 39, 41, 45, 40, 41, 43, 45, 46 MPa. Determine the mean tensile strength and its error.
The mean is given by equation [10] as (40 + 42 + 39 + 41 + 45 + 40 + 41 + 43 + 45 + 44)/10 = 42. The standard deviation can be calculated by the use of equation [2]. The deviations from the mean are −2, 0, −3, −1, 3, −1, 1, 3, 4 and their squares are 4, 0, 9, 1, 9, 1, 1, 9, 16. The standard deviation is thus √[50/(10 − 1)] = 2.4 MPa. The standard error is thus 2.4/√10 = 0.8 MPa. Thus the result can be quoted as 42 ± 0.8 MPa.
#### Statistical errors
In addition to measurement errors arising from the use of instruments, there are what might be termed _statistical errors_. These are not due to any errors arising from an instrument but from statistical fluctuations in the quantity being measured, e.g. the count rate of radioactive materials. The observed values here are distributed about their mean in a Poisson distribution and so the standard deviation is the square root of the mean value.
Example
In an experiment, the number of alpha particles emitted over a fixed period of time is measured as 4206. Determine the standard deviation of the count.
Assuming the count follows a Poisson distribution, the standard deviation will be the square root of 4206 and so 65. Thus the count can be recorded as 4206 ± 65.
### 9.3.1 Combining errors
An experiment might require several quantities to be measured and then the values inserted into an equation so that the required variable can be calculated. For example, a determination of the density of a material might involve a determination of its mass and volume, the density then being calculated from mass/volume. If the mass and volume each have errors, how do we combine these errors in order to determine the error in the density?
Consider a variable _Z_ which is to be determined from sets of measurements of _A_ and _B_ and for which we have the relationship _Z_ = _A_ \+ _B_. If we have _A_ with an error Δ _A_ and _B_ with an error Δ _B_ then we might consider that _Z_ \+ Δ _Z_ = _A_ \+ Δ _A_ \+ _B_ \+ Δ _B_ and so we should have
[26]
i.e. the error in _Z_ is the sum of the errors in _A_ and _B_. However, this ignores the fact that the error is the standard error and so is just the value at which there is a probability of ±68% that the mean value for _A_ or _B_ will be within that amount of the true mean. If we consider the set of measurements that were used to obtain the mean value of _A_ and its standard error and the set of measurements to obtain the mean value of _B_ and its standard error and consider the adding together of individual measurements of _A_ and _B_ then we can write Δ _Z_ = Δ _A_ \+ Δ _B_ for each pair of measurements. Squaring this gives:
We can write such an equation for each of the possible combinations of measurements of _A_ and _B_. If we add together all the possible equations and divide by the number of such equations, we would expect the 2 _ΔA ΔB_ terms to cancel out since there will be as many situations with it having a negative value as a positive value. Thus the equation we should use to find the error in _Z_ is:
[27]
The same equation is obtained for _Z_ = _A − B_.
Now consider the error in _Z_ when _Z_ = _AB_. As before, we might argue that _Z_ \+ Δ _Z_ = ( _A_ \+ Δ _A_ )( _B_ \+ Δ _B_ ) and so Δ _Z_ = _B_ Δ _A_ \+ _A_ Δ _B_ , if we ignore as insignificant the Δ _A_ Δ _B_ term. Hence:
[28]
i.e. the fractional error in _Z_ is the sum of the fractional errors in _A_ and _B_ or the percentage error in _Z_ is the sum of the percentage errors in _A_ and _B_. However, this ignores the fact that the error is the standard error and so is just the value at which there is a probability of ±68% that the mean value for _A_ or _B_ will be within that amount of the true mean. If we consider the set of measurements that were used to obtain the mean value of _A_ and its standard error and the set of measurements to obtain the mean value of _B_ and its standard error and use equation [28] for each such combination, then we can write:
We can write such an equation for each of the possible combinations of measurements of _A_ and _B_. If we add together all the possible equations and divide by the number of such equations, the 2(Δ _A/A_ )(Δ _B/B_ ) terms cancel out since there will be as many situations with it having a negative value as a positive value. Thus the equation we should use to find the error in _Z_ is:
[29]
The same equation is obtained for _Z_ = _A_ / _B_. If _Z_ = _A_ 2 then this is just the product of _A_ and _A_ and so equation [29] gives (Δ _Z_ / _Z_ )2 = 2(Δ _A/A_ )2. Thus for _Z_ = _A n_ we have:
[30]
In all the above discussion it was assumed that the mean value of _Z_ was given when the mean values of _A_ and _B_ were used in the defining equation.
Example
The resistance of a resistor is determined from measurements of the potential difference across it and the current through it. If the potential difference has been measured as 2.1 ± 0.2 V and the current as 0.25 ± 0.01 A, what is the resistance and its error?
The mean resistance is 2.1/0.25 = 8.4 Ω. The fractional error in the potential difference is 0.2/2.1 = 0.095 and the fractional error in the current is 0.01/0.25 = 0.04. Hence the fractional error in the resistance is √(0.0952 \+ 0.042) = 0.10. Thus the resistance is 8.4 ± 0.9 Ω.
Example
If _g_ = 4π2 _L/T_ 2 and _L_ has been measured as 1.000 ± 0.005 m and _T_ as 2.0 ± 0.1 s, determine _g_ and its error.
The mean value of _g_ is 4π2(1.000)/2.02 = 9.87 m/s2. The fractional error in _L_ is 0.005 m and that in _T_ is 0.05 s. Thus:
(fractional error in _g_ )2 = 0.0052 \+ 2 × 0.052
Thus the fractional error in _g_ is 0.071 and so _g_ = 9.87 ± 0.7 m/s2.
### Problems 9.3
1. Determine the mean value and standard error for the measured diameter of a wire if it is measured at a number of points and gave the following results: 2.11, 2.05, 2.15, 2.12, 2.16, 2.14, 2.16, 2.17, 2.13, 2.15 mm.
2. An ammeter has a scale with graduations at intervals of 0.1 A. Give an estimate of the standard deviation.
3. Determine the mean and the standard error for the resistance of a resistor if repeated measurements gave 51.1, 51.2, 51.0, 51.4, 50.9 Ω.
4. In an experiment the number of gamma rays emitted over a fixed period of time is measured as 5210. Determine the standard deviation of the count.
5. How big a count should be made of the gamma radiation emitted from a radioactive material if the percentage error should be less than 1%?
6. Repeated measurements of the voltage necessary to cause the breakdown of a dielectric gave the results 38.9, 39.3, 38.6, 38.8, 38.8, 39.0, 38.7, 39.4, 39.7, 38.4, 39.0, 39.1, 39.1, 39.2 kV. Determine the mean value and the standard error of the mean.
7. Determine the mean value and error for _Z_ when (a) _Z_ = _A − B_ , (b) _Z_ = 2 _AB_ , (c) _Z_ = _A_ 3, (d) _Z_ = _B/A_ if _A_ = 100 ± 3 and _B_ = 50 ± 2.
8. The resistivity of a wire is determined from measurements of the resistance _R_ , diameter _d_ and length _L_. If the resistivity is _RA/L_ , where _A_ is the cross-sectional area, which measurement requires determining to the greatest accuracy if it is not to contribute the most to the overall error in the resistivity?
9. The cross-sectional area of a wire is determined from a measurement of the diameter. If the diameter measurement gives 2.5 ± 0.1 mm, determine the area of the wire and its error.
10. Determine the mean value and error for _Z_ when (a) _Z_ = _A_ \+ _B_ , (b) _Z_ = _AB_ , (c) _Z_ = _A/B_ if _A_ = 100 ± 3 and _B_ = 50 ± 2.
Solutions to problems
## Chapter 1
1.1
1.
(a) 3,
(b) 5
2.
(a) 0,
(b) 6
3.
(a)
4.
(a) 2,
(b) l
5.
(a) 0,
(b) 2
6. _v_ = 0 for 0 ≤ _t_ ≤ 2, _v_ = 10 V for 2 ≤ _t_
7. See Figure S.1
Figure S.1
8.
(a) 2.01 s,
(b) 6.34 s
9.
(a) 2 m/s,
(b) 7 m/s
10.
(a) 3 _x_ \+ 1,
(b) 2 _x_ \+ 2,
(c) 2 _x_ \+ 1
11.
(a) _x_ 2 \+ 3 _x_ \+ 1,
(b) 9 _x_ 2 \+ 3,
(c) 3 _x_ 2 \+ 3,
(d) _x_ 2 − 3 _x_ − 1,
(e) 9 _x_ 2 \+ 12 _x_ \+ 5
### 1.2
1.
(a) Straight line through origin,
(b) straight line not through origin,
(c) not straight line,
(d) no straight line
2.
(a) _i_ = 0.5 _t_ \+ 2 amps,
(b) _e_ = 1.2 × 10−3 _L_ m
### 1.3
1.
(a) −3.2, 1.2,
(b) −2.6, −0.38,
(c) 3.8, −0.41,
(d) no real roots
2. 38.2°C, 261.8°C
3. 0.93 m, −0.11m
4.
5. 18.8 m or 1.6 m
6. 5.41 cm and 8.41 cm
### 1.4
1.
(a)
(b) _x_ −4,
(c)
(d)
2. No, but if domain restricted to then yes
3.
(a)
(b)
### 1.5
1. Amplitude = 5, phase angle = +30° leading
2. Amplitude = 4, angular frequency = 3 rad/s
3.
(a) 2, 1.27 s, 1 rad,
(b) 6, 2.09 s, 0,
(c) 5, 9.4 s, 0.33 rad,
(d) 2, 6.28 s, −0.6 rad lagging
4.
(a) 6, π, 1,
(b) 2, 2π/9, 0,
(c) 6, 5π, −0.2,
(d) 2, 2π, −0.2,
(e) 6, π/2, π/8,
(f) 0.5, 2π, −π/6
5. 40, 20 Hz
6.
(a) 0.87 V,
(b) −0.87 V
7.
(a) 100 mA,
(b) 100 Hz,
(c) 0.25 rad or 14.3° lagging
8.
(a) 12 V,
(b) 50 Hz,
(c) 0.5 rad or 28.6° lagging
9. 16.2 V
10. As given in the problem
11. 101.3°, 355.4°
12.
(a) √41 sin (θ − 5.61),
(b) √41 cos (θ + 5.39)
13. _R_ = _W_ (1 + μ2)1/2, tan β = 1/μ
14.
(a) 5 sin ( _ωt_ \+ 0.927),
(b) 8.63 sin ( _ωt_ − 1.01),
(c) 4.91 sin ( _ωt_ \+ 4.13) or 4.91 ( _ωt_ − 2.15) for −π ≤ _a_ ≤ π
15. 5.83 sin (θ + 59.03)
16.
(a) 15 sin ( _ωt_ − 0.64),
(b) 8.06 sin ( _ωt_ \+ 2.62),
(c) 6.71 sin ( _ωt_ − 2.03)
17. 22.4 sin ( _ωt_ \+ 1.11) mA
18. 6.81 sin ( _ωt_ \+ 0.147) V
19. 126.2√2 sin ( _ωt_ − 0.071) V
20. 8.72√2 sin ( _ωt_ − 9.639) A
21.
(a) 0.83,
(b) 1.47,
(c) 0.67,
(d) −0.41
### 1.6
1.
(a) The negative sign,
(b) _N_ 0,
(c) λ small
2.
(a) The power is positive,
(b) _L_ 0,
(c) α high means a high expansion
3.
(a) 0,
(b) 3A
4.
(a) 2, infinite,
(b) 10, 0,
(c) 0, 2,
(d) 2,0,
(e) −4, 0,
(f) 0, 0.5,
(g) 0, 4,
(h) 10, 0,
(i) 0, 0.2
5.
(a) 18.10 V,
(b) 7.36 V
6. 9.96 × 104 Pa
7.
(a) 0,
(b) 0.86 _E_ / _R_ A
8.
(a) 0,
(b) 0.86 _E_
9. 0.95 μC
10.
(a) 0,
(b) 1.57 A,
(c) 2.53 A,
(d) 3.11 A,
(e) 4 A
11. 5.13 V
12. 5637 Ω
13.
(a) 1.26 A,
(b) 1.72 A,
(c) 1.90 A
14. 0.03 g
15.
(a) 8.61 × 104 Pa,
(b) 7.41 × 104 Pa
16.
(a) 198.0 V,
(b) 190.2 V
17.
(a) − _E_ ,
(b) −0.61 _E_
18.
(a) 0,
(b) 0.63 _E_
19.
(a) 200°C,
(b) 134.1°C,
(c) 0°C
20. \+ gives growth, − decay.
21.
(a) e8 _t_ ,
(b) e−2 _t_ ,
(c) e−12 _t_ ,
(d) 1 + 2 e2 _t_ \+ e4 _t_ ,
(e) e−3 _t_ ,
(f) e−2 _t_ ,
(g) e−8 _t_ ,
(h) 5 e3 _t_ ,
(i) 0.4 e4 _t_
### 1.7
1.
(a) 4 1g _x_ ,
(b) 5 1g __x__ − 1g 2
2.
(a) 1g _b_ \+ 0.5 1g 2 − 1g _a_ − 1g _c_ ,
(b) 3 1g _a_ \+ 3 lg _b_ − 1.5 1g _c_
3.
(a) 5.19,
(a) −0.593,
(c) 0.419
4. _v_ = 10.2e−0.1 _t_
5. θ = 800 e−0 2 _t_
6. _Q_ = 2.6 _h_ 2.5
7. _A_ = 400 e−0.02 _t_
8. _T_ = 50 e0.3θ
9. _I_ = 2430 mA, _T_ = −51.3
10. 60.62 × 103
### 1.8
1.
(a) 3.627,
(b) 74.210,
(c) 0.964,
(d) −3.627,
(e) 0.525,
(f) 0.748
2. 1.622m
3.
(a)
note when surface tension neglected
(b)
## Chapter 2
2.1
1.
(a) 7.43 m/s, N 73° W,
(b) 3.58 m/s, N 54° E,
(c) 8.16 m/s, N 75° E
2.
(a) 13 m, N 67° E,
(b) 13 m, N 67° W,
(c) 13 m, S 67° E,
(d) 24.5 m, N 78° E
3.
(a)
(b)
(c)
4.
(a) **b − a** ,
(b) **a + b** ,
(c) **a** − 3 **b** ,
(d) 2 **b**
5. 7.8 N at 54° to AB
6.
(a)
(b)
(c) 0,
(d)
7. 1.36 N, 8.82 N
8.
(a) 3.6, 56.3°,
(b) 5.4, 21.8°,
(c) 4.2, 45°
9.
(a) 4 **i** \+ 6 **j** ,
(b) −8 **i** ,
(c) 10 **i** \+ 9 **j**
10.
(a) 7 **i** \+ 5 **j** ,
(b) 3 **i** − l **j** ,
(c) l **i** − 4 **j**
11.
(a) 9 **i** \+ 2 **j** ,
(b) 3 **i** \+ 4 **j** ,
(c) −13 **i** − 3 **j**
12.
(a)
(b)
(c)
13.
14.
15. 112.6°
16. 48.2°, 131.8°,70.5°
### 2.3
1.
(a) −j,
(b) 1,
(c) −j,
(d) −1
2.
(a) ±j4,
(b) −2 ± j2,
(c) 0.5 ± j1.1
3.
(a) 4.12∠166o,
(b) 5∠233°,
(c) 3∠0°,
(d) 6∠270o,
(e) 1.4∠45o,
(f) 3.61∠326o
4.
(a) −2.5 + j4.3,
(b) 7.07 +j7.07,
(c) −6,
(d) 0.68 + j2.72,
(e) 1.73 + j1,
(f) 1.5 − j2.6
5.
(a) 1 +j6,
(b) 5 −j2,
(c) −14 + j8,
(d) 0.23 −j0.15,
(e) 0.1 − j0.8
6.
(a) 5 − j2,
(b) −2 − j1,
(c) −1 + j7,
(d) 1,
(e) 12 +j8,
(f) −10 + j6,
(g) 11 − j2,
(h) 12,
(i) 10 + j5,
(j) 0.9 + j1.2,
(k) 0.23 − j0.15,
(l) j1,
(m) −0.3 + j1.1
7.
(a) 20∠60o,
(b) 50∠80o,
(c) 0.1∠(−20°),
(d) 0.5∠(−40°),
(e) 5∠(−20°),
(f) 0.4∠(−20°)
8.
(a) 10∠(−30°), 8.66 − j5,
(b) 10∠150o, −8.66 + j5,
(c) 22∠45°, 15.6 + j15.6
9.
(a) 5.5 + j2.6, 6.1∠25.3o,
(b) −2 + j7, 7.3∠105.9o,
(c) 3.7 + j4.5, 5.8∠50.6o
10.
(a) 25∠90o,
(b) 20∠75o,
(c) 44.5∠83.3o,
(d) 4∠(−30°),
(e) 1.25∠15o,
(f) 0.164∠9.2°
11.
(a) 20 + j17.32 = 26.46∠40.9o V,
(b) 26.46 sin( _ωt_ \+ 40.9°) V
12. 25∠(−30°)Ω
13. 2∠(−36.8°)A
14.
(a) 12 − j5 Ω,
(b) 136.6 +j136.6 Ω,
(c) 32.1 +j7.4 Ω,
(d) 1.88 − j6.34 Ω,
(e) 0.384 − j1.922 Ω,
(f) j13.3 Ω
15.
(a) 5 + j2 Ω,
(b) 50 − j10 Ω,
(c) 2 + j1 Ω,
(d) 1.92 − j0.38 Ω,
(e) −j125Ω
## Chapter 3
3.1
1. You might like to consider it to be like a swinging chain which, in itself, is rather like a form of simple pendulum.
2. See Figure S.2,
Figure S.2
3.
(a)
(b)
(c)
(d)
### 3.2
1.
(a) _E_ = 0.45 _L_ \+ 5,
(b) _R_ = 4.2L
2.
(a) _T_ against √ _L_ , gradient 2π/√g, intercept 0,
(b) _s_ / _t_ against _t_ , gradient _a_ /2, intercept _u,_
(c) _e_ /θ against θ, gradient _b_ , intercept _a,_
(d) _R_ against θ, gradient _R_ 0α, intercept _R_ 0,
3.
4.
5. _V_ = 16 _p_ −1
6. _T_ = 500 _p_ 0.28
7. _C_ = 0.001 _n_ 3 \+ 30
8. _s_ = 0.1 _v_ 2 \+ 0.5 _v_
9. _I_ = 0.001 _V_ 2
10. _v_ = 2090 e− _t_ /12
## Chapter 4
4.1
1.
(a) 5 _x_ 4,
(b) −8 _x_ −5,
(c) −6 _x_ ,
(d) ½,
(e) 4 _πx_ ,
(f) 3 sec2 3 _x_ ,
(g) −10 sin 2 _x_ ,
(h) 8 e _x_ /2,
(i) −4 e−2 _x_ ,
(j) 9 e3 _x_ ,
(k) −(5/6) _x_ −3/2,
(l) −(12/3) _x_ −3,
(m)
(n)
(o)
(p) −24 _x_ 2 \+ 4 _x_ \+ 15,
(q) 5 _x_ cos _x_ \+ 5 sin _x_ ,
(r) e _x_ /2 \+ ½ _x_ e _x_ /2,
(s) ( _x_ 2 \+ 1) cos _x_ \+ 2 _x_ sin _x_ ,
(t)
(u)
(v)
(w)
(x)
(y)
(z)
2.
(a) 2,
(b) −4 cos 2 _x_ ,
(c) 6/ _x_ 4,
(d) 36 _x_ 2 − 2 − 2/ _x_ 3,
(e) 12 _x_ 2 \+ 12 _x_ ,
(f)
3. 7 m/s, −4 m/s2
4. 6 cos 2 _t_ − 9 sin 3 _t_ m/s, −12 sin 2 _t_ − 27 cos 3 _t_ m/s2
5. 0.03 cos 5 _t_ A/s
6. 50 e−100 _t_ V/s
7. _πr_ 2
8. 4 _πr_ 2
9.
10. _L_ 0( _a_ \+ 2 _bT_ )
11.
(a) (2, −1) min.,
(b) (1, 7) max., (3, 3) min.,
(c) (1, −4) min., (−1, 4) max.,
(d) (π/2, 1) max., (3π/2, −1) min.,
(e) (−2, 23) max., (1, −4) min.
12. _r_ = _h_ = 1 m
13. _h_ = _r_ = 4 cm
14. ½ _L_
15. 0,0.32 _L_ ,
16. 47.7 V
17. 3.33 m from smaller source
18. 10 mA/s
19. 0.58 _L_
20. _−a_ /2 _b_
21. 45°
22. _x_ = _y_
23. ½( _a − b_ )
24. _r_ = 4/3m
25. 6 × 6 cm
26. 4 × 4 × 2 m
### 4.2
1.
(a) 4 _x_ + _C_ ,
(b)
(c)
(d)
(e)
(f)
(g)
(h)
2.
(a) 15,
(b) 8,
(c) 1.10,
(d) −2.25,
(e) −4.5,
(f) 2.67,
(g) 0.83
3.
(a) 116/3,
(b) 11/3
4. ½
5. 1/12
6. 1
7. 4½
8.
(a) Diverges,
(b) 1/18,
(c) 1/3,
(d) diverges
9.
(a)
(b)
(c)
(d)
(e)
(f)
(g)
(h)
(i)
10.
(a)
(b)
(c)
(d)
(e)
(f)
(g)
(h)
(i)
11.
(a) In 2,
(b) 1/90,
(c) π/4,
(d) π/4,
(e) π/8
12.
(a)
(b)
(c)
(d)
(e)
(f)
13.
(a)
(b)
(c) 2
14.
(a)
(b)
(c)
(d)
(e)
(f)
(g)
(h)
(i)
(j)
(k)
15.
(a) _Mh_ 2/18,
(b) _Mh_ 2/6
16. 5 _Mr_ 2/2
17.
(a)
(b)
18.
(a) 1,
(b) 7,
(c) 16,
(d) 0.5
19. 2 _A/π_
20. 0.623 _N_ 0
(21)
(a) 4.92,
(b) 1.15,
(c) 1.23,
(d) 0.707,
(e) 1.35
22. _V/_ 2
## Chapter 5
5.1
1.
(a)
(b)
(c)
(d)
(e)
(f)
(g)
2. As given in the problem
3.
(a)
(b)
(c)
4.
### 5.2
1.
(a) y = ln _x_ \+ _A_ ,
(b) _y_ = 2 sin ½ _x_ \+ _A_ ,
(c) −1/y = _x_ \+ _A_ ,
(d)
(e)
(f)
(g)
(h)
(i)
(j)
(k)
(l)
2.
3.
4.
5.
6.
7.
8.
9. 3.41 hours
10. _RC_
11. 51.4°C
12. As given in the problem
13. 10.9 V
14.
15.
(a) _x_ = 1 − e− _t_ /2,
(b) _x_ = 8 e− _t_ /2 \+ 4 _t_ − 8
16. _x_ = 5(1 − e− _t_ / _r_ )
17.
(a) 36.8%,
(b) 13.5%
18. 12 s
19.
(a)
(b)
### 5.3
1.
(a)
(b)
(c)
(d)
(e)
(f)
2.
(a)
(b)
(c)
(d)
(e)
(f)
(g)
(h)
3.
(a)
(b)
(c)
(d)
(e)
(f)
4.
(a)
(b)
(c)
(d)
(e)
(f)
(g)
5. _y_ = e3 _x_ − 3 e2 _x_
6.
7. _x_ = _A_ e-4 _t_ \+ _B_ e− _t_
8.
(a) _x_ = 0.2 cos 3 _t_ ,
(b) _x_ = e− _t_ (0.2 cos 2.83 _t_ \+ 0.070 sin 2.83 _t_ )
9. 0.44
10.
(a) 5 rad/s,
(b) 1.25
11. 316 rad/s, 6.3 N s/m
12. Over damped
13. 6 N s/m
14. 2.6 rad/s, 0.76
15. e− _t_ (0.2 cos 2.24 _t_ \+ 0.22 sin 2.24 _t_ )
16.
17. As given in the problem
## Chapter 6
6.1
1.
(a)
(b)
(c)
2.
(a)
(b)
(c)
(d)
(e)
(f)
(g)
(h)
(i)
(j)
(k)
(l)
(m)
(n)
(o)
(p)
(q)
(r)
(s)
3.
(a)
(b)
(c)
(d)
(e)
(f)
4.
(a)
(b)
(c)
5.
(a)
(b)
(c)
6.
7.
8.
(a)
(b)
(c)
9.
(a) e2 _t_ ,
(b) 5,
(c) cos 4 _t_ ,
(d) sinh 3 _t_
(e) e2 _t_ sin 5 _t_ ,
(f)
(g) ( _t_ −2) _u_ ( _t_ −2),
(h)
10.
(a)
(b)
(c)
(d)
11.
(a) 0,
(b) 1
12.
(a) 2,
(b) 0
### 6.2
1.
(a)
(b)
(c)
(d)
(e)
(f)
(g)
(h)
(i)
(j)
(k)
### 6.3
1. 3/ _s_
2. 2
3. 5/ _s_ 2
4. e−5 _t_ V
5.
(a) (5/3)(1 − e−3 _t_ ) V,
(b) 5 _e_ −3 _t_ V
6.
(a) 6(1 − e− _t_ ) V,
(b) 6e− _t_ V
7.
(a) 2(1 − e-2 _t_ ) V,
(b) ½[ _t_ − ½(1 − e−2 _t_ )] V
8.
(a) 5/( _s_ − 1) − 4/( _s_ − 2),
(b) 4/( _s_ \+ 1) − 3/( _s_ \+ 2),
(c) 2/( _s_ \+ 1) − 3/( _s_ \+ 1)2
9. 24 − 12e−2 _t_ − 4e−4 _t_
10.
11.
(a) Critical,
(b) overdamped,
(c) underdamped
12. −1.5 + 3.0 _t_ \+ 1.5e−2 _t_
13. 0.5-e− _t_ \+ 0.5e−2 _t_
14. 0.5(e− _t_ − e−3 _t_ )
15. 0.5(1 − e−10 _t_ )
16. 10,0.05
17. Underdamped
18. Critically damped
19. 1/53 s
20.
(a) 0.01s Ω in series with −0.002 V, in parallel with 0.2/s A,
(b) 0.5/s MΩ in series with 5 _/s_ V, in parallel with 10 μA.
21.
(a)
(b)
22.
23.
24.
## Chapter 7
7.1
1. 0, 1, 0,−1, 0, 1,...
2.
(a) 0, 1, 2, 3, 4,
(b) 1, 0.37, 0.13, 0.05, 0.007
3.
(a) 116,
(b) 0.75
4.
(a) (−1) _k_ −1,
(b) 5 _k_ ,
(c) 2.5 − 0.5 _k_
5.
(a) 0, 1, 4, 9, 16,
(b) 1, 2.72, 7.39, 20.09, 54.60,
(c) 0, 2.5, 6, 10.5, 16
6. 13
7. 0.5
8.
(a) 0.25 _k_ ,
(b) 2(−1) _k_ ,
(c) 3 + 0.1 _k_
9.
(a) 0.1, 0.01, 0.001,
(b) 5.1, 5.01, 5.001,
(c) −1, +1, −1
10.
(a) 222,
(b) 9.998,
(c) 28.70
11.
(a) 7.5,
(b) 23.98,
(c) 1023
12.
(a) 12,
(b) 16,
(c) 48
13.
(a) Convergent,
(b) divergent
14.
(a) Convergent,
(b) divergent
15.
(a) Divergent,
(b) convergent,
(c) convergent,
(d) convergent,
(e) convergent,
(f) divergent
16.
(a)
(b)
(c)
(d) 1 − 0.25 + 0.062 − 0.015,
(e)
17.
(a)
(b)
(c)
(d)
(e)
(f)
18. 1.013 2
19. As given in the problem
20.
21.
22.
(a)
(b)
(c)
(d)
(e)
(f)
23. As given in the problem
24. As given in the problem
25. As given in the problem
26. Reduced by 6%
27. Increased by 1%
### 7.2
1. Second and fourth (and a d.c. term)
2.
3.
4.
5.
(a) Only even harmonics,
(b) only odd harmonics
6.
7.
(a) Odd sines,
(b) _a_ 0, even sines and cosines
8.
(a) 10/6,
(b) 0
9.
(a)
(b)
(d) odd,
(c)
(e) even
10.
(a) sine,
(b) _a_ 0 and cosine,
(c) _a_ 0 and cosine
11.
12.
(a) 0.5, 0.31, 0.16, 0.11, 180°, 180°, 180°,
(b) 1.57, 1.19, 0.5, 0.13, −32°, 180°, −12°
13. 0.32 + 0.5 cos 100 _t_ \+ 0.21 cos 200 _t_ mA
14. 0.5 cos(100 _t_ − 90°) + 0.21 cos(200 _t_ − 90°) A
15. 3.2 sin(100 _t_ \+ 90°) + 3.2 sin(200 _t_ \+ 90°) mA
## Chapter 8
8.2
1.
(a) 1,
(b) 1,
(c) 0
2.
(a) _a_ · _b_
(b) _a_ \+ _b_ \+ _c_ ,
(c) _b_ ,
(d)
(e)
### 8.3
1.
(a) ( _b_ \+ _c_ ) · _d_ \+ _a_ ,
(b)
(c)
2.
(a)
(b)
3.
(a) _A·B_ \+ _C·_ ( _D_ \+ _E_ ),
(b) _A_ \+ ( _B_ \+ _C_ ),
(c) _A_ \+ _B_ \+ _C_
4.
5.
(a) See Figure S.3(a),
Figure S.3
(b) see Figure S.3(b),
(c) see Figure S.3(c)
6.
(a) ( _A_ \+ _B_ ) _·B·C_ \+ _A, B·C_ \+ _A_ , see Figure S.4,
Figure S.4
(b) see Figure S.5
Figure S.5
7.
(a) See Figure S.6(a),
Figure S.6
(b) see Figure S.6(b),
(c) see Figure S.6(c),
(d) see Figure S.6(d)
8.
(a)
(b)
9.
(a) _A · B ·_ ( _A_ \+ _B_ \+ _C_ ), _A · B · C,_
(b)
See Figure S.7
Figure S.7
10.
(a)
(b)
(c)
(d)
(e) _C_ ,
(f) _C_ · _D_
(g)
## Chapter 9
9.1
1. 4/50
2. 4/52
3.
(a) 0.004,
(b) 0.96
4. 0.85
5. 0.97
6.
(a) 0.01,
(b) 0.02,
(c) 0.03,
(d) 0.96
7.
(a) 1/3,
(b) 2/3
8.
(a) 40 320,
(b) 840,
(c) 30
9. 1260
10.
(a) 21,
(b) 21,
(c) l
11.
(a) 77/92,
(b) 11/69,
(c) 1/276
12.
(a) 1/15,
(b) 7/15
13.
(a) 1/15,
(b) 8/15
14. 1000
15. 0.5
### 9.2
1. 3.0
2. 4/3
3. 40, 31.6
4. 5
5. 0.237
6. 0.24
7.
(a) 0.02, 0.06, 0.22, 0.32, 0.28, 0.08, 0.02,
(b) 69.3, 2.3
8.
(a) 0.4,
(b) 7
9. 0.122, 0.270, 0.285, 0.190, 0.090, 0.032, 0.009, 0.002, 0.000 4, 0.000 1
10. 0.132
11.
(a) 0.817,
(b) 0.016
12. 0.366
13. 0.005, 0.029, 0.078, 0.138, 0.181, 0.185, 0.154, 0.108, 0.064, 0.033
14. 0.016
15. 100,9.9
16. 0.001
17. 0.135
18. 0.297
19. 20
20.
(a) 6.68,
(b) 17.75
21.
(a) 9.68,
(b) 11.5
22.
(a) 0.091 3,
(b) 0.091 3,
(c) 0.817 4
23. 50 ±6.6 Ω
24. 0.34
25. 0.954
26. 0.185 9
27.
(a) 0.315 6,
(b) 0.726 0
### 9.3
1. 2.134 mm, 0.011 mm
2. 0.05 A
3. 51.12 Ω, 0.08 Ω
4. 72
5. 4.9 ± 0.3 mm2
6. 10 000
7. 39.0 kV, 0.11 kV
8.
(a) 50 ± 3.6,
(b) 10 000 ± 500,
(c) 1 000 000 ± 52 000,
(d) 2 ± 0.1
9. Diameter
10.
(a) 150 ± 3.6,
(b) 5000 ± 250,
(c) 2 ± 0.1
# Index
A
Accuracy,
Alternating current, x
Angular frequency,
Approximations, making,
Argand diagram,
Argument,
Attributes,
B
Beam, deflection, ix, , , ,
Boolean algebra,
complementary law,
De Morgan laws,
product of sums,
sum of products,
Building, wind deflection,
C
Cables, suspended,
Capacitor
charging, ,
discharging, ,
time constant,
Car suspension system,
Centre of gravity,
Centroid,
Circular functions,
_a_ cos θ + _b_ sin θ,
cosine,
inverse,
manipulating,
sine,
sums of angles,
tangent,
Combinations,
Complex numbers,
addition,
Argand diagram,
argument,
conjugate, ,
division,
manipulation,
modulus,
multiplication,
polar coordinates,
subtraction,
Conjugate pair, ,
Cyclic functions,
D
Damping factor, , ,
Decibel,
Differential equation,
boundary conditions,
first-order, , ,
and step input,
general solution,
initial conditions,
ordinary,
particular solution,
second-order, , ,
auxiliary equation,
characteristic equation,
complementary function,
homogeneous,
non-homogeneous,
particular integral, ,
separation of variables,
solving,
by Laplace transform,
Differentiation,
chain rule,
D-operator,
function of a function rule,
inverse functions,
log functions,
of a constant,
of an exponential,
of a power,
of trigonometric function,
order,
product rule,
quotient rule,
rules,
sum rule,
Distributions,
binomial,
expected value,
frequency,
Gaussian,
mean,
normal,
Poisson,
probability,
probability density function,
standard deviation,
standard error,
true value,
D-operator,
E
Equations,
Euler's equation,
Experimental errors,
combining,
random,
reading,
statistical,
systematic,
Exponential functions,
e,
manipulating,
F
Factors,
Feedback control systems,
Fourier series,
coefficients,
even symmetry,
frequency spectrum,
half-wave symmetry,
odd symmetry,
rectified sinusoid,
rectangular waveform,
sawtooth waveform,
shift of origin,
Frequency,
Functions,
circular,
combinations,
cyclic,
defined,
exponential,
hyperbolic,
inverse, ,
linear,
log,
quadratic,
G
Gradient of graph, ,
Graphs
cyclic functions,
hyperbolic functions,
linear, ,
log,
H
Histogram,
Hyperbolic functions,
cosh,
graphs,
sinh,
tanh,
I
Imaginary number,
Impedance,
Inductance and a.c.,
Inflexion, point of,
Integral
definite, ,
indefinite, ,
Integration,
area under graph,
by partial fractions,
by parts,
by substitution,
by trig. substitution,
common functions,
constant of integration,
of a sum,
particular solution,
reverse of differentiation,
Inverse functions, ,
K
Karnaugh map,
Kirchhoff's laws, ,
L
Ladder programming,
Laplace transform,
derivatives,
differential equations,
electrical circuits,
final value theorem,
first-order systems,
first shift theorem,
impulse function, ,
initial value theorem,
integrals,
inverse,
additive property,
first shift theorem,
second shift theorem,
periodic functions,
properties,
_s_ -domain,
second-shift theorem,
standard,
step function, ,
sum of functions,
table,
time domain,
transfer function,
Limits,
Linear relationships,
Log functions,
changing bases,
natural,
Logic gates,
AND, ,
Boolean algebra,
combining,
De Morgan laws,
EXCLUSIVE OR,
logic level,
negative logic,
NAND,
NOR,
NOT, ,
OR, ,
positive logic,
symbols,
truth table,
XOR,
M
Maxima,
Mean, ,
Measurement, xi
Minima,
Model, mathematical, xi,
electrical systems, , , , , , ,
hydraulic systems, , ,
lumped element,
mechanical systems, , , , , ,
rotational systems,
thermal systems, , , ,
Moment of inertia,
Motor, model,
N
Newton's second law,
O
Oscillation, x,
damped, ,
undamped,
P
Parallelogram of vectors, ,
Partial fractions,
Permutations,
Periodic time,
Phase angle, ,
Phasor, x,
adding, , ,
complex representation,
defined,
division,
impedance, ,
Kirchhoff's laws,
multiplication,
polar notation,
subtraction,
PLC, x, , , ,
Polygon rule for vectors,
Power, maximum transfer,
Precision,
Probability,
conditional,
density function,
distributions,
mutually exclusive events,
Progammable logic controller, _See_ PLC
Proportionality,
constant,
Q
Quadratic functions,
completing the square,
factors,
formula,
roots,
R
Radioactive decay,
Reactance,
Rectifier filter,
Reliability,
Ripple factor,
Root-mean-square value,
Roots,
S
Scalars, defined,
Second moment of area,
Sensitivity, static system,
Series,
arithmetic,
binomial, , ,
Sequence,
arithmetic,
geometric,
harmonic,
convergent,
D'Alembert test,
divergent,
Fourier,
geometric,
Pascal's triangle,
power, ,
Simple harmonic motion, ,
Spring
force–extension, ,
oscillation,
Steady-state value, ,
Straight-line graph,
Summation symbol, ,
Superposition principle,
Switching circuits,
T
Thermometer, model, ,
Time constant, , , ,
Transfer function,
defined,
feedback systems,
series systems,
Transients, electrical,
Triangle rule for vectors,
Truth table,
Turning points,
V
Variable
continuous,
dependent,
discrete,
independent,
random,
Vectors,
adding, ,
components, ,
defined,
direction cosines, ,
free,
in space,
like,
multiplication by a number,
parallelogram rule,
polygon rule,
position,
subtraction, ,
triangle rule,
unit, ,
W
Work,
| {
"redpajama_set_name": "RedPajamaBook"
} | 7,203 |
When a container without a signed weight declaration shows up at a marine terminal as of July 1, when a new SOLAS rule takes effect, what happens next? Will the terminal allow the container in, hoping that the weight will arrive in time for the container to be handled and loaded without having to be pulled aside? Or does the terminal avoid the risk, telling the carrier and its customer that containers without the Verified Gross Mass won't be allowed in under any circumstances? | {
"redpajama_set_name": "RedPajamaC4"
} | 9,686 |
Q: Android ImageView wont clear I have this url being loaded into an imageview by picasso
Intent intent = getIntent();
sceneUrl = intent.getStringExtra("url");
image = (ImageView)findViewById(R.id.imageView1);
Picasso.with(this).load(sceneUrl).into(image);
This activity is being started from a broadcast receiver via intent...
Intent scene = new Intent( context, Image.class );
scene.putExtra("url", output);
scene.addFlags(Intent.FLAG_FROM_BACKGROUND);
scene.addFlags(Intent.FLAG_ACTIVITY_NEW_TASK);
scene.addFlags(Intent.FLAG_ACTIVITY_CLEAR_TOP);
context.startActivity(scene);
Everytime i broadcast a new intent meaning a new image, the same image shows when the activity pops up.
What isn't being cleared? is it picasso or the imageview or the activity... something is stuck and is keeping the old url/image around
A: Picasso.with(this).load(sceneUrl).skipMemoryCache().into(image);
| {
"redpajama_set_name": "RedPajamaStackExchange"
} | 1,803 |
The Groton and Stonington Street Railway was an interurban trolley line that extended from Groton, Connecticut, to Westerly, Rhode Island, with a later branch to Old Mystic, Connecticut, and an extension to New London. The line operated from 1904 to 1919 and 1923 to 1928, after which it was replaced by buses.
History
The Groton and Stonington Street Railway was chartered on August 17, 1903, with permission to build two lines. The loop line in Groton was never constructed, but work began on the line from Groton to Stonington in early 1904. The G&S opened from Groton to Mystic on December 19, 1904, to Stonington on April 8, 1905, and finally to the state line at Westerly on May 6, 1905.
The trolley line started at Thames Street in Groton, passing through Poquonnock and Noank, and ending in Mystic. The company was headquartered in Mystic, and the powerhouse was located in between Water Street and the west bank of the Mystic River. The powerhouse is still standing, and has been converted into condominiums. The adjacent carhouse was retrofitted with two additional stories in the 1980s and converted into condominiums as well.
A spur line was built in 1911 that extended to Old Mystic. In 1928, G&S ceased operating, and buses of the Groton-Stonington Traction Company began operating along the route.
Rolling stock
Eight open cars
Eight closed cars
One double-truck Taunton snowplow
One work car
References
Interurban railways in Connecticut
Streetcars in Connecticut
Defunct Connecticut railroads
Interurban railways in Rhode Island
Defunct public transport operators in the United States
Tram, urban railway and trolley companies
Railway companies established in 1904
1904 establishments in Connecticut
Railway companies disestablished in 1928
1928 disestablishments in Connecticut | {
"redpajama_set_name": "RedPajamaWikipedia"
} | 2,189 |
'use strict'
require('../styles/scroller.css')
require('scrolljs')
// lib.scroll events:
// - scrollstart
// - scrolling
// - pulldownend
// - pullupend
// - pullleftend
// - pullrightend
// - pulldown
// - pullup
// - pullleft
// - pullright
// - contentrefresh
var Component = require('./component')
var utils = require('../utils')
// attrs:
// - scroll-direciton: none|vertical|horizontal (default is vertical)
// - show-scrollbar: true|false (default is true)
function Scroller (data, nodeType) {
var attrs = data.attr || {}
this.items = []
this.totalWidth = 0
this.scrollDirection = attrs.scrollDirection === 'horizontal'
? 'horizontal'
: 'vertical'
this.showScrollbar = attrs.showScrollbar || true
Component.call(this, data, nodeType)
}
Scroller.prototype = Object.create(Component.prototype)
Scroller.prototype.create = function (nodeType) {
var Scroll = lib.scroll
var node = Component.prototype.create.call(this, nodeType)
node.classList.add('weex-container', 'scroll-wrap')
this.scrollElement = document.createElement('div')
this.scrollElement.classList.add(
'weex-container',
'scroll-element',
this.scrollDirection
)
node.appendChild(this.scrollElement)
this.scroller = new Scroll({
scrollElement: this.scrollElement,
direction: this.scrollDirection === 'vertical' ? 'y' : 'x'
})
this.scroller.init()
return node
}
Scroller.prototype.bindEvents = function (evts) {
Component.prototype.bindEvents.call(this, evts)
// to enable lazyload for Images
this.scroller.addEventListener('scrolling', function (e) {
var so = e.scrollObj
this.dispatchEvent('scroll', {
originalType: 'scrolling',
scrollTop: so.getScrollTop(),
scrollLeft: so.getScrollLeft()
}, {
bubbles: true
})
}.bind(this))
}
Scroller.prototype.appendChild = function (data) {
var children = this.data.children
var componentManager = this.getComponentManager()
var child = componentManager.createElement(data)
this.scrollElement.appendChild(child.node)
var childWidth = child.node.getBoundingClientRect().width
this.totalWidth += childWidth
// if direction is horizontal then the width of scrollElement
// should be set manually due to flexbox's rule (child elements
// will not exceed box's width but to shrink to adapt).
if (this.scrollDirection === 'horizontal') {
this.scrollElement.style.width = this.totalWidth + 'px'
}
// update this.data.children
if (!children || !children.length) {
this.data.children = [data]
} else {
children.push(data)
}
this.items.push(child)
return child
}
Scroller.prototype.insertBefore = function (child, before) {
var children = this.data.children
var i = 0
var isAppend = false
// update this.data.children
if (!children || !children.length || !before) {
isAppend = true
} else {
for (var l = children.length; i < l; i++) {
if (children[i].ref === child.data.ref) {
break
}
}
if (i === l) {
isAppend = true
}
}
if (isAppend) {
this.scrollElement.appendChild(child.node)
children.push(child.data)
this.items.push(child)
} else {
this.scrollElement.insertBefore(child.node, before.node)
children.splice(i, 0, child.data)
this.items.splice(i, 0, child)
}
}
Scroller.prototype.removeChild = function () {
var children = this.data.children
// remove from this.data.children
var i = 0
var componentManager = this.getComponentManager()
if (children && children.length) {
for (var l = children.length; i < l; i++) {
if (children[i].ref === child.data.ref) {
break
}
}
if (i < l) {
children.splice(i, 1)
this.items.splice(i, 1)
}
}
// remove from componentMap recursively
componentManager.removeElementByRef(child.data.ref)
this.scrollElement.removeChild(child.node)
}
module.exports = Scroller
| {
"redpajama_set_name": "RedPajamaGithub"
} | 7,263 |
{"url":"http:\/\/mathhelpforum.com\/calculus\/162622-finding-dy-dx-equation-using-both-chain-rule-product-rule.html","text":"# Thread: Finding dy\/dx of equation using both chain rule and product rule\n\n1. ## Finding dy\/dx of equation using both chain rule and product rule\n\nHello everyone,\n\nRecently I've been learning about the chain rule and the product rule, which by themselves is fairly straightforward to solve. However, it becomes a little more complex when attempting to solve an equation with both of them together. So any help in solving the following equation using the chain rule and product rule will be greatly appreciated.\n\nHere is the equation:\n\ny = (3x + 5) ^2 x (2x - 2)\n\nI solved the chain rule part (I think), which is the (3x + 5) ^2 segment. Using the chain rule, I got the answer 6(3x + 5), leaving me with the equation of:\n\ny = 6(3x + 5)(2x - 2) to be solved using the product rule. But I am not entirely confident with the steps involved, as I believe it involves factorizing and such.\n\nThanks!\n\nNathaniel\n\n2. That's not how it is done...\n\ny = 6(3x + 5)(2x - 2) is wrong, because y = (3x + 5)^2 . (2x - 2) [use . if necessary to show product]\n\nUsing product rule first:\n\nLet us break it up.\n\nLet u = (3x+5)^2, let v = (2x-2)\n\nthen, we get:\n\n$\\dfrac{du}{dx} = 6(3x + 5) = 18x + 30$\n\n$\\dfrac{dv}{dx} = 2$\n\nThen, y = uv\n\ny' = udv\/dx + vdu\/dx\n\n$y'= (3x+5)^2 \\cdot 2 + (2x-2) \\cdot (18x + 30)$\n\nNow, you simplify:\n\n$y'= 2(3x+5)^2 + (2x-2)(18x + 30)$\n\nYou can expand and simplify further itf you want.\n\n3. $\\displaystyle y = (3x + 5)^2(2x - 2)$\n\n$\\displaystyle \\frac{dy}{dx} = (3x + 5)^2\\,\\frac{d}{dx}(2x - 2) + (2x -2)\\,\\frac{d}{dx}[(3x + 5)^2]$\n\n$\\displaystyle = 2(3x + 5)^2 + 6(3x + 5)(2x - 2)$\n\n$\\displaystyle = (3x + 5)[2(3x + 5) + 6(2x - 2)]$\n\n$\\displaystyle = (3x + 5)(6x + 10 + 12x - 12)$\n\n$\\displaystyle = (3x + 5)(18x - 2)$\n\n$\\displaystyle = 2(3x + 5)(9x - 1)$.\n\n4. Thank you both so much for the speedy replies, Unknown008, and Prove It. Okay, so I get both methods, but there is just one thing that I think I need clarification on:\n\nProve It, can you please explain the reasoning behind the factorizing. As in I do not understand why the 6 suddenly moved in front of the (2x + 2), and why the (3x + 5) is out of the front of what seems to be a factorized equation.\n\nOnce again, thanks so much for all the help!\n\nEDIT:\n\nActually, I get it now, I think!\n\nThat factorizing is just like\n\nx ( 2x + 6y) and when you expand you get 2x ^ 2 + 6xy\n\nAnd subbing the values of x and y, I get: 2(3x + 5) ^ 2 + 6(3x + 5)(2x - 2)\n\nI think that is correct reasoning. Please do clarify if I am wrong though.\n\n5. Yes, that's it\n\nOnce you get the hang of it, you can differentiate directly to what Prove It posted.\n\n6. Actually, there's just one last thing.\n\nThe second last line of Prove It's method reads: (3x + 5)(18x - 2)\n\nAnd the final answer is: 2(3x + 5)(9x -1)\n\nI'm just wondering if I need to use the product rule to achieve that final answer?\n\nThanks again.\n\nNathaniel.\n\n7. The conversion from the before last line to the last line is simply the factorisation of (18x -2) = 2(9x -1)\n\nThe product rule is used only on the line where dy\/dx appeared first.\n\n8. Very good indeed, Unknown008.\n\nSo the equation would still be correct if I wrote it as (3x + 5) . 2(9x -1) ? It was just confusing, because the nature of (3x + 5) didn't change with the 2 stuck out the front.\n\n9. It's algebraically the same thing. That's why in my post, I told you that you could simplify further if you want.\n\nSometimes, for the problem, it's better to simplify for the other parts to be easier.\n\nSometimes, it's just a matter of substituting values of x and\/or y. Then, simplification doesn't necessaily makes it easier than before.\n\n10. Excellent. I think my troubles regarding this problem have been solved now. I'll just do some practice with similar problems. This has been a great help, and yet again a wonderful resource.\n\nCheers!","date":"2013-12-12 01:06:21","metadata":"{\"extraction_info\": {\"found_math\": true, \"script_math_tex\": 0, \"script_math_asciimath\": 0, \"math_annotations\": 0, \"math_alttext\": 0, \"mathml\": 0, \"mathjax_tag\": 0, \"mathjax_inline_tex\": 0, \"mathjax_display_tex\": 0, \"mathjax_asciimath\": 0, \"img_math\": 0, \"codecogs_latex\": 11, \"wp_latex\": 0, \"mimetex.cgi\": 0, \"\/images\/math\/codecogs\": 0, \"mathtex.cgi\": 0, \"katex\": 0, \"math-container\": 0, \"wp-katex-eq\": 0, \"align\": 0, \"equation\": 0, \"x-ck12\": 0, \"texerror\": 0, \"math_score\": 0.7692594528198242, \"perplexity\": 543.8962281801486}, \"config\": {\"markdown_headings\": true, \"markdown_code\": true, \"boilerplate_config\": {\"ratio_threshold\": 0.18, \"absolute_threshold\": 10, \"end_threshold\": 15, \"enable\": true}, \"remove_buttons\": true, \"remove_image_figures\": true, \"remove_link_clusters\": true, \"table_config\": {\"min_rows\": 2, \"min_cols\": 3, \"format\": \"plain\"}, \"remove_chinese\": true, \"remove_edit_buttons\": true, \"extract_latex\": true}, \"warc_path\": \"s3:\/\/commoncrawl\/crawl-data\/CC-MAIN-2013-48\/segments\/1386164128316\/warc\/CC-MAIN-20131204133528-00049-ip-10-33-133-15.ec2.internal.warc.gz\"}"} | null | null |
USE BDPetCenter
GO
DECLARE @CONTADOR INT,
@DESCRIPCION VARCHAR(100)
SET @CONTADOR = 1
WHILE @CONTADOR <= 40
BEGIN
SET @DESCRIPCION = 'CAN' + CASE WHEN @CONTADOR < 10 THEN '0' ELSE '' END + LTRIM(STR(@CONTADOR))
INSERT INTO dbo.GHA_Lugar (DescripcionLugar) VALUES (@DESCRIPCION)
SET @CONTADOR = @CONTADOR + 1
END
GO
INSERT INTO dbo.GCP_Cliente (NombreCliente,DireccionCliente) VALUES('Ramos Conde, Juan','Av. Las Américas 7676')
INSERT INTO dbo.GCP_Cliente (NombreCliente,DireccionCliente) VALUES('Ramos Santos, Irvin','Av. Benavides 4500')
INSERT INTO dbo.GCP_Cliente (NombreCliente,DireccionCliente) VALUES('Peña Rios, Luis','Jr. Cusco 206')
INSERT INTO dbo.GCP_Cliente (NombreCliente,DireccionCliente) VALUES('Marino Alvarez, Victor','Av. Angamos 2208')
INSERT INTO dbo.GCP_Cliente (NombreCliente,DireccionCliente) VALUES('Davila Campos, Francisco','Jr. Puno 101')
INSERT INTO dbo.GCP_Cliente (NombreCliente,DireccionCliente) VALUES('Campos Vasquez, Carlos','Av. Aviacion 2905')
GO
INSERT INTO dbo.GCP_Especie (DescripcionEspecie) VALUES ('Perro')
INSERT INTO dbo.GCP_Especie (DescripcionEspecie) VALUES ('Gato')
GO
INSERT INTO dbo.GCP_Raza (CodigoEspecie,NombreRaza) VALUES (1,'Pequinez')
INSERT INTO dbo.GCP_Raza (CodigoEspecie,NombreRaza) VALUES (2,'Siamez')
GO
INSERT INTO dbo.GCP_Mascota (CodigoCliente,CodigoRaza,NombreMascota) VALUES (1,1,'Bobby')
INSERT INTO dbo.GCP_Mascota (CodigoCliente,CodigoRaza,NombreMascota) VALUES (1,2,'Mayra') | {
"redpajama_set_name": "RedPajamaGithub"
} | 6,364 |
Midwest Rewind
Relive, Review, Rewind
What's Readable
Live Events Main Page
Consistency Ruled the Day for Foreigner's Show Wednesday in Saint Louis
July 21, 2018 July 21, 2018 Sean Derrick 0 Comments David Coverdale, Foreigner, Hollywood Casino Amphitheatre Saint Louis, Jeff Pilson, Kelly Hansen, Mick Jones, Rock show, Saint Louis concert review and photos, Whitesnake
Foreigner performing Wednesday at Hollywood Casino Amphitheatre. Photo by Sean Derrick/Thyrd Eye Photography.
–By Sean Derrick
While the rock group Foreigner's lineup may have changed several times over the years one thing has remained consistent: the quality of their live shows. Fans who attended their Juke Box Heroes Tour stop with Whitesnake Wednesday at the Hollywood Casino Amphitheatre in Saint Louis were reminded of that fact, and judging by the fans reaction to the show, they loved every minute of it.
Let's get this out of the way right now; You know who I am talking to – the people who say they won't see or listen to a band if the original lineup isn't the same. While it is true that this isn't the original lineup, you must have stopped listening to the band in 1980. Because by the time their huge hit album 4 came out in 1981 half the band had parted ways.
And if you stopped listening then, well, it is a shame since you missed great songs like "Urgent", "Juke Box Hero", "I Want to Know What Love Is", 'Waiting For a Girl Like You", "That Was Yesterday" and others.
Get over it, the original lineup is probably not getting back together. Drummer Dennis Elliot isn't even an active musician anymore, Bassist Ed Gagliardi has passed away, keyboardist Al Greenwood and guitarist Ian McDonald did a few songs with guitarist Mick Jones and vocalist Lou Gramm to celebrate the band's 40th anniversary last year, but highly unlikely a full tour would happen with them. Speaking of Gramm after his successful battle with a brain tumor 20 years ago his voice and stamina took a huge hit and he does not have the same quality he once had, unfortunately.)
Since Jones is a founding member and wrote the vast majority of the songs (yes, many were co-written with Gramm) and the fact he owns the rights to the name Foreigner plays a huge part.
Jones as put together a solid lineup of musicians who have shown time again that they have the energy and charisma and talent to carry the Foreigner name. This is most evident in vocalist Kelly Hansen and bassist Jeff Pilson, both of whom have been with the band for over 13 years.
In Hansen you have an Energizer Bunny wrapped around an astounding voice with a fan friendly attitude, who is consistent whether he is on stage, running laps through the crowd, or performing Juke Box Hero from a 20 foot high platform in the middle of the venue. Bassist Jeff Pilson brings multi-dimensional talent and a rock star enthusiasm who leaves everything onstage every night in his sweat drenched clothes.
As a photographer, I always love shooting this band as I know there will be great shots and action from all the members, but especially Hansen and Pilson.
Kicking through hit after hit the band (which now features guitarist/saxophonist Thom Gimbel, keyboardist Michael Bluestein, Bruce Watson, and drummer Chris Frazier to play with Jones, Hansen and Pilson) sounded superb and spot on. Just like they do year after year, show after show.
The set ran almost identical to their 40th Anniversary Tour show last year with a few minor changes. (They added "Long, Long Way From Home and kicked off with that while dropping "Blue Morning, Blue Day").
Personally, I have wanted to see them perform "That Was Yesterday" again, but I haven't since I saw them perform in at Fair Saint Louis in 1995, eight shows in all.
Unlike last year, where heavy rains forced the local choir that was supposed to perform with them to cancel their appearance, this year the Park Hills, Missouri Central High School Concert Choir was able to back up Foreigner during the encore of their massive hit "I Want To Know What Love Is". It was a nice to see the kids get a chance to share the stage and perform well with a band that, in my opinion, should be in the Rock and Roll Hall of Fame.
Whitesnake is celebrating their 40th anniversary this year and the band (with founding member David Coverdale-vocals, Tommy Aldridge – drums, Reb Beach – guitar, Michael Devin – bass, Joel Hoekstra – guitar, Michael Luppi – keyboards) brings it like it is still 1987 in true Rockstar fashion.
Whitesnake performing Wednesday at Hollywood Casino Amphitheatre. Photo by Sean Derrick/Thyrd Eye Photography.
With guitarist Joel Hoekstra's continuous rock star poses and hair whipping (not to mention is great work on the axe) it was hard not to focus on him during their set.
Coverdale did his signature mic stand poses to compliment his larger than life stage presence. Even though his vocals have dropped a bit (he makes up for it with more growling and allows Michael Devin to sing some of the more strenuous parts – along with audience sing-a-longs, he makes up for it in attitude, charisma, stage presence, and energy.
The band played an hour long set that finally got he blood pumping in the crowd that was mostly subdued during opener Jason Bonham's Led Zeppelin Experience. Unfortunately, because they sounded great performing Jason's dad's songs from Led Zeppelin. As Jason said "I guess it's too early to get the Led out".
Jason Bonham's Led Zeppelin Experience performing Wednesday at Hollywood Casino Amphitheatre. Photo by Sean Derrick/Thyrd Eye Photography.
C'mon Saint Louis, you are better than that.
Solid show, top to bottom.
Be sure to check out the complete gallery of images from the evening after the setlists.
Foreigner Setlist:
Long, Long Way From Home
Head Games
Waiting for a Girl Like You
Dirty White Boy
Feels Like the First Time
Starrider
Juke Box Hero
I Want to Know What Love Is (With the Central High School Concert Choir)
Hot Blooded
Whitesnake Setlist:
Give Me All Your Love
Love Ain't No Stranger
Slow an' Easy
Guitar solos/duel (with Reb Beach and Joel Hoekstra)
Crying in the Rain
Slide It In
Still of the Night
← Grits to Glory is Good Enough to Eat
The Pretenders Still Sharp in Performance in Saint Louis Wednesday →
Past Articles by Month
Past Articles by Month Select Month October 2020 (1) June 2020 (3) May 2020 (1) April 2020 (1) March 2020 (10) February 2020 (31) January 2020 (19) December 2019 (15) November 2019 (16) October 2019 (14) September 2019 (11) August 2019 (16) July 2019 (8) June 2019 (6) May 2019 (6) April 2019 (4) March 2019 (16) February 2019 (7) January 2019 (3) December 2018 (7) November 2018 (15) October 2018 (11) September 2018 (8) August 2018 (14) July 2018 (13) June 2018 (12) May 2018 (7) April 2018 (9) March 2018 (11) February 2018 (6) January 2018 (15) December 2017 (12) November 2017 (8) October 2017 (12) September 2017 (10) August 2017 (11) July 2017 (15) June 2017 (8) May 2017 (15) April 2017 (13) March 2017 (19) February 2017 (21) January 2017 (20) December 2016 (8) November 2016 (14) October 2016 (10) September 2016 (10)
sean@midwestrewind.com
Copyright © 2021 Midwest Rewind. All rights reserved. | {
"redpajama_set_name": "RedPajamaCommonCrawl"
} | 8,556 |
\section{Introduction}
Numerical solution of time dependent wave equations is an important
problem in physics, engineering and mathematics. To solve the wave
equation on ${\mathbb{R}^{3+1}}} \newcommand{\kmax}{K_{max}$, one must truncate the domain to a finite region
due to the limited memory of most computers. Of course, on a finite
region, boundary conditions must be specified in such a way as to minimize spurious reflections.
Boundary conditions of this form were first described in
\cite{MR596431,MR658635,MR0471386,MR517938,MR611807}, although rigorous error bounds would
wait until more recently \cite{MR1819643,MR2032866}.
In \cite{MR1819643}, a family of absorbing
boundary conditions based on rational function approximation to the
Dirichlet-to-Neumann operator in the frequency domain are reviewed. Boundary
conditions for the half-space (a boundary at $x=0$), as well as
cylindrical and spherical coordinates are also constructed.
Error bounds are proved for this family by inverting the
Fourier-Laplace transform for both the true solution and his
approximation and bounding the difference. Due to poles on the
imaginary line in $s$ ($s$ being the variable dual to $t$), the difference is bounded on a countour separated from the singular points, namely a line in the right half plane $\gamma + i \mathbb{R}$. This shows that the error is bounded by $C_{\gamma} e^{\gamma t}$, with
$C_{\gamma}$ left implicit.
A careful examination of the poles of the rational function reveal that they approximate the
branch cut of the true solution in the sense of hyperfunctions
\cite{sterninshatalov:resurgentanalysis}. Instead of using the
machinery of hyperfunctions, we take an elementary approach. The true
solution can be represented as a certain integral over a compact
region. The approximate solution, after we collect the residues
associated to the poles on the imaginary line, turns out to be a
quadrature for this integral. By computing the difference between the
quadrature and the true integral, we can compute an optimal error
bound.
Let us now state our results precisely. Let $u(x,y,t)$ solve:
\begin{equation}
\label{eq:wave}
\partial_t^2 u(x,y,t) = \partial_x^2 u(x,y,t)+\Delta_y u(x,y,t)
\end{equation}
where $x \in \mathbb{R}$ and $y \in \mathbb{R}^{N-1}$ ($x$ is the normal direction,
$y$ the tangential directions).
We wish to solve \eqref{eq:wave} on $\mathbb{R}^{N+1}$. The boundary will be
taken to be the surface $x=0$, and thus the approximation region will
be the region $\{ (x,y,z,t) : x \geq 0 \}$. We let $u_b(x,y,t)$ be the
approximation, solving \eqref{eq:wave} on the half-space. The boundary
conditions imposed are Hagstrom's:
\begin{equation}
\label{eq:hagstromBoundary}
\prod_{j=1}^n \left(\cos\left(\frac{j\pi}{n+1}\right) \partial_t - \partial_x \right)u_b(x,y,t)=0
\end{equation}
The main theorem is the following:
\begin{thm}
\label{thm:main}
Let $u(x,y,t)$ solve \eqref{eq:wave} on $\mathbb{R}^{N+1}$, and
$u_b(x,y,t)$ solve \eqref{eq:wave} with boundary conditions
\eqref{eq:hagstromBoundary}. Then we have the following error bound:
\begin{multline}
\label{eq:errorBound}
\abs{u(x,y,t) - u_b(x,y,t)}\\
\leq \frac{\kmax}{3}\frac{\pi^4}{(n+1)^3}M(x)\left(2nt^2+9nt+n\kmax+8n+3\right) \\
= O\left(\frac{\kmax}{n^{2}} (\kmax+t^{2}) \right)
\end{multline}
\end{thm}
\section{Proof}
\subsection{The Exact Boundary}
We begin by reviewing the exact boundary conditions described in
\cite{MR1819643}. Applying the Laplace transform of \eqref{eq:wave} with respect to time
(letting $s$ be dual to $t$) and the Fourier transform with respect to
$y$ (with $\K$ dual to $y$) yields:
\begin{equation}
\label{eq:FLwave}
s^2 \U = \partial_x^2 \U - \K^2 \U
\end{equation}
The solution to \eqref{eq:FLwave} is:
\begin{equation}
\U(x)=A(s,\K)e^{\sqrt{s^2+|\K|}x}+B(s,\K)e^{-\sqrt{s^2+|\K|^2}x}
\end{equation}
The solutions with nonzero $A(s,\K)$ are nonphysical, since they
correspond to a wave coming from infinity to the object. Thus our
boundary conditions must imply $A(s,\K)= 0$. Such a boundary condition
is (in the frequency domain):
\begin{equation}
\label{eq:exactD2N}
\partial_x \U(x,\K,s)+\sqrt{s^2+|\K|^2}\U(x,\K,s)=0
\end{equation}
Of course, the operator $\sqrt{s^2+|\K|^2}$ is non-local in time and
space, so we will approximate it.
To reduce the dependence to a single variable, we make the
substitution $z=s/ \abs{\K}$, yielding:
\begin{equation*}
\partial_{x} \U(x,\K,s)+ |\K|\sqrt{1+z^2} \U(x,\K,s) = 0
\end{equation*}
This boundary condition can be rewritten as:
\begin{equation}
\partial_{x} \U(x,\K,s) + \abs{\K}\left(z+\frac{1}{z+\sqrt{1+z^2}} \right)\U(x,\K,s)=0
\end{equation}
Let $h(z) \equiv \abs{\K}/(z+\sqrt{1+z^2})$. We will invert the
Laplace transform, and shift the contour to surroung the singularities
of $h(z)$. Ths following lemma summarizes the necessary analyticity
properties of $h(z)$:
\begin{lemma}
The function $h(z)$ is analytic on $\mathbb{C} \setminus [-i,i]$. In
addition, the difference across the branch cut is given by:
\begin{equation}
\label{eq:1}
\lim_{\epsilon\to 0}\left(h(z+\epsilon)-h(z-\epsilon)\right)=2|\K|\sqrt{1+z^2}
\end{equation}
\end{lemma}
\begin{proof}
The function $h(z)$ is well defined and analytic for $\Re z > 0$. It
is strictly imaginary on $\{z : \Re z = 0 \text{and} \abs{z} > 1\}$.
By the Schwartz reflection principle, it can be analytically
continued to the left half plane, with a discontinuity along the
line $[-i,i]$.
An explicit calculation shows \eqref{eq:1}.
\end{proof}
We now reconstruct $u(x,y,t)$. This is done by inverting the Laplace
transform:
\begin{subequations}
\begin{equation}
\label{eq:2}
u(x,y,t) = \frac{1}{(2\pi)^{(N+1)/2}} \int_{a+i \mathbb{R}} e^{s t} \int_{\mathbb{R}^{N-1}} e^{i y \cdot \K} \U(x,\K, s) d\K ds
\end{equation}
\begin{equation}
\label{eq:3}
\overline{u}} \newcommand{\K}{\mathbf{k}(x,\K,t) = \frac{1}{2\pi} \int_{a + i \mathbb{R}} e^{s t} \U(x,\K,s) ds
\end{equation}
\end{subequations}
And so, the integral we must approximate is
\begin{equation*}
\int_{-i}^i 2|\K|\sqrt{1+z^2}f(z)e^{zt}dz.
\end{equation*}
\subsection{The Approximation}
We review the approximation itself, and how
\eqref{eq:hagstromBoundary} was derived. Our description follows
\cite{MR1819643} quite closely. We approximate
$\abs{\K}\sqrt{1+z^{2}}$ by:
\begin{equation}
\abs{\K} \sqrt{1+z^2} = \abs{\K}\left(z+\frac{1}{z+\sqrt{1+z^2}} \right) \approx \abs{\K}\left(z+\frac{1}{2z+\frac{1}{\ddots 2z}} \right)
\end{equation}
where the right hand side is the $n$'th iteration of the continued
fraction.
A straightforward computation shows that in the frequency domain,
\begin{equation*}
\partial_x \U(x,\K,s) + \abs{\K}\left(z+\frac{1}{2z+\frac{1}{\ddots 2z}} \right)\U(x,\K,s)=0
\end{equation*}
corresponds to the boundary condition \eqref{eq:hagstromBoundary}. We
simplify this:
\begin{lemma}
Let $\theta_{j}=j\pi/(n+1)$. Then we have the following formula:
\begin{equation}
\label{eq:continuedFractionApproximation}
\frac{1}{2z+\frac{1}{\ddots 2z}}=\sum_{j=1}^n \frac{\sin^2\theta_j}{(n+1)(z-i\cos\theta_j)}
\end{equation}
\end{lemma}
\begin{proof}
Let $U_n(x)$ be the $n^{th}$ Chebyshev Polynomial of the $2^{nd}$
kind and $P_n(z)$ be the successive numerators of the sequence of
finite continued fractions ($P_0(z)=1, P_1(z)=2z, \ldots$).
If we take $U_n(iz)$ then for $n$ odd we get $iP_n(z)$ and for $n$
even we get $P_n(z)$, and the sequence of finite continued fractions
is $(P_{n-1}(z))/(P_n(z))$ for $n\geq 1$.
We consider the case case where $n$ is even; in this case, the
finite continued fraction is $(iP_{n-1}(z))(P_n(z))$. Thus, ratios
of Chebychev polynomials of the $2^{nd}$ kind only differ from the
finite continued fraction by multiplication by $i$, and will
therefore have the same zeros. \ The continued fraction will have
poles where $P_n(z)$ is zero, for $n$ even. That is, when
$U_n(iz)=0$.
We will take $z=i\cos\theta$. Thus, we are looking for zeroes of
$U_n(-\cos\theta)$ where $U_{n-1}(-\cos\theta)\neq 0$.
\begin{equation*}
U_n(-\cos\theta)=U_n(\cos\theta)=U_n(x)=\frac{\sin(n+1)\theta}{\sin\theta},\quad \theta\neq 0,\pi,2\pi,\ldots
\end{equation*}
So $\sin(n+1)\theta=0$ are our solutions, and that is
$\theta=\frac{j\pi}{n+1}=\theta_j$ as claimed.
Thus, $U(i \cos \theta_{j})=0$ and hence $z=i \cos \theta_{j}$ are the
only poles of the continued fraction approximation. A direct
computation shows that the residues at the pole $i \cos \theta_{j}$ is
$(sin^2\theta_j)/(n+1)$.
\end{proof}
As the continued fraction is a close approximation to $\sqrt{1+z^2}$, we can use it to approximate an integral involving $\sqrt{1+z^2}$ by substituting the approximation, which is a rational function. And so, in evaluating the integral around the branch cut, a finite sum which approximates this integral is given by the sum of the residues at the poles of the rational function above.
\subsection{The Error Bound}
First, we make the definition $|k|g(s/|k|)=\U(s,k)$.
\begin{prop}
\label{prop:maintheorem}
The following error bound holds.
\begin{multline}
\left|2|\K|\int_{-i}^i \sqrt{1+z^2} e^{zt}g(z)dz-2\pi i|\K|\sum_{j=1}^n g(z_j)e^{z_jt}\alpha_j\right|\\
\leq\frac{\kmax}{3}\frac{\pi^4}{(n+1)^3}M(x)(2nt^2+9nt+n\kmax+8n+3)
\end{multline}
Here, $z_j=i \cos \theta_{j}$ are the positions of the poles,
$\alpha_j$ the residue at $\theta_j$, $\kmax$ the maximal frequency
under consideration, $M(x)$ is a pointwise upper bound on
$\U,\partial_s\U$ and $\partial_s^2\U$ and $n$ is the order of the
continued fraction approximation.
\end{prop}
We will need the following lemma
\begin{lemma}
\begin{equation*}
\left|\int_0^{\Delta x} f(x)dx-f(0)\Delta x\right|\leq \Delta x^2 f'(\xi),\quad \xi\in [0,\Delta x]
\end{equation*}
\end{lemma}
\begin{proof}
This follows immediately from the Intermediate Value Theorem.
\end{proof}
We will prove Lemma \ref{prop:maintheorem} above in several
intermediate steps.
\begin{prop}
\begin{multline}
\label{eq:big}
\left|2k\int_{-i}^i\sqrt{1+z^2}e^{zt}g(z)dz-2\pi ik\sum_{j=1}^n g(z_j)e^{z_j t}\alpha_j\right|\\
\leq\frac{2}{3}|\K|\Delta\theta^3\Bigg(3\left|\max_{s\in[i|\K|,i|\K|\cos\Delta\theta]}g\left(\frac{s}{|\K|},|\K|\right)\right|+3\left|\max_{s\in[-i|\K|\cos\Delta\theta,-i|\K|]}g\left(\frac{s}{|\K|},|\K|\right)\right|\\
+\sum_{j=1}^n\Bigg|\max_{s\in[i|\K|\cos(\theta_j-\Delta\theta),i|\K|\cos(\theta_j+\Delta\theta)]}e^{\frac{s}{|\K|}t}\Bigg(g''\left(\frac{s}{|\K|},|\K|\right)\left(\frac{s}{|\K|}\right)^4\\
+2g''\left(\frac{s}{|\K|},|\K|\right)\left(\frac{s}{|\K|}\right)^2+g''\left(\frac{s}{|\K|},|\K|\right)+2tg'\left(\frac{s}{|\K|},|\K|\right)\left(\frac{s}{|\K|}\right)^4\\
+4tg'\left(\frac{s}{|\K|},|\K|\right)\left(\frac{s}{|\K|}\right)^2+2tg'\left(\frac{s}{|\K|},|\K|\right)+t^2g\left(\frac{s}{|\K|},|\K|\right)\left(\frac{s}{|\K|}\right)^4\\
+2t^2g\left(\frac{s}{|\K|},|\K|\right)\left(\frac{s}{|\K|}\right)^2+t^2g\left(\frac{s}{|\K|},|\K|\right)+5\left(\frac{s}{|\K|}\right)^3g'\left(\frac{s}{|\K|},|\K|\right)\\
+5g'\left(\frac{s}{|\K|},|\K|\right)\frac{s}{|\K|}+5\left(\frac{s}{|\K|}\right)^3tg\left(\frac{s}{|\K|},|\K|\right)+5tg\left(\frac{s}{|\K|},|\K|\right)\frac{s}{|\K|}\\
+4\left(\frac{s}{|\K|}\right)^2g\left(\frac{s}{|\K|},|\K|\right)+2g\left(\frac{s}{|\K|},|\K|\right)\Bigg)\Bigg|\Bigg)
\end{multline}
In this equation $z_j$ are the positions of the poles, $\alpha_j$ are the
residues at the poles, $\Delta \theta=\frac{\pi}{2(n+1)}$, and
$\alpha(\theta)=g(i\cos\theta)e^{it\cos\theta}\sin^2\theta$.
\end{prop}
\begin{proof}
We first change variables to $z=i\cos\theta$. Using the fact that
\begin{equation*}
\sqrt{1+z^2}=\frac{1}{2z+\frac{1}{2z+\ddots}}
\end{equation*}
we can approximate $\sqrt{1+z^{2}}$ by taking a truncated continued
fraction. This yields:
\begin{equation}
\label{eq:15}
|k|\left|2\int_0^\pi g(i\cos\theta)e^{it\cos\theta}\sin^2\theta
d\theta-2\pi i\sum_{j=1}^n
g(i\cos\theta_j)e^{it\cos\theta_j}\frac{\sin^2\theta_j}{n+1}\right|
\end{equation}
We define $f(z)=g(z)e^{zt}$ and $\Delta\theta=\frac{\pi}{2(n+1)}$
and expand the integral around each pole to obtain
\begin{multline}
\label{eq:16}
\eqref{eq:15} =
|k|\Bigg|2i\int_0^{\Delta\theta}f(i\cos\theta)\sin^2\theta d\theta+2i\int_{\pi-\Delta\theta}^\pi f(i\cos\theta)\sin^2\theta d\theta \\
+2i\sum_{j=1}^n\Bigg(\int_{\theta_j-\Delta\theta}^{\theta_j+\Delta\theta}f(i\cos\theta)\sin^2\theta
d\theta-\pi g(i\cos\theta_j)\frac{\sin^2\theta_j}{n+1}\Bigg)\Bigg|
\end{multline}
To simplify further, we substitute
$\alpha(\theta)=f(i\cos\theta)\sin^2\theta$ cancel terms, and use
the fact that
$\int_{\theta_j-\Delta\theta}^{\theta_j+\Delta\theta}\alpha'(\theta_j)(\theta-\theta_j)d\theta=0$
to get
\begin{multline}
\label{eq:calc1}
\eqref{eq:16} =
|k|\Bigg|2i\int_0^{\Delta\theta}\alpha(\theta) d\theta+2i\int_{\pi-\Delta\theta}^\pi \alpha(\theta) d\theta+2i\sum_{j=1}^n\Bigg(\int_{\theta_j-\Delta\theta}^{\theta_j+\Delta\theta}\big(\alpha(\theta)-\alpha(\theta_j)\\
-\alpha'(\theta_j)(\theta-\theta_j)\big)\Bigg)d\theta\Bigg|
\end{multline}
By the triangle inequality, and the mean value theorem, we have
\begin{multline}
\label{eq:calc2}
\eqref{eq:calc1}\leq2|k|\Bigg(\int_0^{\Delta\theta}\left|\alpha(\theta)\right|d\theta+\int_{\pi-\Delta\theta}^\pi \left|\alpha(\theta)\right|d\theta\\
+\frac{1}{3}\sum_{j=1}^n\left|\max_{\xi\in[\theta_j-\Delta\theta,\theta_j+\Delta\theta]}\alpha''(\xi)\right|\Delta\theta^3\Bigg)
\end{multline}
To deal with the ends of the integral, we substitute $\alpha$ and $f$
back into the integrals near the endpoints. We then use the fact that $\int_a^b f\leq \max_{x\in[a,b]}f(x)(b-a)$ and $\abs{\sin \theta} \leq \abs{\theta}$ and $\abs{\sin (\pi-\theta)} \leq \abs{\pi-\theta}$ to obtain:
\begin{multline}
\eqref{eq:calc2}
\leq
2|k|\Bigg(\left|\max_{[0,\Delta\theta]}g(i\cos\theta)\right|\Delta\theta^3+\left| \max_{[\pi-\Delta\theta,\pi]}g(i\cos\theta)\right|\Delta\theta^3\\
+\frac{1}{3}\sum_{j=1}^n\left|\max_{\xi\in[\theta_j-\Delta\theta,\theta_j+\Delta\theta]}\alpha''(\xi)\right|\Delta\theta^3\Bigg)
\end{multline}
Upon substitution back and simplification, this becomes
\begin{multline}
\label{eq:14}
\eqref{eq:calc2} \leq
\frac{2}{3}|k|\Delta\theta^3\Bigg(3\left|\max_{[i,i\cos\Delta\theta]}g(z)\right|+3\left|\max_{[-i\cos\Delta\theta,-i]}g(z)\right|\\
+\sum_{j=1}^n\Bigg|\max_{[i\cos(\theta_j-\Delta\theta),i\cos(\theta_j+\Delta\theta)]}e^{zt}\Bigg(g''(z)z^4+2g''(z)z^2+g''(z)+2tg'(z)z^4\\
+4tg'(z)z^2+2tg'(z)+t^2g(z)z^4+2t^2g(z)z^2+t^2g(z)+5z^3g'(z)+5g'(z)z+5z^3tg(z)\\
+5tg(z)z+4z^2g(z)+2g(z)\Bigg)\Bigg|\Bigg)
\end{multline}
We also know that $g(z)=g\left(s/|\K|,k\right)$. All the derivatives in \eqref{eq:13} are in $s/|\K|$. And so we get:
\begin{multline}
\label{eq:13}
\eqref{eq:14} \leq
\frac{2}{3}|\K|\Delta\theta^3\Bigg(3\left|\max_{s\in[i|\K|,i|\K|\cos\Delta\theta]}g\left(\frac{s}{|\K|},|\K|\right)\right|+3\left|\max_{s\in[-i|\K|\cos\Delta\theta,-i|\K|]}g\left(\frac{s}{|\K|},|\K|\right)\right|\\
+\sum_{j=1}^n\Bigg|\max_{s\in[i|\K|\cos(\theta_j-\Delta\theta),i|\K|\cos(\theta_j+\Delta\theta)]}e^{\frac{s}{|\K|}t}\Bigg(g''\left(\frac{s}{|\K|},|\K|\right)\left(\frac{s}{|\K|}\right)^4\\
+2g''\left(\frac{s}{|\K|},|\K|\right)\left(\frac{s}{|\K|}\right)^2
+g''\left(\frac{s}{|\K|},|\K|\right)+2tg'\left(\frac{s}{|\K|},|\K|\right)\left(\frac{s}{|\K|}\right)^4\\
+4tg'\left(\frac{s}{|\K|},|\K|\right)\left(\frac{s}{|\K|}\right)^2+2tg'\left(\frac{s}{|\K|},|\K|\right)+t^2g\left(\frac{s}{|\K|},|\K|\right)\left(\frac{s}{|\K|}\right)^4\\
+2t^2g\left(\frac{s}{|\K|},|\K|\right)\left(\frac{s}{|\K|}\right)^2+t^2g\left(\frac{s}{|\K|},|\K|\right)+5\left(\frac{s}{|\K|}\right)^3g'\left(\frac{s}{|\K|},|\K|\right)\\
+5g'\left(\frac{s}{|\K|},|\K|\right)\frac{s}{|\K|}+5\left(\frac{s}{|\K|}\right)^3tg\left(\frac{s}{|\K|},|\K|\right)+5tg\left(\frac{s}{|\K|},|\K|\right)\frac{s}{|\K|}\\
+4\left(\frac{s}{|\K|}\right)^2g\left(\frac{s}{|\K|},|\K|\right)+2g\left(\frac{s}{|\K|},|\K|\right)\Bigg)\Bigg|\Bigg)
\end{multline}
\end{proof}
Now we can prove the final bound, and complete the proof of the main
theorem. Once Proposition \ref{prop:maintheorem} is proven, this
implies the main result by \eqref{eq:2} and \eqref{eq:3}.
\begin{proofof}{Proposition \ref{prop:maintheorem}}
$\partial_s \U(s,k)=\partial_s kg(s/k,k)=kD_1 \frac{1}{k} g(s/k,k)=D_1
g(\frac{s}{|\K|},k)$.
$\partial_s^2 \U(s,k)=\partial_s^2(kg(s/k,k))=k\partial_s^2 g(s/k,k)=1/k D_1^2
g(\frac{s}{|\K|},k)$
So $D_1 g=\partial_s \U$ and $D_1^2 g=|\K|\partial_s^2\U$
Thus, \eqref{eq:big} above can be simplified to:
\begin{multline}
\label{eq:4}
\frac{2}{3}\Delta\theta^3\Bigg(3\Bigg|\max_{s\in[i|\K|,i|\K|\cos\Delta\theta]}\U(s,|\K|)\Bigg|+3\Bigg|\max_{s\in[-i|\K|\cos\Delta\theta,-i|\K|]}\U(s,|\K|)\Bigg|\\
+\sum_{j=1}^n\Bigg|\max_{s\in[i|\K|\cos(\theta_j-\Delta\theta),i|\K|\cos(\theta_j+\Delta\theta)]}e^{\frac{s}{|\K|}t}\Bigg(|\K|\partial_s^2\U(s,|\K|)\left(\frac{s}{|\K|}\right)^4\\
+2|\K|\partial_s^2\U(s,|\K|)\left(\frac{s}{|\K|}\right)^2+|\K|\partial_s^2\U(s,|\K|)+2t\partial_s \U(s,|\K|)\left(\frac{s}{|\K|}\right)^4\\
+4t\partial_s\U(s,|\K|)\left(\frac{s}{|\K|}\right)^2+2t\partial_s\U(s,|\K|)+t^2\U\left(s,|\K|\right)\left(\frac{s}{|\K|}\right)^4+2t^2\U(s,|\K|)\left(\frac{s}{|\K|}\right)^2\\
+t^2\U(s,|\K|)+5\left(\frac{s}{|\K|}\right)^3\partial_s\U(s,|\K|)+5\partial_s\U(s,|\K|)\frac{s}{|\K|}+5\left(\frac{s}{|\K|}\right)^3t\U(s,|\K|)\\
+5t\U(s,|\K|)\frac{s}{|\K|}+4\left(\frac{s}{|\K|}\right)^2\U(s,|\K|)+2\U(s,|\K|)\Bigg)\Bigg|\Bigg)
\end{multline}
If we find the maximum for each term independently, we will obtain an
upper bound for this. Noting that $\cos\theta$ is monotonic
decreasing on $[0,\pi]$, we obtain:
\begin{multline}
\label{eq:5}
\eqref{eq:4} \leq
\frac{2}{3}\Delta\theta^3\Bigg(3\left|\max_{s\in[i|\K|,i|\K|\cos\Delta\theta]}\U\left(s,|\K|\right)\right|+3\left|\max_{s\in[-i|\K|\cos\Delta\theta,-i|\K|]}\U\left(s,|\K|\right)\right|\\
+\sum_{j=1}^n\Bigg|\max_{s\in[i|\K|\cos(\theta_j-\Delta\theta),i|\K|\cos(\theta_j+\Delta\theta)]}e^{i\cos(\theta_j-\Delta\theta)t}\bigg(|\K|\partial_s^2\U\left(s,|\K|\right)\cos^4(\theta_j-\Delta\theta)\\
-2|\K|\partial_s^2\U\left(s,|\K|\right)\cos^2(\theta_j-\Delta\theta)+|\K|\partial_s^2\U\left(s,|\K|\right)+2t\partial_s \U\left(s,|\K|\right)\cos^4(\theta_j-\Delta\theta)\\
-4t\partial_s\U\left(s,|\K|\right)\cos^2(\theta_j-\Delta\theta)+2t\partial_s\U\left(s,|\K|\right)+t^2\U\left(s,|\K|\right)\cos(\theta_j-\Delta\theta)\\
-2t^2\U\left(s,|\K|\right)\cos^2(\theta_j-\Delta\theta)+t^2\U\left(s,|\K|\right)-5i\cos^3(\theta_j-\Delta\theta)\partial_s\U\left(s,|\K|\right)\\
+5\partial_s\U\left(s,|\K|\right)i\cos(\theta_j-\Delta\theta)-5i\cos^3(\theta_j-\Delta\theta)t\U\left(s,|\K|\right)+5t\U\left(s,|\K|\right)i\cos(\theta_j-\Delta\theta)\\
-4\cos^2(\theta_j-\Delta\theta)\U\left(s,|\K|\right)+2\U(s,|\K|)\bigg)\Bigg|\Bigg)
\end{multline}
We also know that $\U(s,x,k)$ is bounded, so let $U(x)$ be an upper
bound, let $U'(x)$ be an upper bound of $\partial_s\U$ and $U''(x)$ and upper
bound of $\partial_s^2 \U$. Then let $M(x)$ be the maximum of these
functions. Now, things simplify further to:
\begin{multline}
\label{eq:6}
\eqref{eq:5} \leq
\frac{2}{3}\Delta\theta^3M(x)\Bigg(6+\sum_{j=1}^n\Bigg|e^{i\cos(\theta_j-\Delta\theta)t}\bigg(|\K|\cos^4(\theta_j-\Delta\theta)-2|\K|\cos^2(\theta_j-\Delta\theta)+|\K|\\
+2t\cos^4(\theta_j-\Delta\theta)-4t\cos^2(\theta_j-\Delta\theta)+2t+t^2\cos(\theta_j-\Delta\theta)-2t^2\cos^2(\theta_j-\Delta\theta)+t^2\\
-5i\cos^3(\theta_j-\Delta\theta)+5i\cos(\theta_j-\Delta\theta)-5i\cos^3(\theta_j-\Delta\theta)t+5ti\cos(\theta_j-\Delta\theta)\\
-4\cos^2(\theta_j-\Delta\theta)+2\bigg)\Bigg|\Bigg)
\end{multline}
Applying the triangle inequality, we obtain the following as a bound.
\begin{multline}
\label{eq:7}
\eqref{eq:6} \leq
\frac{2}{3}\Delta\theta^3M(x)\Bigg(6+\sum_{j=1}^n\big(|\K|\cos^4(\theta_j-\Delta\theta)-2|\K|\cos^2(\theta_j-\Delta\theta)+|\K|\\
+2t\cos^4(\theta_j-\Delta\theta)-4t\cos^2(\theta_j-\Delta\theta)+2t+t^2|\cos(\theta_j-\Delta\theta)|-2t^2\cos^2(\theta_j-\Delta\theta)\\
+t^2-5|\cos^3(\theta_j-\Delta\theta)|+5|\cos(\theta_j-\Delta\theta)|-5|\cos^3(\theta_j-\Delta\theta)|t\\
+5t|\cos(\theta_j-\Delta\theta)|-4\cos^2(\theta_j-\Delta\theta)+2\big)\Bigg)
\end{multline}
Introducing $\beta_j=|\cos(\theta_j-\Delta\theta)|$, this can be
written as
\begin{multline}
\label{eq:8}
\eqref{eq:7} =
\frac{2}{3}\Delta\theta^3M(x)\Bigg(6+\sum_{j=1}^n\big(|\K|\beta_j^4-2|\K|\beta_j^2+|\K|+2t\beta^4_j-4t\beta^2_j+2t+t^2\beta_j\\
-2t^2\beta^2_j +t^2-5\beta_j^3
+5\beta_j-5\beta^3_jt+5t\beta_j-4\beta^2_j+2\big)\Bigg)
\end{multline}
Now, we integrate in $\K$ over the circle of radius $\kmax$. This
translates to integrating $|\K|$ from $0$ to $\kmax$ and multiplying
by $2\pi$.. This gives us
\begin{multline}
\label{eq:9}
\eqref{eq:8} \leq
\frac{4\pi}{3}\Delta\theta^3M(x)\Bigg(6\kmax +\frac{\kmax}{2}\sum_{j=1}^n\Big(\kmax\beta_j^4-2\kmax\beta_j^2\\
+\kmax+4t\beta^4_j-8t\beta^2_j+4t+2t^2\beta_j-4t^2\beta^2_j\\
+2t^2-10\beta_j^3+10\beta_j-10\beta^3_jt+10t\beta_j-8\beta^2_j+4\Big)\Bigg)
\end{multline}
And this becomes
\begin{multline}
\label{eq:10}
\eqref{eq:9} \leq
\frac{2\kmax\pi}{3}\Delta\theta^3M(x)\Bigg[12+4n+2nt^2+4nt+\kmax n \\
+\sum_{j=1}^n\Big((\kmax+4t)\beta_j^4-10(t+1)\beta_j^3\\
-2(2t^2+4t+\kmax+4)\beta_j^2+2(t^2+5t+5)\beta_j\Big)\Bigg]
\end{multline}
Breaking up the sum yields:
\begin{multline}
\label{eq:11}
\eqref{eq:10} =
\frac{2\kmax\pi}{3}\Delta\theta^3M(x)(12+4n+2nt^2+4nt+\kmax
n\\+(\kmax+4t)\sum_{j=1}^n\beta_j^4
-10(t+1)\sum_{j=1}^n\beta_j^3\\-2(2t^2+4t+\kmax+4)
\sum_{j=1}^n\beta_j^2+2(t^2+5t+5)\sum_{j=1}^n\beta_j)
\end{multline}
Now, we notice that $\beta_j=|\cos\phi|$ for some $\phi$ and that
$|\cos\phi|\leq 1$. This finally allows us to remove the $j$
dependence of the terms inside the sum, and we obtain, after
substituting $\Delta\theta$ back in:
\begin{equation}
\label{eq:12}
\eqref{eq:11} \leq
\frac{\kmax}{3}\frac{\pi^4}{(n+1)^3}M(x)\left(2nt^2+9nt+n\kmax+8n+3\right)
\end{equation}
\end{proofof}
\subsection{Improving the quadrature}
The result we describe here depends on the following idea: $\sqrt{1+z^{2}}$ has a branch cut on the region $[-i,i]$. The rational function approximation can be expanded as a sum of first order poles, as per \eqref{eq:continuedFractionApproximation}. Integrating an analytic function against this sum of poles (around a contour encircling $[-i,i]$ yields a sum of the form $\sum_{n} w_{n} f(z_{n})$, which approximates the integral of $f(z) \sqrt{1+z^{2}}$ around a contour encircling $[-i,i]$. In particular, this is a second order quadrature.
A natural line of inquiry is to ask is whether higher order quadratures can be used, simply by discarding the rational function approximation, and merely choosing a sum of poles according to some appropriate quadrature rule. We conjecture that this can be done.
{\bf Acknowledgements: } A. Soffer and C. Stucchio were supported by NSF grant DMS01-00490. C.M. Siegel was supported by the 2005 DIMACS/Rutgers Research Experience for Undergraduates. We also acknowledge that some of this work may duplicate recent results of Tom Hagstrom, Bradley Alpert and Leslie Greengard.
\bibliographystyle{plain}
| {
"redpajama_set_name": "RedPajamaArXiv"
} | 5,226 |
Die Dutch Open 1992 im Badminton fanden vom 30. September bis zum 4. Oktober 1992 im Sportcentrum de Maaspoort in Den Bosch statt. Das Preisgeld betrug 35.000 US-Dollar, was dem Turnier zu einem Zwei-Sterne-Status im Grand Prix verhalf.
Resultate
Weblinks
Offizielle Website
tournamentsoftware.com
1992
World Badminton Grand Prix 1992
Badmintonwettbewerb in 's-Hertogenbosch | {
"redpajama_set_name": "RedPajamaWikipedia"
} | 7,978 |
This casserole dish is a huge hit at our table.
I am always creating and trying new, fast dishes at our place. With the kids being in school and after school activities I don't have much time to wait on supper. I really like this dish because I can put it in the oven and in 30 minutes it will be ready to eat. Another thing I do is cook up hamburger on the weekends, so then I can just add the cooked hamburger to any dish and it cuts the cutting time in half. Let me know what you think of this recipe.
My family gave it "Thumbs Up"!
Pour into a 12 x 18 casserole dish that has been sprayed with cooking spray.
Cover and bake at 350 degrees for 30-45 minutes.
This recipe could be doubled and halved.
I served it with Fresh Spinach.
Let me know if your family likes it! | {
"redpajama_set_name": "RedPajamaC4"
} | 8,260 |
DPBS may refer to:
Dulbecco's phosphate-buffered saline, a buffer solution used in biological research
DPBS (CONFIG.SYS directive), a configuration directive in DOS | {
"redpajama_set_name": "RedPajamaWikipedia"
} | 4,147 |
var table = $('#lista_presupuestos').dataTable( {
"dom": "T<'row'<'col-md-6 col-sm-12'l><'col-md-6 col-sm-12'f>r><'table-scrollable't><'row dataTableFooter'<'col-md-6'i><'col-md-6'p>>",
"tableTools": {
"sSwfPath": "<?=base_url().'assets/global/plugins/datatables/extensions/TableTools'?>/swf/copy_csv_xls_pdf.swf",
"aButtons": [ {"sExtends": "copy", "sButtonText": "Copiar", "mColumns": [0,1,2,3]},
{"sExtends": "print", "sButtonText": "Vista de impresión", "mColumns": [0,1,2,3]},
{"sExtends": "pdf", "title": "Ingeniería Vialda C.A. - Listado de presupuestos", "sPdfSize": "letter", "sPdfOrientation": "landscape", "sButtonText": "Guardar PDF", "mColumns": [0,1,2,3]},
{"sExtends": "xls", "title": "Ingeniería Vialda C.A. - Listado de presupuestos", "sButtonText": "Guardar MS Excel", "mColumns": [0,1,2,3]} ]
},
"order": [[4, 'desc']],
"processing": true,
"ordering": true,
"searching": true,
"ajax": '<?=base_url().'presupuestos/lista'?>',
"language": {
"url": "<?=base_url().'assets/global/plugins/datatables/extensions/es_ES.txt'?>"
},
"columnDefs": [
{ "width": "14%", "title": "Cliente", "data": "cliente", "targets": [0] },
{ "width": "16%", "title": "Proyecto", "data": "proyecto_id", "targets": [1] },
{ "width": "30%", "title": "Descripción", "data": "descripcion", "targets": [2] },
{ "width": "8%", "title": "Monto", "data": "monto", "targets": [3] },
{ "data": "created_at", "type": "date", "visible": false, "targets": [4] },
{ "width": "12%", "title": "Acciones", "data": "id",
"render": function ( data, type, full, meta ) {
if(full.status_proyecto == "Aprobado" || full.status_proyecto == "Finalizado" || full.status_proyecto == "Activo" || full.status_proyecto == "Cancelado"){
return '<a href="<?=base_url()?>presupuestos/ver/'+data+'" class="btn btn-xs green"><i class="fa fa-eye"></i> Ver detalles</a>';
}else{
return '<a href="<?=base_url()?>presupuestos/editar/'+data+'" class="btn btn-xs green"><i class="fa fa-edit"></i> Editar</a> <a href="javascript:;" class="btn btn-xs red" data-container="body" data-toggle="popover" data-title="Eliminar presupuesto" data-content="<a id="confirm" class="btn btn-xs green" href="<?=base_url().'presupuestos/eliminar'?>/'+data+'"><i class="fa fa-check-square"></i> Confirmar</a> <a class="btn btn-xs default" href="javascript:;"><i class="fa fa-ban"></i> Cancelar</a>"><i class="fa fa-trash"></i> Eliminar</a>';
}
}, "targets": [5], "orderable": false }
],
"createdRow": function( nRow, aData, iDataIndex ) {
// Agregar id a tr en la tabla
$(nRow).attr('id', aData['id']);
},
"drawCallback": function ( oSettings ) {
$("[data-toggle=popover]").popover({
html: true,
placement: "left"
});
$("[data-toggle=popover]").click(function(){
$("[data-toggle=popover]").not(this).popover('hide'); //hide all popover but this
});
},
initComplete: function ( settings, json ) {
$("#filtro").append( $('.DTTT')[0] );
}
});
$('body').on('click', function (e) {
$('[data-toggle="popover"]').each(function () {
//the 'is' for buttons that trigger popups
//the 'has' for icons within a button that triggers a popup
if (!$(this).is(e.target) && $(this).has(e.target).length === 0 && $('[data-toggle="popover"]').has(e.target).length === 0 && e.target.id !== "confirm"){
$(this).popover('hide');
}
});
});
$('#lista_presupuestos tbody').on('click', 'tr td:not(:last-child)', function(event){
var id = $(this).parent().attr("id");
window.location = "<?=base_url()?>presupuestos/ver/"+id;
});
$('#form_nuevo_presupuesto').validate({
errorElement: 'span', //default input error message container
errorClass: 'help-inline', // default input error message class
focusInvalid: false, // do not focus the last invalid input
ignore: "",
rules: {
proyecto_id: {
required: true
},
descripcion: {
required: true
},
monto: {
digits: true,
required: true
}
},
invalidHandler: function (event, validator) { //display error alert on form submit
$('.alert-success').hide();
$('.alert-error').show();
//App.scrollTo(error1, -200);
},
submitHandler: function (form) {
//$('.alert-success').show();
$('.alert-error').hide();
form.submit();
},
highlight: function (element) { // hightlight error inputs
$(element).closest('.help-inline').removeClass('ok'); // display OK icon
$(element).closest('.control-group').removeClass('success').addClass('error'); // set error class to the control group
},
unhighlight: function (element) { // revert the change dony by hightlight
$(element).closest('.control-group').removeClass('error'); // set error class to the control group
},
success: function (label) {
label.addClass('valid').addClass('help-inline ok') // mark the current input as valid and display OK icon
.closest('.control-group').removeClass('error').addClass('success'); // set success class to the control group
}
});
function numberWithCommas(x) {
return x.toString().replace(/\B(?=(\d{3})+(?!\d))/g, ",");
}
$("#monto").on("blur", function(){
$(this).val(numberWithCommas(parseFloat($(this).val().replace(/,/g,'')).toFixed(2)));
}); | {
"redpajama_set_name": "RedPajamaGithub"
} | 1,575 |
var mongo = require('mongodb').MongoClient;
module.exports = function indexes(sails) {
return {
//Hook config
configure: function () {
sails.log.info('Config monngoindexes');
var name = this.configKey;
var host = 'localhost';
var port = 27017;
var database = 'test';
sails.config[name] = {
urls: []
};
sails.log.warn('async is active as global');
sails.config.globals.async = true;
if(sails.config.connections){
var keys = Object.keys(sails.config.connections);
keys.forEach(function(item) {
var connections = sails.config.connections[item];
if(connections.adapter==='sails-mongo'){
sails.log.info('Add base: %s', item);
if(connections.host!==host) host= connections.host;
if(connections.port!==port) port=connections.port;
if(connections.database!==database) database=connections.database;
sails.config[name].urls.push(
'mongodb://'+host+':'+port+'/'+database
);
}
}, this);
}
},
//Start hook
initialize: function (cb) {
sails.log.info('Initialize indexes');
async.mapLimit(
sails.config.mongoindexes.urls,
1,
function (url, next) {
if(sails.models){
var nameModels = Object.keys(sails.models);
return mapModels(url,nameModels,next);
}else return next();
},
cb
);
}
};
};
function mapModels(url, names,cb) {
async.mapLimit(names, 1, iterCollection(url), cb);
}
function iterCollection(url){
return function(name,cb){
async.map(sails.models[name].index,function iterIndex(item, next) {
mongo.connect(url, function (err, db) {
if(err) return next(err);
else{
var collection = db.collection(name);
return collection.createIndex(item.ind,item.ops,next);
}
});
}, cb);
};
} | {
"redpajama_set_name": "RedPajamaGithub"
} | 5,226 |
\section*{}
\documentclass[prl,preprint,superscriptaddress]{revtex4-1}
\usepackage{color,amssymb,colortbl,subfigure,graphicx}
\usepackage[table]{xcolor}
\renewcommand{\thefigure}{S\arabic{figure}}
\newcommand{t_{\rm{mod}}}{t_{\rm{mod}}}
\newcommand{t_{\rm{hold}}}{t_{\rm{hold}}}
\newcommand{\red}[1]{{\color{red}{#1}}}
\newcommand{\blue}[1]{{\color{blue}{#1}}}
\usepackage{lineno}
\usepackage{bm}
\topmargin 0.0cm
\oddsidemargin 0.2cm
\textwidth 16cm
\textheight 21cm
\footskip 1.0cm
\begin{document}
\begin{center}
\Large{Supplementary Materials for} \\
\Large{Creation and Characterization of Matter-Wave Breathers} \\
\large{D. Luo, Y. Jin, J. H. V. Nguyen, B. A. Malomed, O. V. Marchukov}\\
\large{V. A. Yurovsky, V. Dunjko, M. Olshanii, R. G. Hulet}\\
\small{Edited Sep 2, 2020}
\end{center}
\subsection*{Error analysis}
The uncertainties in $N/N_{c}$ and in $A^2$ arise from the uncertainties in the measured quantities: $\omega_r$, $N$, and $a$. The radial frequency $\omega_{r}$ is measured by parametric excitation of a trap mode of the BEC at a frequency 2$\omega_r$ which produces observable heating and atom loss. The loss feature is fit to a Lorentzian, giving $\omega_r = (2\pi)297(1)$ Hz. The uncertainty in $N$ is due mainly to a 7\% systematic uncertainty in the imaging laser detuning. The scattering length is determined from the axial size of the BEC measured as a function of the magnetic field $B$ and compared to a 3D GPE simulation \cite{Pollack2009}. For $B$ between 536 G and 544 G, $a(B)$ is fit to a linear function, $a(B) = \alpha(B-B_0)$, where $\alpha = 0.091(4)$ $a_{0}$/G and $B_{0} = 543.8(2)$ G are the fitted parameters. The uncertainty in $B_0$, arising from the accuracy of the field calibration by RF spectroscopy, results in a systematic uncertainty of 0.02 $a_0$ in $a$, and the uncertainty in $\alpha$ gives an additional fractional uncertainty $\Delta a/a = 4.5$\%, where the former dominates the uncertainty in $a_{i}$, while the latter contribution dominates the uncertainty in $a_f$. The effect of the magnetic dipole interaction (MDI) is included in the calibration of the zero-crossing location, $B_0$ \cite{Pollack2009}. The atomic dipoles are aligned along the $z$-axis which produces an effective attraction for our quasi-1D geometry. In the analysis presented here, however, instead of solving the 3D GPE with MDI, we solve the 1D GPE without MDI. The neglect of MDI produces an effective $B_0$ shifted from the original. We found that the data in Fig. 2(a) are in best agreement with the 1D GPE simulations for $B_{0} = 544.0$ G. We justify using this effective value of $B_{0}$ to evaluate $a(B)$ since it is both consistent with the otherwise neglected MDI shift, and it is within our measurement uncertainty of the unshifted value.
\subsection*{Factorization ansatz for breathers beyond the one-dimensional regime}
Here we use a factorization ansatz to obtain an analytic approximation of the collapse threshold for the 2-soliton breather.
Consider $N$ atoms with mass $m$ trapped in the harmonic potential with frequency $\omega_{r}$ in the transverse ($xy$) direction. Let $\hbar\omega_{r}$ be the energy unit and $a_{r}=\sqrt{\hbar/m \omega_{r}}$ be the length unit. This system is described by the Schr\"{o}dinger equation
\begin{equation}\label{eq:schrodinger}
N E_{3D}\psi=\hat{H}_{3D}\psi,
\end{equation}
where
\begin{equation}
\hat{H}_{3D} = \sum_{j=1}^{N}\left(-\frac{1}{2}\frac{\partial^{2}}{\partial z_{j}^{2}}+\hat{H}_{\perp}(r_{j})\right)+\frac{4\pi a}{a_{r}}\sum_{j<j'}\delta( \bm{r} _{j}-\bm{r}_{j'}),
\label{eq:H3D}
\end{equation}
$a$ is the scattering length,
\begin{equation}
\hat{H}_{\perp}=-\frac{1}{2}\left(\frac{\partial^{2}}{\partial r^{2}}+\frac{1}{r}\frac{\partial}{\partial r}\right)+\frac{1}{2}r^{2},
\end{equation}
and $r_{j}=\sqrt{x_{j}^{2}+y_{j}^{2}}$ is the transverse radius.\\
Following \cite{Salasnich2006}, let us take the wavefunction in the form of a product of many-body axial and transverse wavefunctions
\begin{equation}
\psi=\varphi(\{z\})\prod_{j=1}^{N}\Phi (r_{j}),
\end{equation}
where $\{z\}=\{z_{1},...,z_{N}\}$ is the set of atom axial coordinates and the transverse function is a Hartree product of single-atom functions $\Phi (r_{j})$ of the transverse radius $r_{j}$ (the transverse ground state contais only axially symmetric functions). The single-atom functions are normalized i.e.
\begin{equation}
2\pi\int_{0}^{\infty}rdr|\Phi(r)|^{2}=1.
\end{equation}
Projection of the Schr\"{o}dinger equation (\ref{eq:schrodinger}) onto the transverse functions leads to the Lieb-Liniger-McGuire model \cite{Lieb1963,McGuire1963,Berezin1964} for the axial function
\begin{equation}
\left[-\frac{1}{2}\frac{\partial^{2}}{\partial z_{j}^{2}}+\tilde{g}_{1D}\frac{a}{a_{r}}\sum_{j<j'}\delta(z_{j}-z_{j'})\right]\varphi(\{z\})=N E_{\{N\}}\varphi(\{z\}),
\label{eq:schr1D}
\end{equation}
where
\begin{equation}\label{eq:g1D}
\tilde{g}_{1D}=8\pi^2 \int rdr|\Phi(r)|^{4}
\end{equation}
is the effective 1D interaction strength. When $\Phi(r)$ is the ground-state wavefunction of the transverse harmonic potential, we have $\tilde{g}_{1D}=2$ \cite{Yurovsky2008b}, in agreement with the nonlinear coupling constant $g_{1D}$ in Eq. (2) in the main text.
Assuming $a<0$, there exists multistring solutions \cite{McGuire1963}, in addition to the single-string solutions considered in \cite{Salasnich2006}. Due to the translational invariance of the Hamiltonian (\ref{eq:H3D}) and Eq. (\ref{eq:schr1D}) in the $z$-direction, these solutions are also translationally invariant and have homogeneous density. Localized solutions, corresponding to mean-field multi-solitons, can be constructed as a superposition of multistring solutions with different string velocities \cite{Lai1989a}. The mutistring energy tends to the multi-soliton energy in the mean-field limit, and for the $N_{s}$-soliton breather the energy per atom is given by
\begin{equation}
E_{\{N\}}=-\frac{1}{24}\left(\frac{\tilde{g}_{1D}aN}{a_{r}}\right)^2\epsilon_{\{N\}}.
\end{equation}
Here
\begin{equation}
\epsilon_{\{N\}} \approx \frac{1}{N^3}\sum_{i}N_{i}^3
\end{equation}
and the numbers of atoms in the constituent solitons are $\{N\}=\{N_{1},N_{2},...,N_{N_{s}}\}$ with
$\sum_{i}N_{i}=N$.\\
The transverse single-atom functions can be evaluated using the variational principle for the total energy
$\langle\psi|\hat{H}_{3D}|\psi\rangle=N\left(E_{\{N\}}+\langle\Phi|\hat{H}_{\perp}|\Phi\rangle\right)$. Unlike the Gaussian variational function, used in \cite{Salasnich2006}, here the variation over $\delta\Phi^*$ leads to the radial GPE
\begin{equation}\label{eq:radialGPE}
\left[\hat{H_{\perp}}-\frac{16}{3}\pi^3\left(\frac{a}{a_{r}}\right)^{2}N^{2}\epsilon_{\{N\}}\int r'dr'|\Phi(r')|^{4}|\Phi(r)|^{2}\right]\Phi(r)=E_{r}\Phi(r).
\end{equation}
It depends only on the universal parameter --- the scaled atom number
\begin{equation}\label{eq:tilN}
\tilde{N}=\frac{a}{a_{r}}\sqrt{\epsilon_{\{N\}}}N
\end{equation}
and was solved numerically. The solution diverges showing collapse at $\tilde{N}\ge 0.717$. Therefore, a collapse occurs at $N>N_{c}/\sqrt{\epsilon_{\{N\}}}$, where $N_{c}=0.717a_{r}/a$ is the critical number of atoms for the single string, corresponding to the fundamental soliton. The factor of 0.717 is closer to the value of 0.676, obtained in \cite{Gammal2001} by a numerical solution of 3D GPE, than the value of 0.76 in \cite{Salasnich2006} with the Gaussian transverse function.\\
The critical atom number depends on the axial state since the effective 2D interaction strength in (\ref{eq:radialGPE}) is proportional to the binding energy of the multi-soliton state. Then the collapse threshold increases with the number of solitons. For $N_{s}$-breather containing solitons with masses $N_{i}=(2i-1)N/N_{s}^{2}$ $(1\leq i\leq N_{s})$ we have
$\epsilon_{\{N\}}\approx(2N_{s}^2-1)/N_{s}^4$ and, therefore, collapse is predicted at
$N/N_{c}>N_{s}^2/\sqrt{2N_{s}^2-1}$. For the $N_{s}=2$ breather, the model gives the approximate estimate of $N/N_{c}=1.5$.
\newpage
| {
"redpajama_set_name": "RedPajamaArXiv"
} | 1,825 |
@interface SpectacleLoginItemHelper : NSObject
+ (BOOL)isLoginItemEnabledForBundle:(NSBundle *)bundle;
#pragma mark -
+ (void)enableLoginItemForBundle:(NSBundle *)bundle;
+ (void)disableLoginItemForBundle:(NSBundle *)bundle;
@end
| {
"redpajama_set_name": "RedPajamaGithub"
} | 487 |
Features Reviews Theater
Old People and Theater (VIGIL at Lantern)
Christopher Munden May 30, 2011 No Comments
I began to attend Philadelphia theater in earnest about a decade ago, in my early mid-twenties. Consistently, my companion and I would be the youngest people in an audience made up mostly of gray-haired, well-dressed white couples. Now in my mid-thirties, I feel less of a demographic anomaly, my increasingly salted salt-and-pepper hair blending into the aging crowd. But just as going to an indie rock show at the TLA can make me feel decidedly old, certain theater crowds still make me feel teenager-ly young.
Memorable cases of this included a show at the Walnut Street Theatre, during which several older audience members received (and took!) cell phone calls in the middle of the show, and I had to endure the distraction of several wives seated near me provided hearing-assisted recaps for their hard-of-hearing husband. But this fact was really brought home to me at a Lantern Theater Company performance of Vigil by Morris Panych.*
It wasn't that the audience at the Lantern was particularly superannuated—several youngish couples were camped among the standard middle-aged theatergoers—it was that the dryly comic subject matter, which I found hilariously dark, fell flat to the older patrons around me.
Quickly paced with an abundance of droll one-liners ("Why are you putting on makeup? Why don't you let the mortician do that?"), absurd monologues (on cross-dressing, religious guilt, and a disillusioned magician father), and even farcical slapstick, Vigil won its author Vancouver's top award for best play. A British adaptation (the Lantern's source, judging by the language and cultural references, but thankfully not any attempted accents) won critical acclaim at the Edinburgh Fringe. So it's not as if the play didn't have what it needed to be a success, and it was hard to see how the production, which featured Lenny Haas and Cael Phelan in spot-on performances, could have better served the material. Perhaps Vigil would be better received at Philadelphia's own Fringe, where audiences are generally younger in age and more comically inclined. Because, unfortunately for the Lantern, much of the humor—laughter in the face of mortality—came at the expense of the old.
The audience's reaction was mixed, with some people laughing and others sitting in unamused silence. Of course, humor is subjective, and what is a series of laughs to one person can be interminably unfunny to another. If you only read published reviews, you might assume there has never been a good production of a comedy in the city, so anti-humor is the criticism in town. Certainly, when a play is advertised as "darkly funny," I often discover that means "not funny" (I thought the recent August: Osage County at the Arden was a good exception.) Perhaps I was just more sympathetic than most to Vigil's wicked humor, but the timing and absence of others' laughter makes me think it was a generational problem.
Last decade, my regular theater companion was a French live-in girlfriend, who criticized the age of Philadelphia play audiences with irritatingly justified Gallic arrogance, claiming that Parisian theaters offer five euro tickets to all buyers under 25-years-old. I'm sure hundreds of arts management theses have been written about this topic, and I suspect that that outreach efforts need to reach even younger viewers. (My own love of theater, piqued by annual holiday pantomimes, was cemented when I saw a performance of the Royal Shakespeare Company at the age of eleven.)
Theater audiences are generally upper middle-aged, running to elderly at the more established (read: expensive) theaters in town. (Though even many of the smaller theaters can be obnoxiously overpriced.) This would seem inauspicious for the future of the craft, but it could just be that social activities which seem fun in our twenties (loud rock shows, dance clubs, near-death drinking experiences) become unappetizing, while a nice night at the theater or opera seems more refined and appealing. Still (and I haven't bothered to look up any statistics to back me up . . . mmm, could I run for the Republican presidential nomination on that claim alone?), audiences in London, where I irregularly see plays, seem younger on average, as do audiences for independent local theater.
Whatever the causes or potential cures, the age of the local theatergoing crowd is sure to remain troubling high as long as the already old audiences reject fare that would appeal to younger-minded viewers, discouraging theaters from putting on such work in the first place. That was what was so worrying about being the most amused person in the Lantern's surprisingly sparse audience.
Vigil lacked profundity, and the one major (well-signposted) plot twist does not survive scrutiny, but it was the type of wickedly entertaining fare of which I'd like to see more. My experience at Lantern bodes poorly for established Philadelphia theaters. Thankfully, Philadelphia has an abundance of independent companies willing to take risks, shunning subscribers to put on shows that will appeal to new audiences. It is there that I will have to continue to look for more cutting-edge shows (and old people jokes).
But then, the admonition on the tomb in Masaccio's Florentine Renaissance masterpiece, "What you are I was, what I am you will become," works for aging as well as dying. Soon enough, I will see an old man on the bus and think "look at my reflection in that glass" and the Vigil character's thought in the same situation, "what a waste of a seat," will seem just plain unfunny.
*This piece is adapted from my Broad Street Review write–up of that show and was previously published by the Philadelphia Performing Arts Authority.
Arden Theatre CompanyCael PhelanLantern Theater CompanyLenny HaasMorris PanychVigilWalnut Street Theatre
Previous Previous post: Theater People vs. Theater Critics…The Ultimate Debate
Next Next post: Personal Thoughts Made Physical: Jaamil Kosoko talks dance, poetry, and the Gemini Show | {
"redpajama_set_name": "RedPajamaCommonCrawl"
} | 304 |
[ January 28, 2023 ] Pope Francis clarifies comments on sin and homosexuality News Briefs
Benedict XVI: thinker, preacher, saint? Scholars and former students discuss legacy
By Kevin J. Jones for CNA
January 1, 2023 Catholic News Agency Features, History, Interview 10 Print
Benedict XVI holds his final general audience, Feb. 27, 2013. / Mazur/www.thepapalvisit.org.uk (CC BY-NC-SA 2.0)
Denver, Colo., Jan 1, 2023 / 13:00 pm (CNA).
The death of Pope Emeritus Benedict XVI prompted his former students and other Catholic scholars to reflect on his importance for the Catholic Church as a theologian, a scholar and a preacher. Some even raised the prospect of the late pontiff's canonization and recognition as a Doctor of the Church.
"I don't know anyone who has worked closely with him who does not recognize his holiness and his brilliance," Father Joseph Fessio, S.J., the founder and editor of the San Francisco-based Ignatius Press, said Saturday. Fessio was a doctoral student under Benedict XVI when the future pope was known as Joseph Ratzinger, a professor and priest at the University of Regensburg.
George Weigel, a biographer of Pope St. John Paul II who holds the William E. Simon Chair in Catholic Studies at the Ethics and Public Policy Center, praised the late pontiff as "one of the most creative Catholic theologians of modern times and arguably the greatest papal preacher since Pope St. Gregory the Great."
"(N)o one I've ever met had a more lucid or orderly mind than Joseph Ratzinger," Weigel added. "He believed that the truth of the Gospel was the truth of the world, and he bent every effort to help others understand that truth."
Pope Emeritus Benedict XVI died on Saturday at the age of 95. He served as pope from 2005 to 2013, when he became the first pontiff in nearly 600 years to resign.
He was born in the German region of Bavaria on April 16, 1927. He grew up in the shadow of Nazi Germany, a regime he later deemed "sinister" and one that "banished God and thus became impervious to anything true and good."
As a young priest, he served as a theological expert during the Second Vatican Council. As Cardinal Joseph Ratzinger, he served as Archbishop of Munich and Freising and then was Prefect of the Congregation for the Doctrine of the Faith under Pope St. John Paul II. In that role, he played a key part in preparing the Catechism of the Catholic Church and clarifying and defending Catholic teaching against erroneous theologians and dissenting activist groups.
After his death accolades poured in from leaders and contributing authors to Ignatius Press, the primary English-language publisher of Pope Emeritus Benedict XVI's works. These works include his bestseller "Jesus of Nazareth" and his earlier works like "Introduction to Christianity" and "The Spirit of the Liturgy." Ignatius Press has published more than 80 books by Benedict XVI or about him.
Mark Brumley, president of Ignatius Press, said Benedict XVI was "a major figure" in the history of the Church and the world.
"He was one of the great theologians and churchmen of the 20th and early 21st centuries," Brumley said. "Along with Pope John Paul II, he served the Lord and his people mightily by helping the Catholic Church faithfully embrace reform in continuity, rather than either radical rupture or an uncritical return to the past. He was a major force for evangelical fidelity and engagement with the modern world."
"He was a man of God, a disciple of Jesus, and bearer of the Holy Spirit, who helped keep us on the right path," said Brumley. "Thanks be to God for Joseph Ratzinger/Benedict XVI."
In Fessio's view, the late pontiff's life showed heroic virtues and evidence he should be canonized.
"I don't believe being pope is a proof of sanctity, nor is it sufficient grounds for canonization. But being Joseph Ratzinger is," said the priest. Fessio went so far as to say he hoped for "santo subito," an Italian phrase roughly meaning "sainthood now." Many mourners of Pope St. John Paul II invoked the phrase at his death in 2005.
Fessio added that he looks forward to seeing Benedict XVI declared a "Doctor of the Church." This special title uses "doctor" in the sense of the Latin word meaning "teacher." The title is bestowed upon a saint whose writings are deemed to be of universal importance to the Church.
There are 37 official Doctors of the Church including Pope St. Gregory the Great, St. Augustine, St. Thomas Aquinas, St. John Chrysostom, St. Francis de Sales, St. Basil the Great, St. Catherine of Siena, St. Hildegard von Bingen, and St. Therese of Lisieux.
Peter Kreeft, a Boston College philosophy professor and author, said he considered Benedict "a shoo-in for saint and eventually a Doctor of the Church."
"Pope Benedict XVI was a gift of God, one of the very best teachers we have ever had, an equal to Gregory the Great, Leo the Great, and Leo XIII," said Kreeft.
Father D. Vincent Twomey, S.V.D., said it "will come as no surprise" to him if Benedict XVI is named a Doctor of the Church. Twomey, a friend and former doctoral student of the late pope, is a professor emeritus of theology at St Patrick's Pontifical University in Maynooth, Ireland.
The late pontiff, Twomey thought, will be remembered "above all for his literary and scholarly output."
"His writings on a vast spectrum of theological and philosophical topics have a clarity and a depth that make his theology inspiring and therefore liberating," said Twomey. "Future generations of all walks of life will find inspiration in his homilies and in his pastoral writings as pope; his encyclicals on love and hope must rank among the most outstanding ever to come from the pen of a pope."
For Tracey Rowland, John Paul II Chair of Theology at the University of Notre Dame (Australia), Benedict XVI was "one of the most learned men ever to occupy the Petrine Office."
"I believe that future generations will honor him with the title of Church Doctor," she said. "His intellectual legacy is immense and at least on par with St. John Henry Newman, one of the intellectual heroes of his youth."
"He never lost the faith of his Bavarian boyhood and he defended it intellectually on the stage of the world," Rowland added. "He understood the theological roots of the cultural crisis of the Western world better than any world leader of his generation."
Robert Royal, president of the Faith & Reason Institute, also praised the late pope.
"In his brilliance, imagination, humility and steady faith, he resembled the Church Fathers, whom he loved and studied and brought to bear on our troubled age," he said. "He belongs in their company and should be named a Doctor of the Church."
"All of us must die, and the passing of most of us is of little consequence in the vast sweep of sacred history. But Pope Benedict's death marks the end of a monumental life that changed the Church — and the world — and will continue to do so for many years to come," said Royal.
These calls to recognize the late pontiff's contributions echo the words of Cardinal Gerhard Müller, prefect emeritus of the Dicastery for the Doctrine of the Faith. In a Dec. 31 interview with the National Catholic Register, Müller called Benedict XVI a "true Doctor of the Church for today" and a "great thinker."
Tim Gray, president of the Denver-based theology graduate school the Augustine Institute, said Benedict XVI's ministry complemented that of his friend and predecessor Pope St. John Paul II.
"(H)e showed how the Second Vatican Council faithfully applied the Word of God and the gospel proclamation as the way for us to navigate the crisis of truth we now face," he said. For Gray, the late pontiff's writings exemplified the classic Christian description of theology as "faith seeking understanding."
"He spoke of the hope that challenges us to give up comfort in order to embrace the cross, striving forward for the hope Christ has stored up for us in heaven," said Gray. "I pray he may now realize the hope he cherished and the hope he challenged the Church to hold to above all others."
Fr. Joseph Fessio
Mark Brumley
Robert Royal
Tim Gray
About Catholic News Agency 6494 Articles
Catholic News Agency (www.catholicnewsagency.com)
Here are the last words spoken by Pope Emeritus Benedict XVI
U.S. lay Catholic organizations mourn the loss of Pope Emeritus Benedict
Pope Francis: 'The Lord is not looking for perfect Christians'
April 24, 2022 Catholic News Agency 7
Pope Francis gives his Regina Caeli reflections on Divine Mercy Sunday, April 24, 2022. / Vatican Media
Washington, D.C. Newsroom, Apr 24, 2022 / 07:06 am (CNA).
Jesus' merciful words to a doubting St. Thomas reminds us that the Lord does not e… […]
South Korean president seeks Pope's support in reconciliation efforts
May 31, 2017 CNA Daily News 0
Vatican City, May 31, 2017 / 12:29 am (CNA/EWTN News).- On the eve of President Trump's visit to the Holy See, the newly elected president of South Korea sent a special envoy to ask for Vatican support in efforts to foster reconciliation in the K… […]
A synod summary from the Polish synod fathers – Oct 5
October 5, 2018 CNA Daily News 0
Vatican City, Oct 5, 2018 / 11:01 am (CNA).-
The synod of bishops on young people, the faith, and vocational discernment is being held at the Vatican Oct. 3-28.
CNA plans to provide a brief daily summary of the sessions, provided by the synodal fathers from Poland.
Please find below the Polish fathers' summary of the Oct. 5 session:
Preparation for life in marriage, the father's role in the family, young immigrants, and the testimony of the life of young people are the topics on the synod's second day, during which representatives of different countries and continents spoke, exchanging their experiences.
Some of the young people evoked life in broken families. "Therefore, attention was paid to preparing for marriage, for example, pre-marital catecheses that exist in Poland. These catecheses should take into consideration the cultural context because the number of marriages of people from different cultures and religions is increasing," said Auxiliary Bishop Marian Florczyk of Kielce.
"During the discussion, the opinion was also expressed that the man's role has been lost. In the past, a man grew to be a father, to fulfill his tasks. The father's example most strongly draws children to the faith, observed one of the speakers," Bishop Florczyk emphasized.
Another topic was the issue of immigrants, in a broad sense. "If the Church herself in Europe is struggling with problems, what help should be given to the newcomers?" Attention was paid to the condition of immigrants and of those who receive them. "The religious condition of the latter is unfortunately poor," said Bishop Florczyk.
Attention was also paid to young people evangelizing their peers through the example of their lives. "The point is for young believers to lead other young people to Jesus Christ because He is the one who shapes their life. The Church is, indeed, a community that characterizes itself by faith in Jesus Christ, that lives by this faith," highlighted Bishop Florczyk.
"The issues raised show, on the one hand, all the wealth and opportunities, such as good liturgy, but also threats, such as sects that exist in some countries," said Auxiliary Bishop Marek Solarczyk of Warszawa-Praga.
The discussions also focused on the impact of social media on the lives of young people.
Peter D. Beaulieu
Peter Kreeft of Boston College hits the mark when he says "Pope Benedict XVI was a gift of God."…
Benedict once remarked that if stranded with only two books, one would be Scripture and the other St. Augustine's "Confessions"—where Augustine records that he named his son Adeodatus—"gift of God." Augustine now has two sons!
And we can be assured that, in their shared moment of eternity, Benedict and Augustine are delighting in Kreeft's words, and—more—that the 2,000 years of graced influence of Augustinian theology has just received its second wind. And just at the right time, as this fallen world of ours spins farther into the into the void—the "tyranny of relativism."
Benedict's influence, now from Heaven, has just begun.
Chris in Maryland
Agree Peter B.
Edward J Baker
Perhaps now he will intercede for our suffering at witnessing our Church now
increasingly engulfed in this very tyranny of relativism.
His talents will be well used in heaven as they were on earth. He set an example for us who remain. Praise God that He sends us steady guides to aid the believer. He was a great thinker and one with a great heart.
His prayers did more good than we can begin to know (in this life).
James 5:16 Therefore, confess your sins to one another and pray for one another, that you may be healed. The prayer of a righteous person has great power as it is working.
Jeremiah 29:12-13 Then you will call upon me and come and pray to me, and I will hear you. You will seek me and find me, when you seek me with all your heart.
Matthew 21:22 And whatever you ask in prayer, you will receive, if you have faith."
Proverbs 15:29 The Lord is far from the wicked, but he hears the prayer of the righteous.
John 9:31 We know that God does not listen to sinners, but if anyone is a worshiper of God and does his will, God listens to him.
Psalm 34:15 The eyes of the Lord are toward the righteous and his ears toward their cry.
God bless you as you proclaim His majesty.
Augustine is a bridge builder, an architect of righteous thought. His edifice is understanding and homage to God.
God bless you as you strive to proclaim His grace.
Dr.Cajetan Coelho
As teacher, Benedict XVI touched the hearts, minds, and lives of countless students and scholars. Long live the memory of his life-inspiring contributions.
Russell E. Snow
As one who has studied the modernist biblical scholars for many years, I found Pope Benedict XVI's talk at Saint Peter's Church in New York a devastating analysis of biblical scholarship detached from Catholic faith. The fact that he began with a reference to Vladimir Soloviev's Anti-Christ is very telling. The pope's comment I remember best as he demolished the presuppositions of modernist biblical scholars: "How can you deal with writings that are all about God as if he is not there."
Diane McHenry
Thank you Peter D. Beaulieu for a lovely post and especially with that comforting last sentence. Diane McHenry
"Saint?" When one has faith in Jesus Christ, Holy Scripture assures the believer that he/she is a saint. What the Lord gives let no one cast doubts over.
1 Corinthians 14:33 For God is not a God of confusion but of peace. As in all the churches of the saints,
2 Corinthians 5:17 Therefore, if anyone is in Christ, he is a new creation. The old has passed away; behold, the new has come.
Revelation 14:12 Here is a call for the endurance of the saints, those who keep the commandments of God and their faith in Jesus.
1 Corinthians 1:2 To the church of God that is in Corinth, to those sanctified in Christ Jesus, called to be saints together with all those who in every place call upon the name of our Lord Jesus Christ, both their Lord and ours:
Psalm 30:4 Sing praises to the Lord, O you his saints, and give thanks to his holy name.
Thank you Lord for the witness of Benedict the XVI.
Pope Emeritus Benedict XVI, Requiescat In Pace – Big Pulpit
All comments posted at Catholic World Report are moderated. While vigorous debate is welcome and encouraged, please note that in the interest of maintaining a civilized and helpful level of discussion, comments containing obscene language or personal attacks—or those that are deemed by the editors to be needlessly combative or inflammatory—will not be published. Thank you.
The Gadfly Letter Apostolate
Father Jerry J. Pokorsky January 26, 2023 20
Elias Galy: Pope Francis says "homosexuality is a sin" but is pushing its legalization? Pope Francis keeps allowing one James Martin to…
Crusader: Scandal there is- but it is not CWR that is seeking out scandal. It is Biden who gives scandal seemingly…
brineyman: Tell me, Frances. Are the one Billion-plus children whom Lyin' Biden and his fellow devout Catholics are responsible for killing…
Elias Galy on Pope Francis clarifies comments on sin and homosexuality
Crusader on Some questions about the Bidens' 1977 Catholic wedding
brineyman on Some questions about the Bidens' 1977 Catholic wedding
Hilde DiMarco on Some questions about the Bidens' 1977 Catholic wedding
MichaelR on The need for orthodoxy in a world of dangerous "orthodoxies"
A Pope Who Thinks in Centuries
Tracey Rowland April 18, 2010 1
In 1963 Columbia Pictures produced the movie The Cardinal. According to Wikipedia the Vatican's liaison officer on the project was a young Joseph Ratzinger. In the movie there is a dialogue between a couple of […] | {
"redpajama_set_name": "RedPajamaCommonCrawl"
} | 6,922 |
{-# LANGUAGE BangPatterns #-}
-- | This module defines a type for mutable, string-valued labels.
-- Labels are variable values and can be used to track e.g. the
-- command line arguments or other free-form values. All operations on
-- labels are thread-safe.
module System.Remote.Label
(
Label
, set
, modify
) where
import Data.IORef (atomicModifyIORef)
import qualified Data.Text as T
import System.Remote.Label.Internal
-- | Set the label to the given value.
set :: Label -> T.Text -> IO ()
set (C ref) !i = atomicModifyIORef ref $ \ _ -> (i, ())
-- | Set the label to the result of applying the given function to the
-- value.
modify :: (T.Text -> T.Text) -> Label -> IO ()
modify f (C ref) = do
!_ <- atomicModifyIORef ref $ \ i -> let i' = f i in (i', i')
return ()
| {
"redpajama_set_name": "RedPajamaGithub"
} | 7,812 |
Michael K. Lillard is Head of Fixed Income and Chief Investment Officer for PGIM Fixed Income, responsible for portfolio management and trading for all products and strategies across the firm. Previously, Mr. Lillard was Senior Investment Officer for Insurance Portfolios, Liability-Driven Investment Strategies, and Hedge Strategies at PGIM Fixed Income, and was Head of the Quantitative Research and Risk Management Group. Earlier, he was head of PGIM Fixed Income's U.S. Liquidity Team, the group responsible for managing U.S. government and mortgage-backed securities, and also the portfolio manager for Core Fixed Income Strategies.
Mr. Lillard began his career with PFI in 1987 in the Portfolio Management Group. He serves as a member of the United States Treasury Borrowing Advisory Committee.
He earned a Bachelor of Science degree and a Master of Science degree in Computer Science from the Massachusetts Institute of Technology (MIT) and a Master of Science degree in Management from the Sloan School at MIT. He holds the Chartered Financial Analyst® designation. | {
"redpajama_set_name": "RedPajamaC4"
} | 1,662 |
\section{Introduction}
PKS 2155-304 (z=0.116, Falomo et al. 1991) is a prototype of high frequency peaked BL Lac objects. It has been
observed in the entire electromagnetic spectrum, from radio to TeV
gamma-rays. It was the target of several multifrequency campaigns,
the main scope of which was to study the variability of the spectral energy
distribution (SED), in order to constrain emission models.
In particular we
refer to the 1991 and 1994 campaigns involving IUE, ROSAT, ASCA, EUVE and
ground based telescopes (see Edelson et al. 1995, Urry et al. 1997, and
references therein). There were noticeable differences in source behaviour between these two epochs.
While in 1991 the multiwavelength variability was almost achromatic, and the X-ray
variation led that in the UV by a couple of hours, in 1994 the variability was
more pronounced in X-rays than in UV-optical, with a lag of the
latter by two days. The general pattern was that of a hardening of the spectrum
with increasing intensity. More recently Zhang et al. (2006b) studied a large set of
data covering the period 2000-2005 obtained with the XMM-Newton satellite, which
allowed a direct comparison of the X-ray and UV-optical band, the latter
deriving from the Optical Monitor on board the satellite. The complexity
of the variability pattern is confirmed. Some episodes of achromatic variation were
detected, but a general tendency of increasing variability amplitude with increasing
frequency, and spectral hardening with increasing intensity was found.
Optical photometry has been performed by several groups in several occasions (see e.g. Miller et al. (1983), Smith et al. (1992), Xie et al. (1996), Paltani et al. (1997), Pesce et
al. (1997), Fan \& Lin (2000), Tommasi et al. (2001) and references therein). All this
material is rather fragmented, consisting of few hours of observations during
few nights. The difficulty of a systematic observing campaign covering many
nights is partly
overcome by the possibility of observing using remotely guided or robotic
telescopes.
The REM telescope, originally designed for a prompt detection of
gamma ray bursts (see Molinari et al. (2006)), is particularly apt for photometric studies of BL Lacs
(see also the previous results for PKS 0537-441 by Dolcini et al. 2005, and
for 3C 454.3 by Fuhrmann et al. 2006)
and, being located at La Silla (Chile), it is ideally fit to study PKS 2155-304.
We report on extensive and intensive photometric campaign performed in 2005 in the V,
R, I, J, H, K bands. For the total number of photometric points, for the time
resolution (minutes) and spectral range this campaign seems to supersede
all the IR-optical photometric material presented thus far.
\section{REM, Photometric procedure, data analysis}
\subsection{REM}
The Rapid Eye Mount (REM) Telescope is a 60 cm fully robotic instrument. It has two cameras fed at the same time
by a dichroic filter that allows the telescope to observe in the NIR (z', J,
H, K) as well as
optical (I, R, V). Further information on the REM project may be found in Zerbi et al. (2001),
Chincarini et al. (2003) and Covino et al. (2004).
\subsection{Observations and data analysis}
REM observed the PKS 2155-304 field during May, September, October, November and
December 2005 in VRIH bands. Only during three nights in September the
telescope observed also in J and K filters. To allow intranight and
short time-scale variability monitoring, very intensive observations (2-3 h,
quasi-continuously) were made during five of the nights in November. An
outline of the observations is reported in Table 1, while the complete log is
only available in Table \ref{electronic_table} (see Appendix A): we report for each
photometric point the band, the epoch, the integration time, the intensity and its uncertainty. Typical
integration times are $\leq$100 s and statistical uncertainties are always $\leq$ 10$\%$ and $\leq$ 3$\%$ in the highest state
(November 2005, see following).
\begin{table}
\begin{center}
\begin{tabular}{|c|c|c|c|}
\hline
\textbf{Period of observation} & \textbf{Nights of observation} &
\textbf{Number of photometric points} & \textbf{Total exposure time} \\
\hline
May & 6 & 129 & 14520 s \\
\hline
September & 8 & 159 & 18080 s \\
\hline
October & 3 & 102 & 11590 s \\
\hline
November & 21 & 1581 & 173540 s \\
\hline
December & 6 & 64 & 7030 s \\
\hline
\end{tabular}
\caption{Outline of observations accomplished in 2005.}
\label{outline}
\end{center}
\end{table}
Reduction of the REM NIR and optical frames followed standard procedures.
Photometric analysis of the frames was done using the GAIA\footnote{http://star-www.dur.ac.uk/~pdraper/gaia/gaia.html} and DAOPHOT
packages (Stetson 1986).
Relative calibration was obtained by calculating magnitude shifts relative
to three bright isolated stars in the field, indicated by A, B, C in
Fig. \ref{frame} (image taken from ESO Digitized Sky Survey\footnote{http://archive.eso.org/dss/dss}).
\begin{figure}
\begin{center}
\includegraphics[width=8cm]{zpoint3.eps}
\caption{PKS2155-304 field (DSS-1 survey). Letters indicate stars used for calibration.}
\label{frame}
\end{center}
\end{figure}
The NIR frames were calibrated using the magnitudes of the A, B and C stars
as reported in the 2MASS catalogue\footnote{http://irsa.ipac.caltech.edu}. For
the optical, we exposed on 2006 June 29 the standard field
G156-31 (Landolt, 1992), and immediately after this the PKS 2155-304 field. We calculated the
zero points which were then used to calibrate all of our data. The observed magnitudes
in the REM filters for the reference objects A, B, and C are reported in
Table \ref{magabs}. We have monitored the relative intensities of the A, B, C
reference stars during the entire observation period, and we have detected no
indication of variability within 0.1 mag (error on the average $\leq$0.01
mag).
\begin{table}
\begin{center}
\begin{tabular}{|c|c|c|c|c|}
\hline
\textbf{} & \textbf{A} & \textbf{B} & \textbf{C} \\
\hline
RA & 21:58:46.505 & 21:58:43.807 & 21:58:42.337 \\
\hline
DEC & -30:17:51.29 & -30:17:15.71 & -30:10:27.41 \\
\hline
K & 11.171$\pm$0.024 & 12.475$\pm$0.030 & 12.648$\pm$0.024 \\
\hline
H & 11.182$\pm$0.027 & 12.556$\pm$0.026 & 12.769$\pm$0.027 \\
\hline
J & 11.510$\pm$0.027 & 12.838$\pm$0.026 & 13.091$\pm$0.029 \\
\hline
I & 12.184$\pm$0.005 & 13.421$\pm$0.009 & 13.216$\pm$0.006 \\
\hline
R & 12.981$\pm$0.004 & 13.434$\pm$0.006 & 13.671$\pm$0.010 \\
\hline
V & 13.179$\pm$0.005 & 13.822$\pm$0.009 & 13.899$\pm$0.013 \\
\hline
\end{tabular}
\caption{Coordinates, IR and optical magnitudes for the reference stars.}
\label{magabs}
\end{center}
\end{table}
Note that we found significant
deviations from the optical calibrations provided by the finding charts for AGN of the Heidelberg
University\footnote{http://www.lsw.uni-heidelberg.de/projects/extragalactic/charts/2155-304.html}
(Hamuy \& Maza, 1989). In particular the star C is also used as
a calibrator by these authors and our optical zeropoint differs by about 0.3 mag
from theirs.
Relative and absolute calibration errors have been
added in quadrature to the photometric error derived from the procedure.
\section{Results}
\subsection{Long term variability}
In this section we report the results of the long term photometric
analysis. The light curves in the H, R, I, V filters are given in Fig. \ref{curve_luce}.
\begin{figure}
\begin{center}
\includegraphics[width=12cm]{curve_luce_fin.eps}
\caption{Normalized light curves of PKS 2155-304. Flux
is reported in arbitrary unit (a. u.). In each boxes a typical error bar
is plotted.}
\label{curve_luce}
\end{center}
\end{figure}
The intensity is normalized
with respect to the average over the entire observation period. These
averages are given in Table \ref{table1}. It is immediately apparent that the total
variability range is very different in the various filters, being a factor
$\approx$ 4 in H and a factor $\approx$ 2 in V (see Table \ref{table1}) .
\begin{table}
\begin{center}
\begin{tabular}{|r|r|r|r|r|}
\hline
{\bf Filter} & {\bf H} & {\bf I} & {\bf R} & {\bf V} \\
\hline
{\bf Average} & 114.9$\pm$3.3 & 34.45$\pm$6.5 & 30.89$\pm$5.13 &
30.70$\pm$5.05 \\
\hline
{\bf Max value} & 156.5 & 46.4 & 38.3 & 37.4 \\
\hline
{\bf Min value} & 36.5 & 19.1 & 16.2 & 16.2 \\
\hline
{\bf Average ep.1} & 39.3$\pm$1.4 & 21.4$\pm$1.5 & 18.7$\pm$1.3 & 18.1$\pm$0.7
\\
\hline
{\bf Average ep.2} & 65.9$\pm$5.2 & 28.4$\pm$3.3 & 27.2$\pm$2.5 &
20.3$\pm$3.4\\
\hline
{\bf Average ep.3} & 122.9$\pm$6.1 & 38.8$\pm$1.9 & 34.1$\pm$1.5 & 33.5$\pm$1.7\\
\hline
\end{tabular}
\caption{Average intensities for all epochs and all filters. All data are in mJy
units. \textbf{Epoch 1} corresponds to May 2005 observations, \textbf{epoch 2} to
September-October 2005 observations and \textbf{epoch 3} to November-December 2005 observations.}
\label{table1}
\end{center}
\end{table}
The shapes of the light curves
are similar in the various filters. A flare-like structure is apparent in
all filters at t $\approx$ 680 (first days of November).
The ratio between the V- and H-band fluxes, designated as V/H, is reported in
Fig. \ref{vsuhtime}. In order not to introduce spurious effects due to small time scale
variability, the V/H ratio has been computed for pairs of V and H
measurements spaced apart in time by no more than 10 minutes.
\begin{figure}
\begin{center}
\includegraphics[width=8cm]{vsuhtime.eps}
\caption{V/H flux ratio evolution during 2005. Error bars are
comparable with symbol size.}
\label{vsuhtime}
\end{center}
\end{figure}
It seems that there
are two main colour states: the source softens rather abruptly, in response to the November flare. On the basis of the
light curve and the colour curve we divide the observations in three epochs:
\textbf{1} 500-525, \textbf{2} 640-660, \textbf{3} 670-725, expressed in
MJD\footnote{For the Modified Julian Date we use the convention
MJD=JD-2,453,000.5}.
\subsection{Short time-scale variability}
\label{short}
We report in Fig. \ref{curvenov} the light curves for five nights in
November 2005, when
the observations were more intensive. All the
nights belong to epoch \textbf{3}, corresponding to the high state of the source.
\begin{figure}
\begin{center}
\includegraphics[width=12cm]{new_fig4.eps}
\caption{Light curves in the H and V filters for five nigths in November 2005,
when the observations were more intensive. Dates of observations are reported
in each box. The solid line in V band - 4 Nov box results from a linear
regression analysis. The solid line in H band - 8 Nov box connects the four
points of the flare-like structure. In each box it is given a typical
error bar. In V band - 4 Nov box the
light curve of one comparison star is also plotted, with a fixed enhancement of 9 mJy.}
\label{curvenov}
\end{center}
\end{figure}
\begin{figure}
\begin{center}
\includegraphics[width=12cm]{fratio.eps}
\caption{V/H flux ratio versus intensity for the five more intensively
observed nights of epoch \textbf{3}. In each box a typical
error bar is plotted.}
\label{vsuhsuv}
\end{center}
\end{figure}
The mean intensity and the 1-sigma values for each night are given in Table \ref{table4}.
\begin{table}
\begin{center}
\begin{tabular}{|r|r|r|r|r|r|}
\hline
{\bf Night} & {\bf 4/11} & {\bf 8/11} & {\bf 18/11} & {\bf 19/11} & {\bf 20/11} \\
\hline
{\bf Average H} & 119.3$\pm$1.7 & 119.3$\pm$3.0 & 120.4$\pm$2.1 & 124.5$\pm$2.8 &
130.8$\pm$3.0 \\
\hline
{\bf Average I} & 39.1$\pm$0.6 & 36.4$\pm$0.5 & 38.7$\pm$0.6 & 38.7$\pm$0.6 &
38.3$\pm$0.7 \\
\hline
{\bf Average R} & 38.5$\pm$0.8 & 36.40$\pm$.8 & 37.3$\pm$0.5 & 38.0$\pm$1.6 &
37.1$\pm$0.5 \\
\hline
{\bf Average V} & 33.2$\pm$1.1 & 33.0$\pm$1.1 & 33.4$\pm$1.1 & 33.5$\pm$1.1 &
34.7$\pm$0.1 \\
\hline
\end{tabular}
\caption{Average intensities and 1-sigma values for all filters for all five nights with more intensive observations in November 2005. All values are in mJy units.}
\label{table4}
\end{center}
\end{table}
A $\chi^{2}$ analysis indicates that in each night the significance of
variability is very high, but for the nights of Nov 4 and Nov 18 for the
H band and Nov 19 for the V band. In the box of Nov 4 - V band we also
report the photometry of a comparison star which illustrates
directly the significance of the source variability.
Though the shapes of intensity curves are different (see Fig. \ref{curvenov}), there is a rather regular colour-intensity dependence (see Fig. \ref{vsuhsuv})
indicating harder states for higher intensities.
We adopt the usual definition of time scale variability
$\tau= \frac{1}{1+z} \frac{<f>} {df/dt}$. Following Montagni et
al. (2006), a variability time scale is taken as reliable
if the light curve can be approximated with a linear dependence, and it
contains at least 10 points. In particular this gives a time scale
of $\approx24$ h for the November 4 night (Fig. \ref{curvenov}, V band - Nov 4
box). The simultaneous H light curve does not show any regular variability. We note that on
November 8 in the H curve there is
a flare-like event. If one connects 4 points as suggested in
Fig. \ref{curvenov} H band - Nov 8 box,
the time scale variability is as short as 1.5 h. Unfortunately the V light curve is
too sparse to confirm the presence of the flare also in this band.
\subsection{The NIR-Optical spectral energy distribution}
We had six filter coverage (K,H,J,I,R,V) during three nights
of Sept. 2005 (epoch \textbf{2}) and representative SEDs for these nights are reported in Fig. \ref{spettri_set}.
\begin{figure}
\begin{center}
\includegraphics[width=8cm]{spettro_medio.eps}
\caption{September 2005 spectra for observations including the K and J filters.
The spectral fit on average data with a single power law yields a
spectral index $\alpha$=0.91$\pm$0.07.}
\label{spettri_set}
\end{center}
\end{figure}
The delays between exposures in the different filters are less than 20
minutes. Reddening corrections are less than 6\% in V and have been neglected.
A fit with a single power law yields $\alpha\approx$0.9 and it is clearly not
good. The main deviation derives from the J filter, exceeding substantially
our photometric precision of about 10\%.
An improvement in the fit is obtained by using a broken power law with
spectral indices $\alpha\approx$0.4 for the IR data and $\alpha\approx$0.9 for
the optical data.
For comparison, we report in Fig. \ref{spettro29giu} the SED of June 29, 2006,
exposure used for calibration purpose: its profile is rather similar to that of Sept. 2005.
\begin{figure}
\begin{center}
\includegraphics[width=8cm]{spettro_29giu.eps}
\caption{29 June 2006 spectrum. The spectral fit with a single power
law yields a spectral index $\alpha$=0.90$\pm$0.16.}
\label{spettro29giu}
\end{center}
\end{figure}
At the other epochs the SED consists of 4 points (H, I, R, V), and in
Figs. \ref{spettro13mag} and \ref{spettro4nov}
we give representative examples of SEDs acquired on epoch \textbf{1} and \textbf{3}. The
time differences between observations at various filters are less than 20 minutes.
\begin{figure}
\begin{center}
\includegraphics[width=8cm]{epoch1.eps}
\caption{13 May 2005 spectrum - epoch 1. We report also the
spectrum of the host galaxy (see text). The spectral fit with a single power law
yields a spectral index $\alpha$=0.77$\pm$0.16.}
\label{spettro13mag}
\end{center}
\end{figure}
\begin{figure}
\begin{center}
\includegraphics[width=8cm]{epoch2.eps}
\caption{19 September 2005 spectrum - epoch 2. For comparison we report the ESO 3.6m telescope
spectrophotometry which correspond to a slightly lower state of the
source. The spectral fit with a single power law yields a spectral index $\alpha$=0.88$\pm$0.05.}
\label{spettro19set}
\end{center}
\end{figure}
\begin{figure}
\begin{center}
\includegraphics[width=8cm]{new_fig10.eps}
\caption{4 November 2005 spectrum - epoch 3. The spectral fit with a
single power law yields a spectral index $\alpha$=1.32$\pm$0.25. Error bars are
comparable with symbols size.}
\label{spettro4nov}
\end{center}
\end{figure}
In Fig. \ref{spettro13mag}, which refers to a low state,
we report also the estimated contribution of the host galaxy, which was
calculated adopting the H magnitude of the galaxy measured by Kotilainen et
al. (1998) and the Mannucci at al. (2001) template spectrum for giant ellipticals. It is apparent that the
contribution of the galaxy never exceeds 20$\%$ of the BL Lac signal. At the other epochs
the contribution from galaxy is negligible and it is not relevant for
explaining the excess in J with respect a single power law noted above. The epoch \textbf{2} photometry (Fig. \ref{spettro19set})
is compared with spectrophotometry obtained with the ESO 3.6m telescope by R. Falomo\footnote{spectrum available at the ZBLLAC online library,
http://www.oapd.inaf.it/zbllac} on July 25, 2001 (Sbarufatti et al. (2006)). The source was found in a similar, but
somewhat lower brightness state and some deviations from a power law are apparent.
The HRIV points at epoch \textbf{3} (Fig. \ref{spettro4nov}) are roughly fitted by a single power law of
$\alpha\approx$1.3. In any case the comparison of the SEDs at the three epochs clearly indicate
a softening with increasing intensity.
\section{Discussion}
A collection of near-IR/optical SEDs of PKS2155-304 obtained by various authors at different epochs
is presented in Fig. \ref{spettri_storici} and in Table \ref{indici_spettrali_V}. Our data encompass all those reported in the literature.
\begin{figure}
\begin{center}
\includegraphics[width=12cm]{new_fig11.eps}
\caption{Different spectra of PKS2155-304 from observations at other epochs reported in the
literature. Symbols correspond to following works: filled circles: this work (13/5/2005 data),
filled squares: this work (19/9/2005 data), filled up triangles: this work (4/11/2005
data), open diamonds: Bertone et al. (2000; 24/5/1996 data), open circles:
Pesce et al. (1997; 19/5/1994 data, the Hamuy \& Maza (1989) calibration is used), open up triangles: Zhang and Xie (1996;
5/7/1994 data), open squares: Bersanelli et al. (1992; 17/1/1987 data), open crosses:
Treves et al. (1989; 1/12/1983 data), open stars: Treves et
al. (1989; 11/11/1984 data), asterisks: Miller and McAlister (1983; 19/11/1981
data). Spectral
index values and V magnitudes for all data sets are reported in Table
\ref{indici_spettrali_V}.}
\label{spettri_storici}
\end{center}
\end{figure}
\begin{table}
\begin{center}
\begin{tabular}{|c|c|c|}
\hline
\textbf{Data set} & \textbf{$\alpha$} & V (mJy) \\
\hline
This work (13/5/2005) & 0.77$\pm$0.16 & 16.485$\pm$0.263 \\
\hline
This work (19/9/2005) & 0.88$\pm$0.05 & 24.370$\pm$0.238 \\
\hline
This work (4/11/2005) & 1.32$\pm$0.24 & 35.278$\pm$0.498 \\
\hline
Bertone at al. (2000) & 0.42$\pm$0.26 & 26.20$\pm$0.58 \\
\hline
Pesce et al. (1997) & 0.62$\pm$0.30 & 24.50$\pm$0.67 \\
\hline
Zhang \& Xie (1996) & 0.62$\pm$0.16 & 22.90$\pm$0.63 \\
\hline
Bersanelli et al. (1992) & 0.61$\pm$0.38 & 51.88$\pm$1.56 (J band) \\
\hline
Treves et al. (1989) (1/12/1983) & 0.51$\pm$0.31 & 19.80$\pm$0.36 \\
\hline
Treves et al. (1989) (11/11/1984) & 0.51$\pm$0.41 & 26.20$\pm$0.48 \\
\hline
Miller \& McAlister (1983) & 0.62$\pm$0.56 & 17.8 \\
\hline
\end{tabular}
\caption{Spectral index values and V values for all spectra plotted in
Fig. \ref{spettri_storici}. $\alpha$ vs V plot is reported in Fig. \ref{alpha_V}.}
\label{indici_spettrali_V}
\end{center}
\end{table}
\begin{figure}
\begin{center}
\includegraphics[width=10cm]{new_fig12.eps}
\caption{$\alpha$ vs V plot for data reported in
Fig. \ref{spettri_storici}. Symbols are the same as used in
Fig. \ref{spettri_storici}.}
\label{alpha_V}
\end{center}
\end{figure}
In the historical observations of PKS2155-304 the delays between exposures
at different filters are typically of the order of hours, instead of about
10 minutes as in our data set.
Comparing literature data with our data it is apparent that the maximum
we observed on 20 November 2005 in the H filter light curve
is the highest state ever reported in this band. Note that the V state was
comparable with states reported in the literature, likely because the
coverage of
the source in the optical band is less sparse than that in the NIR.
A most noticeable result of our photometry is the discovery of long term
H-band variability, the amplitude of which is much larger than that in the
optical.
In Fig. \ref{alpha_V} we plot the spectral index vs the V magnitude, as reported in table \ref{indici_spettrali_V}. There is no
apparent correlation. It is noticeable however that the highest state in all
bands (our observation of Nov 2005) corresponds to a rather soft spectral shape. This contrasts with the usual source behaviour of
hardening with increasing intensity, as found in the UV-X-ray band
(see Introduction).
It contrasts also with the short time scale variability, as reported
in section \ref{short}.
There is a general consensus that the blazar SED can be explained by
the superposition of a synchrotron component,
and an inverse Compton one due either to scattering off the synchrotron
photons (synchrotron-self Compton, SSC), or to external photons like those
of the broad line region or
of a thermal disk (e.g. Tavecchio et al. 1998, Katarzynski et al. 2005). This
results in a typical two-maxima shape of the
blazar SED. In Fig. \ref{Chiappetti99} we report examples of the SED
modeling proposed for PKS 2155-304,
on the basis of data taken in 1997. The models are detailed in
Chiappetti et al. (1999).
\begin{figure}
\begin{center}
\includegraphics[width=14cm]{new_fig13.eps}
\caption{ SED of PKS 2155-304 in two states, adapted from Chiappetti et
al. (1999) (see the paper for details). Data from this work are
also plotted. Filled triangles correspond to epoch \textbf{1} (13/5/2005
data), while filled hexagons belong to epoch \textbf{3} data
(20/11/2005). Optical, UV and REM data are dereddened using E(B-V)=0.026 and parameters given by Cardelli et
al. (1989).}
\label{Chiappetti99}
\end{center}
\end{figure}
The object is a typical HBL, with the synchrotron peak in the soft
X-rays.
A well known critical point of this model, is that the source
size is essentially constrained by variability, and variability itself
requires that the SED is constructed using simultaneous observations in
all bands. A further step of the modelling consists in identifying
the physical origin
of the relativistic jet and of its variability, see e.g. Katarzynski \&
Ghisellini (2006). With this
premise it is obvious that the optical-IR photometric study, non simultaneous
with that in other regions of the SED, has only a limited relevance in
clarifying
the overall picture. However we would like to make some remarks. If the
SSC models reported in Fig. \ref{Chiappetti99} truly represent the behaviour
of the SED in 1997, as suggested by the good match with the X-ray and TeV energy data, and if our 2005 optical-IR spectra are also due to the SSC mechanism, then the latter represent a
different condition in the jet and point to different critical parameters within the SSC scenario.
While the IR-optical spectrum in May 2005 (triangles) has the same shape as predicted in 1997, but different normalization, the November 2005 IR-optical spectrum is different in both shape and
normalization. The May 2005 observation
suggests that the synchrotron peak may be located at a frequency
similar to the one observed in 1997 (approximately between extreme UV and soft X-rays), the
total energy being somewhat higher (about a factor 2, see Figure 13) than observed in 1997. The
slope of the November 2005 spectrum suggests instead a much lower synchrotron peak energy, around
the IR-optical domain or even redward, i.e. about 2-3 orders of magnitude lower than observed in 1997 and inferred in May 2005. While a variation of the synchrotron peak energy of this amplitude and on this time scale
(the September 2005 slope is intermediate between those of May and November 2005, suggesting a
monotonic change) it is not unprecedented in blazars (Mkn501 exhibited a similar variation in a much
more rapid time scale, Pian et al. 1998),
this would be the first observation of this kind in PKS~2155-304. Therefore, our interpretation is
only tentative, although supported by the large observed IR variability.
Alternatively, in order to explain the optical-IR flux excess we observe in 2005 with respect to the
SSC prediction based on the earlier multiwavelength data (Fig. 13), one could
invoke a thermal component, possibly from hot dust associated with the
``dusty torus'' surrounding the central region of the active nucleus, as suggested in the cases of other blazars with
excess in the optical-infrared band
(De Diego et al. 1997, for blazar 3C 66A; Pian et al. 1999 for 3C~279;
Pian et al. 2002, 2006, for blazar PKS 0537-441). However, this seems
somewhat less
likely, because high emission states, as observed by us, are expected to be
dominated by non-thermal beamed relativistic radiation.
The continuation of this and other similar optical-IR studies, which have
been proven to be promising but do not provide enough information for a
physical interpretation of the data, requires that the observations are
extended to other wavelengths. Simultaneous observations over a large
wavelength range is the only tool to provide the necessary information for a
physical interpretationof the observed variability of blazars.
REM
monitorings of the kind reported here
could be an effective trigger to X-ray satellites, and programs
along these lines are foreseen with SWIFT. Cross correlation procedures,
which up to now have been limited mainly to the X-ray band (Zhang et al. 2005,
2006a, 2006b, Sembay et al. 2002, Edelson et al. 1995), would be extended to
a much larger portion of the SED.
| {
"redpajama_set_name": "RedPajamaArXiv"
} | 2,478 |
San Dimas Parks Department Completes Projects, Commission Unable to Convene
Featured/Government/Parks & Recreation
The new playground installation in Loma Vista Park is one of many projects completed by the Parks and Recreation Department as the commission remains dormant. Photo: Rommel Alcantara
By Layla Abbas
UPDATE, Mar. 29, 4:30 p.m.: The special city council meeting regarding the policy about board, commission and committee appointments, membership and responsibilities was rescheduled from March 30 to April 8 at 6 p.m.
The San Dimas Parks and Recreation Commission has been unable to meet since November of 2020 primarily because of four vacancies, according to Parks and Recreation Commission Chair Jan Bartolo.
Hector Kistemann, director of the Parks and Recreation Department was removed from his position and placed on paid administrative leave on Oct. 14, 2020. According to a claim filed against the city, Kistemann was not told why he was being removed. Kistemann resigned from his position on Feb. 10, 2021.
The city received applicants for the Parks and Recreation Commission last year, but on Nov. 24, San Dimas City Council postponed scheduling interviews to fill those and other commission vacancies, preventing the Parks Commission from meeting.
"We currently do not have a quorum to conduct the commission meetings," Bartolo said.
A quorum is the minimum number of people that are required in order to conduct an official meeting.
"It has been hard to get together and convene anyway," Bartolo said. "Some cities have temporarily held up on board and commission meetings. We did have some nice momentum when the board was meeting, so it will be nice to regain the traction again."
Despite not being able to meet as a body, Assistant City Manager Brad McKinney said the department has continued its normal operations.
Assistant City Manager Brad McKinney said the Parks and Recreation Department continues its normal operations. Completed projects include resurfacing the basketball court at Pioneer Park. Photo: Rommel Alcantara
In an email on Feb. 5, McKinney said San Dimas Parks is monitoring and complying with LA County COVID-19 guidelines and continuing programs like San Dimas Cares.
"Senior and Recreation programs continue to be provided virtually and programs will expand as COVID-19 guidelines allow," McKinney said in the same email.
McKinney said some recently completed projects include the Loma Vista Park Playground installation, basketball court resurfacing, Pioneer Park court resurfacing, SPLEX tennis court resurfacing, rehabilitation to the SPLEX fields and maintenance to the dog park.
The San Dimas Dog Park at Horsethief Canyon Park reopened on Mar. 5 after regularly scheduled maintenance. Photo: Rommel Alcantara
The city council will hold a special meeting regarding the policy about board, commission and committee appointments, membership and responsibilities on April 8 at 6 p.m.
Neighboring Parks Departments Pivot During Pandemic
Yvonne Duran, recreation coordinator for the City of La Verne, said their top priority has been senior residents.
"We have set up virtual coffee talks, bingo, curbside meal drop-off, and more to check up on our senior residents."
Duran said the department has started a senior newsletter that is available online and handed out during curbside meal drop-offs.
In addition, Duran said their Preschool on the Go program has been a huge success.
"Our team delivers the educational items to the families that they can complete on their own time," Duran said. "We have about 50 to 60 participants every month."
Duran and her team are planning a special and safe graduation celebration when the preschoolers graduate in May.
Outdoor activities like agility, gymnastics and virtual classes offered through Mt. San Jacinto are still active.
Daisy Flores, human services leader for Claremont, said although this year limited in-person interactions, her team met bi-weekly to share new ideas and ways to keep up community involvement.
"We had to change up the process for our senior meal distribution and find a safe way to deliver meals," Flores said. "Seniors can either drive up to our building or, if they are unable, we safely drop off the meals at their doorstep."
Flores said the virtual classes like Zumba, coding and drawing have remained a popular hit throughout the year.
"The classes will continue to be held virtually through summer," Flores said. "We have not yet heard about the plans for fall and whether or not the online classes will continue."
Wilderness passes for the Claremont Loop, a popular hiking trail, have remained available to the public, Flores said.
Annie Warner, recreation superintendent for community services, said the Glendora commission took a six-month hiatus in 2020 because of no special events to discuss.
The commission resumed monthly meetings in February, which take place the third Thursday of each month over Zoom.
"One activity we had to adjust and ended up being well-accepted from the community was concerts in your front lawn instead of concerts in the park," Warner said. "Everyone gathered safely in their front yard and enjoyed hanging out."
Warner said outdoor activities like guided hikes, PE classes with recreation leaders and outdoor conditioning classes were still offered in a safe manner throughout the pandemic.
Recreation leaders also dropped off activity boxes to residents and would demonstrate the activity via Zoom. Some activities included how to build a lava lamp, how to make a pizza and how to paint.
Warner said it was important for her department to pivot and find new ways to ensure the community felt involved during a rather isolating time.
Disclaimer: Isabel Ebiner, managing editor for the San Dimas Community Post and daughter-in-law of Councilmember John Ebiner, edited this story for AP Style.
SUPPORT US SUBSCRIBE WRITE A LETTER TO THE EDITOR
Layla Abbas
School District Addresses COVID-19, Career and Technical Education Programs
Woman, Dog Fatally Stabbed at Lone Hill Park, Suspect in Custody
Latest from Featured
Senior Care Facilities Celebrate the Holidays
Senior care organizations such as Visiting Angels and The Terraces at Via Verde work hard to | {
"redpajama_set_name": "RedPajamaCommonCrawl"
} | 6,475 |
The Apprentice: Why Stuart Baggs wins despite a lack of authority
Benji Wilson assesses the seventh episode of The Apprentice, in which the self-titled "The Brand", Stuart Baggs, becomes a team leader for the first time.
Joanna Riley and Stuart Baggs, "The Brand", brainstorm in the seventh episode of the 2010 series of The Apprentice. Photo: BBC
By Benji Wilson
4:21PM GMT 17 Nov 2010
Episode seven of The Apprentice (BBC One) was supposed to be the one where viewers were treated to The Long-Awaited Comeuppance of Stuart Baggs ("The Brand"). Baggsy, the youngest contestant in the competition, began the series as a spiky-haired blowhard, never afraid of an astonishing overstatement when something merely crass would do, but in recent weeks he has gone into his shell. This has been disappointing. From the minute he coined his own nickname, to the moment where "LorShuga" started openly mocking him with it – a first for The Apprentice, six series in – we were waiting for Baggs to go the way of Greek heroes, heavyweight boxers and diminutive generals, and be repaid for his hubris with some appropriately Sophoclean downfall. But in the face of BBC compliance the pudgy finger of fate/Lord Sugar would suffice.
As the episode began, it all looked so promising. The task was to sell DVDs to people of themselves filmed against a green screen, and then overlaid on moving backdrops such as racetracks or ski slopes. Baggs laid on a 20-minute masterclass of how to a) not sell DVDs, b) get fired from The Apprentice and c) make TV reviewers' lives ridiculously easy by furnishing them with a plume of aphoristic nonsense. Almost every time Baggs opened his mouth, Sugar's sidekick Nick Hewer, the Greek chorus in this tragedy, made that face where he looks as if someone put bleach in his lemonade. Among the litany of Baggisms was, "I absolutely live adrenalin. If my heart's not racing, why be alive?" (This was meant to be a rhetorical question but several obvious answers did present themselves.)
Baggs bulldozed his team-mates, and was at one point reduced to shouting, "Spoony, spoony, spoony" in the back of his people carrier (he was trying to mock someone else, but shouting "spoony" repeatedly tends not to flatter the shouter). He implied that Stella, 30, was borderline geriatric because she wrote things down. "I remember everything in my head," was his boast. Later, in the boardroom, Sugar noted that having an apprentice who didn't write anything down might be bad for business. "What if you got run over by a number six bus? Where does that leave me?" he drawled with a half-smile, as if he couldn't think of a more exquisite scenario.
But wouldn't you know it, team Baggs won the task, so whatever Sugar felt was just deserts for The Brand it mattered not. Moreover, Baggs's team won by selling less product but at a higher price, which must be an affront to Sugar's lifelong business rationale and perhaps explained why he was so subdued – when the finger-wag of death came it was a half-hearted expulsion. "There's no luck in business," he reminded his wannabes, but if any business truths were to be gleaned from this instalment it was that there is a bottomless vat of luck in business – Brand Baggs had just spent 40 minutes splashing about in it.
TV and Radio »
BBC »
In The Apprentice
The Apprentice's 17 most idiotic candidates
The Apprentice: where are they now?
The 10 faces of Nick Hewer - in GIFs
The Apprentice: 'a tight finish'
The Apprentice 2014 candidates: ranked from worst to best
Oscars 2016 highlights
Chris Rock's funniest Oscars jokes
Come Dine with Me producer wins Oscar
Leo attends the Oscars after-party
Lady Gaga sings with sexual abuse survivors
Oscars 2016: Red carpet highlights | {
"redpajama_set_name": "RedPajamaCommonCrawl"
} | 1,482 |
Yoga is a form of exercise that combines both physical and mental workout and helps in stress release. In today's stressful life, yoga is the best way to stay fit. Yoga might not seem very easy for people who are not particularly habituated with it, but its benefits are so prominent that you cannot really overlook the needs of doing yoga regularly.
Yoga is a very systematic way of relaxing your muscles and releasing stress. While you are on the practice, you need to feel free and comfortable. The Yoga gear or the yoga tools you use in the process needs to be good ones, not only from the point of looks but also in quality.
The mat that you use at the time of yoga, yoga blocks, the clothes you wear and the towel that you use while doing the yoga comes under the essential Yoga gear or tools. While doing yoga you have to take care so that you can fully concentrate on the process and nothing else takes your concentration away. So, whatever you buy to use in your yoga class, should be comfortable.
The clothes you choose to wear must be of pure cotton that can soak sweat and give a soft feeling on the skin. At the same time, it is very essential that the dress fits you well. Too big or too tight clothing at the time of yoga can make it very uncomfortable for you.
To start with your yoga classes you really need not to spend 200 bucks just on these Yoga gear, you only have to act and choose wisely. You always need not to buy a high brand product for your yoga classes, but the comparatively less expensive brands also have some great accessories for this purpose. You can make shopping for your yoga classes easily on the net.
The online stores such as Amazon.com and ebay.com provide a great collection of the yoga tools, and at the same time they provides great discounts that can be very beneficial to save you some money.
http://TheYogaStore.com is a good site to look for all types of yoga gear items. Just visit the site and get the help. | {
"redpajama_set_name": "RedPajamaC4"
} | 8,724 |
Walter F. Tichy (born April 22, 1952, in Bad Reichenhall) is a German computer scientist. He was professor of computer science at the Karlsruhe Institute of Technology in Germany where he taught classes in software engineering until April 2022 when he retired.
To the larger software development community he is best known as the initial developer of the RCS revision control system. However, he has also written highly cited works on experimental software engineering, the string-to-string correction problem, software configuration management, and extreme programming.
References
External links
IPD Tichy - Prof. Dr. Walter F. Tichy - Website at the university department
Articles by Walter F. Tichy
German computer scientists
Living people
1952 births | {
"redpajama_set_name": "RedPajamaWikipedia"
} | 5,856 |
Nach Baliye 9: Mouni Roy To Be Seen Alongside Salman Khan In The Grand Premiere?
July 4th, 2019 | by vBollywood Author
Nach Baliye 9 is one of the most popular dance reality show which has been intriguing its huge fan base ever since its announcement. We all know that Salman Khan is producing this years' show and as per latest reports, Gold star Mouni Roy will be seen alongside the Blockbuster Khan for a segment in the grand premiere.
Mouni Roy will reportedly showcase her Kathak skills on the Kalank song, Ghar More Pardesiya, and while it's already been pre-recorded, there's another segment with Salman and contestants planned. According to a source close to Mumbai Mirror reports the same as, "While it has been pre-recorded for the premiere, she will join Salman and the contestants for the shoot again for a brief segment." The actress will also introduce a pair of contestants alongside the actor.
Mouni Roy had earlier shared her feelings about her love for the dance form as, "I would love to do something related to Kathak and other classical dance forms. That's something I have dreamt of doing since I was a little girl.
The actress will next be seen in Brahmastra alongside Alia Bhatt and Ranbir Kapoor.
While names of contestants like Yuvika Chaudhary – Prince Narula, Abhinav Shukla – Rubina Dilaik Gautam Rode – Pankhuri Awasthy, Anita Hassanandani – Rohit Reddy, Parth Samthaan – Erica Fernandes, are taking on the social media, Kasautii Zindagii Kay actress Urvashi Dholakia and Chandrakanta actor Vishal Singh have confirmed their participation in the show.
Presently an online portal had reported that Nach Baliye 8 winner Divyanka Tripathi will host the show. Confirming the same, Divyanka also had shared, "Vivek and I are doing it together for the pre-launch episode. I always wanted to host with Vivek. I'm very happy that we got this opportunity in Nach, which has been a very important part of our life."
Photo courtesy: www.koimoi.com
Grand Premiere Mouni Roy Nach Baliye 9 Salman Khan | {
"redpajama_set_name": "RedPajamaCommonCrawl"
} | 1,123 |
Q: El número de emails recibidos en Gmail no coincide con el que exportado por el código En mi bandeja de entrada de Gmail tengo un total de 1994 email en recibidos. Dispongo de un código que exporta 3 campos del email (Id, Asunto y Fecha) a una hoja de Google sheets. Con él descarga un total de 1809 filas. ¿Alguien sabría decirme por qué existe esa diferencia? He probado con otra cuenta y ocurre lo mismo.
function getMail1(){
var myspreadsheet =
SpreadsheetApp.openById('xxxxxxxxxxxxxxx');
var mysheet = myspreadsheet.getSheets()[0];
var start = 0;
var max = 249;
var count =0;
var row = mysheet.getLastRow()+1
while(count < 8)
{
var threads = GmailApp.getInboxThreads(start , max);
var messages = GmailApp.getMessagesForThreads(threads);
var froms = [];
messages.get
for(var i = 0; i < threads.length; i++)
{
froms.push([messages[i][0].getId(),messages[i][0].getSubject(),messages[i][0].getDate()]);
}
mysheet.getRange(mysheet.getLastRow()+1,1,threads.length,3).setValues(froms);
start = start + 250;
Logger.log(start);
count++;
Logger.log(count);
}}
Aquí el registro de la ejecución:
22:38:59 Aviso Se ha iniciado la ejecución
22:39:14 Información 250.0
22:39:14 Información 1.0
22:39:29 Información 500.0
22:39:29 Información 2.0
22:39:45 Información 750.0
22:39:45 Información 3.0
22:40:00 Información 1000.0
22:40:00 Información 4.0
22:40:17 Información 1250.0
22:40:17 Información 5.0
22:40:32 Información 1500.0
22:40:32 Información 6.0
22:40:48 Información 1750.0
22:40:48 Información 7.0
22:40:53 Información 2000.0
22:40:53 Información 8.0
A: He estado analizando la solución al problema de exportar mensajes en lugar de conversaciones de mi bandeja de recibidos de Gmail. Lo he conseguido, pero los procesos son bastante lentos. No sé cómo mejorar mi código. ¿alguien me podria ayudar?
`function getMailToSheetMessages() {
var myspreadsheet =
SpreadsheetApp.openById('1sruWW6lDDkRtW7ksuFxRiPvwqkHFMnnf753LJjTSDtw'); // Doc
Google de referencia
var mysheet = myspreadsheet.getSheets()[0]; // Hoja 1
var start = 0;
var max = 1;
var count =0;
while(count < 11)
{
// var firstThread = GmailApp.getInboxThreads(start , max)[0];
var firstThread = GmailApp.getInboxThreads(start , max)[0];
// var messages = threads.getMessages();
var messages = GmailApp.getMessagesForThread(firstThread);
var froms = [];
for (var i = 0; i < firstThread.getMessages().length; i++)
{
froms.push([messages[i].getSubject()_,messages[i].getDate(),messages[i].getFrom()]);
}
start++;
count++;
mysheet.getRange(mysheet.getLastRow()+1,1, messages.length,3).setValues(froms);
}}
| {
"redpajama_set_name": "RedPajamaStackExchange"
} | 7,465 |
{"url":"http:\/\/wiki.nmr-relax.com\/CR72","text":"CR72\n\nThe Carver and Richards 1972 2-site relaxation dispersion model for SQ CPMG-type data for most time scales whereby the simplification R2A0 = R2B0 is assumed. This model is labelled as CR72 in relax.\n\nEquation\n\n$R_{2,\\textrm{eff}} = \\frac{R_2^A+R_2^B+k_{\\textrm{EX}}}{2} - \\nu_{\\textrm{cpmg}} \\cosh^{-1} (D_+\\cosh(\\eta_+) - D_-\\cos(\\eta_-)) \\\\ \\phantom{R_{2,\\textrm{eff}}} = R_2 + \\frac{k_{\\textrm{EX}}}{2} - \\nu_{\\textrm{cpmg}} \\cosh^{-1} (D_+\\cosh(\\eta_+) - D_-\\cos(\\eta_-))$\n\nWhich have the following definitions\n\n$\\zeta = 2 \\Delta \\omega \\, (R_2^A - R_2^B - p_A k_{\\textrm{EX}} + p_B k_{\\textrm{EX}}) \\\\ \\phantom{\\zeta} = - 2 \\Delta \\omega \\, ( p_A k_{\\textrm{EX}} - p_B k_{\\textrm{EX}}) \\\\ \\phantom{\\zeta} = - 2 \\Delta \\omega \\, ( k_{\\textrm{BA}} - k_{\\textrm{AB}}) \\\\ \\phantom{\\zeta} = - 2 \\Delta \\omega \\, k_{\\textrm{EX}} ( 2p_A - 1) \\\\ \\Psi = (p_B k_{\\textrm{EX}} - p_A k_{\\textrm{EX}})^2 + 4 p_A p_B k_{\\textrm{ex}}^2 - \\Delta \\omega^2 \\\\ \\phantom{\\Psi} = ( p_A k_{\\textrm{EX}} + p_B k_{\\textrm{EX}} )^2 - \\Delta \\omega^2 \\\\ \\phantom{\\Psi} = k_{\\textrm{ex}}^2 - \\Delta \\omega^2 \\\\ \\eta_+ = \\frac{1}{2\\sqrt{2} \\, \\nu_{\\textrm{cpmg}}}\\sqrt{+\\Psi + \\sqrt{\\Psi^2 + \\zeta^2}} \\\\ \\eta_- = \\frac{1}{2\\sqrt{2} \\, \\nu_{\\textrm{cpmg}}}\\sqrt{-\\Psi + \\sqrt{\\Psi^2 + \\zeta^2}} \\\\ D_+=\\frac{1}{2}\\left(1+\\frac{\\Psi+2\\Delta \\omega^2}{\\sqrt{\\Psi^2+\\zeta^2}} \\right) \\\\ D_-=\\frac{1}{2}\\left(-1+\\frac{\\Psi+2\\Delta \\omega^2}{\\sqrt{\\Psi^2+\\zeta^2}} \\right)$\n\nkex is the chemical exchange rate constant, pA and pB are the populations of states A and B, and \u0394\u03c9 is the chemical shift difference between the two states in ppm.\n\nParameters\n\nThe CR72 model has the parameters {R20, ..., pA, \u0394\u03c9, kex}.\n\nReference\n\nThe reference for the CR72 model is:\n\n\u2022 Carver, J. P. and Richards, R. E. (1972). General 2-site solution for chemical exchange produced dependence of T2 upon Carr-Purcell pulse separation. J. Magn. Reson., 6(1), 89-105. (DOI: 10.1016\/0022-2364(72)90090-X)\n\nRelated models\n\nThe CR72 model is a parametric restriction of the CR72 full model.","date":"2018-03-18 15:48:11","metadata":"{\"extraction_info\": {\"found_math\": true, \"script_math_tex\": 0, \"script_math_asciimath\": 0, \"math_annotations\": 0, \"math_alttext\": 0, \"mathml\": 0, \"mathjax_tag\": 0, \"mathjax_inline_tex\": 1, \"mathjax_display_tex\": 0, \"mathjax_asciimath\": 0, \"img_math\": 0, \"codecogs_latex\": 0, \"wp_latex\": 0, \"mimetex.cgi\": 0, \"\/images\/math\/codecogs\": 0, \"mathtex.cgi\": 0, \"katex\": 0, \"math-container\": 0, \"wp-katex-eq\": 0, \"align\": 0, \"equation\": 0, \"x-ck12\": 0, \"texerror\": 0, \"math_score\": 0.5140878558158875, \"perplexity\": 12602.272161685154}, \"config\": {\"markdown_headings\": false, \"markdown_code\": true, \"boilerplate_config\": {\"ratio_threshold\": 0.18, \"absolute_threshold\": 10, \"end_threshold\": 15, \"enable\": true}, \"remove_buttons\": true, \"remove_image_figures\": true, \"remove_link_clusters\": true, \"table_config\": {\"min_rows\": 2, \"min_cols\": 3, \"format\": \"plain\"}, \"remove_chinese\": true, \"remove_edit_buttons\": true, \"extract_latex\": true}, \"warc_path\": \"s3:\/\/commoncrawl\/crawl-data\/CC-MAIN-2018-13\/segments\/1521257645824.5\/warc\/CC-MAIN-20180318145821-20180318165821-00029.warc.gz\"}"} | null | null |
Q: Difficulty getting desired output file type in function After going through a couple of chapters of "Learn You A Haskell", I wanted to write something hands on and decided to implement a Sudoku solver.I am trying to implement the B2 function from here: http://www.cse.chalmers.se/edu/year/2013/course/TDA555/lab3.html
Here's my code:
data Sudoku = Sudoku { getSudoku :: [[Maybe Int]] } deriving (Show, Eq)
printSudoku :: Sudoku -> IO ()
printSudoku s = do
putStrLn . unlines . map (map (maybe '.' (head . show))) $ rows s
stringToSudoku :: String -> [[Maybe Int]]
stringToSudoku [] = []
stringToSudoku s = (f x):stringToSudoku y
where (x,y) = splitAt 9 s
f = map (\x -> if (digitToInt x)==0 then Nothing else Just (digitToInt x))
readSudoku :: FilePath -> IO Sudoku
readSudoku path = do
handle <- openFile path ReadMode
contents <- hGetContents handle
return $ Sudoku $ stringToSudoku contents
I am able to get the desired output:
readSudoku "sudoku.txt" >>= printSudoku
.......1.
4........
.2.......
....5.4.7
..8...3..
..1.9....
3..4..2..
.5.1.....
...8.6...
However, I had to convert [[Maybe Int]] to Sudoku in the readSudoku function. It should be possible to do this in the stringToSudoku function right?
The sudoku.txt file contains 1 line
000000010400000000020000000000050407008000300001090000300400200050100000000806000
A: If you want stringToSudoku to return Sudoku you could do:
stringToSudoku :: String -> Sudoku
stringToSudoku s = Sudoku $ stringToGrid s
where stringToGrid [] = []
stringToGrid s = let (x, y) = splitAt 9 s
f = map (\x -> if (digitToInt x)==0 then Nothing else Just (digitToInt x))
in (f x):stringToGrid y
| {
"redpajama_set_name": "RedPajamaStackExchange"
} | 2,149 |
Q: How to delete cookie after closing the tab while other tabs are open I'm implementing an asp.net core 3.1 project. My problem is I want when the user close the project tab meanwhile other tabs are open, the cookie goes to get deleted. Right now after running the project, while other tabs are open, after I close the tab that shows my program and rerun my project again, I don't need to login and the cookie isn't deleted. But I want in that situation to login as well. I appreciate if anyone tells me the expression I need to use to solve the issue. For implementing the project, I authenticate the user via ldap with the below expressions in Startup.cs:
public void ConfigureServices(IServiceCollection services)
{
services.AddControllersWithViews();
services.AddDbContext<CSDContext>(options =>
options.UseSqlServer(Configuration.GetConnectionString("CSDContext")));
services.AddScoped<IAuthenticationService, LdapAuthenticationService>();
services.AddAuthentication(CookieAuthenticationDefaults.AuthenticationScheme).AddCookie(options =>
{
// Cookie settings
//set the cookie name here
options.Cookie.Name = "UserLoginCookie"; // Name of cookie
options.Cookie.HttpOnly = true;
options.ExpireTimeSpan = TimeSpan.FromMinutes(15);
options.LoginPath = "/Account/Login";
options.AccessDeniedPath = "/Account/UserAccessDenied";
options.AccessDeniedPath = "/Account/AccessDenied";
options.SlidingExpiration = true;
});
services.AddSession();
services.AddSingleton<MySharedDataViewComponent>();
services.AddHttpContextAccessor();
}
public void Configure(IApplicationBuilder app, IWebHostEnvironment env, ILoggerFactory loggerFactory)
{
if (env.IsDevelopment())
{
app.UseDeveloperExceptionPage();
}
else
{
app.UseExceptionHandler("/Home/Error");
// The default HSTS value is 30 days. You may want to change this for production scenarios, see https://aka.ms/aspnetcore-hsts.
app.UseHsts();
}
app.UseSession();
app.UseHttpsRedirection();
app.UseStaticFiles();
app.UseRouting();
app.UseAuthentication();
app.UseAuthorization();
app.UseEndpoints(endpoints =>
{
endpoints.MapControllerRoute(
name: "default",
pattern: "{controller=Home}/{action=Index}/{id?}");
});
}
Now I used the below expression for deleting cookie after closing the browser.
var authProperties = new AuthenticationProperties
{
IsPersistent = false
};
The above expression works and it deletes cookie if all I close all the tabs and browser.
| {
"redpajama_set_name": "RedPajamaStackExchange"
} | 1,090 |
package org.tiscs.reststack.core.contexts;
import org.apache.commons.dbcp2.BasicDataSource;
import org.apache.ibatis.session.SqlSessionFactory;
import org.mybatis.spring.SqlSessionFactoryBean;
import org.mybatis.spring.annotation.MapperScan;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.ComponentScan;
import org.springframework.context.annotation.Configuration;
import org.springframework.core.io.support.PathMatchingResourcePatternResolver;
import org.springframework.jdbc.datasource.DataSourceTransactionManager;
import org.springframework.transaction.PlatformTransactionManager;
import org.springframework.transaction.annotation.EnableTransactionManagement;
@Configuration
@EnableTransactionManagement
@ComponentScan("org.tiscs.reststack.*.models.mappers")
@MapperScan("org.tiscs.reststack.*.models.mappers")
public class DbContext {
@Bean(destroyMethod = "close")
public BasicDataSource dataSource() {
BasicDataSource dataSource = new BasicDataSource();
dataSource.setDriverClassName("org.postgresql.Driver");
dataSource.setUsername("postgres");
dataSource.setPassword("postgres");
dataSource.setUrl("jdbc:postgresql://localhost:5432/reststack?charSet=UTF-8¤tSchema=identity");
return dataSource;
}
@Bean
public PlatformTransactionManager transactionManager() {
return new DataSourceTransactionManager(dataSource());
}
@Bean
public SqlSessionFactory sessionFactory() throws Exception {
SqlSessionFactoryBean sessionFactoryBean = new SqlSessionFactoryBean();
// https://github.com/manniwood/mmpt 2.1
sessionFactoryBean.setTypeHandlersPackage("org.tiscs.reststack.core.postgres");
sessionFactoryBean.setTypeAliasesPackage("org.tiscs.reststack.core.postgres");
sessionFactoryBean.setDataSource(dataSource());
PathMatchingResourcePatternResolver resolver = new PathMatchingResourcePatternResolver();
sessionFactoryBean.setMapperLocations(resolver.getResources("classpath*:/org/tiscs/reststack/*/models/mappers/*.pgsql.xml"));
return sessionFactoryBean.getObject();
}
}
| {
"redpajama_set_name": "RedPajamaGithub"
} | 4,333 |
package org.apache.harmony.text.tests.java.text;
import junit.framework.TestCase;
import java.text.ChoiceFormat;
import java.text.DateFormat;
import java.text.FieldPosition;
import java.text.Format;
import java.text.MessageFormat;
import java.text.NumberFormat;
import java.text.ParseException;
import java.text.ParsePosition;
import java.text.SimpleDateFormat;
import java.util.Calendar;
import java.util.Date;
import java.util.GregorianCalendar;
import java.util.Locale;
import java.util.TimeZone;
public class MessageFormatTest extends TestCase {
private MessageFormat format1, format2, format3;
private Locale defaultLocale;
/**
* @tests java.text.MessageFormat#MessageFormat(java.lang.String,
* java.util.Locale)
*/
public void test_ConstructorLjava_lang_StringLjava_util_Locale() {
// Test for method java.text.MessageFormat(java.lang.String,
// java.util.Locale)
Locale mk = new Locale("mk", "MK");
MessageFormat format = new MessageFormat(
"Date: {0,date} Currency: {1, number, currency} Integer: {2, number, integer}",
mk);
assertTrue("Wrong locale1", format.getLocale().equals(mk));
assertTrue("Wrong locale2", format.getFormats()[0].equals(DateFormat
.getDateInstance(DateFormat.DEFAULT, mk)));
assertTrue("Wrong locale3", format.getFormats()[1].equals(NumberFormat
.getCurrencyInstance(mk)));
assertTrue("Wrong locale4", format.getFormats()[2].equals(NumberFormat
.getIntegerInstance(mk)));
}
/**
* @tests java.text.MessageFormat#MessageFormat(java.lang.String)
*/
public void test_ConstructorLjava_lang_String() {
// Test for method java.text.MessageFormat(java.lang.String)
MessageFormat format = new MessageFormat(
"abc {4,time} def {3,date} ghi {2,number} jkl {1,choice,0#low|1#high} mnop {0}");
assertTrue("Not a MessageFormat",
format.getClass() == MessageFormat.class);
Format[] formats = format.getFormats();
assertNotNull("null formats", formats);
assertTrue("Wrong format count: " + formats.length, formats.length >= 5);
assertTrue("Wrong time format", formats[0].equals(DateFormat
.getTimeInstance()));
assertTrue("Wrong date format", formats[1].equals(DateFormat
.getDateInstance()));
assertTrue("Wrong number format", formats[2].equals(NumberFormat
.getInstance()));
assertTrue("Wrong choice format", formats[3].equals(new ChoiceFormat(
"0.0#low|1.0#high")));
assertNull("Wrong string format", formats[4]);
Date date = new Date();
FieldPosition pos = new FieldPosition(-1);
StringBuffer buffer = new StringBuffer();
format.format(new Object[] { "123", new Double(1.6), new Double(7.2),
date, date }, buffer, pos);
String result = buffer.toString();
buffer.setLength(0);
buffer.append("abc ");
buffer.append(DateFormat.getTimeInstance().format(date));
buffer.append(" def ");
buffer.append(DateFormat.getDateInstance().format(date));
buffer.append(" ghi ");
buffer.append(NumberFormat.getInstance().format(new Double(7.2)));
buffer.append(" jkl high mnop 123");
assertTrue("Wrong answer:\n" + result + "\n" + buffer, result
.equals(buffer.toString()));
assertEquals("Simple string", "Test message", new MessageFormat("Test message").format(
new Object[0]));
result = new MessageFormat("Don't").format(new Object[0]);
assertTrue("Should not throw IllegalArgumentException: " + result,
"Dont".equals(result));
try {
new MessageFormat("Invalid {1,foobar} format descriptor!");
fail("Expected test_ConstructorLjava_lang_String to throw IAE.");
} catch (IllegalArgumentException ex) {
// expected
}
try {
new MessageFormat(
"Invalid {1,date,invalid-spec} format descriptor!");
} catch (IllegalArgumentException ex) {
// expected
}
// Regression for HARMONY-65
try {
new MessageFormat("{0,number,integer");
fail("Assert 0: Failed to detect unmatched brackets.");
} catch (IllegalArgumentException e) {
// expected
}
}
/**
* @tests java.text.MessageFormat#applyPattern(java.lang.String)
*/
public void test_applyPatternLjava_lang_String() {
// Test for method void
// java.text.MessageFormat.applyPattern(java.lang.String)
MessageFormat format = new MessageFormat("test");
format.applyPattern("xx {0}");
assertEquals("Invalid number", "xx 46", format.format(
new Object[] { new Integer(46) }));
Date date = new Date();
String result = format.format(new Object[] { date });
String expected = "xx " + DateFormat.getInstance().format(date);
assertTrue("Invalid date:\n" + result + "\n" + expected, result
.equals(expected));
format = new MessageFormat("{0,date}{1,time}{2,number,integer}");
format.applyPattern("nothing");
assertEquals("Found formats", "nothing", format.toPattern());
format.applyPattern("{0}");
assertNull("Wrong format", format.getFormats()[0]);
assertEquals("Wrong pattern", "{0}", format.toPattern());
format.applyPattern("{0, \t\u001ftime }");
assertTrue("Wrong time format", format.getFormats()[0]
.equals(DateFormat.getTimeInstance()));
assertEquals("Wrong time pattern", "{0,time}", format.toPattern());
format.applyPattern("{0,Time, Short\n}");
assertTrue("Wrong short time format", format.getFormats()[0]
.equals(DateFormat.getTimeInstance(DateFormat.SHORT)));
assertEquals("Wrong short time pattern",
"{0,time,short}", format.toPattern());
format.applyPattern("{0,TIME,\nmedium }");
assertTrue("Wrong medium time format", format.getFormats()[0]
.equals(DateFormat.getTimeInstance(DateFormat.MEDIUM)));
assertEquals("Wrong medium time pattern",
"{0,time}", format.toPattern());
format.applyPattern("{0,time,LONG}");
assertTrue("Wrong long time format", format.getFormats()[0]
.equals(DateFormat.getTimeInstance(DateFormat.LONG)));
assertEquals("Wrong long time pattern",
"{0,time,long}", format.toPattern());
format.setLocale(Locale.FRENCH); // use French since English has the
// same LONG and FULL time patterns
format.applyPattern("{0,time, Full}");
assertTrue("Wrong full time format", format.getFormats()[0]
.equals(DateFormat.getTimeInstance(DateFormat.FULL,
Locale.FRENCH)));
assertEquals("Wrong full time pattern",
"{0,time,full}", format.toPattern());
format.setLocale(Locale.getDefault());
format.applyPattern("{0, date}");
assertTrue("Wrong date format", format.getFormats()[0]
.equals(DateFormat.getDateInstance()));
assertEquals("Wrong date pattern", "{0,date}", format.toPattern());
format.applyPattern("{0, date, short}");
assertTrue("Wrong short date format", format.getFormats()[0]
.equals(DateFormat.getDateInstance(DateFormat.SHORT)));
assertEquals("Wrong short date pattern",
"{0,date,short}", format.toPattern());
format.applyPattern("{0, date, medium}");
assertTrue("Wrong medium date format", format.getFormats()[0]
.equals(DateFormat.getDateInstance(DateFormat.MEDIUM)));
assertEquals("Wrong medium date pattern",
"{0,date}", format.toPattern());
format.applyPattern("{0, date, long}");
assertTrue("Wrong long date format", format.getFormats()[0]
.equals(DateFormat.getDateInstance(DateFormat.LONG)));
assertEquals("Wrong long date pattern",
"{0,date,long}", format.toPattern());
format.applyPattern("{0, date, full}");
assertTrue("Wrong full date format", format.getFormats()[0]
.equals(DateFormat.getDateInstance(DateFormat.FULL)));
assertEquals("Wrong full date pattern",
"{0,date,full}", format.toPattern());
format.applyPattern("{0, date, MMM d {hh:mm:ss}}");
assertEquals("Wrong time/date format", " MMM d {hh:mm:ss}", ((SimpleDateFormat) (format
.getFormats()[0])).toPattern());
assertEquals("Wrong time/date pattern",
"{0,date, MMM d {hh:mm:ss}}", format.toPattern());
format.applyPattern("{0, number}");
assertTrue("Wrong number format", format.getFormats()[0]
.equals(NumberFormat.getNumberInstance()));
assertEquals("Wrong number pattern",
"{0,number}", format.toPattern());
format.applyPattern("{0, number, currency}");
assertTrue("Wrong currency number format", format.getFormats()[0]
.equals(NumberFormat.getCurrencyInstance()));
assertEquals("Wrong currency number pattern",
"{0,number,currency}", format.toPattern());
format.applyPattern("{0, number, percent}");
assertTrue("Wrong percent number format", format.getFormats()[0]
.equals(NumberFormat.getPercentInstance()));
assertEquals("Wrong percent number pattern",
"{0,number,percent}", format.toPattern());
format.applyPattern("{0, number, integer}");
assertEquals("Wrong integer number pattern",
"{0,number,integer}", format.toPattern());
format.applyPattern("{0, number, {'#'}##0.0E0}");
/*
* TODO validate these assertions
* String actual = ((DecimalFormat)(format.getFormats()[0])).toPattern();
* assertEquals("Wrong pattern number format", "' {#}'##0.0E0", actual);
* assertEquals("Wrong pattern number pattern", "{0,number,' {#}'##0.0E0}", format.toPattern());
*
*/
format.applyPattern("{0, choice,0#no|1#one|2#{1,number}}");
assertEquals("Wrong choice format",
"0.0#no|1.0#one|2.0#{1,number}", ((ChoiceFormat) format.getFormats()[0]).toPattern());
assertEquals("Wrong choice pattern",
"{0,choice,0.0#no|1.0#one|2.0#{1,number}}", format.toPattern());
assertEquals("Wrong formatted choice", "3.6", format.format(
new Object[] { new Integer(2), new Float(3.6) }));
try {
format.applyPattern("WRONG MESSAGE FORMAT {0,number,{}");
fail("Expected IllegalArgumentException for invalid pattern");
} catch (IllegalArgumentException e) {
}
// Regression for HARMONY-65
MessageFormat mf = new MessageFormat("{0,number,integer}");
String badpattern = "{0,number,#";
try {
mf.applyPattern(badpattern);
fail("Assert 0: Failed to detect unmatched brackets.");
} catch (IllegalArgumentException e) {
// expected
}
}
/**
* @tests java.text.MessageFormat#clone()
*/
public void test_clone() {
// Test for method java.lang.Object java.text.MessageFormat.clone()
MessageFormat format = new MessageFormat("'{'choice'}'{0}");
MessageFormat clone = (MessageFormat) format.clone();
assertTrue("Clone not equal", format.equals(clone));
assertEquals("Wrong answer",
"{choice}{0}", format.format(new Object[] {}));
clone.setFormat(0, DateFormat.getInstance());
assertTrue("Clone shares format data", !format.equals(clone));
format = (MessageFormat) clone.clone();
Format[] formats = clone.getFormats();
((SimpleDateFormat) formats[0]).applyPattern("adk123");
assertTrue("Clone shares format data", !format.equals(clone));
}
/**
* @tests java.text.MessageFormat#equals(java.lang.Object)
*/
public void test_equalsLjava_lang_Object() {
// Test for method boolean
// java.text.MessageFormat.equals(java.lang.Object)
MessageFormat format1 = new MessageFormat("{0}");
MessageFormat format2 = new MessageFormat("{1}");
assertTrue("Should not be equal", !format1.equals(format2));
format2.applyPattern("{0}");
assertTrue("Should be equal", format1.equals(format2));
SimpleDateFormat date = (SimpleDateFormat) DateFormat.getTimeInstance();
format1.setFormat(0, DateFormat.getTimeInstance());
format2.setFormat(0, new SimpleDateFormat(date.toPattern()));
assertTrue("Should be equal2", format1.equals(format2));
}
/**
* @tests java.text.MessageFormat#formatToCharacterIterator(java.lang.Object)
*/
public void failing_test_formatToCharacterIteratorLjava_lang_Object() {
// Test for method formatToCharacterIterator(java.lang.Object)
new Support_MessageFormat("test_formatToCharacterIteratorLjava_lang_Object").t_formatToCharacterIterator();
}
/**
* @tests java.text.MessageFormat#format(java.lang.Object[],
* java.lang.StringBuffer, java.text.FieldPosition)
*/
public void test_format$Ljava_lang_ObjectLjava_lang_StringBufferLjava_text_FieldPosition() {
// Test for method java.lang.StringBuffer
// java.text.MessageFormat.format(java.lang.Object [],
// java.lang.StringBuffer, java.text.FieldPosition)
MessageFormat format = new MessageFormat("{1,number,integer}");
StringBuffer buffer = new StringBuffer();
format.format(new Object[] { "0", new Double(53.863) }, buffer,
new FieldPosition(0));
assertEquals("Wrong result", "54", buffer.toString());
format
.applyPattern("{0,choice,0#zero|1#one '{1,choice,2#two {2,time}}'}");
Date date = new Date();
String expected = "one two "
+ DateFormat.getTimeInstance().format(date);
String result = format.format(new Object[] { new Double(1.6),
new Integer(3), date });
assertTrue("Choice not recursive:\n" + expected + "\n" + result,
expected.equals(result));
}
/**
* @tests java.text.MessageFormat#format(java.lang.Object,
* java.lang.StringBuffer, java.text.FieldPosition)
*/
public void test_formatLjava_lang_ObjectLjava_lang_StringBufferLjava_text_FieldPosition() {
// Test for method java.lang.StringBuffer
// java.text.MessageFormat.format(java.lang.Object,
// java.lang.StringBuffer, java.text.FieldPosition)
new Support_MessageFormat(
"test_formatLjava_lang_ObjectLjava_lang_StringBufferLjava_text_FieldPosition")
.t_format_with_FieldPosition();
}
/**
* @tests java.text.MessageFormat#getFormats()
*/
public void test_getFormats() {
// Test for method java.text.Format []
// java.text.MessageFormat.getFormats()
// test with repeating formats and max argument index < max offset
Format[] formats = format1.getFormats();
Format[] correctFormats = new Format[] {
NumberFormat.getCurrencyInstance(),
DateFormat.getTimeInstance(),
NumberFormat.getPercentInstance(), null,
new ChoiceFormat("0#off|1#on"), DateFormat.getDateInstance(), };
assertEquals("Test1:Returned wrong number of formats:",
correctFormats.length, formats.length);
for (int i = 0; i < correctFormats.length; i++) {
assertEquals("Test1:wrong format for pattern index " + i + ":",
correctFormats[i], formats[i]);
}
// test with max argument index > max offset
formats = format2.getFormats();
correctFormats = new Format[] { NumberFormat.getCurrencyInstance(),
DateFormat.getTimeInstance(),
NumberFormat.getPercentInstance(), null,
new ChoiceFormat("0#off|1#on"), DateFormat.getDateInstance() };
assertEquals("Test2:Returned wrong number of formats:",
correctFormats.length, formats.length);
for (int i = 0; i < correctFormats.length; i++) {
assertEquals("Test2:wrong format for pattern index " + i + ":",
correctFormats[i], formats[i]);
}
// test with argument number being zero
formats = format3.getFormats();
assertEquals("Test3: Returned wrong number of formats:", 0,
formats.length);
}
/**
* @tests java.text.MessageFormat#getFormatsByArgumentIndex()
*/
public void test_getFormatsByArgumentIndex() {
// Test for method java.text.Format [] test_getFormatsByArgumentIndex()
// test with repeating formats and max argument index < max offset
Format[] formats = format1.getFormatsByArgumentIndex();
Format[] correctFormats = new Format[] { DateFormat.getDateInstance(),
new ChoiceFormat("0#off|1#on"), DateFormat.getTimeInstance(),
NumberFormat.getCurrencyInstance(), null };
assertEquals("Test1:Returned wrong number of formats:",
correctFormats.length, formats.length);
for (int i = 0; i < correctFormats.length; i++) {
assertEquals("Test1:wrong format for argument index " + i + ":",
correctFormats[i], formats[i]);
}
// test with max argument index > max offset
formats = format2.getFormatsByArgumentIndex();
correctFormats = new Format[] { DateFormat.getDateInstance(),
new ChoiceFormat("0#off|1#on"), null,
NumberFormat.getCurrencyInstance(), null, null, null, null,
DateFormat.getTimeInstance() };
assertEquals("Test2:Returned wrong number of formats:",
correctFormats.length, formats.length);
for (int i = 0; i < correctFormats.length; i++) {
assertEquals("Test2:wrong format for argument index " + i + ":",
correctFormats[i], formats[i]);
}
// test with argument number being zero
formats = format3.getFormatsByArgumentIndex();
assertEquals("Test3: Returned wrong number of formats:", 0,
formats.length);
}
/**
* @tests java.text.MessageFormat#setFormatByArgumentIndex(int,
* java.text.Format)
*/
public void test_setFormatByArgumentIndexILjava_text_Format() {
// test for method setFormatByArgumentIndex(int, Format)
MessageFormat f1 = (MessageFormat) format1.clone();
f1.setFormatByArgumentIndex(0, DateFormat.getTimeInstance());
f1.setFormatByArgumentIndex(4, new ChoiceFormat("1#few|2#ok|3#a lot"));
// test with repeating formats and max argument index < max offset
// compare getFormatsByArgumentIndex() results after calls to
// setFormatByArgumentIndex()
Format[] formats = f1.getFormatsByArgumentIndex();
Format[] correctFormats = new Format[] { DateFormat.getTimeInstance(),
new ChoiceFormat("0#off|1#on"), DateFormat.getTimeInstance(),
NumberFormat.getCurrencyInstance(),
new ChoiceFormat("1#few|2#ok|3#a lot") };
assertEquals("Test1A:Returned wrong number of formats:",
correctFormats.length, formats.length);
for (int i = 0; i < correctFormats.length; i++) {
assertEquals("Test1B:wrong format for argument index " + i + ":",
correctFormats[i], formats[i]);
}
// compare getFormats() results after calls to
// setFormatByArgumentIndex()
formats = f1.getFormats();
correctFormats = new Format[] { NumberFormat.getCurrencyInstance(),
DateFormat.getTimeInstance(), DateFormat.getTimeInstance(),
new ChoiceFormat("1#few|2#ok|3#a lot"),
new ChoiceFormat("0#off|1#on"), DateFormat.getTimeInstance(), };
assertEquals("Test1C:Returned wrong number of formats:",
correctFormats.length, formats.length);
for (int i = 0; i < correctFormats.length; i++) {
assertEquals("Test1D:wrong format for pattern index " + i + ":",
correctFormats[i], formats[i]);
}
// test setting argumentIndexes that are not used
MessageFormat f2 = (MessageFormat) format2.clone();
f2.setFormatByArgumentIndex(2, NumberFormat.getPercentInstance());
f2.setFormatByArgumentIndex(4, DateFormat.getTimeInstance());
formats = f2.getFormatsByArgumentIndex();
correctFormats = format2.getFormatsByArgumentIndex();
assertEquals("Test2A:Returned wrong number of formats:",
correctFormats.length, formats.length);
for (int i = 0; i < correctFormats.length; i++) {
assertEquals("Test2B:wrong format for argument index " + i + ":",
correctFormats[i], formats[i]);
}
formats = f2.getFormats();
correctFormats = format2.getFormats();
assertEquals("Test2C:Returned wrong number of formats:",
correctFormats.length, formats.length);
for (int i = 0; i < correctFormats.length; i++) {
assertEquals("Test2D:wrong format for pattern index " + i + ":",
correctFormats[i], formats[i]);
}
// test exceeding the argumentIndex number
MessageFormat f3 = (MessageFormat) format3.clone();
f3.setFormatByArgumentIndex(1, NumberFormat.getCurrencyInstance());
formats = f3.getFormatsByArgumentIndex();
assertEquals("Test3A:Returned wrong number of formats:", 0,
formats.length);
formats = f3.getFormats();
assertEquals("Test3B:Returned wrong number of formats:", 0,
formats.length);
}
/**
* @tests java.text.MessageFormat#setFormatsByArgumentIndex(java.text.Format[])
*/
public void test_setFormatsByArgumentIndex$Ljava_text_Format() {
// test for method setFormatByArgumentIndex(Format[])
MessageFormat f1 = (MessageFormat) format1.clone();
// test with repeating formats and max argument index < max offset
// compare getFormatsByArgumentIndex() results after calls to
// setFormatsByArgumentIndex(Format[])
Format[] correctFormats = new Format[] { DateFormat.getTimeInstance(),
new ChoiceFormat("0#off|1#on"), DateFormat.getTimeInstance(),
NumberFormat.getCurrencyInstance(),
new ChoiceFormat("1#few|2#ok|3#a lot") };
f1.setFormatsByArgumentIndex(correctFormats);
Format[] formats = f1.getFormatsByArgumentIndex();
assertEquals("Test1A:Returned wrong number of formats:",
correctFormats.length, formats.length);
for (int i = 0; i < correctFormats.length; i++) {
assertEquals("Test1B:wrong format for argument index " + i + ":",
correctFormats[i], formats[i]);
}
// compare getFormats() results after calls to
// setFormatByArgumentIndex()
formats = f1.getFormats();
correctFormats = new Format[] { NumberFormat.getCurrencyInstance(),
DateFormat.getTimeInstance(), DateFormat.getTimeInstance(),
new ChoiceFormat("1#few|2#ok|3#a lot"),
new ChoiceFormat("0#off|1#on"), DateFormat.getTimeInstance(), };
assertEquals("Test1C:Returned wrong number of formats:",
correctFormats.length, formats.length);
for (int i = 0; i < correctFormats.length; i++) {
assertEquals("Test1D:wrong format for pattern index " + i + ":",
correctFormats[i], formats[i]);
}
// test setting argumentIndexes that are not used
MessageFormat f2 = (MessageFormat) format2.clone();
Format[] inputFormats = new Format[] { DateFormat.getDateInstance(),
new ChoiceFormat("0#off|1#on"),
NumberFormat.getPercentInstance(),
NumberFormat.getCurrencyInstance(),
DateFormat.getTimeInstance(), null, null, null,
DateFormat.getTimeInstance() };
f2.setFormatsByArgumentIndex(inputFormats);
formats = f2.getFormatsByArgumentIndex();
correctFormats = format2.getFormatsByArgumentIndex();
assertEquals("Test2A:Returned wrong number of formats:",
correctFormats.length, formats.length);
for (int i = 0; i < correctFormats.length; i++) {
assertEquals("Test2B:wrong format for argument index " + i + ":",
correctFormats[i], formats[i]);
}
formats = f2.getFormats();
correctFormats = new Format[] { NumberFormat.getCurrencyInstance(),
DateFormat.getTimeInstance(), DateFormat.getDateInstance(),
null, new ChoiceFormat("0#off|1#on"),
DateFormat.getDateInstance() };
assertEquals("Test2C:Returned wrong number of formats:",
correctFormats.length, formats.length);
for (int i = 0; i < correctFormats.length; i++) {
assertEquals("Test2D:wrong format for pattern index " + i + ":",
correctFormats[i], formats[i]);
}
// test exceeding the argumentIndex number
MessageFormat f3 = (MessageFormat) format3.clone();
f3.setFormatsByArgumentIndex(inputFormats);
formats = f3.getFormatsByArgumentIndex();
assertEquals("Test3A:Returned wrong number of formats:", 0,
formats.length);
formats = f3.getFormats();
assertEquals("Test3B:Returned wrong number of formats:", 0,
formats.length);
}
/**
* @tests java.text.MessageFormat#parse(java.lang.String,
* java.text.ParsePosition)
*/
public void test_parseLjava_lang_StringLjava_text_ParsePosition() {
MessageFormat format = new MessageFormat("date is {0,date,MMM d, yyyy}");
ParsePosition pos = new ParsePosition(2);
Object[] result = (Object[]) format
.parse("xxdate is Feb 28, 1999", pos);
assertTrue("No result: " + result.length, result.length >= 1);
assertTrue("Wrong answer", ((Date) result[0])
.equals(new GregorianCalendar(1999, Calendar.FEBRUARY, 28)
.getTime()));
MessageFormat mf = new MessageFormat("vm={0},{1},{2}");
result = mf.parse("vm=win,foo,bar", new ParsePosition(0));
assertTrue("Invalid parse", result[0].equals("win")
&& result[1].equals("foo") && result[2].equals("bar"));
mf = new MessageFormat("{0}; {0}; {0}");
String parse = "a; b; c";
result = mf.parse(parse, new ParsePosition(0));
assertEquals("Wrong variable result", "c", result[0]);
mf = new MessageFormat("before {0}, after {1,number}");
parse = "before you, after 42";
pos.setIndex(0);
pos.setErrorIndex(8);
result = mf.parse(parse, pos);
assertEquals(2, result.length);
}
/**
* @tests java.text.MessageFormat#setLocale(java.util.Locale)
*/
public void test_setLocaleLjava_util_Locale() {
// Test for method void
// java.text.MessageFormat.setLocale(java.util.Locale)
MessageFormat format = new MessageFormat("date {0,date}");
format.setLocale(Locale.CHINA);
assertEquals("Wrong locale1", Locale.CHINA, format.getLocale());
format.applyPattern("{1,date}");
assertEquals("Wrong locale3", DateFormat.getDateInstance(DateFormat.DEFAULT,
Locale.CHINA), format.getFormats()[0]);
}
/**
* @tests java.text.MessageFormat#toPattern()
*/
public void test_toPattern() {
// Test for method java.lang.String java.text.MessageFormat.toPattern()
String pattern = "[{0}]";
MessageFormat mf = new MessageFormat(pattern);
assertTrue("Wrong pattern", mf.toPattern().equals(pattern));
// Regression for HARMONY-59
new MessageFormat("CHOICE {1,choice}").toPattern();
}
/**
* Sets up the fixture, for example, open a network connection. This method
* is called before a test is executed.
*/
protected void setUp() {
defaultLocale = Locale.getDefault();
Locale.setDefault(Locale.US);
// test with repeating formats and max argument index < max offset
String pattern = "A {3, number, currency} B {2, time} C {0, number, percent} D {4} E {1,choice,0#off|1#on} F {0, date}";
format1 = new MessageFormat(pattern);
// test with max argument index > max offset
pattern = "A {3, number, currency} B {8, time} C {0, number, percent} D {6} E {1,choice,0#off|1#on} F {0, date}";
format2 = new MessageFormat(pattern);
// test with argument number being zero
pattern = "A B C D E F";
format3 = new MessageFormat(pattern);
}
/**
* Tears down the fixture, for example, close a network connection. This
* method is called after a test is executed.
*/
protected void tearDown() {
Locale.setDefault(defaultLocale);
}
/**
* @tests java.text.MessageFormat(java.util.Locale)
*/
public void test_ConstructorLjava_util_Locale() {
// Regression for HARMONY-65
try {
new MessageFormat("{0,number,integer", Locale.US);
fail("Assert 0: Failed to detect unmatched brackets.");
} catch (IllegalArgumentException e) {
// expected
}
}
public void test_format_Object() {
// Regression for HARMONY-1875
Locale.setDefault(Locale.CANADA);
TimeZone.setDefault(TimeZone.getTimeZone("UTC"));
String pat="text here {0, date, yyyyyyyyy } and here";
String etalon="text here 000002007 and here";
MessageFormat obj = new MessageFormat(pat);
assertEquals(etalon, obj.format(new Object[]{new Date(1198141737640L)}));
assertEquals("{0}", MessageFormat.format("{0}", (Object[])null));
assertEquals("nullABC",
MessageFormat.format("{0}{1}", (Object[]) new String[]{null, "ABC"}));
}
public void testHARMONY5323() {
Object []messageArgs = new Object[11];
for (int i = 0; i < messageArgs.length; i++)
messageArgs[i] = "dumb"+i;
String res = MessageFormat.format("bgcolor=\"{10}\"", messageArgs);
assertEquals(res, "bgcolor=\"dumb10\"");
}
}
| {
"redpajama_set_name": "RedPajamaGithub"
} | 6,237 |
\section{Introduction}
The emergence and popularization of social networking services constitutes an unprecedented social phenomenon that has transformed the way people communicate, get access to different kinds of information, establish communities and many other things. These novel communication channels allow for the fast and massive diffusion of both information and disinformation, a feature that has been well exploited by marketing agencies, social movements, political parties and government agencies, among others. It is therefore relevant to understand the process of information diffusion over this kind of networks.
Among the most popular social networking services, such as Facebook, YouTube or Instagram, the microblogging site Twitter stands as particularly effective for information diffusion purposes. According to 2016 data (\url{about.twitter.com}), Twitter has approximately 320 million active users (accounts that show activity at least once a month), which represent approximately 9 $\%$ of total Internet users worldwide (\url{www.itu.int}). According to these same sources, approximately 500 million messages are sent over this network everyday.
The growing interest in modeling and understanding different dynamical processes that occur on this social network is manifested in the large number of studies on this matter in recent years. Kawamoto et al. have proposed a multiplicative process model for information spread \cite{kawamoto2013,kawamoto2014}. Kwon et al. have proposed models for the evolution of the number of messages, the propensity to send or resend messages and have categorized messages according to predictability and sustainability \cite{kwon2012,kwon2013,ko2014}. Weng et al. have elaborated and agent-based model for information overflow and have discovered similarities between images diffusion over Twitter and epidemic spreads \cite{weng2012,weng2013}. Mathiesen et al. have studied scaling laws of big brands tweet-rates, which have been modeled through classic stochastic equations \cite{mathiesen2013,mollgaard2015}. Sutton et al. have made statistical analysis for the diffusion of official warnings during disasters and have identified some factors that contribute to information diffusion \cite{sutton2015}. There are also some works that model topic popularity and information spread with SIR or SIRI-like equations \cite{xiong2012,jin2013,skaza2017}. Bao et al. have studied the predictability of the numer of times a message will be shared or resent \cite{bao2019}. Bauman et al. have modeled community polarization on social networks and specifically analyzed this with Twitter data \cite{baumann2020}. Yook et al. have developed models to account for the observed probability distributions and scaling laws of images and topics popularity \cite{yook2020}. There are as well many other studies for different kinds of phenomena that occur on this social network, other than dynamical process, see for example \cite{gonccalves2011,bovet2018,bovet2019,zhang2018}. Finally, there are many other studies for this kind of phenomena on other social networks, see for example \cite{crane2008,hogg2009,wu2007,miotto2014,miotto2017,wang2018}.
In this paper we propose and validate a model, based on master equations, for the temporal evolution of the number of times a certain topic or label appears on the Twitter network (these labels are called \emph{hashtags}, as we explain in the next section). Notice this is not a model for the number of times a message is sent or shared, but for the number of times it appears on the network, which depends on the number of links the nodes that are sending this message have (the degree distribution of the network). Clearly, a label being shared by nodes with a few links will behave differently, on a global scale, than a label being shared by nodes with many links. We use this as measure of popularity for the label or topic and construct our model under the hypotheses that this popularity is influenced by the degree distribution (a feature that is intrinsic to the network) and also by the extrinsic popularity of the topic (see \cite{bandari2012} for a discussion on this subject). Data obtained through the Twitter API show that our model is indeed plausible. As far as we know, this is the first attempt to approach this phenomenon with semi-deterministic models.
This paper is organized as follows: in section \ref{section-network} we describe the phenomenon we want to study on Twitter in terms of network theory; in section \ref{section-model} we develop the model, based on master equations; we show in section \ref{section-solutions} how to obtain solutions for the mean number of messages and its variance; in section \ref{section-popularity} we explain how we modeled the extrinsic topic-popularity function; in section \ref{section-validation} we show how we calibrated the model data and demonstrate that the model is consistent with empirical data from Twitter; finally, section \ref{section-discussion} discusses implications and limitations of the model, as well as future research paths.
\section{Twitter as a directed network}
\label{section-network}
From a network perspective, Twitter is a directed network where nodes are Twitter users and links represent a follower/friend relationship between them. Users interact on the network by sending messages called \emph{tweets}. Not every user on the network receives all messages. A \emph{follower} of user $i$ is a user that receives all messages sent by $i$. If $j$ is a follower of $i$, then $j$ receives messages sent by $i$ but not the other way around. If $j$ is a follower of $i$, then we say that $i$ is a \emph{friend} of $j$, and represent this in the adjacency matrix of the network through $a_{ij}=1$. In this way, there is a directed link in the network from node $i$ to node $j$, through which a message can flow. Every time $i$ sends a message, all of its friends receive it. If a user receives a message and decides to resend it to his of her followers, we say that this user \emph{retweets} the message. We say that the original message is a tweet and the resent message is a \emph{retweet}. In this way, a specific message can propagate through the network via retweets.
A \emph{hashtag} is a keyword or phrase used to describe a certain topic or theme. Hashtags are preceded by the hash sign ($\#$) and they are widely used because they categorize tweets in a way that is easy for other users to find. Many different messages can be categorized by a common hashtag; if this is the case, all these messages usually speak about a common topic or theme. A certain hashtag propagates through the network if users retweet messages that contain it, or if they send new messages categorized by the same hashtag. A hashtag propagates and popularizes when many users are sending messages about a topic of current interest.
A word, phrase, topic or hashtag that is mentioned at a greater rate than others is said to be a \emph{trending topic}. Trending topics become popular either through a concerted effort by users or because of an event that prompts people to talk about a specific topic. We recall that the purpose of this work is to model with master equations the popularity evolution of a hashtag or topic. We develop this model in the next section.
\section{The model}
\label{section-model}
For simplicity, we assume that users read all messages they receive from their friends immediately after these are sent. Therefore, if a user with $n$ followers sends a message, we say that this message has $n$ \emph{reads} (indicating that $n$ users have received it). We want to model the time evolution for the number of reads $X(t)$ of all messages categorized by a specific hashtag. In this way, $X(t)$ is a measure of the popularity of a certain topic, phrase or news on the network at time $t$. At any fixed time, we consider $X(t)$ to be a random variable; our goal is to find an equation for the probability of having exactly $X$ reads of a certain hashtag at time $t$, which we denote $P(X=x,t)$.
We say that a user \emph{shoots} every time he or she sends or resends a message with the hashtag of interest. Let $N$ be the total number of users in the community. Let $w(t)$ be the average rate at which users shoot. This means that the average probability for every user to shoot in the time interval $(t,t+dt)$ is $w(t)dt$. Finally, let $f(y)$ be the out-degree distribution of the network, so the probability of a randomly picked user to have $y$ followers is $f(y)$. The contributions to $P(X=x,t)$ are the following:
\begin{itemize}
\item There were $x$ reads at time $t$ and nobody shot (which happens with probability $1-Nw(t)dt$,
\item there were $x-1$ reads at time $t$ and exactly one user with $y=1$ follower shot (which happens with probability $Nw(t)dtf(1)$),
$$\vdots$$
\item there were $0$ messages at time $t$ and exactly one user with $y=x$ followers shot (which happens with probability $Nw(t)dtf(x)$).
\end{itemize}
Since we will consider the limit of very short time intervals, $dt\rightarrow 0$, other possible contributions, such as more than one user shooting during the interval $(t,t+dt)$, do not need to be included, as their contribution will be of higher order in $dt$. Summing up all contributions we get the equation, from the law of total probability,
$$
P(x,t+dt)=\displaystyle
P(x,t)[1-Nw(t)dt]+Nw(t)dt\sum_{i=1}^xP(x-i,t)f(i) + O(dt^2).
$$
Rearranging terms and taking the continuous-time limit $dt\rightarrow 0$ we obtain the partial differential equation for $P(x,t)$,
$$
\displaystyle \frac{\partial P(x,t)}{\partial t} =
-Nw(t)\left[P(x,t)-\sum_{i=1}^xP(x-i,t)f(i)\right].
$$
We can further approximate the out-degree distribution $f(y)$ to be a continuous distribution
with support $[m,\infty)$ so there is a minimum of (possibly zero) $m$ followers per user. With this approximation, we get the equation
$$
\displaystyle \frac{\partial P(x,t)}{\partial t} =
-Nw(t)\left[P(x,t)-\int_m^x P(x-y,t)f(y)dy\right].
$$
After a change of variable and rearranging terms, we finally get the equation
\begin{equation}
\label{master}
\displaystyle \frac{\partial P(x,t)}{\partial t} =
-Nw(t)P(x,t) + Nw(t)\int_0^{x-m}P(y,t)f(x-y)dy.
\end{equation}
This equation, along with the initial condition of zero reads at time $t=0$,
\begin{equation}
\label{delta}
P(x,0) = \delta(x)
\end{equation}
constitute a master equation for the evolution of the number of reads containing a certain hashtag in the network. In a mean-field framework, $w(t)$ is the probability density of an average user in the network to send o resend a message at time $t$; therefore, this function represents a measure of the popularity that the topic categorized by the hashtag as at time $t$. If the hashtag under consideration is very popular, then it has a high probability of being mentioned in new messages and the messages that contain it have a high probability of being resent. We will refer to this function $w(t)$ as the \emph{hashtag-popularity function}.
\section{Solutions for the mean and variance}
\label{section-solutions}
Explicit solutions for Eq.(\ref{master}) will depend on the forms of the popularity function $w(t)$ and the out-degree or followers distribution $f(y)$ and will be generally not available. However, we can get an equivalent equation for the moment generating function (mgf) of $X(t)$, which we will denote $M_X(s,t)$ and we will be able to utilize it to derive equations for the mean and variance of $X(t)$.
Consider the Laplace transform with respect to $x$,
$$
L_s^{(x)}[g(x)]= \displaystyle \int_0^\infty e^{-sx}g(x)dx.
$$
Direct integration shows that the Laplace transform of the integral on the right-size of Eq.(\ref{master}) is
$$
\begin{array}{lll}
\displaystyle L_s^{(x)} \left[\int_0^{x-m}P(y,t)f(x-y)dy\right]
& = & \displaystyle \int_0^\infty e^{-sy}P(y,t)dt \int_m^\infty e^{-sy}f(y)dy\\
& & \\
& = &
L_s^{(x)}[P(x,t)]E_f[e^{-sx}].
\end{array}
$$
From the relationship between the moment generating function and the Laplace transform $L_{-s}^{(x)}[P(x,t)] = M_X(s,t)$ we can derive an equation for $M_X(s,t)$ by taking the Laplace transform of Eq.(\ref{master}),
\begin{equation}
\label{mgf}
\frac{\partial M_X(s,t)}{\partial t} = N(M_f(s)-1)w(t)M_X(s,t).
\end{equation}
Here, $M_f(s)$ is the mgf of the out-degree or followers distribution $f(y)$. Taking the Laplace transform of the initial condition Eq.(\ref{delta}) we get
\begin{equation}
M_X(s,0)=1.
\end{equation}
Because of the popularity function $w(t)$, Eq.(\ref{mgf}) will be in general a non-linear differential equation for $M_X(s,t)$ and we cannot give a general explicit solution. We can, however, utilize the fact that the n-th moment of a distribution, if it exists, is given by the n-th derivative of the mgf evaluated at zero,
$$
E[X(t)^n] =\left. \frac{\partial ^n M_X(s,t)}{\partial s^n} \right|_{s=0}.
$$
For $n=1$, we obtain a very simple equation for the expectation of $X(t)$,
$$
\frac{dE[X(t)]}{dt}=Nw(t)\langle f \rangle, \qquad E[X(0)]=0,
$$
where $\langle f \rangle$ is the first moment of the out-degree distribution, i.e. the mean number of followers of users in the community. This equation has the solution
\begin{equation}
\label{expectation}
\displaystyle E[X(t)]=N\langle f \rangle \int_0^t w(s)ds.
\end{equation}
In a similar way, we can get an initial value problem for the second moment,
$$
\frac{dE[X^2(t)]}{dt}=Nw(t)[2\langle f \rangle E[X(t)]+\langle f^2 \rangle ], \qquad
E[X^2(0)]=0,
$$
where $\langle f^2 \rangle$ is the second moment of the followers distribution. Thus,
$$
E[X^2(t)]= \displaystyle N \int_0^t w(s)[2\langle f \rangle E[X(s)]+\langle f^2 \rangle ]ds.
$$
Finally, we can have an expression for the variance of $X(t)$,
$$
Var[X(t)]= \displaystyle N \int_0^t w(s)\left[2E[X(t)]\langle f \rangle + \langle f^2 \rangle \right]ds - \left(E[X(t)]\right)^2.
$$
Integrating by parts the first term of the variance, rearranging terms and simplifying, we get
\begin{equation}
\label{variance}
Var[X(t)] = \displaystyle N \langle f^2 \rangle \int_0^t w(s)ds = \frac{\langle f^2 \rangle}{\langle f \rangle} E[X(t)].
\end{equation}
\section{Modeling the popularity function}
\label{section-popularity}
Consider the simplest possible case, where the interest a hashtag produces remains constant over time, thus $w(t)$ is a constant function. Recall that $w(t)$ is a probability for any fixed time, so it must always lie in the interval $[0,1]$. By using $w(t)=c$ with $c\in [0,1]$, we obtain from Eqs.(\ref{expectation}) and (\ref{variance})
$$
E[X(t)] = Nc\langle f \rangle t, \qquad Var[X(t)]=Nc\langle f^2 \rangle t.
$$
A more realistic consideration is that the interest grows until it reaches a peak, then decays and vanishes for very large times. This behavior can be represented in several ways. Here we will examine one simple possibility, which is a function proportional to a gamma distribution kernel,
\begin{equation}
\label{w}
w(t) = c \frac{e^a}{(ab)^a}t^a e^{-t/b},
\end{equation}
where $a,b>0$ are parameters that control the shape of the interest function and $c\in[0,1]$ is the value of $w(t)$ at its peak. Notice that $w(t)$ reaches its maximum value $w_{max}=c$ at $t_{max}=a\cdot b$ and has an inflection point at $t_{inf}=a\cdot b + b\sqrt{a}$. With this popularity function, we get from Eqs.(\ref{expectation}) and (\ref{variance})
\begin{equation}
\begin{array} {c}
\displaystyle E[X(t)] = \frac{Ncbe^a \langle f \rangle}{a^a}\gamma (t/b,a+1), \\
\\
\displaystyle Var[X(t)] = \frac{Ncbe^a \langle f^2 \rangle}{a^a}\gamma (t/b,a+1).
\end{array}
\end{equation}
Here, $\gamma (x,s)$ is the lower incomplete gamma function, $\gamma (x,s) = \int_0^x e^{-t}t^{s-1}ds$. By utilizing the Stirling approximation for the gamma function
$$
\Gamma (z) = \sqrt{\frac{2\pi}{z}}\left(\frac{z}{e}\right)^z \left(1+O\left(\frac{1}{z}\right)\right),
$$
we can approximate the limits for the expectation and variance for very large times,
$$
\begin{array}{c}
\displaystyle E[X(t)] \longrightarrow \frac{Ncbe^a \langle f \rangle}{a^a} \Gamma (a+1) \simeq Nbc\langle f \rangle \sqrt{2\pi(a+1)},\\
\\
\displaystyle Var[X(t)] \longrightarrow \frac{Ncbe^a \langle f^2 \rangle}{a^a} \Gamma (a+1) \simeq Nbc\langle f^2 \rangle \sqrt{2\pi(a+1)}\\
\end{array}
$$
for large values of the parameter $a$.\\
Notice that this is not the only way in which we can model the popularity function, but it constitutes a relatively simple function that yields acceptable fits, as we will see in the following section.
\section{Model calibration and validation}
\label{section-validation}
In order to corroborate the validity of the model, we analyzed time evolution of popular trends and hashtags in Twitter during the first half of February 2020. We obtained data through the Twitter API with the \emph{rtweet} library for the statistical software R \cite{rtweet-package}. We implemented the following pipeline to contrast empirical observations with model predictions:
\begin{enumerate}
\item From the sample of tweets, directly compute number of different users $N$, mean number of followers $\langle f \rangle$ and mean square number of followers $\langle f^2 \rangle$.
\item Divide time interval of the sample into $n$ equal length sub-intervals, then compute fraction of different users that sent a message within each sub-interval. This gives us the empirical popularity function $w(t)$, since it approximates the probability for each user to send a message at any time.
\item Empirical $w(t)$ is usually very noisy, so we smooth this time series with a simple $k$−
point moving average filter. This gives us a smoothed empirical popularity function.
\item Fit parameters for theoretical $w(t)$ with Levenberg-Marquardt non-linear least squares.
\item Utilize the cumulative sum of followers as an empirical approximation for the time evolution of $X(t)$.
\item With fitted $w(t)$ parameters, and knowing theoretical $E[X(t)]$ and $Var[X(t)]$,
construct $95 \%$ approximate confidence regions for $X(t)$ and contrast with empiric observations.
\end{enumerate}
We show in Fig. \ref{results}, the results of our analyses for three different trends and hashtags. From the database we collected, we chose three worldwide trending topics on three different time scales: first, the trending topic ``\emph{José Luis Cuerda}'', following the decease of this Spanish film director on February 4, 2020; this was a world trend for approximately two days. Second, the hashtag $\#KirkDouglasRIP$, which was a world trend for approximately one day after the decease of the American actor and film producer on February 5, 2020. Third, the hashtag $\#festivalsanremo2020$, which was a world trend for approximately three hours during the grand final of the San Remo Music Festival 2020 on February 8, 2020. The first three panels on this figure show the empiric popularity function, computed directly from the Twitter data, as well as the smoothed and fitted popularity functions. Notice how the empiric $w(t)$ is somewhat noisy, yet fitted and smoothed functions are very close to each other. The last three panels show in red the empiric number of reads $X(t)$, dashed lines are the expected number of reads predicted by the model, we show in shaded blue the approximate confidence region for $X(t)$ and the dotted blue line is the long-term expectation predicted by the model. Notice how in these tree cases the observed $X(t)$ stays within the confidence region for almost the entire time intervals. See, for example, how wide the confidence region is for $\#KirkDouglasRIP$ in comparison with the other two, which is a consequence of a relatively larger variance on the degree distribution for this community.
\begin{figure}[h]
\includegraphics[width=0.99\linewidth]{fig1.eps}
\caption{ A, B and C) Empiric, smoothed and fitted popularity function $w(t)$, D, E and F) Observed evolution of reads $X(t)$ and approximate confidence region predicted by the model for three different trends and hashtags. Last three figures also show the limit expectation for very large times.}
\label{results}
\end{figure}
\section{Discussion and conclusions}
\label{section-discussion}
We have presented a mathematical model, based on master equations, for the temporal evolution of the popularity of a certain hashtag or topic on the Twitter network. The measure we utilize for the popularity of a hashtag is the number of times it appears on the network, which depends on how many users have posted it and how many followers these users have. According to our model, there are two main components that influence this dynamics: on one side, certain characteristics of the community and the network such as number of nodes and mean and variance of the degree distribution; these are components that are intrinsic to the network. On the other side, we have the time evolution of the interest people have on the topic or hashtag we are modeling, which we quantify as the probability each user in the community has of sending a message as a function of time. This popularity function is an extrinsic component influencing this dynamics.
We utilized actual Twitter data, that we got from the public API, to calibrate the model (fit parameters from the empiric popularity function) and to compare its predictions and the actual observations. Even though we are not able to give an explicit solution for the master equation, we can compute the mean and variance and therefore construct approximate confidence regions. The examples we show in this paper confirm that our model is plausible and consistent with the observations.
We have used only one possibility to model the popularity function, one that is relatively simple and yields acceptable fits. However, other functions with similar behaviors may be used. More important than this is the fact that the parameters of this function are fixed, ignoring the possibility that the shape of the popularity function varies with time, for example through a back-feeding process (a popular hashtag gets more and more popular over time). The possibility of a popularity function that updates and that is itself an unknown function is a matter of future study.
The Twitter public API we utilized to gather our data base has some limitations: we can only make 18 thousand requests every 15 minutes and we can only access tweets that are 10 days old or newer. We believe that a more comprehensive data base would be helpful and illustrating to see the performance of our model on a more global scale. In spite of these limitations, we observed that our model is consistent with the observations. This is also a matter of future study.
Accurately predicting the evolution and impact a certain tweet or hashtag will have on the network is a difficult task and it is currently a matter of great interest. With this model, we hope to contribute to the understanding of this phenomenon. Finally, the activity on Twitter may not be completely different from dynamics on other social networks, online or offline; we believe that the present model, though very simple, can give interesting insights into the behavior of other networks.\\
\noindent \textbf{Author contributions:} Conceptualization, data collection, methodology, visualizations, writing, reviw and editing by OF and RM.
\noindent \textbf{Competing interests:} The authors declare that they have no known competing financial interests or personal relationships that could have appeared to influence the work reported in this paper.
\noindent \textbf{Acknowledgments:} Suggestions and comments by Mario Alejandro López Pérez and Ricardo Mansilla Sánchez are gratefully acknowledged. This study was supported by the UNAM-DGAPA Postdoctoral Scolarships Program at CEIICH, UNAM.
\bibliographystyle{unsrt}
| {
"redpajama_set_name": "RedPajamaArXiv"
} | 8,132 |
{"url":"http:\/\/mathoverflow.net\/revisions\/15510\/list","text":"There exist two binary trees with rotation distance $2n-6$. The proof is unexpected and based on hyperbolic geometry (Sleator, Tarjan, Thurston (1988), \"Rotation distance, triangulations, and hyperbolic geometry\").","date":"2013-05-23 05:53:03","metadata":"{\"extraction_info\": {\"found_math\": true, \"script_math_tex\": 0, \"script_math_asciimath\": 0, \"math_annotations\": 0, \"math_alttext\": 0, \"mathml\": 0, \"mathjax_tag\": 0, \"mathjax_inline_tex\": 1, \"mathjax_display_tex\": 0, \"mathjax_asciimath\": 0, \"img_math\": 0, \"codecogs_latex\": 0, \"wp_latex\": 0, \"mimetex.cgi\": 0, \"\/images\/math\/codecogs\": 0, \"mathtex.cgi\": 0, \"katex\": 0, \"math-container\": 0, \"wp-katex-eq\": 0, \"align\": 0, \"equation\": 0, \"x-ck12\": 0, \"texerror\": 0, \"math_score\": 0.3677423298358917, \"perplexity\": 2966.6185042970756}, \"config\": {\"markdown_headings\": false, \"markdown_code\": true, \"boilerplate_config\": {\"ratio_threshold\": 0.18, \"absolute_threshold\": 10, \"end_threshold\": 15, \"enable\": true}, \"remove_buttons\": true, \"remove_image_figures\": true, \"remove_link_clusters\": true, \"table_config\": {\"min_rows\": 2, \"min_cols\": 3, \"format\": \"plain\"}, \"remove_chinese\": true, \"remove_edit_buttons\": true, \"extract_latex\": true}, \"warc_path\": \"s3:\/\/commoncrawl\/crawl-data\/CC-MAIN-2013-20\/segments\/1368702900179\/warc\/CC-MAIN-20130516111500-00029-ip-10-60-113-184.ec2.internal.warc.gz\"}"} | null | null |
N-Acetylaspartic acid, or ''N''-acetylaspartate (NAA), is a derivative of aspartic acid with a formula of C6H9NO5 and a molecular weight of 175.139.
NAA is the second-most-concentrated molecule in the brain after the amino acid glutamate. It is detected in the adult brain in neurons, oligodendrocytes and myelin and is synthesized in the mitochondria from the amino acid aspartic acid and acetyl-coenzyme A.
Function
The various functions served by NAA are under investigation, but the primary proposed functions include:
Neuronal osmolyte that is involved in fluid balance in the brain
Source of acetate for lipid and myelin synthesis in oligodendrocytes, the glial cells that myelinate neuronal axons
Precursor for the synthesis of the neuronal dipeptide N-Acetylaspartylglutamate
Contributor to energy production from the amino acid glutamate in neuronal mitochondria
In the brain, NAA was thought to be present predominantly in neuronal cell bodies, where it acts as a neuronal marker, but it is also free to diffuse throughout neuronal fibers.
Applications
However, the recent discovery of a higher concentration of NAA in myelin and oligodendrocytes than in neurons raises questions about the validity of the use of NAA as a neuronal marker. NAA gives off the largest signal in magnetic resonance spectroscopy of the human brain. The levels measured there are decreased in numerous neuropathological conditions ranging from brain injury to stroke to Alzheimer's disease. This fact makes NAA a potential diagnostic molecule for doctors treating patients with brain damage or disease.
NAA may be a marker of creativity. High NAA levels in the hippocampus are related to better working memory performance in humans.
NAA may function as a neurotransmitter in the brain by acting on metabotropic glutamate receptors.
See also
Aspartoacylase
Canavan disease
References
Further reading
External links
GeneReviews/NCBI/UW/NIH entry on Canavan disease
Amino acid derivatives | {
"redpajama_set_name": "RedPajamaWikipedia"
} | 9,426 |
Johanna (Jeannette) Sofia Fredrika Silfverstolpe, född 1 juli 1808 i Åbo, död 18 januari 1869 i Stockholm, var en svensk friherrinna och tecknare.
Hon var dotter till geheimerådet friherre Knut von Troil och Johanna Margareta Groen och från 1837 gift med generallöjtnanten David Ludvig Silfverstolpe. Hon finns representerad med teckningen Målarkonstens ursprung i Christian Eichhorns samling.
Tryckta källor
Svenskt konstnärslexikon del V, sid 147, Allhems Förlag, Malmö.
Svenska tecknare under 1800-talet
Kvinnor
Födda 1808
Avlidna 1869
Personer från Åbo
Jeannette | {
"redpajama_set_name": "RedPajamaWikipedia"
} | 7,658 |
\section{Introduction}
A basic theme in dynamics is the investigation of the measure-theoretic entropy and its maximizing measures known as the measures of maximal entropy. By the pioneering work of R.~Bowen, D.~Ruelle, P.~Walters, Ya.~Sinai, M.~Lyubich, R.~Ma\~n\'e and many others, existence and uniqueness results of the measure of maximal entropy are known for uniformly expansive continuous dynamical systems, distance expanding continuous dynamical systems, uniformly hyperbolic smooth dynamical systems and rational maps on the Riemann sphere. In many cases, the measure of maximal entropy is also the asymptotic distribution of the period points (see \cite{Pa64, Si72,Bo75,Ly83,FLM83,Ru89,PU10}).
In this paper, we discuss a class of dynamical systems that are not among the classical dynamical systems mentioned above, namely, expanding Thurston maps on a topological $2$-sphere $S^2$. Thurston maps are branched covering maps on a sphere $S^2$ that generalize rational maps with finitely many post-critical points on the Riemann sphere. More precisely, a branched covering map $f\: S^2 \rightarrow S^2$ is a \emph{Thurston map} if its topological degree is at least 2 and if each of its finitely many critical points is preperiodic. These maps arose in W.P.~Thurston's study of a characterization of rational maps on the Riemann sphere in a general topological context (see \cite{DH93}). We will give a more detailed introduction to Thurston maps in Section~\ref{sctThurstonMap}.
In order to obtain the existence and uniqueness of the measure of maximal entropy of an Thurston map, one has to impose a condition of expansion for the map. More generally, P.~Ha\"issinsky and K.~Pilgrim introduced a notion of expansion for any finite branched coverings between two suitable topological spaces (see \cite[Section~2.1 and Section~2.2]{HP09}). We will use an equivalent definition in the context of Thurston maps formulated by M.~Bonk and D.~Meyer in \cite{BM10}. We will discuss the precise definition in Section~\ref{sctThurstonMap}. We call Thurston maps with this property \emph{expanding Thurston maps}. For a list of equivalent definitions of expanding Thurston maps, we refer to \cite[Proposition~8.2]{BM10}.
As mentioned earlier, words like ``expanding'' and ``expansive'' have been used in different contexts to describe various notions of expansion. Our notion of expansion differs from all of the classical notions (except that of \cite{HP09}), with the closest notion being that of \emph{piecewise expanding maps} from \cite{BS03}. Even though an expanding Thurston map $f$ is expanding, in the sense of \cite{BS03}, when restricted to any $1$-tile $X$ in the cell decompositions that we will discuss in Section~\ref{sctThurstonMap}, it is still not clear why $f$ is piecewise expanding in their sense.
As a consequence of their general results in \cite{HP09}, P.~Ha\"issinsky and K.~Pilgrim proved that for each expanding Thurston map, there exists a measure of maximal entropy and that the measure of maximal entropy is unique for an expanding Thurston map without periodic critical points. M.~Bonk and D.~Meyer then proved the existence and uniqueness of the measure of maximal entropy for all expanding Thurston maps using an explicit combinatorial construction \cite{BM10}.
In \cite{BM10}, M.~Bonk and D.~Meyer studied various properties of expanding Thurston maps and gave a wealth of combinatorial and analytical tools for such maps. Using the framework set in \cite{BM10}, we investigate in this paper the numbers and locations of the fixed points, periodic points, and preperiodic points of such maps. We establish equidistribution results of preimages of any point, of preperiodic points, and of periodic points, with respect to the measure of maximal entropy. We also generalize some of the results from \cite{BM10} in the development of this paper.
\smallskip
We will now give a brief description of the structure and main results of this paper.
After fixing notation in Section~\ref{sctNotation}, we introduce Thurston maps in Section~\ref{sctThurstonMap} and record, in some cases generalize, a few key concepts and results from \cite{BM10}.
In Section~\ref{sctFixedPts}, we study the fixed points, periodic points, and preperiodic points of the expanding Thurston maps. For the convenience of the reader, we first provide a direct proof in Proposition~\ref{propNoFixedPtsRational}, using knowledge from complex dynamics, of the fact that a rational expanding Thurston map $R$ on the Riemann sphere has exactly $1+\deg R$ fixed points. Then we set out to generalize this result to the class of expanding Thurston maps, and get our first main theorem.
\begin{theorem} \label{thmNoFixedPts}
Every expanding Thurston map $f\: S^2 \rightarrow S^2$ has $1+\deg f$ fixed points, counted with weight given by the local degree of the map at each fixed point.
\end{theorem}
Here $\deg f$ denotes the topological degree of the map $f$. The local degree is a natural weight for points on $S^2$ for expanding Thurston maps. P.~Ha\"issinsky and K.~Pilgrim also used the same weight in the general context they considered in \cite{HP09}. For a more detailed discussion on the local degree, we refer to Section~\ref{sctThurstonMap}.
We first observe that the statement of Theorem~\ref{thmNoFixedPts} agrees with what can be concluded from the Lefschetz fixed-point theorem (see for example, \cite[Chapter~3]{GP10}) if the map $f$ is smooth and the graph of $f$ intersects the diagonal of $S^2\times S^2$ transversely at each fixed point of $f$. However, an expanding Thurston map may not satisfy either of these conditions. It is not clear how to give a proof by using the Lefschetz fixed-point theorem.
The proof of Theorem~\ref{thmNoFixedPts} that we adopt here is quite different from that of the rational case. It uses the correspondence between the fixed points of $f$ and the $1$-tiles in some cell decomposition of $S^2$ induced by $f$ and its invariant Jordan curve $\mathcal{C}\subseteq S^2$, for the special case when $f$ has a special invariant Jordan curve $\mathcal{C}$. In fact, $f$ may not have such a Jordan curve, but by a main result of \cite{BM10}, for each $n$ large enough there exists an $f^n$-invariant Jordan curve. We will need a slightly stronger result as formulated in Lemma~\ref{lmCexists}. Then the general case follows from an elementary number-theoretic argument. One of the advantages of this proof is that we also exhibit an almost one-to-one correspondence between the fixed points and the $1$-tiles in the cell decomposition of $S^2$, which leads to precise information on the location of each fixed point. This information is essential later in the proof of the equidistribution of preperiodic and periodic points of expanding Thurston maps in Section~\ref{sctEquidistribution}. As a corollary of Theorem~\ref{thmNoFixedPts}, we give a formula in Corollary~\ref{corNoPrePeriodicPts} for the number of preperiodic points when counted with the corresponding weight.
In Section~\ref{sctEquidistribution}, the concepts of topological entropy, measure-theoretic entropy, and the measure of maximal entropy are reviewed. Then a number of equidistribution results are proved. More precisely, we first prove in Theorem~\ref{thmWeakConv} the equidistribution of the $n$-tiles in the tile decompositions discussed in Section~\ref{sctThurstonMap} with respect to the measure of maximal entropy $\mu_f$ of an expanding Thurston map $f$. The proof uses a combinatorial characterization of $\mu_f$ due to M.~Bonk and D.~Meyer \cite{BM10} that we will state explicitly in Theorem~\ref{thmBMCharactMOME}.
We then formulate the equidistribution of preimages with respect to the measure of maximal entropy $\mu_f$ in Theorem~\ref{thmWeakConvPreImg} below. Here we denote by $\delta_x$ the Dirac measure supported on a point $x$ in $S^2$.
\begin{theorem}[Equidistribution of preimages] \label{thmWeakConvPreImg}
Let $f\: S^2 \rightarrow S^2$ be an expanding Thurston map with its measure of maximal entropy $\mu_f$. Fix $p\in S^2$ and define the Borel probability measures
\begin{equation} \label{eqDistrPreImg}
\nu_i=\frac{1}{(\deg f)^i}\sum\limits_{q\in f^{-i}(p)} \deg_{f^i}(q) \delta_q, \qquad \widetilde{\nu}_i=\frac{1}{Z_i}\sum\limits_{q\in f^{-i}(p)}\delta_q,
\end{equation}
for each $i\in\N_0$, where $Z_i=\card\(f^{-i}(p)\)$. Then
\begin{equation} \label{eqWeakConvPreImgWithWeight}
\nu_i \stackrel{w^*}{\longrightarrow} \mu_f \text{ as } i\longrightarrow +\infty,
\end{equation}
\begin{equation} \label{eqWeakConvPreImgWoWeight}
\widetilde{\nu}_i \stackrel{w^*}{\longrightarrow} \mu_f \text{ as } i\longrightarrow +\infty.
\end{equation}
\end{theorem}
Here $\deg_{f^i}(x)$ denotes the local degree of the map $f^i$ at a point $x\in S^2$. In (\ref{eqWeakConvPreImgWithWeight}), (\ref{eqWeakConvPreImgWoWeight}), and similar statements below, the convergence of Borel measures is in the weak$^*$ topology, and we use $w^*$ to denote it. Note that the difference of $\nu_i$ and $\widetilde\nu_i$ is the weight at each preimage of $p$ under $f^i$. As mentioned earlier, the local degree is a natural weight for a point in $S^2$ in the context of Thurston maps. On the other hand, it is also natural to assign the same weight for each preimage.
After generalizing Lemma~\ref{lmCoverEdgesBM}, which is due to M.~Bonk and D.~Meyer \cite[Lemma~20.2]{BM10}, in Lemma~\ref{lmCoverEdges}, we prove the equidistribution of preperiodic points with respect to $\mu_f$.
\begin{theorem}[Equidistribution of preperiodic points] \label{thmWeakConvPrePerPts}
Let $f\: S^2 \rightarrow S^2$ be an expanding Thurston map with its measure of maximal entropy $\mu_f$. For each $m\in\N_0$ and each $n\in\N$ with $m<n$, we define the Borel probability measures
\begin{equation} \label{eqDistrPrePerPts}
\xi_n^m = \frac{1}{s_n^m} \sum\limits_{f^m(x)=f^n(x)} \deg_{f^n}(x) \delta_x, \qquad \widetilde\xi_n^m = \frac{1}{\widetilde s_n^m} \sum\limits_{f^m(x)=f^n(x)} \delta_x,
\end{equation}
where $s_n^m,\widetilde s_n^m$ are the normalizing factors defined in (\ref{eqSetPrePeriodicPts}) and (\ref{eqNoPrePeriodicPts}). If $\{m_n\}_{n\in\N}$ is a sequence in $\N_0$ such that $m_n <n$ for each $n\in\N$, then
\begin{equation} \label{eqWeakConvPrePerPtsWithWeight}
\xi_n^{m_n} \stackrel{w^*}{\longrightarrow} \mu_f \text{ as } n\longrightarrow +\infty,
\end{equation}
\begin{equation} \label{eqWeakConvPrePerPtsWoWeight}
\widetilde \xi_n^{m_n} \stackrel{w^*}{\longrightarrow} \mu_f \text{ as } n\longrightarrow +\infty.
\end{equation}
\end{theorem}
We prove in Corollary~\ref{corNoPrePeriodicPts} that $s_n^m = (\deg f)^n + (\deg f)^m$ for $m\in\N_0$, $n\in\N$ with $m<n$.
As a special case of Theorem~\ref{thmWeakConvPrePerPts}, we get the equidistribution of periodic points with respect to $\mu_f$.
\begin{cor}[Equidistribution of periodic points] \label{corWeakConvPerPts}
Let $f\: S^2 \rightarrow S^2$ be an expanding Thurston map with its measure of maximal entropy $\mu_f$. Then
\begin{equation} \label{eqWeakConvPerPts1}
\frac{1}{1+(\deg f)^n} \sum\limits_{x=f^n(x)} \deg_{f^n} (x) \delta_x \stackrel{w^*}{\longrightarrow} \mu_f \text{ as } n\longrightarrow +\infty,
\end{equation}
\begin{equation} \label{eqWeakConvPerPts2}
\frac{1}{\card \{x\in S^2 \,|\, x=f^n(x)\}} \sum\limits_{x=f^n(x)} \delta_x \stackrel{w^*}{\longrightarrow} \mu_f \text{ as } n\longrightarrow +\infty,
\end{equation}
\begin{equation} \label{eqWeakConvPerPts3}
\frac{1}{(\deg f)^n} \sum\limits_{x=f^n(x)} \delta_x \stackrel{w^*}{\longrightarrow} \mu_f \text{ as } n\longrightarrow +\infty.
\end{equation}
\end{cor}
The equidistribution (\ref{eqWeakConvPreImgWithWeight}), (\ref{eqWeakConvPreImgWoWeight}), (\ref{eqWeakConvPerPts1}), and (\ref{eqWeakConvPerPts2}) are analogs of corresponding results for rational maps on the Riemann sphere by M.~Lyubich \cite{Ly83}. Some ideas from \cite{Ly83} are used in the proofs of Theorem~\ref{thmWeakConvPreImg} and Theorem~\ref{thmWeakConvPrePerPts} as well. P.~Ha\"issinsky and K.~Pilgrim also proved (\ref{eqWeakConvPreImgWithWeight}) and (\ref{eqWeakConvPerPts1}) in their general context \cite{HP09}, which includes expanding Thurston maps.
The equidistribution (\ref{eqWeakConvPrePerPtsWithWeight}) and (\ref{eqWeakConvPrePerPtsWoWeight}) are inspired by the recent work of M.~Baker and L.~DeMarco \cite{BD11}. They used some equidistribution result of preperiodic points of rational maps on the Riemann sphere in the context of arithmetic dynamics.
We show in Corollary~\ref{corAsympRatioNoPerPts} that for each expanding Thurston map $f$, the exponential growth rate of the cardinality of the set of fixed points of $f^n$ is equal to the topological entropy $h_{\operatorname{top}}(f)$ of $f$, which is known to be equal to $\log(\deg f)$ (see for example, \cite[Corollary~20.8]{BM10}). This is analogous to the corresponding result for expansive homeomorphisms on compact metric spaces with the \emph{specification property} (see for example, \cite[Theorem~18.5.5]{KH95}).
In Section~\ref{sctFactor}, we prove in Theorem~\ref{thmLfactor} that for each expanding Thurston map $f$ with its measure of maximal entropy $\mu_f$, the measure-preserving dynamical system $(S^2,f,\mu_f)$ is a factor, in the category of measure-preserving dynamical systems, of the measure-preserving dynamical system of the left-shift operator on the one-sided infinite sequences of $\deg f$ symbols together with its measure of maximal entropy. This generalizes the corresponding result in \cite{BM10} in the category of topological dynamical systems, reformulated in Theorem~\ref{thmBMfactor}.
Finally, in Section~\ref{sctIteration}, we follow the idea of J.~Hawkins and M.~Taylor \cite{HT03} to prove in Theorem~\ref{thmRandomIntConv} that for each $p\in S^2$, the measure of maximal entropy $\mu_f$ of an expanding Thurston map $f$ is almost surely the limit of
$$
\frac1n \sum\limits_{i=0}^{n-1} \delta_{q_i}
$$
as $n\longrightarrow+\infty$ in the weak* topology, where $q_i$ is one of the points in $f^{-1}(q_{i-1})$, chosen with probability proportional to the weight given by the local degree of $f$ at each point in $f^{-1}(q_{i-1})$, for all $i\in\N$, and $q_0=p$. A similar result for certain hyperbolic rational maps on $S^2$ was proved by M.~Barnsley \cite{Bar88}. J.~Hawkins and M.~Taylor generalized it to any rational map on the Riemann sphere of degree $d\geq 2$ \cite{HT03}.
\bigskip
\noindent
\textbf{Acknowledgments.} The author wants to express his gratitude to M.~Bonk for introducing him to the subject of expanding Thurston maps and patiently teaching and guiding him as an advisor.
\section{Notation} \label{sctNotation}
Let $\C$ be the complex plane and $\widehat{\C}$ be the Riemann sphere. Let $\D$ be the open unit disk on $\C$. We use the convention that $\N=\{1,2,3,\dots\}$ and $\N_0 = \{0\} \cup \N$. We always use base $e$ for logarithm unless otherwise specified.
The cardinality of a set $A$ is denoted by $\card{A}$. For each $x\in\mathbb{R}$, we define $\lfloor x\rfloor$ as the greatest integer smaller than or equal to $x$, and $\lceil x \rceil$ the smallest integer greater than or equal to $x$.
Let $(X,d)$ be a metric space. For subsets $A,B\subseteq X$, we set $d(A,B)=\sup \{d(x,y)\,|\, x\in A,y\in B\}$, and $d(A,x)=d(x,A)=d(A,\{x\})$ for $x\in X$. For each subset $S\subseteq X$, we denote the diameter of $S$ by $\diam_d(S)=\sup\{d(x,y)\,|\,x,y\in S\}$. For each $r>0$, we set $N^r_d(A)$ to be the open $r$-neighborhood $\{y\in X \,|\, d(y,A)<r\}$ of $A$, and $\overline{N^r_d}(A)$ the closed $r$-neighborhood $\{y\in X \,|\, d(y,A)\leq r\}$ of $A$. The identity map $\id_X\: X\rightarrow X$ maps each $x\in X$ to $x$ itself. We denote by $C(X)$ the space of continuous functions from $X$ to $\mathbb{R}$, by $\mathcal{M}(X)$ the set of finite signed Borel measures, and $\mathcal{P}(X)$ the set of Borel probability measures on $X$. We use $\Norm{\cdot}$ to denote the total variation norm on $\mathcal{M}(X)$. For a point $x\in X$, we define $\delta_x$ as the Dirac measure supported on $\{x\}$. For $g\inC(X)$ we set $\mathcal{M}(X,g)$ to be the set of $g$-invariant Borel probability measures on $X$.
\section{Thurston maps} \label{sctThurstonMap}
In this section, we briefly review some key concepts and results on Thurston maps, and expanding Thurston maps in particular. For a more thorough treatment of the subject, we refer to \cite{BM10}. Towards the end of this section, we state and prove a slightly stronger version of one of the main theorems in \cite{BM10}, which we will repeatedly use in the following sections.
Let $S^2$ denote an oriented topological $2$-sphere. A continuous map $f\:S^2\rightarrow S^2$ is called a \defn{branched covering map} on $S^2$ if for each point $x\in S^2$, there exists $d\in \N$, open neighborhoods $U$ of $x$ and $V$ of $y=f(x)$, $U'$ and $V'$ of $0$ in $\widehat{\C}$, and orientation-preserving homeomorphisms $\varphi\:U\rightarrow U'$ and $\eta\:V\rightarrow V'$ such that $\varphi(x)=0$, $\eta(y)=0$ and
$$
(\eta\circ f\circ\varphi^{-1})(z)=z^d
$$
for each $z\in U'$. The positive integer $d$ above is called the \defn{local degree} of $f$ at $p$ and is denoted by $\deg_f (p)$. The \defn{(global) degree} of $f$ is defined as
\begin{equation} \label{eqDeg=SumLocalDegree}
\deg f=\sum\limits_{x\in f^{-1}(y)} \deg_f (x)
\end{equation}
for each $y\in S^2$. It is independent of $y\in S^2$. It is true that if $f\:S^2\rightarrow S^2$ and $g\:S^2\rightarrow S^2$ are two branched covering maps on $S^2$, then
\begin{equation} \label{eqLocalDegreeProduct}
\deg_{f\circ g}(x) = \deg_g(x)\deg_f(g(x)), \qquad \text{for each } x\in S^2,
\end{equation}
and moreover,
\begin{equation} \label{eqDegreeProduct}
\deg(f\circ g) = (\deg f)( \deg g).
\end{equation}
A point $x\in S^2$ is a \defn{critical point} of $f$ if $\deg_f(x) \geq 2$. The set of critical points of $f$ is denoted by $\operatorname{crit} f$. A point $y\in S^2$ is a \defn{postcritical point} of $f$ if $y \in \bigcup\limits_{n\in\N}\{f^n(x)\,|\,x\in\operatorname{crit} f\}$. The set of postcritical points of $f$ is denoted by $\operatorname{post} f$. Note that $\operatorname{post} f=\operatorname{post} f^n$ for all $n\in\N$.
\begin{definition} [Thurston maps] \label{defThurstonMap}
A Thurston map is a branched covering map $f\:S^2\rightarrow S^2$ on $S^2$ with $\deg f\geq 2$ and $\card(\operatorname{post} f)<+\infty$.
\end{definition}
We define two notions of equivalence for Thurston maps. The first one is the usual topological conjugation. We call the maps $f$ and $g$ \defn{topologically conjugate} if there exists a homeomorphism $h\: S^2\rightarrow S^2 $ such that $h\circ f = g\circ h$. The second one is a weaker notion due to W.P.~Thurston \cite{DH93}.
\begin{definition}[Thurston equivalence]\label{defThequiv}
Two Thurston maps $f\: S^2\rightarrow S^2$ and $g\: S^2\rightarrow S^2$ are called \defn{Thurston equivalent} if there exist homeomorphisms
$h_0,h_1 \:S^2\rightarrow S^2 $ that are isotopic rel.\ $\operatorname{post} f$ and satisfy $ h_0\circ f = g\circ h_1$.
\end{definition}
For the usual definition of an isotopy, we refer to \cite[Section~3]{BM10}.
We now set up the notation for cell decompositions of $S^2$. A \defn{cell of dimension $n$} in $S^2$, $n \in \{1,2\}$, is a subset $c\subseteq S^2$ that is homeomorphic to the closed unit ball $\overline{\B^n}$ in $\mathbb{R}^n$. We define the \defn{boundary of $c$}, denoted by $\partial c$, to be the set of points corresponding to $\partial\B^n$ under such a homeomorphism between $c$ and $\overline{\B^n}$. The \defn{interior of $c$} is defined to be $\inte c = c \setminus \partial c$. A cell $c$ of dimension 0 is a singleton set $\{x\}$ for some point $x\in S^2$. For cells $c$ with dimension $0$, we adopt the convention that $\partial c=\emptyset$ and $\inte c =c$.
The following three definitions are from \cite{BM10}.
\begin{definition}[Cell decompositions]\label{defcelldecomp}
Let $\mathbf{D}$ be a collection of cells in $S^2$. We say that $\mathbf{D}$ is a \defn{cell decomposition of $S^2$} if the following conditions are satisfied:
\begin{itemize}
\smallskip
\item[(i)]
the union of all cells in $\mathbf{D}$ is equal to $S^2$,
\smallskip
\item[(ii)] for $c_1,c_2 \in \mathbf{D}$ with $c_1 \ne c_1$, we have $\inte c_1 \cap \inte c_2= \emptyset$,
\smallskip
\item[(iii)] if $c\in \mathbf{D}$, then $\partial c$ is a union of cells in $\mathbf{D}$,
\smallskip
\item[(iv)] every point in $S^2$ has a neighborhood that meets only finitely many cells in $\mathbf{D}$.
\end{itemize}
\end{definition}
\begin{definition}[Refinements]\label{defrefine}
Let $\mathbf{D}'$ and $\mathbf{D}$ be two cell decompositions of $S^2$. We
say that $\mathbf{D}'$ is a \defn{refinement} of $\mathbf{D}$ if the following conditions are satisfied:
\begin{itemize}
\smallskip
\item[(i)] for every cell $c'\in \mathbf{D}'$ there exits a cell $c\in \mathbf{D}$ with $c'\subseteq c$,
\smallskip
\item[(ii)] every cell $c\in \mathbf{D}$ is the union of all cells $c'\in \mathbf{D}'$ with $c'\subseteq c$.
\end{itemize}
\end{definition}
\begin{definition}[Cellular maps and cellular Markov partitions]\label{defcellular}
Let $\mathbf{D}'$ and $\mathbf{D}$ be two cell decompositions of $S^2$. We say that a continuous function $f \: S^2 \rightarrow S^2$ is \defn{cellular} for $(\mathbf{D}', \mathbf{D})$ if for every cell $c\in \mathbf{D}'$, the restriction $f|_c$ is a homeomorphism of $c$ onto a cell in $\mathbf{D}$. We say that $(\mathbf{D}',\mathbf{D})$ is a \defn{cellular Markov partition} for $f$ if $f$ is cellular for $(\mathbf{D}',\mathbf{D})$ and $\mathbf{D}'$ is a refinement of $\mathbf{D}$.
\end{definition}
Let $f\:S^2 \rightarrow S^2$ be a Thurston map, and $\mathcal{C}\subseteq S^2$ be a Jordan curve such that $\operatorname{post} f\subseteq \mathcal{C}$. Then the pair $(f,\mathcal{C})$ induces natural cell decompositions $\mathbf{D}^n(f,\mathcal{C})$ of $S^2$, for $n\in\N_0$, in the following way:
By the Jordan curve theorem, the set $S^2\setminus\mathcal{C}$ has two connected components. We call the closure of one of them the \defn{white $0$-tile} for $(f,\mathcal{C})$, denoted by $X^0_w$, and the closure of the other one the \defn{black $0$-tile} for $(f,\mathcal{C})$, denoted by $X^0_b$. The set of \defn{$0$-tiles} is $\X^0(f,\mathcal{C})=\{X_b^0,X_w^0\}$. The set of \defn{$0$-vertices} is $\V^0(f,\mathcal{C})=\operatorname{post} f$. We define $\overline\V^0(f,\mathcal{C})$ to be $\{ \{x\} \,|\, x\in \V^0(f,\mathcal{C}) \}$. The set of \defn{$0$-edges} $\E^0(f,\mathcal{C})$ is the set of connected components of $\mathcal{C} \setminus \operatorname{post} f$. Then we get a cell decomposition
$$
\mathbf{D}^0(f,\mathcal{C})=\X^0(f,\mathcal{C}) \cup \E^0(f,\mathcal{C}) \cup \overline\V^0(f,\mathcal{C})
$$
of $S^2$ consisting of \defn{$0$-cells}.
One can recursively define, for each $n\in\N$, the unique cell decomposition $\mathbf{D}^n(f,\mathcal{C})$ consisting of \defn{$n$-cells} such that $f$ is cellular for $(\mathbf{D}^{n+1}(f,\mathcal{C}),\mathbf{D}^n(f,\mathcal{C}))$. For details, we refer to \cite[Lemma~5.4]{BM10}. We denote by $\X^n(f,\mathcal{C})$ the set of $n$-cells of dimension 2, called \defn{$n$-tiles}; by $\E^n(f,\mathcal{C})$ the set of $n$-cells of dimension 1, called \defn{$n$-edges}; by $\overline\V^n(f,\mathcal{C})$ the set of $n$-cells of dimension 0; and by $\V^n(f,\mathcal{C})$ the set $\{x\,|\, {x}\in \overline\V^n(f,\mathcal{C})\}$, called \defn{$n$-vertices}.
For the convenience of the reader, we record Proposition~6.1 of \cite{BM10} here in order to summarize properties of the cell decompositions $\mathbf{D}^n(f,\mathcal{C})$.
\begin{prop} \label{propCellDecomp}
Let $k,n\in \N_0$, let $f\: S^2\rightarrow S^2$ be a Thurston map, $\mathcal{C}\subseteq S^2$ be a Jordan curve with $\operatorname{post} f \subseteq \mathcal{C}$, and $m=\card(\operatorname{post} f)$.
\smallskip
\begin{itemize}
\smallskip
\item[(i)] The map $f^k$ is cellular for $(\mathbf{D}^{n+k}(f,\mathcal{C}), \mathbf{D}^n(f,\mathcal{C}))$. In particular, if $c$ is any $(n+k)$-cell, then $f^k(c)$ is an $n$-cell, and $f^k|_c$ is a homeomorphism of $c$ onto $f(c)$.
\smallskip
\item[(ii)] Let $c$ be an $n$-cell. Then $f^{-k}(c)$ is equal to the union of all
$(n+k)$-cells $c'$ with $f^k(c')=c$.
\smallskip
\item[(iii)] The $0$-skeleton of $\mathbf{D}^n(f,\mathcal{C})$ is the set $\V^n(f,\mathcal{C})=f^{-n}(\operatorname{post} f )$, and we have $\V^n(f,\mathcal{C}) \subseteq \V^{n+k}(f,\mathcal{C})$. The $1$-skeleton of $\mathbf{D}^n(f,\mathcal{C})$ is equal to $f^{-n}(\mathcal{C})$.
\smallskip
\item[(iv)] $\card (\V^n(f,\mathcal{C})) \leq m (\deg f)^n$, $\card(\E^n(f,\mathcal{C}))=m(\deg f)^n$, and
$\card(\X^n(f,\mathcal{C}))=2(\deg f)^n$.
\smallskip
\item[(v)] The $n$-edges are precisely the closures of the connected components of $f^{-n}(\mathcal{C})\setminus f^{-n}(\operatorname{post} f )$. The $n$-tiles are precisely the closures of the connected components of $S^2\setminus f^{-n}(\mathcal{C})$.
\smallskip
\item[(vi)] Every $n$-tile is an $m$-gon, i.e., the number of $n$-edges and the number of $n$-vertices contained in its boundary are equal to $m$.
\end{itemize}
\end{prop}
Here the \defn{$n$-skeleton}, for $n\in\{0,1,2\}$, of a cell decomposition of $S^2$ is the union of all $n$-cells in this cell decomposition.
For $n\in \N_0$, we define \defn{the set of black $n$-tiles} as
$$
\X_b^n(f,\mathcal{C})=\{X\in\X^n(f,\mathcal{C}) \, |\, f^n(X)=X_b^0\},
$$
and the \defn{set of white $n$-tiles} as
$$
\X_w^n(f,\mathcal{C})=\{X\in\X^n(f,\mathcal{C}) \, |\, f^n(X)=X_w^0\}.
$$
Moreover, for $n\in\N$, we define \defn{the set of black $n$-tiles contained in a white $(n-1)$-tile} as
$$
\X_{bw}^n(f,\mathcal{C}) = \{ X\in \X_b^n(f,\mathcal{C}) \, |\, \exists X'\in \X_w^{n-1}(f,\mathcal{C}), \, X\subseteq X' \},
$$
\defn{the set of black $n$-tiles contained in a black $(n-1)$-tile} as
$$
\X_{bb}^n(f,\mathcal{C}) = \{ X\in \X_b^n(f,\mathcal{C}) \, |\, \exists X'\in \X_b^{n-1}(f,\mathcal{C}),\, X\subseteq X' \},
$$
\defn{the set of white $n$-tiles contained in a black $(n-1)$-tile} as
$$
\X_{wb}^n(f,\mathcal{C}) = \{ X\in \X_w^n(f,\mathcal{C}) \, |\, \exists X'\in \X_b^{n-1}(f,\mathcal{C}),\, X\subseteq X' \},
$$
\defn{and the set of white $n$-tiles contained in a while $(n-1)$-tile} as
$$
\X_{ww}^n(f,\mathcal{C}) = \{ X\in \X_w^n(f,\mathcal{C}) \, |\, \exists X'\in \X_w^{n-1}(f,\mathcal{C}),\, X\subseteq X' \}.
$$
In other words, for example, a black $n$-tile is an $n$-tile that is mapped by $f^n$ to the black $0$-tile, and a black $n$-tile contained in a white $(n-1)$-tile is an $n$-tile that is contained in some white $(n-1)$-tile as a set, and is mapped by $f^n$ to the black $0$-tile.
From now on, we will say the cell decompositions induced by the pair $(f,\mathcal{C})$ and induced by $f$ and $\mathcal{C}$ interchangeably. If the pair $(f,\mathcal{C})$ is clear from the context, we will sometimes omit $(f,\mathcal{C})$ in the notation above.
We now define two notions of expansion by M.~Bonk and D.~Meyer \cite{BM10}.
It is proved in \cite[Corollary~6.4]{BM10} that for each expanding Thurs\-ton map $f$ (see Definition \ref{defExpanding} below), we have $\card(\operatorname{post} f) \geq 3$.
\begin{definition}[Joining opposite sides] \label{defConnectop}
Fix a Thurston map $f$ with $\card(\operatorname{post} f) \geq 3$ and an $f$-invariant Jordan curve $\mathcal{C}$ containing $\operatorname{post} f$. A set $K\subseteq S^2$ \defn{joins opposite sides} of $\mathcal{C}$ if $K$ meets two disjoint $0$-edges when $\card( \operatorname{post} f)\geq 4$, or $K$ meets all three $0$-edges when $\card(\operatorname{post} f)=3$.
\end{definition}
\begin{definition}[Combinatorial expansion]\label{defCombExpanding}
Let $f$ be a Thurston map. We say that $f$ is \defn{combinatorially expanding} if $\card (\operatorname{post} f)\geq 3$, and there exists an $f$-invariant Jordan curve $\mathcal{C}\subseteq S^2$ (i.e., $f(\mathcal{C})\subseteq \mathcal{C}$) with $\operatorname{post} f \subseteq \mathcal{C}$, and there exists a number $n_0\in \N$ such that none of the $n_0$-tiles in $\X^{n_0}(f,\mathcal{C})$ joins opposite sides of $\mathcal{C}$.
\end{definition}
\begin{definition} [Expansion] \label{defExpanding}
A Thurston map $f\:S^2\rightarrow S^2$ is called \defn{expanding} if there exist a metric $d$ on $S^2$ that induces the standard topology on $S^2$ and a Jordan curve $\mathcal{C}\subseteq S^2$ containing $\operatorname{post} f$ such that $\lim\limits_{n\to+\infty}\max \{\diam_d(X) \,|\, X\in \X^n(f,\mathcal{C})\}=0$.
\end{definition}
\begin{rems} \label{rmExpanding}
We observe that being expanding is a purely topological property of a Thurston map and independent of the choice of the metric $d$ that generates the standard topology on $S^2$. By Lemma~8.1 in \cite{BM10}, it is also independent of the choice of the Jordan curve $\mathcal{C}$ containing $\operatorname{post} f$. More precisely, if $f$ is an expanding Thurston map, then
$$
\lim\limits_{n\to+\infty}\max \{\diam_{\widetilde{d}}(X) \,|\, X\in \X^n(f,\widetilde\mathcal{C})\}=0,
$$
for each metric $\widetilde{d}$ that generates the standard topology on $S^2$ and each Jordan curve $\widetilde\mathcal{C}\subseteq S^2$ that contains $\operatorname{post} f$. From the definition, it is also clear that if $f$ is an expanding Thurston map, so is $f^n$ for each $n\in\N$.
\end{rems}
P. Ha\"{\i}ssinsky and K. Pilgrim developed a more general notion of expansion for finite branched coverings between two Hausdorff, locally compact, locally connected topological spaces (see \cite[Section~2.1 and Section~2.2]{HP09}). When restricted to Thurston maps, their notion of expansion is equivalent to our notion defined above (see \cite[Proposition~8.2]{BM10}). Such notions of expansion are the natural analogs in the context of finite branched coverings and Thurston maps to some of the more classical notions of expansion, such as expansive homeomorphisms and forward-expansive continuous maps between compact metric spaces (see for example, \cite[Definition~3.2.11]{KH95}), and distance-expanding maps between compact metric spaces (see for example, \cite[Chapter~4]{PU10}). Our notion of expansion is not equivalent to any of such classical notions in the context of Thurston maps.
M.~Bonk and D.~Mayer proved that if two expanding Thurston maps are Thurston equivalent, then they are topologically conjugate (see \cite[Theorem~10.4]{BM10}).
For an expanding Thurston map $f$, we can fix a metric $d$ for $f$ on $S^2$ called a visual metric. For the existence and properties of such metrics, see \cite[Chapter~8]{BM10}. In particular, we will need the fact that $d$ induces the standard topology on $S^2$ (\cite[Proposition~8.9]{BM10}). One major advantage of visual metrics $d$ is that in $(S^2,d)$ we have good quantitative control over the sizes of the cells in the cell decompositions discussed above, see \cite[Lemma~8.10]{BM10}. More precisely,
\begin{lemma}[M.~Bonk \& D.~Meyer, 2010] \label{lmBMCellSizeBounds}
Let $f\: S^2\rightarrow S^2$ be an expanding Thurston map, $\mathcal{C}\subseteq S^2$ be a Jordan curve with $\operatorname{post} f\subseteq \mathcal{C}$, and $d$ a visual metric for $f$. Then there exists a constant $\Lambda>1$ called the expansion factor, and a constant $C\geq 1$ such that for each $n\in\N_0$,
\begin{itemize}
\smallskip
\item[(i)] $d(\delta,\tau) \geq \frac{1}{C} \Lambda^{-n}$ whenever $\delta$ and $\tau$ are disjoint $n$-cells,
\smallskip
\item[(ii)] $\frac{1}{C} \Lambda^{-n} \leq \diam_d(\tau) \leq C\Lambda^{-n}$ for all $n$-edges and all $n$-tiles $\tau$.
\end{itemize}
\end{lemma}
A Jordan curve $\mathcal{C}\subseteq S^2$ is $f$-invariant if $f(\mathcal{C})\subseteq \mathcal{C}$. We are interested in $f$-invariant Jordan curves that contain $\operatorname{post} f$, since for such a curve $\mathcal{C}$, the partition $(\mathbf{D}^1(f,\mathcal{C}),\mathbf{D}^0(f,\mathcal{C}))$ is then a cellular Markov partition for $f$. According to Example~15.5 in \cite{BM10}, $f$-invariant Jordan curves containing $\operatorname{post}{f}$ need not exist. However, M.~Bonk and D.~Meyer proved in \cite[Theorem~1.2]{BM10} that for each sufficiently large $n$ depending on $f$, an $f^n$-invariant Jordan curve $\mathcal{C}$ containing $\operatorname{post}{f}$ always exists. We will need a slightly stronger version in this paper. Its proof is almost the same as that of \cite[Theorem~1.2]{BM10}. For the convenience of the reader, we include the proof here.
\begin{lemma} \label{lmCexists}
Let $f\:S^2\rightarrow S^2$ be an expanding Thurston map, and $\widetilde{\mathcal{C}}\subseteq S^2$ be a Jordan curve with $\operatorname{post} f\subseteq \widetilde{\mathcal{C}}$. Then there exists an integer $N(f,\widetilde{\mathcal{C}}) \in \N$ such that for each $n\geq N(f,\widetilde{\mathcal{C}})$ there exists an $f^n$-invariant Jordan curve $\mathcal{C}$ isotopic to $\widetilde{\mathcal{C}}$ rel.\ $\operatorname{post} f$ such that no $n$-tile in $\mathbf{D}^n(f,\mathcal{C})$ joins opposite sides of $\mathcal{C}$.
\end{lemma}
\begin{proof}
By \cite[Lemma~15.9]{BM10}, there exists an integer $N(f,\widetilde{\mathcal{C}})\in\N$ such that for each $n \geq N(f,\widetilde{\mathcal{C}})$, there exists a Jordan curve $\mathcal{C}' \subseteq f^{-n} (\widetilde{\mathcal{C}})$ that is isotopic to $\widetilde{\mathcal{C}}$ rel.\ $\operatorname{post} f$, and no $n$-tile for $(f,\widetilde{\mathcal{C}})$ joins opposite sides of $\mathcal{C}'$. Let $H\:S^2 \times [0,1] \rightarrow S^2$ be this isotopy rel.\ $\operatorname{post} f$. We set $H_t(x)=H(x,t)$ for $x\in S^2, t\in [0,1]$. We have $H_0=\id_{S^2}$ and $\mathcal{C}'=H_1(\widetilde{\mathcal{C}}) \subseteq f^{-n}(\widetilde{\mathcal{C}})$.
If we set $F=f^n$, then $\operatorname{post} F=\operatorname{post} f$ and $F$ is also an expanding Thurston map (\cite[Lemma~8.4]{BM10}). Note that $F$ is cellular for $(\mathbf{D}^n(f,\widetilde{\mathcal{C}}), \mathbf{D}^0(f,\widetilde{\mathcal{C}}))$. So $\mathbf{D}^1(F,\widetilde{\mathcal{C}})=\mathbf{D}^n(f,\widetilde{\mathcal{C}})$ (see \cite[Lemma~5.4]{BM10}). Thus no $1$-cell for $(H_1 \circ F, \mathcal{C}')$ joins opposite sides of $\mathcal{C}'$, and thus $H_1 \circ F$ is combinatorially expanding for $\mathcal{C}'$. Note that $\mathcal{C}'$ contains $\operatorname{post}(H_1\circ F)=\operatorname{post} F=\operatorname{post} f$. By Corollary~13.18 in \cite{BM10}, there exists a homeomorphism $\phi\:S^2\rightarrow S^2$ that is isotopic to the identity rel.\ $\operatorname{post}{(H_1\circ F)}$ such that $\phi(\mathcal{C}')=\mathcal{C}'$ and $G=\phi \circ H_1 \circ F$ is an expanding Thurston map. Since $\phi\circ H_1$ is isotopic to the identity on $S^2$ rel.\ $\operatorname{post} F$, the pair $F$ and $G$ are Thurston equivalent. By Theorem~10.4 in \cite{BM10}, there exists a homeomorphism $h\:S^2\rightarrow S^2$ that is isotopic to the identity on $S^2$ rel.\ $F^{-1}(\operatorname{post} F)$ with $F\circ h=h\circ G$. Set $\mathcal{C} = h(\mathcal{C}')$. Then $\mathcal{C}$ is a Jordan curve in $S^2$ that is isotopic to $\mathcal{C}'$ rel.\ $F^{-1}(\operatorname{post} F)$ and thus isotopic to $\widetilde{\mathcal{C}}$ rel.\ $\operatorname{post} F$. Since $F(\mathcal{C})=F(h(\mathcal{C}'))=h(G(\mathcal{C}'))=h(\phi(H_1(F(\mathcal{C}')))) \subseteq h(\phi(\mathcal{C}'))=h(\mathcal{C}')=\mathcal{C}$, we get that $\mathcal{C}$ is $F$-invariant.
Moreover, since no $1$-cell for $(H_1 \circ F, \mathcal{C}')$ joins opposite sides of $\mathcal{C}'$, $H_1\circ F (\mathcal{C}')\subseteq H_1(\widetilde{\mathcal{C}})=\mathcal{C}'$, $\phi\:S^2 \rightarrow S^2$ is a homeomorphism with $\phi(\mathcal{C}')=\mathcal{C}'$, $G=\phi \circ H_1 \circ F$, we can conclude that $G(\mathcal{C}') \subseteq \mathcal{C}'$ and no $1$-cell for $(G,\mathcal{C}')$ joins opposite sides of $\mathcal{C}'$. Since $h\:S^2\rightarrow S^2$ is a homeomorphism, $\mathcal{C}=h(\mathcal{C}')$, and $F\circ h=h \circ G$, we can finally conclude that no $1$-cell for $(F,\mathcal{C})$ joins opposite sides of $\mathcal{C}$. Therefore no $n$-cell for $(f,\mathcal{C})$ joins opposite sides of $\mathcal{C}$.
\end{proof}
In fact, we will only need the following corollary of Lemma~\ref{lmCexists} in the following sections.
\begin{cor} \label{corCexists}
Let $f\:S^2\rightarrow S^2$ be an expanding Thurston map. Then there exists a constant $N(f)>0$ such that for each $n \geq N(f)$, there exists an $f^n$-invariant Jordan curve $\mathcal{C}$ containing $\operatorname{post} f$ such that no $n$-tile in $\mathbf{D}^n(f,\mathcal{C})$ joins opposite sides of $\mathcal{C}$.
\end{cor}
\begin{proof}
We can choose an arbitrary Jordan curve $\widetilde\mathcal{C}\subseteq S^2$ containing $\operatorname{post} f$ and set $N(f)= N(f,\widetilde\mathcal{C})$, and $\mathcal{C}$ an $f^n$-invariant Jordan curve containing $\operatorname{post} f$ as in Lemma~\ref{lmCexists}.
\end{proof}
\begin{lemma} \label{lmPreImageDense}
Let $f\:S^2\rightarrow S^2$ be an expanding Thurston map. Then for each $p\in S^2$, the set $\bigcup\limits_{n=1}^{+\infty} f^{-n}(p)$ is dense in $S^2$, and
\begin{equation} \label{eqCardn-preimgGoToInfty}
\lim\limits_{n\to +\infty} \card(f^{-n}(p)) = +\infty.
\end{equation}
\end{lemma}
\begin{proof}
Let $\mathcal{C} \subseteq S^2$ be a Jordan curve containing $\operatorname{post} f$. Let $d$ be any metric on $S^2$ that generates the standard topology on $S^2$.
Without loss of generality, we assume that $p\in X^0_w$ where $X^0_w \in \X^0_w(f,\mathcal{C})$ is the white $0$-tile in the cell decompositions induced by $(f,\mathcal{C})$. The proof for the case when $p\in X^0_b$ where $X^0_b \in \X^0_b(f,\mathcal{C})$ is the black $0$-tile is similar.
By Proposition~\ref{propCellDecomp}(ii), for each $n\in\N$ and each white $n$-tile $X^n_w\in\X^n_w(f,\mathcal{C})$, there is a point $q\in X^n_w$ with $f^n(q)=p$. Since $f$ is an expanding Thurston map,
\begin{equation} \label{eqMeshGoTo0}
\lim\limits_{n\to+\infty}\max \{\diam_d(X) \,|\, X\in \X^n(f,\mathcal{C})\}=0.
\end{equation}
Then the density of the set $\bigcup\limits_{n=1}^{+\infty} f^{-n}(p)$ follows from the observation that for each $n\in\N$, each black $n$-tile $X^n_b\in\X^n_b(f,\mathcal{C})$ intersects nontrivially with some white $n$-tile $X^n_w \in \X^n_w(f,\mathcal{C})$.
By the above observation, the triangular inequality, and the fact that $\diam_d(S^2) > 0$ and $S^2$ is connected in the standard topology, the equation (\ref{eqCardn-preimgGoToInfty}) follows from (\ref{eqMeshGoTo0}).
\end{proof}
\section{Fixed points of expanding Thurston maps} \label{sctFixedPts}
The main goal of this section is to prove Theorem~\ref{thmNoFixedPts}; namely, that the number of fixed points, counted with an appropriate weight, of an expanding Thurston map $f$ is exactly $1+\deg f$. In order to prove Theorem~\ref{thmNoFixedPts}, we first establish in Lemma~\ref{lmAtLeast1} and Lemma~\ref{lmAtMost1} an almost one-to-one correspondence between fixed points and $1$-tiles in the cell decomposition $\mathbf{D}^1(f,\mathcal{C})$ for an expanding Thurston map $f$ with an $f$-invariant Jordan curve $\mathcal{C}$ containing $\operatorname{post} f$. As a consequence, we establish in Corollary~\ref{corNoPrePeriodicPts} an exact formula for the number of preperiodic points, counted with appropriate weight. We end this section by establishing a formula for the exact number of periodic points with period $n$, $n\in\N$, for expanding Thurston maps without periodic critical points.
Let $f$ be a Thurston map and $p\in S^2$ a periodic point of $f$ of period $n\in\N$, we define \defn{the weight of $p$ (with respect to $f$)} as the local degree $\deg_{f^n} (p)$ of $f^n$ at $p$. When $f$ is understood from the context and $p$ is a fixed point of $f$, we abbreviate it as \defn{the weight of $p$}. We will prove in this section that each expanding Thurston map $f$ has exactly $1+\deg f$ fixed points, counted with weight.
Note the difference between the weight and \defn{the multiplicity} of a fixed point of a rational map (see \cite[Chapter 12]{Mi06}). In comparison, the multiplicity of a fixed point $p\in\C$ of a rational map $R\:\widehat\C\rightarrow\widehat\C$ is $\deg_{\widetilde R}(p)$, where $\widetilde R(z) = R(z)-z$. For every expanding rational Thurston map $R$, M.~Bonk and D.~Meyer proved that $R$ has no periodic critical points (see \cite[Proposition~19.1]{BM10}). So the weight of every fixed point of $R$ is 1. We can prove that $R$ has exactly $1+\deg R$ fixed points by using basic facts in complex dynamics, even though it will follow as a special case of our general result in Theorem~\ref{thmNoFixedPts}. For the relevant definitions and general background of complex dynamics, see \cite{CG93} and \cite{Mi06}.
\begin{prop} \label{propNoFixedPtsRational}
Let $R\:\widehat\C\rightarrow\widehat\C$ be a expanding rational Thurston map, then $R$ has exactly $1+\deg R$ fixed points. Moreover, the weight $\deg_R(q)$ of each fixed point $q$ of $R$ is equal to 1.
\end{prop}
\begin{proof}
Conjugating $R$ by a fractional linear automorphism of the Riemann sphere if necessary, we may assume that the point at infinity is not a fixed point of $R$.
Since $R$ is expanding, $R$ is not the identity map. By Lemma~12.1 in \cite{Mi06}, which is basically an application of the fundamental theorem of algebra, we can conclude that $R$ has $1+\deg R$ fixed points, counted with multiplicity. For rational Thurston maps, being expanding is equivalent to having no periodic critical points (see \cite[Proposition~19.1]{BM10}). So the weight $\deg_R(q)$ of every fixed point $q$ of $R$ is exactly 1. Thus it suffices now to prove that each fixed point $q$ of $R$ has multiplicity 1.
Suppose a fixed point $p$ of $R$ has multiplicity $m>1$. In the terminology of complex dynamics, $q$ is then a parabolic fixed point with multiplier $1$ and multiplicity $m$. Then by Leau-Fatou flower theorem (see for example, \cite[Chapter~10]{Mi06} or \cite[Theorem~2.12]{Br10}), there exists an open set $U\subseteq S^2$ such that $f(U)\subseteq U$ and $U\neq S^2$ (by letting $U$ be one of the attracting petals, for example). This contradicts the fact that the function $R$, as an expanding Thurston map, is \emph{eventually onto}, i.e., for each nonempty open set $V\subseteq S^2$, there exists a number $m\in\N$ such that $R^m(V)=S^2$.
In order to see that $R$ is eventually onto, let $d$ be a metric on $S^2$ and $\mathcal{C}\subseteq S^2$ be a Jordan curve, as given in Definition~\ref{defExpanding}. Since $V$ is open, it contains some open ball in the metric space $(S^2,d)$. Then since $R$ is expanding, by Definition~\ref{defExpanding}, we can conclude that there exists a constant $m\in\N$, a black $m$-tile $X^m_b\in\X^m_b(R,\mathcal{C})$ and a white $m$-tile $X^m_w\in\X^m_w(R,\mathcal{C})$ such that $X^m_b\cup X^m_w \subseteq V$. Thus $R^m(V)\supseteq R^m(X^m_b\cup X^m_w) = S^2$. Therefore, $R$ is eventually onto.
\end{proof}
For general expanding Thurston maps, we need to use the combinatorial information from \cite{BM10}. Recall that cells in the cell decompositions are by definition closed sets.
\begin{lemma} \label{lmAtLeast1}
Let $f$ be an expanding Thurston map with an $f$-invariant Jordan curve $\mathcal{C}$ containing $\operatorname{post} f$. If $X\in \X^1_{ww}(f,\mathcal{C}) \cup \X^1_{bb}(f,\mathcal{C})$ is a white $1$-tile contained in the while $0$-tile $X^0_w$ or a black $1$-tile contained in the black $0$-tile $X^0_b$, then $X$ contains at least one fixed point of $f$. If $X\in \X^1_{wb}(f,\mathcal{C}) \cup \X^1_{bw}(f,\mathcal{C})$ is a white $1$-tile contained in the black $0$-tile $X^0_b$ or a black $1$-tile contained in the white $0$-tile $X^0_w$, then $\inte X$ contains no fixed points of $f$.
\end{lemma}
Recall the set of $0$-tiles $\X^0(f,\mathcal{C})$ consists of the white $0$-tile $X^0_w$ and the black $0$-tile $X^0_b$.
\begin{proof}
If $X\in \X^1_{ww}(f,\mathcal{C}) \cup \X^1_{bb}(f,\mathcal{C})$, then $X\subseteq f(X)$. By Proposition~\ref{propCellDecomp}(i), $f|_X$ is a homeomorphism from $X$ to $f(X)$, which is one of the two $0$-tiles. Hence, $f(X)$ is homeomorphic to the closed unit disk. So by Brouwer's fixed point theorem, $(f|_X)^{-1}$ has a fixed point $p$. Thus $p$ is also a fixed point of $f$.
If $X\in \X^1_{wb}(f,\mathcal{C})$, then $\inte X \subseteq \inte X_b^0$ and $f(X)=X_w^0$. Since $ X_w^0 \cap \inte X_b^0 = \emptyset$, the map $f$ has no fixed points in $\inte X$. The case when $X\in \X^1_{bw}(f,\mathcal{C})$ is similar.
\end{proof}
\begin{lemma} \label{lmAtMost1}
Let $f$ be an expanding Thurston map with an $f$-invariant Jordan curve $\mathcal{C}$ containing $\operatorname{post} f$ such that no $1$-tile in $\mathbf{D}^1(f,\mathcal{C})$ joins opposite sides of $\mathcal{C}$. Then for every $n\in\N$, each $n$-tile $X^n \in\X^n(f,\mathcal{C})$ contains at most one fixed point of $f^n$.
\end{lemma}
\begin{proof}
Fix an arbitrary $n\in\N$. We denote $F=f^n$ and consider the cell decompositions induced by $F$ and $\mathcal{C}$ in this proof. Note that $F$ is also an expanding Thurston map and there is no $1$-tile in $\mathbf{D}^1(F,\mathcal{C})$ joining opposite sides of $\mathcal{C}$.
It suffices to prove that each $1$-tile $X^1\in\X^1$ contains at most one fixed point of $F$.
Suppose that there are two distinct fixed points $p,q$ of $F$ in a $1$-tile $X^1$. We prove that there is a contradiction in each of the following cases.
\smallskip
Case 1: one of the fixed points, say $p$, is in $\inte X^1 $. Then $X^1 \in \X^1_{ww}\cup \X^1_{bb}$ by Lemma \ref{lmAtLeast1}. Since $p$ is contained in the interior of $X_1\cap F(X_1)$, we get that $X_1\subset F(X_1)$. Since $F|_{X^1}$ is a homeomorphism from $X^1$ to $F(X^1)$ (see Proposition~\ref{propCellDecomp}(i)), we define a $2$-tile $X^2= (F|_{X^1})^{-1}(X^1) \subseteq X^1$. Then we get that $p\in \inte X^2$ and $F(X^2)=X^1$. On the other hand, the point $q$ must be in $X^2$ as well for otherwise there exists $q'\neq q$ such that $q'\in X^2$ and $F(q')=q$, thus $q'$ and $q$ are two distinct points in $X^1$ whose images under $F$ are $q$, contradicting the fact that $F|_{X^1}$ is a homeomorphism from $X^1$ to $F(X^1)$ and $X^1\subseteq F(X^1)$. Thus we can inductively construct an $(n+1)$-cell $X^{n+1}\subseteq X^n$ such that $F(X^{n+1})=X^n$, $p \in \inte(X^{n+1})$, and $q\in X^{n+1}$, for each $n\in\N$. This contradicts the fact that $F$ is an expanding Thurston map, see Remark~\ref{rmExpanding}.
\smallskip
Case 2: there exists a $1$-edge $e\in\E^1$ such that $p,q\in e$. Note that $e\subseteq X^1$. Then one of the fixed points $p$ and $q$, say $p$, must be contained in the interior of $e$, for otherwise $p$, $q$ are distinct $1$-vertices that are fixed by $F$, thus they are both 0-vertices, hence $X^1$ joins opposite sides, a contradiction. Since $F(e)$ is a $0$-edge by Proposition~\ref{propCellDecomp}, and $p\in F(e)$, there exists a $1$-edge $e'\subseteq F(e)$ with $p\in e'$. Thus $e'$ intersects with $e$ at the point $p$, which is an interior point of $e$. So $e'=e$, and $e\subseteq F(e)$. Then by the same argument as when $p \in \inte X^1 $ in Case 1, we can get a contradiction to the fact that $F$ is an expanding Thurston map.
\smallskip
Case 3: the points $p$, $q$ are contained in two distinct $1$-edges $e_1,e_2$ of $X^1$, respectively, and $e_1\cap e_2 \neq \emptyset$. Since $F$ is an expanding Thurston map, we have $m=\card(\operatorname{post} F)\geq 3$ (see \cite[Corollary 6.4]{BM10}). So $X^1$ is an $m$-gon (see Proposition \ref{propCellDecomp}(vi)). Since $e_1\cap e_2 \neq \emptyset$, we get $\card(e_1\cap e_2)=1$, say $e_1\cap e_2 =\{v\}$. By Case 2, we get that $v\neq p$ and $v\neq q$. Note that $p\in F(e_1)$, $q\in F(e_2)$, and $F(e_1),F(e_2)$ are $0$-edges. If at least one of $p$ and $q$ is a $1$-vertex, thus a 0-vertex as well, then since Proposition~\ref{propCellDecomp}(i) implies that $F(e_1)\neq F(e_2)$, we can conclude that $X^1$ touches at least three $0$-edges, thus joins opposite sides of $\mathcal{C}$, a contradiction. Hence $p\in\inte e_1$ and $q\in\inte e_2$. So $e_1\subseteq F(e_1)$, $e_2\subseteq F(e_2)$, and
$$
\{v\} = e_1 \cap e_2 \subseteq F(e_1)\cap F(e_2) = F(e_1\cap e_2) = F(\{v\}),
$$
by Proposition~\ref{propCellDecomp}(i). Thus $F(v)=v$. Then $e_1$ contains two distinct fixed points $p$ and $v$ of $F$, which was already proven to be impossible in Case 2.
\smallskip
Case 4: the points $p$, $q$ are contained in two distinct $1$-edges $e_1,e_2$ of $X^1$, respectively, and $e_1\cap e_2 = \emptyset$. Thus $F(e_1)$ and $F(e_2)$ are a pair of disjoint edges of $F(X^1)$. But $p=F(p)\in F(e_1)$, $q=F(q)\in F(e_2)$, so $X^1$ joins opposite sides of $\mathcal{C}$, a contradiction.
\smallskip
Combining all cases above, we can conclude, therefore, that each $1$-tile $X^1\in\X^1$ contains at most one fixed point of $F$.
\end{proof}
We can immediately get an upper bound of the number of periodic points of an expanding Thurston map from Lemma~\ref{lmAtMost1}.
\begin{cor} \label{corNoFixedPtsUpperBound}
Let $f$ be an expanding Thurston map. Then for each $n\in\N$ sufficiently large, the number of fixed points of $f^n$ is $\leq 2(\deg f)^n$. In particular, the number of fixed points of $f$ is finite.
\end{cor}
\begin{proof}
By Corollary~\ref{corCexists}, for each $n\geq N(f)$, where $N(f)\in\N$ is a constant as given in Corollary~\ref{corCexists}, there exists an $f^n$-invariant Jordan curve $\mathcal{C}$ containing $\operatorname{post} f$ such that no $n$-tile in $\mathbf{D}^n(f,\mathcal{C})$ joins opposite sides of $\mathcal{C}$. Let $F=f^n$. So $F$ is an expanding Thurston map, and $\mathcal{C}$ is an $F$-invariant Jordan curve containing $\operatorname{post} F$ such that no $1$-tile in $\mathbf{D}^1(F,\mathcal{C})$ joins opposite sides of $\mathcal{C}$. By Proposition~\ref{propCellDecomp}(iv), the number of $1$-tiles in $\X^1(F,\mathcal{C})$ is exactly $2\deg F= 2 (\deg f)^n$. By Lemma~\ref{lmAtMost1}, we can conclude that there are at most $2(\deg f)^n$ fixed points of $F=f^n$.
Since each fixed point of $f$ is also a fixed point of $f^n$, for each $n\in\N$, the number of fixed points of $f$ is finite.
\end{proof}
\begin{lemma} \label{lmDeg_f_C}
Let $f$ be an expanding Thurston map with an $f$-invariant Jordan curve $\mathcal{C}$ containing $\operatorname{post} f$. Then
\begin{align}
\deg(f|_{\mathcal{C}}) &= \card (\X_{ww}^1(f,\mathcal{C})) - \card (\X_{bw}^1(f,\mathcal{C})) \label{eqDeg_f_C} \\
&= \card (\X_{bb}^1(f,\mathcal{C})) - \card (\X_{wb}^1(f,\mathcal{C})). \notag
\end{align}
\end{lemma}
Here $\deg(f|_\mathcal{C})$ is the \emph{degree} of the map $f|_\mathcal{C}\: \mathcal{C}\rightarrow \mathcal{C}$ (see for example, \cite[Section~2.2]{Ha02}).
Note that the first equality in (\ref{eqDeg_f_C}), for example, says that the degree of $f$ restricted to $\mathcal{C}$ is equal to the number of white $1$-tiles contained in the white $0$-tile minus the number of black $1$-tiles contained in the white $0$-tile.
Recall that for each continuous path $\gamma\: [a,b]\rightarrow \C\setminus \{0\}$ on the Riemann sphere $\widehat\C$, with $a,b\in\mathbb{R}$ and $a<b$, we can define the \emph{variation of the argument along $\gamma$}, denoted by $V(\gamma)$, as the change of the imaginary part of the logarithm along $\gamma$. Note that $V(\gamma)$ is invariant under an orientation-preserving reparametrization of $\gamma$ and if $\widetilde\gamma\: [a,b]\rightarrow \widehat\C$ reverses the orientation of $\gamma$, i.e., $\widetilde\gamma(t)=\gamma(b-t)$, then $V(\widetilde\gamma)=-V(\gamma)$. We also note that if $\gamma$ is a loop, then $V(\gamma)= 2\pi \operatorname{Ind}_\gamma(0)$, where $\operatorname{Ind}_\gamma(0)$ is the \emph{winding number of $\gamma$ with respect to $0$} \cite[Chapter~IV]{Bu79}.
\begin{proof}
Consider the cell decompositions induced by $(f,\mathcal{C})$. Let $X_w^0$ be the white $0$-tile.
We start with proving the first equality in (\ref{eqDeg_f_C}).
By the Schoenflies theorem (see, for example, \cite[Theorem~10.4]{Mo77}), we can assume that $S^2$ is the Riemann sphere $\widehat{\C}$, and $X_w^0$ is the unit disk with the center $0$ disjoint from $f^{-1}(\mathcal{C})$.
For each $1$-edge $e\in\E^1$, we choose a parametrization $\gamma^+_e\: [0,1]\rightarrow \C\setminus\{0\}$ of $e$ with positive orientation (i.e., with the white $1$-tile on the left), and a parametrization $\gamma^-_e\: [0,1]\rightarrow \C\setminus\{0\}$ of $e$ with negative orientation. Then $f\circ\gamma^+_e$ and $f\circ\gamma^-_e$ are parametrizations of one of the $0$-edges on the unit circle $\mathcal{C}$, with positive orientation and negative orientation, respectively.
We claim that
\begin{align} \label{eqSumVarArg}
& \sum\limits_{X\in\X^1_{ww}} \sum\limits_{e\in\E^1,e\subseteq\partial X} V(f\circ\gamma^+_e) - \sum\limits_{X\in\X^1_{bw}} \sum\limits_{e\in\E^1,e\subseteq\partial X} V(f\circ\gamma^+_e) \\
= & \sum\limits_{e\in\E^1, e\subseteq\mathcal{C}} V(f\circ\gamma_e), \notag
\end{align}
where on the right-hand side, $\gamma_e=\gamma^+_e$ if $e\subseteq\mathcal{C}\cap X$ for some $X\in\X^1_{ww}$ and $\gamma_e=\gamma^-_e$ if $e\subseteq\mathcal{C}\cap X$ for some $X\in\X^1_{bw}$, or equivalently, $\gamma_e$ parametrizes $e$ in such a way that $X_w^0$ is always on the left of $e$ for each $e\in \E^1$ with $e\subseteq\mathcal{C}$.
We observe that the left-hand side of (\ref{eqSumVarArg}) is the sum of $V(f\circ\gamma^+_e)$ over all $1$-edges $e$ in the boundary of a white $1$-tile $X\subseteq X_w^0$ plus the sum of $V(f\circ\gamma^-_e)$ over all $1$-edges $e$ in the boundary of a black $1$-tile $X\subseteq X_w^0$. Since each $1$-edge $e$ with $\inte e\subseteq X_w^0$ is the intersection of exactly one $1$-tile in $\X^1_{ww}$ and one $1$-tile in $\X^1_{bw}$, the two terms corresponding to a $1$-edge $e$ that is not contained in $\mathcal{C}$ cancel each other. Moreover, there is exactly one term for each $1$-edge $e\subseteq X^0_w$ that is contained in $\mathcal{C}$, and $e$ that corresponds to such a term is parametrized in such a way that $X_w^0$ is on the left of $e$. The claim now follows.
We then note that by Proposition~\ref{propCellDecomp}(i) and the definition of branched covering maps on $S^2$ in the beginning of Section~\ref{sctThurstonMap}, the map $f$ is an orientation-preserving local homeomorphism. Thus the left-hand side of (\ref{eqSumVarArg}) is equal to
\begin{equation*}
\sum\limits_{X\in\X^1_{ww}} 2 \pi - \sum\limits_{X\in\X^1_{bw}} 2 \pi = 2\pi \(\card (\X_{ww}^1) - \card (\X_{bw}^1) \),
\end{equation*}
and the right-hand side of (\ref{eqSumVarArg}) is equal to
\begin{equation*}
2\pi \operatorname{Ind}_{f\circ\gamma_\mathcal{C}}(0) = 2\pi \deg (f|_\mathcal{C}),
\end{equation*}
where $\gamma_\mathcal{C}$ is a parametrization of $\mathcal{C}$ with positive orientation. Hence the first equality in (\ref{eqDeg_f_C}) follows.
The second equality in (\ref{eqDeg_f_C}) follows from the fact that
$$
\card (\X_{ww}^1) + \card (\X_{wb}^1) = \deg f = \card (\X_{bb}^1) + \card (\X_{bw}^1).
$$
\end{proof}
Let $f$ be an expanding Thurston map with an $f$-invariant Jordan curve $\mathcal{C}$ containing $\operatorname{post} f$. We orient $\mathcal{C}$ in such a way that the white $0$-tile lies on the left of $\mathcal{C}$. Let $p\in \mathcal{C}$ be a fixed point of $f$. We say that $f|_\mathcal{C}$ \defn{preserves the orientation at $p$} (resp.\ \defn{reverses the orientation at $p$}) if there exists an open arc $l\subseteq\mathcal{C}$ with $p\in l$ such that $f$ maps $l$ homeomorphically to $f(l)$ and $f|_\mathcal{C}$ preserves (resp.\ reverses) the orientation on $l$. More concretely, when $p$ is a $1$-vertex, let $l_1,l_2\subseteq \mathcal{C}$ be the two distinct $1$-edges on $\mathcal{C}$ containing $p$; when $p\in\inte e$ for some $1$-edge $e\subseteq\mathcal{C}$, let $l_1,l_2$ be the two connected components of $e \setminus \{p\}$. Then $f|_\mathcal{C}$ preserves the orientation at $p$ if $l_1\subseteq f(l_1)$ and $l_2\subseteq f(l_2)$, and reverses the orientation at $p$ if $l_2\subseteq f(l_1)$ and $l_1\subseteq f(l_2)$. Note that it may happen that $f|_\mathcal{C}$ neither preserves nor reverses the orientation at $p$, because $f|_\mathcal{C}$ need not be a local homeomorphism near $p$, where it may behave like a ``folding map''.
\begin{lemma} \label{lmNoFixedPts_f_C}
Let $f$ be an expanding Thurston map with an $f$-invariant Jordan curve $\mathcal{C}$ containing $\operatorname{post} f$. Then the number of fixed points of $f|_\mathcal{C}$ where $f|_\mathcal{C}$ preserves the orientation minus the number of fixed points of $f|_\mathcal{C}$ where $f|_\mathcal{C}$ reverses the orientation is equal to $\deg (f|_\mathcal{C}) -1$.
\end{lemma}
\begin{proof}
Let $\psi\:[0,1]\rightarrow\mathcal{C}$ be a continuous map such that $\psi|_{(0,1)}\:(0,1)\rightarrow \mathcal{C}\setminus\{x_0\}$ is an orientation-preserving homeomorphism, and $\psi(0)=\psi(1)=x_0$ for some $x_0\in\mathcal{C}$ that is not a fixed point of $f|_\mathcal{C}$. Note that for each $x\in\mathcal{C}$ with $x\neq x_0$, $\psi^{-1}(x)$ is a well-defined number in $(0,1)$. In particular, $\psi^{-1}(y)$ is a well-defined number in $(0,1)$ for each fixed point $y$ of $f|_\mathcal{C}$. Define $\pi\:\mathbb{R}\rightarrow\mathcal{C}$ by $\pi(x)=\psi(x-\lfloor x \rfloor)$. Then $\pi$ is a covering map. We lift $f|_\mathcal{C}\circ \psi$ to $G\:[0,1]\rightarrow\mathbb{R}$ such that $\pi\circ G = f|_\mathcal{C}\circ\psi$ and $G(0)=\psi^{-1}(f(x_0))\in(0,1)$. So we get the following commutative diagram:
\begin{equation*}
\xymatrix{ & \mathbb{R} \ar[d]^\pi \\
[0,1] \ar[r]_{f|_\mathcal{C} \circ \psi} \ar[ur]^G & \mathcal{C}.}
\end{equation*}
Then $G(1)-G(0)\in\Z$ and
\begin{equation}
\deg(f|_\mathcal{C})=G(1)-G(0).
\end{equation}
Observe that $y\in\mathcal{C}$ is a fixed point of $f|_\mathcal{C}$ if and only if $G(\psi^{-1}(y))-\psi^{-1}(y)\in\Z$. Indeed, if $y\in\mathcal{C}$ is a fixed point of $f|_\mathcal{C}$, then $\pi\circ G \circ \psi^{-1}(y)=f|_\mathcal{C}(y)=y$. Thus $G \circ \psi^{-1}(y)- \psi^{-1}(y) \in \Z$. Conversely, if $G \circ \psi^{-1}(y)- \psi^{-1}(y) \in \Z$, then $y \neq x_0$, thus
$$
f|_\mathcal{C}(y)=f|_\mathcal{C} \circ \psi \circ \psi^{-1} (y) = \pi \circ G \circ \psi^{-1} (y) = \pi \circ \psi^{-1} (y) = y.
$$
For each $m\in\Z$, we define the line $l_m$ to be the graph of the function $x\mapsto x+m$ from $\mathbb{R}$ to $\mathbb{R}$.
Let $y\in\mathcal{C}$ be any fixed point of $f|_\mathcal{C}$. Since by Corollary~\ref{corNoFixedPtsUpperBound} fixed points of $f$ are isolated, there exists a neighborhood $(s,t)\subseteq (0,1)$ such that $\psi^{-1}(y)\in(s,t)$ and for each fixed point $z\in\mathcal{C} \setminus\{y\}$ of $f|_\mathcal{C}$, $\psi^{-1}(z)\notin(s,t)$. Define $k=G(\psi^{-1}(y))-\psi^{-1}(y)$; then $k\in\Z$. Moreover, $z\in\mathcal{C}$ is a fixed point of $f|_\mathcal{C}$ if and only if the graph of $G$ intersects with $l_m$ at the point $(\psi^{-1}(z), G\(\psi^{-1}(z)\)$ for some $m\in\Z$.
\begin{figure}
\centering
\begin{overpic}
[width=10cm,
tics=20]{PlotG.eps}
\end{overpic}
\caption{The lines $l_k$ for $k\in\Z$ and an example of the graph of $G$.}
\label{figPlotG}
\end{figure}
Depending on the orientation of $f|_\mathcal{C}$ at the fixed point $y\in\mathcal{C}$, we get one of the following cases:
\begin{enumerate}
\smallskip
\item If $f|_\mathcal{C}$ preserves the orientation at $y$, then the graph of $G|_{(s,\psi^{-1}(y))}$ lies strictly between the lines $l_{k-1}$ and $l_k$, and the graph of $G|_{(\psi^{-1}(y),t)}$ lies strictly between the lines $l_k$ and $l_{k+1}$.
\smallskip
\item If $f|_\mathcal{C}$ reverses the orientation at $y$, then the graph of $G|_{(s,\psi^{-1}(y))}$ lies strictly between the lines $l_k$ and $l_{k+1}$, and the graph of $G|_{(\psi^{-1}(y),t)}$ lies strictly between the lines $l_{k-1}$ and $l_k$.
\smallskip
\item If $f|_\mathcal{C}$ neither preserves nor reverses the orientation at $y$, then the graph of $G|_{(s,t)\setminus\{\psi^{-1}(y)\}}$ either lies strictly between the lines $l_{k-1}$ and $l_k$ or lies strictly between the lines $l_k$ and $l_{k+1}$.
\end{enumerate}
Thus the number of fixed points of $f|_\mathcal{C}$ where $f|_\mathcal{C}$ preserves the orientation is exactly the number of intersections between the graph of $G$ and the lines $l_m$ with $m\in\Z$, where the graph of $G$ crosses the lines from below, and the number of fixed points of $f|_\mathcal{C}$ where $f|_\mathcal{C}$ reserves the orientation is exactly the number of intersections between the graph of $G$ and the lines $l_m$ with $m\in\Z$, where the graph of $G$ crosses the lines from above. Therefore the number of fixed points of $f|_\mathcal{C}$ where $f|_\mathcal{C}$ preserves the orientation minus the number of fixed points of $f|_\mathcal{C}$ where $f|_\mathcal{C}$ reverses the orientation is equal to $G(1)-G(0)-1=\deg (f|_\mathcal{C}) -1$.
\end{proof}
For each $n\in\N$ and each expanding Thurston map $f\:S^2\rightarrow S^2$, we denote by
\begin{equation}
P_{n,f}=\{x\in S^2\,|\, f^n(x)=x, f^k(x)\neq x,k\in\{1,2,\dots,n-1\}\}
\end{equation}
the \defn{set of periodic points of $f$ with period $n$}, and by
\begin{equation}
p_{n,f}=\sum\limits_{x\in{P_{n,f}}} \deg_{f^n}(x), \qquad \widetilde{p}_{n,f}= \card P_{n,f}
\end{equation}
the numbers of periodic points $x$ of $f$ with period $n$, counted with and without weight $\deg_{f^n}(x)$, respectively, at each $x$. In particular, $P_{1,f}$ is the set of fixed points of $f$ and $p_{1,f}=1+\deg f$ as we will see in the proof of Theorem~\ref{thmNoFixedPts} below. More generally, for all $m\in\N_0$ and $n\in \N$ with $m < n$, we denote by
\begin{equation} \label{eqSetPrePeriodicPts}
S_{n}^m = \{ x\in S^2 \,|\, f^m(x)=f^n(x) \}
\end{equation}
the \defn{set of preperiodic points of $f$ with parameters $m,n$} and by
\begin{equation} \label{eqNoPrePeriodicPts}
s_{n}^m = \sum\limits_{x\in S_{n}^m} \deg_{f^n} (x), \qquad \widetilde{s}_{n}^m = \card S_{n}^m
\end{equation}
the numbers of preperiodic points of $f$ with parameters $m,n$, counted with and without weight $\deg_{f^n}(x)$, respectively, at each $x$. Note that in particular, for each $n\in\N$, $S_{n}^0 = P_{1,f^n}$ is the set of fixed points of $f^n$.
\begin{proof}[Proof of Theorem~\ref{thmNoFixedPts}]
The idea of the proof is to first prove the theorem for $F=f^n$ for sufficiently large $n$ so that we can assume the existence of some $F$-invariant Jordan curve containing $\operatorname{post} F$. This enables us to make use of the combinatorial information from the cell decompositions induced by $(F,\mathcal{C})$. Then we can generalize the conclusion to arbitrary expanding Thurston maps by an elementary number-theoretic argument.
We first prove the theorem for $F=f^n$ for $n \geq N(f)$ where $N(f)$ is a constant as given in Corollary~\ref{corCexists} depending only on $f$. Let $\mathcal{C}$ be an $f^n$-invariant Jordan curve containing $\operatorname{post} f$ such that no $n$-tile in $\mathbf{D}^n(f,\mathcal{C})$ joins opposite sides of $\mathcal{C}$ as given in Corollary~\ref{corCexists}. So $\mathcal{C}$ is an $F$-invariant Jordan curve containing $\operatorname{post} F$ such that no $1$-tile in $\mathbf{D}^1(F,\mathcal{C})$ joins opposite sides of $\mathcal{C}$.
Unless otherwise stated, we consider the cell decompositions induced by $(F,\mathcal{C})$ in this proof. Let $w_w=\card \X^1_{ww} $ be the number of white $1$-tiles contained in the white $0$-tile, $b_w=\card \X^1_{bw} $ be the number of black $1$-tiles contained in the white $0$-tile, $w_b=\card \X^1_{wb} $ be the number of white $1$-tiles contained in the black $0$-tile, and $b_b=\card \X^1_{bb} $ be the number of black $1$-tiles contained in the black $0$-tile. Note that $w_w+w_b=b_w+b_b=\deg F$.
By Corollary~\ref{corNoFixedPtsUpperBound}, we know that fixed points of $F$ are isolated.
\smallskip
Note that
\begin{equation} \label{eqPfThmNoFixedPts}
w_w+b_b=\deg F +\deg (F|_\mathcal{C}),
\end{equation}
which follows from the equation $w_w-b_w=\deg(F|_\mathcal{C})$ by Lemma~\ref{lmDeg_f_C}, and the equation $b_w + b_b=\deg F$.
\smallskip
We define sets
$$A=\{X \in \X^1_{ww}\,|\,\text{there exists $p\in\mathcal{C}\cap X$ with } F(p)=p\},$$
$$B=\{X \in \X^1_{bw}\,|\,\text{there exists $p\in\mathcal{C}\cap X$ with } F(p)=p\},$$
and let $a=\card A$, $b=\card B$.
\smallskip
We then claim that
\begin{equation}
a-b=\deg(F|_\mathcal{C})-1.
\end{equation}
In order to prove this claim, we will first prove that $a-b$ is equal to the number of fixed points of $F|_\mathcal{C}$ where $F|_\mathcal{C}$ preserves the orientation minus the number of fixed points of $F|_\mathcal{C}$ where $F|_\mathcal{C}$ reverses the orientation.
So let $p\in\mathcal{C}$ be a fixed point of $F|_\mathcal{C}$.
\begin{figure}
\centering
\begin{overpic}
[width=6cm,
tics=20]{Plota.eps}
\put(75,40){$bw$}
\put(25,14){$ww$}
\put(120,14){$ww$}
\put(25,-8){$e_1$}
\put(120,-8){$e_2$}
\put(80,-8){$p$}
\end{overpic}
\caption{Case (2)(a) where $F(e_1)\supseteq e_1$ and $F(e_2)\supseteq e_2$.}
\label{figPlota}
\centering
\begin{overpic}
[width=6cm,
tics=20]{Plotb.eps}
\put(75,40){$ww$}
\put(25,14){$bw$}
\put(120,14){$bw$}
\put(25,-8){$e_1$}
\put(120,-8){$e_2$}
\put(80,-8){$p$}
\end{overpic}
\caption{Case (2)(b) where $F(e_1)\supseteq e_2$ and $F(e_2)\supseteq e_1$.}
\label{figPlotb}
\centering
\begin{overpic}
[width=6cm,
tics=20]{Plotc.eps}
\put(55,40){$bw$}
\put(95,40){$ww$}
\put(25,14){$ww$}
\put(120,14){$bw$}
\put(25,-8){$e_1$}
\put(120,-8){$e_2$}
\put(80,-8){$p$}
\end{overpic}
\caption{Case (2)(c) where $F(e_1)=F(e_2)\supseteq e_1$.}
\label{figPlotc}
\centering
\begin{overpic}
[width=6cm,
tics=20]{Plotd.eps}
\put(55,40){$ww$}
\put(95,40){$bw$}
\put(25,14){$bw$}
\put(120,14){$ww$}
\put(25,-8){$e_1$}
\put(120,-8){$e_2$}
\put(80,-8){$p$}
\end{overpic}
\caption{Case (2)(d) where $F(e_1)=F(e_2)\supseteq e_2$.}
\label{figPlotd}
\end{figure}
\begin{enumerate}
\smallskip
\item If $p$ is not a critical point of $F$, then either $F|_\mathcal{C}$ preserves or reverses the orientation at $p$. In this case, the point $p$ is contained in exactly one white $1$-tile and one black $1$-tile.
\begin{enumerate}
\smallskip
\item If $F|_\mathcal{C}$ preserves the orientation at $p$, then $p$ is contained in exactly one white $1$-tile that is contained in the white $0$-tile, and $p$ is not contained in any black $1$-tile that is contained in the while $0$-tile.
\smallskip
\item If $F|_\mathcal{C}$ reverses the orientation at $p$, then $p$ is contained in exactly one black $1$-tile that is contained in the white $0$-tile, and $p$ is not contained in any white $1$-tile that is contained in the while $0$-tile.
\end{enumerate}
\smallskip
\item If $p$ is a critical point of $F$, then $p=F(p)\in\operatorname{post} f$ and so there are two distinct $1$-edges $e_1,e_2\subseteq\mathcal{C}$ such that $\{p\}=e_1\cap e_2$. We refer to Figures \ref{figPlota} to \ref{figPlotd}.
\begin{enumerate}
\smallskip
\item If $e_1\subseteq F(e_1)$ and $e_2\subseteq F(e_2)$, then $p$ is contained in exactly $k$ white and $k-1$ black $1$-tiles that are contained in the white $0$-tile, for some $k\in\N$. Note that in this case $F|_\mathcal{C}$ preserves the orientation at $p$.
\smallskip
\item If $e_2\subseteq F(e_1)$ and $e_1\subseteq F(e_2)$, then $p$ is contained in exactly $k-1$ white and $k$ black $1$-tiles that are contained in the white $0$-tile, for some $k\in\N$. Note that in this case $F|_\mathcal{C}$ reverses the orientation at $p$.
\smallskip
\item If $e_1\subseteq F(e_1)= F(e_2)$, then $p$ is contained in exactly $k$ white and $k$ black $1$-tiles that are contained in the white $0$-tile, for some $k\in\N$. Note that in this case $F|_\mathcal{C}$ neither preserves nor reverses the orientation at $p$.
\smallskip
\item If $e_2\subseteq F(e_1)= F(e_2)$, then $p$ is contained in exactly $k$ white and $k$ black $1$-tiles that are contained in the white $0$-tile, for some $k\in\N$. Note that in this case $F|_\mathcal{C}$ neither preserves nor reverses the orientation at $p$.
\end{enumerate}
\end{enumerate}
It follows then that $a-b$ is equal to the number of fixed points of $F|_\mathcal{C}$ where $F|_\mathcal{C}$ preserves the orientation minus the number of fixed points of $F|_\mathcal{C}$ where $F|_\mathcal{C}$ reverses the orientation.
Then the claim follows from Lemma \ref{lmNoFixedPts_f_C}.
\smallskip
Next, we are going to prove that the number of fixed points of $F$, counted with weight given by the local degree, is equal to
\begin{equation} \label{eqDegF}
w_w+b_b-a+b,
\end{equation}
which, by (\ref{eqPfThmNoFixedPts}) and the claim above, is equal to
\begin{equation*}
\deg F +\deg(F|_\mathcal{C})-(\deg(F|_\mathcal{C})-1)=1+\deg F.
\end{equation*}
Indeed, by Lemma~\ref{lmAtLeast1} and Lemma~\ref{lmAtMost1}, each $1$-tile that contributes in (\ref{eqDegF}), i.e., each $1$-tile in $\X^1_{ww}\cup\X^1_{bb}\cup B \cup A$, contains exactly one fixed point (not counted with weight) of $F$. On the other hand, each fixed point is contained in at least one of the $1$-tiles in $\X^1_{ww}\cup\X^1_{bb}\cup B \cup A$. Let $p$ be a fixed point of $F$, then one of the following happens:
\begin{enumerate}
\smallskip
\item If $p\notin\mathcal{C}$, then $p$ is not contained in any $1$-edges $e$ since $F(e)\subseteq\mathcal{C}$. So $p\in\inte X$ for some $X \in \X^1_{ww}\cup \X^1_{bb}\setminus \(A\cup B\)$, by Lemma~\ref{lmAtLeast1}. So each such $p$ contributes $1$ to (\ref{eqDegF}).
\smallskip
\item If $p\in\mathcal{C}$ but $p\notin\operatorname{crit} F$, then $p$ is not a $1$-vertex, so either $p$ is contained in exactly two $1$-tiles $X\in\X^1_{ww}$ and $X'\in\X^1_{bb}$, or either $p$ is contained in exactly two $1$-tiles $X\in\X^1_{bw}$ and $X'\in\X^1_{wb}$. In either case, $p$ contributes $1$ to (\ref{eqDegF}).
\smallskip
\item If $p\in\mathcal{C}$ and $p\in\operatorname{crit} F$, then $p$ is a $0$-vertex, so the part that $p$ contributes in (\ref{eqDegF}) counts the number of black $1$-tiles that contains $p$, which is exactly the weight $\deg_F(p)$ of $p$.
\end{enumerate}
Hence we have proved the theorem with $f$ replaced by $F=f^n$, for each $n \geq N(f)$ where $N(f)$ is the constant as given in Corollary~\ref{corCexists} depending only on $f$. We are now going to remove this restriction by an elementary number-theoretic argument.
Choose a prime $r\geq N(f)$. Note that the set of fixed points of $f^r$ can be decomposed into orbits under $f$ of length $r$ or 1, since $r$ is a prime. Let $p$ be a fixed point of $f^r$. By using the following formula derived from (\ref{eqLocalDegreeProduct}),
\begin{equation}
\deg_{f^r}(p) = \deg_f(p)\deg_f(f(p))\deg_f(f^2(p))\cdots \deg_f(f^{r-1}(p)),
\end{equation}
we can conclude that
\begin{enumerate}
\smallskip
\item[(1)] if $p\notin\operatorname{crit}(f^r)$, or equivalently, $\deg_{f^r}(p)=1$, and
\begin{enumerate}
\smallskip
\item[(i)] if $p$ is in an orbit of length $r$, then $p,f(p),\dots,f^{r-1}(p)\notin \operatorname{crit} f$, or equivalently, the local degrees of $f^r$ at these points are all 1;
\smallskip
\item[(ii)] if $p$ is in an orbit of length 1, then $p\notin\operatorname{crit} f$, or equivalently, $\deg_f(p)=1$;
\end{enumerate}
\smallskip
\item[(2)] if $p\in\operatorname{crit}(f^r)$, and
\begin{enumerate}
\smallskip
\item[(i)] if $p$ is in an orbit of length $r$, then all $p,f(p),\dots,f^{r-1}(p)$ are fixed points of $f^r$ with the same weight $\deg_{f^r}(p)=\deg_{f^r}(f^k(p))$ for each $k\in\N$;
\smallskip
\item[(ii)] if $p$ is in an orbit of length 1, then $p\in\operatorname{crit} f$ and the weight of $f^r$ at $p$ is $\deg_{f^r}(p)=(\deg_f (p))^r$.
\end{enumerate}
\end{enumerate}
Note that a fixed point $p\in S^2$ of $f^r$ is a fixed point of $f$ if and only if $p$ is in an orbit of length 1 under $f$. So by first summing the weight of the fixed points of $f^r$ in the same orbit then summing over all orbits and applying Fermat's Little Theorem, we can conclude that
\begin{align*}
p_{1,f^r} & = \sum\limits_{x\in P_{1,f^r}} \deg_{f^r}(x) \\
& = \sum\limits_{\text{(1)(i)}} r + \sum\limits_{\text{(1)(ii)}} 1 + \sum\limits_{\text{(2)(i)}} r\deg_{f^r}(p) + \sum\limits_{\text{(2)(ii)}} (\deg_f(p))^r \\
& \equiv \sum\limits_{\text{(1)(ii)}} 1 + \sum\limits_{\text{(2)(ii)}} \deg_f(p)\\
& = p_{1,f} \pmod{r},
\end{align*}
where on the second line, the first sum ranges over all orbits in Case (1)(i), the second sum ranges over all orbits in Case (1)(ii), the third sum ranges over all orbits $\{p, f(p),\dots,f^{r-1}(p)\}$ in Case (2)(i), the last sum ranges over all orbits $\{p\}$ in Case (2)(ii). Thus by (\ref{eqDegreeProduct}) and Fermat's Little Theorem again, we have
\begin{align*}
0 = & \deg(f^r)+1-p_{1,f^r} \\
\equiv & (\deg f)^r+1-p_{1,f} \\
\equiv & 1+\deg f-p_{1,f} \pmod{r}.
\end{align*}
By choosing the prime $r$ larger than
$$\Abs{1+\deg f-p_{1,f} },$$
we can conclude that
$$
p_{1,f} = 1+\deg f.
$$
\end{proof}
In particular, we have the following corollary in which the weight for all points are trivial.
\begin{cor}
If $f$ is an expanding Thurston map with no critical fixed points, then there are exactly $1+\deg f$ distinct fixed points of $f$. Moreover, if $f$ is an expanding Thurston map with no periodic critical points, then there are exactly $1+(\deg f)^n$ distinct fixed points of $f^n$, for each $n\in\N$.
\end{cor}
\begin{proof}
The first statement follows immediately from Theorem \ref{thmNoFixedPts}.
To prove the second statement, we first recall that if $f$ is an expanding Thurston map, so is $f^n$ for each $n\in\N$. Next we note that for each fixed point $p\in S^2$ of $f^n$, $n\in\N$, we have $\deg_{f^n}(p)=1$. For otherwise, suppose $\deg_{f^n}(p)>1$ for some $n\in\N$, then
$$
1< \deg_{f^n}(p) = \deg_f(p)\deg_f(f(p))\deg_f(f^2(p))\cdots \deg_f(f^{n-1}(p)).
$$
Thus at least one of the points $p, f(p), f^2(p),\dots, f^{n-1}(p)$ is a periodic critical point of $f$, a contradiction. The second statement now follows.
\end{proof}
We recall the definition of $s_n^m$ in (\ref{eqSetPrePeriodicPts}) and (\ref{eqNoPrePeriodicPts}).
\begin{cor} \label{corNoPrePeriodicPts}
Let $f$ be an expanding Thurston map. For each $m\in\N_0$ and $n\in\N$ with $m<n$, we have
\begin{equation}
s_{n}^m = (\deg f)^n + (\deg f)^m.
\end{equation}
\end{cor}
\begin{proof}
For all $m\in\N_0$ and $n\in\N$ with $m<n$, we have
\begin{align*}
s_{n}^m & = \sum\limits_{x\in S_{n}^m} \deg_{f^n}(x) = \sum\limits_{y=f^{n-m}(y)} \sum\limits_{x\in f^{-m}(y)} \deg_{f^n}(x) \\
& = \sum\limits_{y=f^{n-m}(y)} \deg_{f^{n-m}}(y) \sum\limits_{x\in f^{-m}(y)} \deg_{f^m}(x) \\
& = \( (\deg f)^{n-m} + 1 \) (\deg f)^m.
\end{align*}
The last equality follows from (\ref{eqDeg=SumLocalDegree}), (\ref{eqDegreeProduct}), and Theorem~\ref{thmNoFixedPts}.
\end{proof}
Finally, for expanding Thurston maps with no periodic critical points, we derive a formula for $p_{n,f}$, $n\in\N$, from Theorem~\ref{thmNoFixedPts} and the M\"obius inversion formula (see for example, \cite[Section~2.4]{Bak85}).
\begin{definition} \label{defMobiusFn}
The \defn{M\"obius function}, $\mu(u)$, is defined by
$$
\mu(n) =\begin{cases} 1 & \text{if } n=1; \\ (-1)^r & \text{if } n=p_1 p_2 \dots p_r, \text{ and $p_1,\dots,p_r$ are distinct primes}; \\ 0 & \text{otherwise.} \end{cases}
$$
\end{definition}
\begin{cor}
Let $f$ be an expanding Thurston map without any periodic critical points. Then for each $n\in\N$, we have
$$
p_{n,f}=\sum\limits_{d|n} \mu(d)p_{1,f^{n/d}} = \begin{cases} \sum\limits_{d|n} \mu(d) (\deg f)^{n/d} & \text{if } n>1; \\ 1 + \deg f & \text{if } n=1. \end{cases}
$$
\end{cor}
\begin{proof}
The first equality follows from the M\"obius inversion formula and the equation $p_{1,f^n}=\sum\limits_{d|n} p_{d,f}$, for $n\in\N$. The second equality follows from Theorem~\ref{thmNoFixedPts} and the following fact (see for example, \cite[Section~2.4]{Bak85}):
$$
\sum\limits_{d|n} \mu(d) =\begin{cases} 1 & \text{if } n=1, \\ 0 & \text{if } n>1. \end{cases}
$$
\end{proof}
\section{Equidistribution} \label{sctEquidistribution}
In this section, we derive various equidistribution results as stated in Theorem~\ref{thmWeakConvPreImg}, Theorem~\ref{thmWeakConvPrePerPts}, and Corollary~\ref{corWeakConvPerPts}. We prove these results by first establishing a general statement in Theorem~\ref{thmWeakConv} on the convergence of the distributions of the white $n$-tiles in the tile decompositions discussed in Section~\ref{sctThurstonMap}, in the weak* topology, to the unique measure of maximal entropy of an expanding Thurston map.
Let us now review the concept of measure of maximal entropy for dynamical systems. Then we prove in Theorem~\ref{thmWeakConv} that the distributions of the points, each of which is located ``near'' its corresponding white $n$-tile where the correspondence is a bijection, converges in the weak* topology to $\mu_f$ as $n\longrightarrow+\infty$. Then Theorem~\ref{thmWeakConvPreImg} follows from Theorem~\ref{thmWeakConv}. Theorem~\ref{thmWeakConvPrePerPts} finally follows after we prove a technical bound in Lemma~\ref{lmCoverEdges} generalizing a corresponding lemma from \cite{BM10}. As a special case, we obtain Corollary~\ref{corWeakConvPerPts}.
We start with recalling concepts of entropy for dynamical systems. We follow closely the notation from \cite[Chapter 20]{BM10}.
Let $(X,d)$ be a compact metric space and $g\:X\rightarrow X$ a continuous map. For each $n\in\N$ and $x,y\in X$,
$$
d^n_g(x,y)=\operatorname{max}\{d(g^k(x),g^k(y))\,|\,k=0,\dots,n-1\}
$$
defines a metric on $X$. Let $D(g,\epsilon,n)$ be the minimum number of $\epsilon$-balls in $(X,d^n_g)$ whose union covers $X$.
One can show that the \defn{topological entropy} $h_{\operatorname{top}}(g)$ of $g$, defined as
$$
h_{\operatorname{top}}(g)=\lim\limits_{\epsilon\to 0}\lim\limits_{n\to+\infty} \frac1n \log(D(g,\epsilon,n)),
$$
is well-defined and independent of $d$ as long as the topology on $X$ defined by $d$ remains the same \cite[Proposition 3.1.2]{KH95}.
Let $\mu\in \mathcal{M}(X,g)$. Let $I, J$ be countable index sets. A \defn{measurable partition} $\xi$ for $(X,\mu)$ is a countable collection $\xi=\{A_i\,|\,i\in I\}$ of Borel sets with $\mu(A_i\cap A_j)=0$ for all $i,j\in I$ with $i\neq j$, and
$$
\mu \bigg( X\setminus\bigcup\limits_{i\in I}A_i \bigg) =0.
$$
Let $\xi=\{A_i\,|\,i\in I\}$ and $\eta=\{B_j\,|\,j\in J\}$ be measurable partitions of $(X,\mu)$. Then the \defn{common refinement} $\xi \vee \eta$ of $\xi$ and $\eta$ defined as
$$
\xi \vee \eta = \{A_i\cap B_j \,|\, i\in I, j\in J\}
$$
is also a measurable partition. Let $g^{-1}(\xi)=\{g^{-1}(A_i) \,|\,i\in I\}$, and define for each $n\in\N$,
$$
\xi^n_g=\xi\vee g^{-1}(\xi)\vee\cdots\vee g^{-(n-1)}(\xi).
$$
The \defn{entropy} of $\xi$ is
$$
H_{\mu}(\xi)= - \sum\limits_{i\in I} \mu(A_i)\log\(\mu (A_i)\),
$$
where $0\log 0$ is equal to $0$ by convention. One can show (see \cite[Chapter 4]{Wa82}) that if $H_{\mu}(\xi)<+\infty$, then the following limit exists
$$
h_{\mu}(g,\xi)=\lim\limits_{n\to+\infty} \frac{1}{n} H_{\mu}(\xi^n_g) \in[0,+\infty).
$$
Then we denote the \defn{measure-theoretic entropy} of $g$ for $\mu$ by
\begin{align*}
h_{\mu}(g)=\sup\{h_{\mu}(g,\xi)\,|\, & \xi \text{ is a measurable partition of }\\
&(X,\mu) \text{ with } H_{\mu}(\xi)<+\infty\}.
\end{align*}
By the variational principle (see \cite[Theorem~8.6]{Wa82}), we have
$$
h_{\operatorname{top}}(g)=\sup\{h_{\mu}(g)\,|\,\mu\in \mathcal{M}(X,g)\}.
$$
A measure $\mu$ that achieves the supreme above is called \defn{a measure of maximal entropy} of $g$.
If $f$ is an expanding Thurston map, then
\begin{equation} \label{eqTopEntropy}
h_{\operatorname{top}} (f)=\log(\deg f),
\end{equation}
and there exists a unique measure of maximal entropy $\mu_f$ for $f$ (see \cite[Theorem~20.9]{BM10} and \cite[Section~3.4 and Section~3.5]{HP09}). Moreover, for each $n\in\N$, the unique measure of maximal entropy $\mu_{f^n}$ of the expanding Thurston map $f^n$ is equal to $\mu_f$ (see \cite[Theorem~20.7 and Theorem~20.9]{BM10}).
We recall that in a compact metric space $(X,d)$, a sequence of finite Borel measures $\mu_n$ converges in the weak$^*$ topology to a finite Borel measure $\mu$, or $\mu_n \stackrel{w^*}{\longrightarrow} \mu$, as $n\longrightarrow +\infty$ if and only if $\lim\limits_{n\to+\infty} \int\! u\,\mathrm{d}\mu_n = \int\! u\,\mathrm{d}\mu$ for each $u\in C(X)$.
We need the following lemmas for weak$^*$ convergence.
\begin{lemma} \label{lmPushforwardConv}
Let $X$ and $\widetilde{X}$ be two compact metric spaces and $\phi \:X \rightarrow \widetilde{X}$ a continuous map. Let $\mu$ and $\mu_i$, for $i\in\N$, be finite Borel measures on $X$. If
$$
\mu_i \stackrel{w^*}{\longrightarrow} \mu \text{ as } i\longrightarrow +\infty,
$$
then $\phi_*(\mu)$ and $\phi_*(\mu_i)$, $i\in\N$, are finite Borel measures on $\widetilde{X}$, and
$$
\phi_*(\mu_i) \stackrel{w^*}{\longrightarrow} \phi_*(\mu) \text{ as } i\longrightarrow +\infty.
$$
\end{lemma}
Recall for a continuous map $\phi\: X\rightarrow \widetilde X$ between two metric spaces and a Borel measure $\nu$ on $X$, the \emph{push-forward} $\phi_*(\nu)$ of $\nu$ by $\phi$ is defined to be the unique Borel measure that satisfies $(\phi_*(\nu))(B) = \nu\(\phi^{-1}(B)\)$ for each Borel set $B\subseteq \widetilde X$.
\begin{proof}
By the Riesz representation theorem (see for example, \cite[Chapter~7]{Fo99}), the lemma follows if we observe that for each $h\in C(X)$, we have
$$
\int_{\widetilde{X}} \! h \, \mathrm{d}\phi_*\mu_i=\int_X \! (h\circ \phi) \,\mathrm{d}\mu_i \stackrel{i\longrightarrow +\infty}{\longrightarrow} \int_{X} \! (h\circ \phi)\,\mathrm{d}\mu = \int_{\widetilde{X}} \!h\,\mathrm{d}\phi_*\mu.
$$
\end{proof}
\begin{lemma} \label{lmConvexCombConv}
Let $(X,d)$ be a compact metric space, and $I$ be a finite set. Suppose that $\mu$ and $\mu_{i,n}$, for $i\in I$ and $n\in\N$, are finite Borel measures on $X$, and $w_{i,n}\in [0,+\infty)$, for $i\in I$ and $n\in\N$ such that
\begin{enumerate}
\smallskip
\item $\mu_{i,n} \stackrel{w^*}{\longrightarrow} \mu$, as $n\longrightarrow +\infty$, for each $i\in I$,
\smallskip
\item $\lim\limits_{n\to+\infty} \sum\limits_{i\in I} w_{i,n} = r$ for some $r\in\mathbb{R}$.
\end{enumerate}
Then $\sum\limits_{i\in I} w_{i,n}\mu_{i,n} \stackrel{w^*}{\longrightarrow} r\mu$ as $n\longrightarrow +\infty$.
\end{lemma}
\begin{proof}
For each $u\inC(X)$ and each $n\in\N$,
\begin{align*}
& \Abs{\int \! u \,\mathrm{d}\bigg(\sum\limits_{i\in I} w_{i,n}\mu_{i,n} \bigg) - r \int \! u \,\mathrm{d}\mu } \\
\leq & \sum\limits_{i\in I} w_{i,n} \Abs{\int \! u \,\mathrm{d}\mu_{i,n} - \int \! u \,\mathrm{d}\mu } + \bigg\lvert r-\sum\limits_{i\in I} w_{i,n} \bigg\rvert \Norm{\mu}.
\end{align*}
Since $\mu_{i,n} \stackrel{w^*}{\longrightarrow} \mu$, as $n\longrightarrow +\infty$, for each $i\in I$, and $\lim\limits_{n\to+\infty} \sum\limits_{i\in I} w_{i,n} = r$, we can conclude that the right-hand side of the inequality above tends to $0$ as $n\longrightarrow +\infty$.
\end{proof}
We record the following well-known lemma, sometimes known as the Portmanteau Theorem, and refer the reader to \cite[Theorem~2.1]{Bi99} for the proof.
\begin{lemma} \label{lmPortmanteau}
Let $(X,d)$ be a compact metric space, and $\mu$ and $\mu_i$, for $i\in\N$, be Borel probability measures on $X$. Then the following are equivalent:
\begin{enumerate}
\smallskip
\item $\mu_i \stackrel{w^*}{\longrightarrow} \mu$ as $i\longrightarrow +\infty$;
\smallskip
\item $\limsup\limits_{i\to+\infty} \mu_i(F) \leq \mu(F)$ for each closed set $F\subseteq X$;
\smallskip
\item $\liminf\limits_{i\to+\infty} \mu_i(G) \geq \mu(G)$ for each open set $G\subseteq X$;
\smallskip
\item $\lim\limits_{i\to+\infty} \mu_i(B)= \mu(B)$ for each Borel set $B\subseteq X$ with $\mu(\partial B) = 0$.
\end{enumerate}
\end{lemma}
\begin{lemma} \label{lmLocalPerturb}
Let $(X,d)$ be a compact metric space. Suppose that $A_i \subseteq X$, for $i\in\N$, are finite subsets of $X$ with maps $\phi_i\:A_i\rightarrow X$ such that
\begin{equation*}
\lim\limits_{i\to+\infty} \max\{d(x,\phi_i(x))\,|\, x\in A_i\}=0.
\end{equation*}
Let $m_i\:A_i\rightarrow \mathbb{R}$, for $i\in\N$, be functions that satisfy
\begin{equation*}
\sup_{i\in\N} \Norm{m_i}_1 = \sup_{i\in\N} \sum\limits_{x\in A_i}\abs{m_i(x)} <+\infty.
\end{equation*}
Define for each $i\in\N$,
\begin{equation*}
\mu_i=\sum\limits_{x\in A_i} m_i(x)\delta_x, \quad \widetilde{\mu}_i=\sum\limits_{x\in A_i} m_i(x)\delta_{\phi_i(x)}.
\end{equation*}
If
\begin{equation*}
\mu_i \stackrel{w^*}{\longrightarrow} \mu \text{ as } i\longrightarrow +\infty,
\end{equation*}
for some finite Borel measure $\mu$ on $X$, then
\begin{equation*}
\widetilde\mu_i \stackrel{w^*}{\longrightarrow} \mu \text{ as } i\longrightarrow +\infty.
\end{equation*}
\end{lemma}
\begin{proof}
It suffices to prove that for each continuous function $g\inC(X)$,
$$
\int \! g \, \mathrm{d} \mu_i - \int \! g \,\mathrm{d} \widetilde\mu_i \longrightarrow 0 \text{ as } i \longrightarrow +\infty.
$$
Indeed, $g$ is uniformly continuous, so for each $\epsilon > 0$, there exists $N\in\N$ such that for each $n>N$ and for each $x\in A_n$, we have $\Abs{g(x)-g(\phi_n(x))}<\epsilon$. Thus
$$
\Abs{\int \! g \, \mathrm{d} \mu_n - \int \! g \,\mathrm{d} \widetilde\mu_n} \leq \sum\limits_{x\in A_n} \Abs{g(x)-g(\phi_n(x))}\Abs{m_n(x)} \leq \epsilon \sup_{n\in\N}\Norm{m_n}_1.
$$
\end{proof}
The following lemma is a reformulation of Lemma~20.2 in \cite{BM10}. We will later generalize it in Lemma \ref{lmCoverEdges}.
\begin{lemma}[M.~Bonk \& D.~Meyer, 2010] \label{lmCoverEdgesBM}
Let $f$ be an expanding Thurston map, and $\mathcal{C}\subseteq S^2$ be an $f^N$-invariant Jordan curve containing $\operatorname{post}{f}$ for some $N\in\N$. Then there exists a constant $L_0 \in [1,\deg{f})$ with the following property:
For each $m\in\N_0$ with $m \equiv 0 \pmod N$, there exists a constant $C_0>0$ such that for each $k\in\N_0$ with $k \equiv 0 \pmod N$ and each $m$-edge $e$, there exists a collection $M_0$ of $(m+k)$-tiles with $\card{M_0}\leq C_0 L_0^k$ and $e \subseteq \inte\Big( \bigcup\limits_{X\in M_0} X \Big)$.
\end{lemma}
Let $F$ be an expanding Thurston map with an $F$-invariant Jordan curve $\mathcal{C}\subseteq S^2$ containing $\operatorname{post}{F}$. As before, we let $w_w=\card \X^1_{ww} $ denote the number of white $1$-tiles contained in the white $0$-tile, $b_w=\card \X^1_{bw} $ the number of black $1$-tiles contained in the white $0$-tile, $w_b=\card \X^1_{wb} $ the number of white $1$-tiles contained in the black $0$-tile, and $b_b=\card \X^1_{bb} $ the number of black $1$-tiles contained in the black $0$-tile. We define
\begin{equation} \label{eqDefwb}
w=\frac{b_w}{b_w+w_b}, \quad b=\frac{w_b}{b_w+w_b}.
\end{equation}
Note that (see the discussion in \cite{BM10} proceeding Lemma~20.1 in Chapter 20) $b_w,w_b, w,b>0$, $w+b=1$, and
\begin{equation} \label{eqWw-Bw<deg}
\abs{w_w-b_w} < \deg F.
\end{equation}
M.~Bonk and D.~Meyer give the following characterization of the unique measure of maximal entropy of $F$ (see \cite[Proposition~20.7 and Theorem~20.9]{BM10}):
\begin{theorem}[M.~Bonk \& D.~Meyer, 2010] \label{thmBMCharactMOME}
Let $F$ be an expanding Thurston map with an $F$-invariant Jordan curve $\mathcal{C}\subseteq S^2$. Then there is a unique measure of maximal entropy $\mu_F$ of $F$, which is characterized among all Borel probability measures by the following property:
for each $n\in\N_0$ and each $n$-tile $X^n\in\X^n(F,\mathcal{C})$,
\begin{equation}
\mu(X^n) = \begin{cases} w(\deg F)^{-n} & \text{if } X^n \in \X^n_w(F,\mathcal{C}), \\ b(\deg F)^{-n} & \text{if } X^n \in \X^n_b(F,\mathcal{C}). \end{cases}
\end{equation}
\end{theorem}
We now state our first characterization of the measure of maximal entropy $\mu_f$ of an expanding Thurston map $f$.
\begin{theorem} \label{thmWeakConv}
Let $f$ be an expanding Thurston map with its measure of maximal entropy $\mu_f$. Let $\mathcal{C} \subseteq S^2$ be an $f^n$-invariant Jordan curve containing $\operatorname{post}{f}$ for some $n\in\N$. Fix a visual metric $d$ for $f$. Consider any sequence of non-negative numbers $\{\alpha_i\}_{i\in\N_0}$ with $\lim\limits_{i\to +\infty} \alpha_i = 0$, and any sequence of functions $\{\beta_i\}_{i\in\N_0}$ with $\beta_i$ mapping each white $i$-tile $X^i\in\X^i_w(f,\mathcal{C})$ to a point $\beta_i(X^i) \in N_d^{\alpha_i}(X^i)$. Let
$$
\mu_i = \frac{1}{(\deg f)^i} \sum\limits_{X^i\in\X^i_w(f,\mathcal{C})} \delta_{\beta_i(X^i)}, \quad i \in \N_0.
$$
Then
$$
\mu_i \stackrel{w^*}{\longrightarrow} \mu_f \text{ as } i\longrightarrow +\infty.
$$
\end{theorem}
Recall that $N_d^{\alpha_i}(X^i)$ denotes the open $\alpha_i$-neighborhood of $X^i$ in $(S^2,d)$. This theorem says that a sequence of probability measures $\{\mu_i\}_{i\in\N}$, with $\mu_i$ assigning the same weight to a point near each white $i$-tile, converges in the weak$^*$ topology to the measure of maximal entropy. In some sense, it asserts the equidistribution of the white $i$-tiles with respect to the measure of maximal entropy.
In order to prove the above theorem, we first prove a weaker version of it.
\begin{prop} \label{propWeakConv}
Let $F$ be an expanding Thurston map with its measure of maximal entropy $\mu_F$ and an $F$-invariant Jordan curve $\mathcal{C}\subseteq S^2$ containing $\operatorname{post}{F}$. Consider any sequence of functions $\{\beta_i\}_{i\in\N_0}$ with $\beta_i$ mapping each white $i$-tile $X^i\in\X^i_w(F,\mathcal{C})$ to a point $\beta_i(X^i) \in \inte X^i$ for each $i\in\N_0$. Let
$$
\mu_i = \frac{1}{(\deg F)^i} \sum\limits_{X^i\in\X^i_w(F,\mathcal{C})} \delta_{\beta_i(X^i)}, \quad i\in \N_0.
$$
Then
$$
\mu_i \stackrel{w^*}{\longrightarrow} \mu_F \text{ as } i\longrightarrow +\infty.
$$
\end{prop}
\begin{proof}
Note that $\card \X^i_w =(\deg F)^i$, so $\mu_i$ is a probability measure for each $i\in\N_0$. Thus by Alaoglu's theorem, it suffices to prove that for each Borel measure $\mu$ which is a subsequential limit of $\{\mu_i\}_{i\in\N_0}$ in the weak$^*$ topology, we have $\mu=\mu_F$.
Let $\{i_n\}_{n\in\N} \subseteq \N$ be an arbitrary strictly increasing sequence such that
$$
\mu_{i_n} \stackrel{w^*}{\longrightarrow} \mu \text{ as } n\longrightarrow +\infty,
$$
for some Borel measure $\mu$. Clearly $\mu$ is also a probability measure.
Recall the definitions of $w,b\in(0,1)$ and $w_w,b_w,w_b,b_b$ (see (\ref{eqDefwb})). For each $m,i\in\N_0$ with $0 \leq m \leq i$, each white $m$-tile $X^m_w\in\X^m_w$, and each black $m$-tile $X^m_b\in\X^m_b$, by the formulas in Lemma~20.1 in \cite{BM10}, we have
\begin{align}
\mu_i(X^m_w) = & \frac{1}{(\deg F)^i} \card\{X^i\in\X^i_w \,|\,X^i\subseteq X^m_w\} \notag \\
= & \frac{1}{(\deg F)^i}\(w(\deg F)^{i-m}+b(w_w-b_w)^{i-m}\) \label{eqMu_iX^kW}
\end{align}
and similarly,
\begin{equation} \label{eqMu_iX^kB}
\mu_i(X^m_b) = \frac{1}{(\deg F)^i}\(w(\deg F)^{i-m}-b(w_w-b_w)^{i-m}\)
\end{equation}
\smallskip
We claim that for each $m$-tile $X^m\in\X^m$ with $m\in\N_0$, we have $\mu(\partial X^m)=0$.
To establish the claim, it suffices to prove that $\mu(e)=0$ for each $m$-edge $e$ with $m\in\N_0$. Applying Lemma \ref{lmCoverEdgesBM} in the case $f=F$ and $n=1$, we get that there exists constants $1<L_0<\deg F$ and $C_0>0$ such that for each $k\in\N_0$, there is a collection $M_0^k$ of $(m+k)$-tiles with $\card M_0^k\leq C_0L_0^k$ such that $e$ is contained in the interior of the set $\bigcup\limits_{X\in M_0^k} X$. So by (\ref{eqWw-Bw<deg}), (\ref{eqMu_iX^kW}), (\ref{eqMu_iX^kB}), and Lemma~\ref{lmPortmanteau}, we get
\begin{align*}
\mu(e) & \leq \mu \bigg(\inte \big(\bigcup\limits_{X\in M_0^k} X\big)\bigg) \\
& \leq \limsup_{l\to +\infty} \mu_{m+k+l} \bigg(\inte \big(\bigcup\limits_{X\in M_0^k} X\big)\bigg)\\
& \leq \limsup_{l\to +\infty} \sum\limits_{X\in M_0^k} \mu_{m+k+l}(X)\\
& \leq \sum\limits_{X\in M_0^k} \limsup_{l\to +\infty} \mu_{m+k+l}(X) \\
& \leq C_0 L_0^k\frac{w+b}{(\deg F)^{m+k}}.
\end{align*}
By letting $k\longrightarrow +\infty$, we get $\mu(e)=0$, proving the claim.
\smallskip
Thus by (\ref{eqWw-Bw<deg}), (\ref{eqMu_iX^kW}), (\ref{eqMu_iX^kB}), the claim, and Lemma~\ref{lmPortmanteau}, we can conclude that for each $m\in\N_0$, and each white $m$-tile $X^m_w\in\X^m_w$, each black $m$-tile $X^m_b\in\X^m_b$, we have that
\begin{equation*}
\mu(X^m_w) = \lim\limits_{n\to+\infty} \mu_{i_n}(X^m_w) = w(\deg F)^{-m},
\end{equation*}
\begin{equation*}
\mu(X^m_b) = \lim\limits_{n\to+\infty} \mu_{i_n}(X^m_b)= b(\deg F)^{-m}.
\end{equation*}
By Theorem~\ref{thmBMCharactMOME}, therefore, the measure $\mu$ is equal to the unique measure of maximal entropy $\mu_F$ of $F$.
\end{proof}
As a consequence of the above proposition, we have
\begin{cor} \label{corWeakConvPreimageInt}
Let $f$ be an expanding Thurston map with its measure of maximal entropy $\mu_f$. Let $\mathcal{C}\subseteq S^2$ be an $f^n$-invariant Jordan curve containing $\operatorname{post}{f}$ for some $n\in\N$. Fix an arbitrary $p \in \inte X_w^0$ where $X_w^0$ is the white $0$-tile for $(f,\mathcal{C})$. Define, for $i\in\N$,
$$
\nu_i=\frac{1}{(\deg f)^i}\sum\limits_{q\in f^{-i}(p)}\delta_q.
$$
Then
$$
\nu_i \stackrel{w^*}{\longrightarrow} \mu_f \text{ as } i\longrightarrow +\infty.
$$
\end{cor}
\begin{proof}
First observe that since $p$ is contained in the interior of the white $0$-tile, each $q\in f^{-n}(p)$ is contained in the interior of one of the white $n$-tiles, and each white $n$-tile contains exactly one $q$ with $f^n(q)=p$. So by Proposition~\ref{propWeakConv},
\begin{equation} \label{eqvni}
\nu_{ni} \stackrel{w^*}{\longrightarrow} \mu_{f^n} \text{ as } i\longrightarrow +\infty,
\end{equation}
where $\mu_{f^n}$ is the unique measure of maximal entropy of $f^n$, which is equal to $\mu_f$ (see \cite[Theorem~20.7 and Theorem~20.9]{BM10}).
Then note that for $k>1$,
\begin{equation} \label{eqfnu}
f_*\nu_k =\frac{1}{(\deg f)^k}\sum\limits_{q\in f^{-k}(p)} \delta_{f(q)}=\frac{1}{(\deg f)^{k-1}}\sum\limits_{q\in f^{-k+1}(p)} \delta_q =\nu_{k-1}.
\end{equation}
The second equality above follows from the fact that the number of preimages of each point in $f^{-k+1}(p)$ is exactly $\deg f$.
So by (\ref{eqvni}), (\ref{eqfnu}), Lemma~\ref{lmPushforwardConv}, and the fact that $\mu_f$ is invariant under pushforward of $f$ from Theorem~20.9 in \cite{BM10}, for each $k\in \{0,1,\dots,n-1\}$, we get
$$
\nu_{ni-k} = (f_*)^k \nu_{ni} \stackrel{w^*}{\longrightarrow} (f_*)^k \mu_f = \mu_f \text{ as } i\longrightarrow +\infty.
$$
Therefore
$$
\nu_i \stackrel{w^*}{\longrightarrow} \mu_f \text{ as } i\longrightarrow +\infty.
$$
\end{proof}
\begin{rems} \label{rmEquidistributionTilesWhiteBlack}
We can replace ``white'' by ``black'', $\X^i_w$ by $\X^i_b$, and $X^0_w$ by $X^0_b$ in the statements of Theorem~\ref{thmWeakConv}, Proposition~\ref{propWeakConv}, and Corollary~\ref{corWeakConvPreimageInt}. The proofs are essentially the same.
\end{rems}
\begin{proof} [Proof of Theorem~\ref{thmWeakConv}]
Fix an arbitrary $p \in \inte X_w^0$ in the interior of the while $0$-tile $X_w^0$ for the cell decomposition induced by $(f,\mathcal{C})$.
As in the proof of Corollary~\ref{corWeakConvPreimageInt}, for each $i\in\N_0$, there is a bijective correspondence between points in $f^{-i}(p)$ and the set of white $i$-tiles, namely, each $q\in f^{-i} (p)$ corresponds to the unique white $i$-tile, denoted as $X_q$, containing $q$. Then we define functions $\phi_i\: f^{-i}(p)\rightarrow S^2$ by setting $\phi_i(q)=\beta_i(X_q)$.
For our fixed visual metric $d$, there exists $C \geq 1$ and $\Lambda>1$ such that for each $n\in\N_0$ and each $n$-tile $X^n\in\X^n$, $\diam_d(X^n) \leq C \Lambda^{-n}$ (see Lemma~\ref{lmBMCellSizeBounds}). So for each $i\in N_0$ and each $q\in f^{-i}(p)$, we have
$$
d(q,\phi_i(q)) \leq d(\phi_i(q),X_q)+\diam_d(X_q) \leq \alpha_i + C\Lambda^{-i}.
$$
Thus $\lim\limits_{i\to+\infty} \max\{d(x,\phi_i(x))\,|\, x\in f^{-i}(p)\}=0$.
For $i\in\N_0$, define
$$
\widetilde\mu_i = \frac{1}{(\deg f)^i} \sum\limits_{q\in f^{-i}(p)} \delta_q.
$$
Note that for $i\in\N_0$,
$$
\mu_i = \frac{1}{(\deg f)^i} \sum\limits_{X^i\in\X^i_w(f,\mathcal{C})} \delta_{\beta_i(X^i)} = \frac{1}{(\deg f)^i} \sum\limits_{q\in f^{-i}(p)} \delta_{\phi_i(q)}.
$$
Then by Corollary~\ref{corWeakConvPreimageInt},
$$
\widetilde\mu_i \stackrel{w^*}{\longrightarrow} \mu_f \text{ as } i\longrightarrow +\infty.
$$
Therefore, by Lemma~\ref{lmLocalPerturb} with $A_i=f^{-i}(p)$ and $m_i(x) = \frac{1}{(\deg f)^i}, i\in \N_0$, we can conclude that
$$
\mu_i \stackrel{w^*}{\longrightarrow} \mu_f \text{ as } i\longrightarrow +\infty.
$$
\end{proof}
We are now ready to prove the equidistribution of preimages of an arbitrary point with respect to the measure of maximal entropy $\mu_f$.
\begin{proof}[Proof of Theorem~\ref{thmWeakConvPreImg}]
By Theorem~1.2 in \cite{BM10} or Corollary~\ref{corCexists}, we can fix an $f^n$-invariant Jordan curve $\mathcal{C} \subseteq S^2$ containing $\operatorname{post}{f}$ for some $n\in\N$. We consider the cell decompositions induced by $(f,\mathcal{C})$.
We first prove (\ref{eqWeakConvPreImgWithWeight}).
We assume that $p$ is contained in the (closed) white $0$-tile. The proof for the case when $p$ is contained in the black $0$-tile is exactly the same except that we need to use a version of Theorem~\ref{thmWeakConv} for black tiles instead of using Theorem~\ref{thmWeakConv} literally, see Remark~\ref{rmEquidistributionTilesWhiteBlack}.
Observe that for each $i\in\N_0$ and each $q\in f^{-i}(p)$, the number of white $i$-tiles that contains $q$ is exactly $\deg_{f^i}(q)$. On the other hand, each white $i$-tile contain exactly one point $q$ with $f^i(q)=p$. So we can define $\beta_i\:\X_w^i \rightarrow S^2$ by mapping a white $i$-tile to the point $q$ in it that satisfies $f^i(q)=p$. Define $\alpha_i\equiv 0$. Theorem~\ref{thmWeakConv} applies, and thus (\ref{eqWeakConvPreImgWithWeight}) is true.
Next, we prove (\ref{eqWeakConvPreImgWoWeight}). The proof breaks into three cases.
\smallskip
Case 1. Assume that $p \notin \operatorname{post} f$. Then $\deg_f(x)=1$ for all $x\in\bigcup\limits_{n=1}^{+\infty} f^{-n}(p)$. So $\widetilde{\nu}_i = \nu_i$ for each $i\in\N$. Then (\ref{eqWeakConvPreImgWoWeight}) follows from (\ref{eqWeakConvPreImgWithWeight}) in this case.
\smallskip
Case 2. Assume that $p\in \operatorname{post} f$ and $p$ is not periodic. Then there exists $N\in\N$ such that $f^{-N}(p) \cap \operatorname{post} f = \emptyset$. For otherwise, there exists a point $z\in\operatorname{post} f$ which belongs to $f^{-c}(p)$ for infinitely many distinct $c\in\N$. In particular, there exist two integers $a>b>0$ such that $z\in f^{-a}(p)\cap f^{-b}(p)$. Then $f^{a-b}(p)= p$, a contradiction. So $\deg_f(q)=1$ for each $q\in \bigcup\limits_{x\in f^{-N}(p)}\bigcup\limits_{i=1}^{+\infty} f^{-i}(x)$. Note that for each $x\notin \operatorname{post} f$ and each $i\in\N$, the number of preimages of $x$ under $f^i$ is exactly $(\deg f)^i$. Then for each $i\in\N$, $Z_{i+N}=Z_N (\deg f)^i$, and
$$
\widetilde{\nu}_{i+N} = \frac{1}{Z_{i+N}} \sum\limits_{q\in f^{-(i+N)}(p)} \delta_q = \frac{1}{Z_N} \sum\limits_{x\in f^{-N}(p)} \bigg( \frac{1}{(\deg f)^i} \sum\limits_{q\in f^{-i}(x)} \delta_q \bigg).
$$
For each $x\in f^{-N}(p)$, by Case 1,
$$
\frac{1}{(\deg f)^i} \sum\limits_{q\in f^{-i}(x)} \delta_q \stackrel{w^*}{\longrightarrow} \mu_f \text{ as } i\longrightarrow +\infty.
$$
Thus each term in the sequence $\{\widetilde{\nu}_{i+N}\}_{i\in\N}$ is a convex combination of the corresponding terms in sequences of measures, each of which converges to $\mu_f$ in the weak$^*$ topology. Hence by Lemma~\ref{lmConvexCombConv}, the sequence $\{\widetilde{\nu}_{i+N}\}_{i\in\N}$ also converges to $\mu_f$ in the weak$^*$ topology in this case.
\smallskip
Case 3. Assume that $p\in\operatorname{post} f$ and $p$ is periodic with period $k\in\N$. Let $l=\card(\operatorname{post} f)$. We first note that for each $m,N\in\N$, the inequality
$$
Z_{m+N} \geq (Z_m - l)(\deg f)^N,
$$
and equivalently,
$$
\frac{Z_m}{Z_{m+N}} \leq \frac{1}{(\deg f)^N} + \frac{l}{Z_{m+N}}
$$
hold, since there are at most $l$ points in $Z_m\cap \operatorname{post} f$. So by Lemma~\ref{lmPreImageDense}, for each $\epsilon > 0$ and each $N$ large enough such that ${1}/{(\deg f)^N} < {\epsilon}/{2}$ and ${l}/{Z_{m+N}} < {\epsilon}/{2}$, we get ${Z_m}/{Z_{m+N}} <\epsilon$ for each $m\in\N$. We fix $j\in\N$ large enough such that ${Z_{m-jk}}/{Z_m} < \epsilon$ for each $m>jk$. Observe that for each $m>jk$,
\begin{align} \label{eqPfthmWeakConvPreImg}
\widetilde{\nu}_m = & \frac{1}{Z_m} \sum\limits_{q\in f^{-m}(p)} \delta_q \notag \\
= & \frac{1}{Z_m} \bigg(\sum\limits_{q\in f^{-(m-jk)}(p)} \delta_q + \sum\limits_{x\in f^{-jk}(p)\setminus\{p\}} \sum\limits_{q\in f^{-(m-jk)}(x)} \delta_q \bigg) \\
= & \frac{Z_{m-jk}}{Z_m} \bigg( \frac{1}{Z_{m-jk}} \sum\limits_{q\in f^{-(m-jk)}(p)} \delta_q \bigg) + \frac{ 1}{Z_m} \sum\limits_{x\in f^{-jk}(p)\setminus\{p\}} \notag \\
& \card\(f^{-(m-jk)}(x)\) \bigg( \frac{1}{\card\(f^{-(m-jk)}(x)\) } \sum\limits_{q\in f^{-(m-jk)}(x)} \delta_q \bigg). \notag
\end{align}
Note that no point $x\in f^{-jk}(p) \setminus\{p\}$ is periodic. Indeed, if $x\in f^{-jk}(p) \setminus\{p\}$ were periodic, then $x\in \bigcup\limits_{i=0}^{k-1}f^i(p)$, and so $x$ would have period $k$ as well. Thus $x= f^{jk}(x)=p$, a contradiction. Hence by Case~1 and Case~2, for each $x\in f^{-jk}(p) \setminus\{p\}$,
$$
\frac{1}{\card\(f^{-(m-jk)}(x)\) } \sum\limits_{q\in f^{-(m-jk)}(x)} \delta_q \stackrel{w^*}{\longrightarrow} \mu_f \text{ as } m\longrightarrow +\infty.
$$
Let $\mu\in\mathcal{P}(S^2)$ be an arbitrary subsequential limit of $\{\widetilde\nu_m\}_{m\in\N}$ in the weak$^*$ topology. For each strictly increasing sequence $\{m_i\}_{i\in\N}$ in $\N$ that satisfies
$$
\widetilde{\nu}_{m_i} \stackrel{w^*}{\longrightarrow} \mu \text{ as } i\longrightarrow +\infty,
$$
we can assume, due to Alaoglu's Theorem, by choosing a subsequence if necessary, that
$$
\frac{Z_{m_i-jk}}{Z_{m_i}} \bigg( \frac{1}{Z_{m_i-jk}} \sum\limits_{q\in f^{-(m_i-jk)}(p)} \delta_q \bigg) \stackrel{w^*}{\longrightarrow} \eta \text{ as } i\longrightarrow +\infty,
$$
for some Borel measure $\eta$ with total variation $\Norm{\eta} \leq \epsilon$. Observe that for each $i\in \N$,
$$
\frac{ 1}{Z_{m_i}} \sum\limits_{x\in f^{-jk}(p)\setminus\{p\}} \card\(f^{-(m_i-jk)}(x)\) = 1- \frac{Z_{m_i-jk}}{Z_{m_i}},
$$
since $p\in f^{-jk}(p)$ and $\card\(f^{-(m_i-jk)}(p)\) = Z_{m_i-jk}$. By choosing a subsequence of $\{m_i\}_{i\in\N}$ if necessary, we can assume that there exists $r\in [0,\epsilon]$ such that
\begin{equation*}
\lim\limits_{i\to+\infty} \frac{Z_{m_i-jk}}{Z_{m_i}} = r.
\end{equation*}
So by taking the limits of both sides of (\ref{eqPfthmWeakConvPreImg}) in the weak$^*$ topology along the subsequence $\{m_i\}_{i\in\N}$, we get from Lemma~\ref{lmConvexCombConv} that $\mu = \eta + (1-r)\mu_f$. Thus
$$
\Norm{\mu-\mu_f} \leq \Norm{\eta} + r \Norm{\mu_f} \leq 2 \epsilon.
$$
Since $\epsilon$ is arbitrary, we can conclude that $\mu= \mu_f$. We have proven in this case that each subsequential limit of $\{\widetilde{\nu}_m\}_{m\in\N}$ in the weak$^*$ topology is equal to $\mu_f$. Therefore (\ref{eqWeakConvPreImgWoWeight}) is true in this case.
\end{proof}
In order to prove Theorem~\ref{thmWeakConvPrePerPts}, we will need Lemma~\ref{lmCoverEdges} which is a generalization of Lemma \ref{lmCoverEdgesBM}.
\begin{lemma} \label{lmSumLocalDegreeBound}
Let $f$ be an expanding Thurston map and $d=\deg f$. Then there exist constants $C>0$ and $\alpha\in (0,1]$ such that for each nonempty finite subset $M$ of $S^2$ and each $n\in\N$, we have
\begin{equation} \label{eqSumLocalDegreeBound}
\frac{1}{d^n} \sum\limits_{x\in M} \deg_{f^n}(x) \leq C \max \bigg\{ \( \frac{\card M}{d^n} \)^\alpha, \frac{\card M}{d^n} \bigg\}.
\end{equation}
\end{lemma}
Note that when $\card M \leq d^n$, the right-hand side of (\ref{eqSumLocalDegreeBound}) becomes $C \max \( \frac{\card M}{d^n} \)^\alpha$.
\begin{proof}
Let $m=\card M$. Set
\begin{equation*}
D=\prod\limits_{x\in \operatorname{crit} f} \deg_f(x).
\end{equation*}
In order to establish the lemma, we consider the following three cases.
\smallskip
Case 1: Suppose that $f$ has no periodic critical points. Then since for each $x\in S^2$ and each $n\in\N$,
\begin{equation} \label{eqDegFn}
\deg_{f^n}(x) = \deg_f(x)\deg_f(f(x))\cdots \deg_f(f^{n-1}(x)),
\end{equation}
it is clear that $\deg_{f^n}(x)\leq D$. So
\begin{equation*}
\frac{1}{d^n} \sum\limits_{x\in M} \deg_{f^n}(x) \leq D \frac{m}{d^n}.
\end{equation*}
Thus in this case, $C=D$ and $\alpha=1$.
\smallskip
Case 2: Suppose that $f$ has periodic critical points, but all periodic critical points are fixed points of $f$.
Let $T_0 = \{ x\in\operatorname{crit} f \,|\, f(x)=x \}$ be the set of periodic critical points of $f$. Then define recursively for each $i\in\N$,
$$
T_i = f^{-1}(T_{i-1}) \setminus \bigcup\limits_{j=0}^{i-1} T_j.
$$
Define $T_{-1}=S^2\setminus \bigcup\limits_{j=0}^{+\infty} T_j$, and $\widetilde T_i = S^2\setminus \bigcup\limits_{j=0}^{i} T_j$ for each $i\in\N_0$. Set $t_0 = \card T_0$. Since $\operatorname{crit} f$ is a finite set, we have $1\leq t_0 < +\infty$. Then for each $i\in\N$, we have
$$
\card T_i \leq d^i t_0.
$$
We note that if $\deg_f(x)=d$ for some $x\in T_0$, then $f^{-i}(x)=\{x\}$ for each $i\in\N$, contradicting Lemma~\ref{lmPreImageDense}. So $\deg_f(x)\leq d-1$ for each $x\in T_0$. Thus for each $x\in T_0$ and each $m\in\N$, we have
$$
\deg_{f^m} (x)\leq (d-1)^m.
$$
Moreover, for each $i,m\in\N$ with $i<m$ and each $x\in T_i$, we get
\begin{align*}
\deg_{f^m}(x) & = \deg_f(x)\deg_f(f(x))\cdots\deg_f(f^{i-1}(x))\deg_{f^{m-i}}(f^i(x)) \\
& \leq D (d-1)^{m-i}.
\end{align*}
Similarly, for each $i,m\in\N$ with $i \geq m$ and each $x\in \widetilde T_i$, we have
$$
\deg_{f^m} (x) \leq D.
$$
Thus for each $n\in\N$,
\begin{align*}
& \frac{1}{d^n}\sum\limits_{x\in M} \deg_{f^n}(x) \\
= & \frac{1}{d^n}\sum\limits_{j=-1}^{+\infty} \sum\limits_{x\in M\cap T_j} \deg_{f^{n}}(x)\\
\leq & \frac{1}{d^n} \bigg( \sum\limits_{j=0}^{n} \sum\limits_{x\in M\cap T_j} D(d-1)^{n-j} + \sum\limits_{x\in M\cap \widetilde T_n} D \bigg).
\end{align*}
Note that the more points in $M$ lie in $T_j$ with $j\in [0,n]$ as small as possible, the larger the right-hand side of the last inequality is. So the right-hand side of the last inequality is
\begin{align*}
\leq & \frac{1}{d^n} \Bigg( \sum\limits_{j=0}^{\lceil \log_d \lceil\frac{m}{t_0}\rceil\rceil} (\card T_j)D(d-1)^{n-j} +mD \Bigg) \\
\leq & \frac{D t_0}{d^n}\sum\limits_{j=0}^{\lceil \log_d \lceil\frac{m}{t_0}\rceil\rceil} d^j (d-1)^{n-j} + \frac{mD}{d^n} \\
\leq & D t_0 \( \frac{d-1}{d} \)^n \sum\limits_{j=0}^{\lceil \log_d m \rceil} \( \frac{d}{d-1} \)^j + \frac{mD}{d^n}\\
= & D t_0 \( \frac{d-1}{d} \)^n \frac{\( \frac{d}{d-1} \)^{\lceil \log_d m \rceil+1} -1 }{\frac{d}{d-1} -1 } + \frac{mD}{d^n} \\
\leq & D t_0 \( \frac{d-1}{d} \)^n \( \frac{d}{d-1} \)^{2+ \log_d m } (d-1) + \frac{mD}{d^n}\\
\leq & D t_0 \frac{d^2}{d-1} \( \( \frac{d-1}{d} \)^{n- \log_d m } + \frac{m}{d^n} \) \\
= & \frac12 E_f \( d^{(n- \log_d m ) \log_d \frac{d-1}{d}} + \frac{m}{d^n} \) \\
\leq & E_f \max \Big\{\( \frac{m}{d^n} \)^{\log_d \frac{d}{d-1}}, \frac{m}{d^n} \Big\},
\end{align*}
where $E_f= 2 D t_0 \frac{d^2}{d-1}$ is a constant that only depends on $f$. Thus in this case, $C=E_f$ and $\alpha= \log_d \frac{d}{d-1}\in(0,1]$.
\smallskip
Case 3: Suppose that $f$ has periodic critical points that may not be fixed points of $f$.
Set $\kappa$ to be the product of the periods of all periodic critical points of $f$.
We claim that each periodic critical point of $f^\kappa$ is a fixed point of $f^\kappa$. Indeed, if $x$ is a periodic critical point of $f$ satisfying $f^{\kappa p}(x)=x$ for some $p\in \N$, then by (\ref{eqDegFn}), there exists an integer $i\in\{0,1,\dots,\kappa -1\}$ such that $f^i(x)\in \operatorname{crit} f$. Then $f^i(x)$ is a periodic critical point of $f$, so $f^\kappa(f^i(x)) = f^i(x)$. Thus
\begin{align*}
f^\kappa(x) & = f^{\kappa-i}(f^i(x)) = f^{\kappa-i + \kappa}(f^i(x)) = \dots \\
& = f^{\kappa-i + (p-1)\kappa}(f^i(x)) = f^{\kappa p} (x) = x.
\end{align*}
The claim now follows.
Note that for each $n\in\N$,
\begin{equation*}
\frac{1}{d^n}\sum\limits_{x\in M} \deg_{f^n}(x)
\leq d^\kappa \frac{1}{d^{\kappa \lceil \frac{n}{\kappa}\rceil }}\sum\limits_{x\in M} \deg_{f^{\kappa \lceil \frac{n}{\kappa}\rceil }}(x).
\end{equation*}
Hence by applying Case 2 for $f^\kappa$, we get a constant $E_{f^\kappa}$ that depends only on $f$, such that the right-hand side of the above inequality is
\begin{align*}
\leq & d^\kappa E_{f^\kappa} \max \Bigg\{ \(\frac{m}{d^{\kappa \lceil \frac{n}{\kappa}\rceil }} \)^{\log_{d^\kappa} \frac{d^\kappa}{d^\kappa -1} } , \frac{m}{d^{\kappa \lceil \frac{n}{\kappa}\rceil }} \Bigg\} \\
\leq & d^\kappa E_{f^\kappa} \max \Big\{ \(\frac{m}{d^n} \)^{\log_{d^\kappa} \frac{d^\kappa}{d^\kappa -1} } , \frac{m}{d^n} \Big\}.
\end{align*}
Thus in this case $C= d^\kappa E_{f^\kappa} $ and $\alpha = \log_{d^\kappa} \frac{d^\kappa}{d^\kappa -1} \in (0,1] $.
\end{proof}
Now we formulate a generalization of Lemma~\ref{lmCoverEdgesBM}.
\begin{lemma} \label{lmCoverEdges}
Let $f$ be an expanding Thurston map, and $\mathcal{C}\subseteq S^2$ be an $f^N$-invariant Jordan curve containing $\operatorname{post}{f}$ for some $N\in\N$. Then there exists a constant $L \in [1, \deg{f})$ with the following property:
For each $m\in\N_0$, there exists a constant $D>0$ such that for each $k\in\N_0$ and each $m$-edge $e$, there exists a collection $M$ of $(m+k)$-tiles with $\card{M}\leq D L^k$ and $e \subseteq \inte \Big( \bigcup\limits_{X\in M} X\Big)$.
\end{lemma}
\begin{proof}
We denote $d=\deg f$, and consider the cell decompositions induced by $(f,\mathcal{C})$ in this proof.
\smallskip
Step 1: We first assume that for some $m\in \N$, there exist constants $L\in [1,d)$ and $D>0$ such that for each $k\in\N_0$ and each $m$-edge $e$, there exists a collection $M$ of $(m+k)$-tiles with $\card M \leq DL^k$ and $e\subseteq \inte \Big( \bigcup\limits_{X\in M} X\Big)$. Then by Proposition~\ref{propCellDecomp}(i), for each $(m-1)$-edge $e$, we can choose an $m$-edge $e'$ such that $f(e')=e$. For each $k\in\N_0$, there exists a collection $M'$ of $(m+k)$-tiles with $\card M' \leq DL^k$ and $e'\subseteq \inte \Big( \bigcup\limits_{X\in M'} X\Big)$. We set $M$ to be the collection $\{ f(X) \,|\, X\in M'\}$ of $(m-1+k)$-tiles. Then $\card M \leq \card M' \leq DL^k$ and $e\subseteq \inte \Big( \bigcup\limits_{X\in M} X\Big)$. Hence, it suffices to prove the lemma for ``each $m\in\N_0$ with $m\equiv 0 \pmod N$'' instead of ``each $m\in\N_0$''.
\smallskip
Step 2: We will prove the following statement by induction on $\kappa$:
\begin{itemize}
\smallskip
\item[] For each $\kappa \in \{0,1,\dots,N-1\}$, there exists a constant $L_\kappa \in [1, d)$ with the following property:
For each $m\in\N_0$ with $m \equiv 0 \pmod N$, there exists a constant $D_\kappa>0$ such that for each $k\in\N_0$ with $k \equiv \kappa \pmod N$ and each $m$-edge $e$, there exists a collection $M_{m,k,e}$ of $(m+k)$-tiles that satisfies $\card{M_{m,k,e}}\leq D_\kappa L_\kappa^k$ and $e \subseteq \inte \Big( \bigcup\limits_{X\in M_{m,k,e}} X\Big)$.
\smallskip
\end{itemize}
Lemma~\ref{lmCoverEdgesBM} gives the case for $\kappa=0$. For the induction step, we assuming the above statement for some $\kappa \in[0,N-1]$.
Let $i\in\N_0$ and $p\in S^2$ be an $i$-vertex. We define the \defn{$i$-flower} $W^i(p)$ as in \cite{BM10} by
$$
W^i(p) = \bigcup \{ \inte c \,|\, c \in \mathbf{D}^i, p\in c \}.
$$
Note that the number of $i$-tiles in $W^i(p)$ is $2\deg_{f^i}(p)$, i.e.,
\begin{equation} \label{eqCardFlower}
\card \{X\in\X^i \,|\, p\in X \} = 2 \deg_{f^i}(p).
\end{equation}
By \cite[Lemma~7.11]{BM10}, there exists a constant $\beta\in\N$, which depends only on $f$ and $\mathcal{C}$, such that for each $i\in\N$ and each $i$-tile $X\in\X^i$, $X$ can be covered by a union of at most $\beta$ $(i+1)$-flowers.
Fix an arbitrary $m\in\N_0$ with $m\equiv 0 \pmod N$, and fix an arbitrary $m$-edge $e$.
By the induction hypothesis, there exist constants $D_\kappa >0$ and $L_\kappa \in [1,d)$ such that for each $k\in\N_0$ with $k\equiv \kappa +1 \pmod N$, there exists a collection $M_{m,k-1,e}$ of $(m+k-1)$-tiles with $\card M_{m,k-1,e} \leq D_\kappa L_\kappa^{k-1}$ and $e \subseteq \inte \Big(\bigcup\limits_{X\in M_{m,k-1,e}} X\Big)$. Each $X\in M_{m,k-1,e}$ can be covered by $\beta$ $(m+k)$-flowers $W^{m+k}(p)$. We can then construct a set $F\subseteq \V^{m+k}$ of $(m+k)$-vertices such that
\begin{equation} \label{eqPflmCoverEdges1}
\card F \leq \beta D_\kappa L_\kappa^{k-1}
\end{equation}
and
\begin{equation}
\bigcup\limits_{X\in M_{m,k-1,e}} X \subseteq \bigcup\limits_{p\in F} W^{m+k}(p).
\end{equation}
We define
\begin{equation}
M_{m,k,e} = \{ X\in \X^{m+k} \,|\, X\cap F\neq \emptyset \}.
\end{equation}
Then $e \subseteq \inte \Big( \bigcup\limits_{X\in M_{m,k,e}} X\Big)$, and by (\ref{eqCardFlower}),
\begin{equation} \label{eqPflmCoverEdges2}
\card M_{m,k,e} \leq \sum\limits_{p\in F} 2 \deg_{f^{m+k}}(p).
\end{equation}
Since $L_\kappa \in [1,d)$, there exists $K\in\N$, depending only on $f,\mathcal{C}, m$, and $\kappa$, such that for each $i\geq K$, we have $\beta D_\kappa L_\kappa^{i-1} \leq d^{m+i}$.
Thus by (\ref{eqPflmCoverEdges1}), (\ref{eqPflmCoverEdges2}), and Lemma~\ref{lmSumLocalDegreeBound}, for each $k\geq K$ with $k\equiv \kappa + 1 \pmod N$, there exists constants $C>0$ and $\alpha\in(0,1]$, both of which depend only on $f$, such that
\begin{align} \label{eqPflmCoverEdges3}
\card M_{m,k,e} &\leq 2 \sum\limits_{p\in F} \deg_{f^{m+k}}(p) \notag\\
&\leq 2 C d^{(m+k)(1-\alpha)} \( \beta D_\kappa L_\kappa^{k-1} \)^\alpha \\
& = 2 C d^{m(1-\alpha)} \beta^\alpha D_\kappa^\alpha L_\kappa^{-\alpha} \( d^{1-\alpha} L_\kappa^{\alpha} \)^k. \notag
\end{align}
Let $L_{\kappa +1} = d^{1-\alpha} L_\kappa^{\alpha}$. Since $L_\kappa \in [1,d)$, we get $L_{\kappa +1}\in [L_\kappa,d)\subseteq [1,d)$. Note that $L_{\kappa+1}$ only depends on $f,\mathcal{C}$, and $\kappa$. We define
\begin{equation*}
\tau = \max \bigg\{ 2 \sum\limits_{p\in V} \deg_{f^{m+i}}(p) \,|\, i \leq K, V\subseteq \V^{m+i}, \card V\leq \beta D_\kappa L_\kappa^{k-1} \bigg\}.
\end{equation*}
Since $\tau$ is the maximum over a finite set of numbers, $\tau< +\infty$. We set
\begin{equation} \label{eqPflmCoverEdges4}
D_{\kappa +1} = \max \{ \tau, 2 C d^{m(1-\alpha)} \beta^\alpha D_\kappa^\alpha L_\kappa^{-\alpha} \}.
\end{equation}
Then by (\ref{eqPflmCoverEdges2}), (\ref{eqPflmCoverEdges3}), and (\ref{eqPflmCoverEdges4}), we get that for each $k\in\N_0$ with $k\equiv \kappa+1 \pmod N$,
\begin{equation}
\card M_{m,k,e} \leq \sum\limits_{p\in F} 2 \deg_{f^{m+k}}(p) \leq D_{\kappa+1} L_{\kappa +1}^k.
\end{equation}
We note that $\tau$ only depends on $f,\mathcal{C}, m$, and $\kappa$, so $D_{\kappa+1}$ also only depends on $f,\mathcal{C},m$, and $\kappa$.
This completes the induction.
\smallskip
Step 3: Now we define
$$
L=\max\{L_\kappa \,|\, \kappa\in\{0,1,\dots,N-1\} \}.
$$
For each fixed $m\in\N_0$ with $m\equiv 0 \pmod N$, we set
$$
D=\max\{D_\kappa \,|\, \kappa\in\{0,1,\dots,N-1\} \},
$$
and for each given $k\in\N_0$ and $e\in\E^m$, let $M=M_{m,k,e}$. Then we have $\card M \leq DL^k$ and $e\subseteq \inte \Big(\bigcup\limits_{X\in M} X\Big)$. We note that here $L$ only depends on $f$ and $\mathcal{C}$, and on the other hand, $D$ only depends on $f,\mathcal{C}$, and $m$. The proof is now complete.
\end{proof}
\begin{rems}
It is also possible to prove the previous lemma by observing that $\mathcal{C}$ equipped with the restriction of a visual metric $d$ for $f$ is a quasicircle (see \cite[Theorem~1.8]{BM10}), and $S^2$ equipped with $d$ is linearly locally connected (see \cite[Proposition~16.3]{BM10}). A metric space $X$, that is homeomorphic to the plane and with $\overline{X}$ linearly locally connected and $\partial X$ a Jordan curve, has the property that $\partial X$ is porous in $\overline{X}$ (see \cite[Theorem~IV.14]{Wi07}). Then we can mimic the original proof of Lemma~20.2 in \cite{BM10}. Our proof adopted above is more elementary and self-contained.
\end{rems}
We are finally ready to prove the equidistribution of the preperiodic points with respect to the measure of maximal entropy $\mu_f$.
\begin{proof}[Proof of Theorem~\ref{thmWeakConvPrePerPts}]
Fix an arbitrary $N \geq N(f)$ where $N(f)$ is an constant as given in Corollary~\ref{corCexists} depending only on $f$. We also fix an $f^N$-invariant Jordan curve $\mathcal{C}$ containing $\operatorname{post} f$ such that no $N$-tile in $\mathbf{D}^N(f,\mathcal{C})$ joins opposite sides of $\mathcal{C}$ as given in Corollary~\ref{corCexists}. In the proof below, we consider the cell decompositions $\mathbf{D}^i(f,\mathcal{C}), i\in\N_0,$ induced by $(f,\mathcal{C})$, and denote $d=\deg f$.
Since $\xi_n^m$ and $\widetilde\xi_n^m$ are Borel probability measures for all $m\in\N_0$ and $n\in\N$ with $m<n$, by Alaoglu's Theorem, it suffices to prove that in the weak$^*$ topology, every convergent subsequence of $\{\xi_n^{m_n} \}_{n\in\N}$ and $\{\widetilde\xi_n^{m_n} \}_{n\in\N}$ converges to $\mu_f$.
\smallskip
Proof of (\ref{eqWeakConvPrePerPtsWithWeight}):
\smallskip
Let $\{ n_i \}_{i\in\N}$ be a strictly increasing sequence with
$$
\xi_{n_i}^{m_{n_i}} \stackrel{w^*}{\longrightarrow} \mu \text{ as } i\longrightarrow +\infty,
$$
for some measure $\mu$.
\smallskip
Case 1 for (\ref{eqWeakConvPrePerPtsWithWeight}): We assume in this case that there is no constant $K\in\N$ such that for all $i\in\N$, $n_i-m_{n_i} \leq K$. Then by choosing a subsequence of $\{ n_i \}_{i\in\N}$ if necessary, we can assume that $n_i - m_{n_i}\longrightarrow +\infty$ as $i\longrightarrow +\infty$.
Here is the idea of the proof in this case. By the spirit of Lemma~\ref{lmAtLeast1} and Lemma~\ref{lmAtMost1}, there is an almost bijective correspondence between the fixed points of $f^{n-m_n}$ and the $(n-m_n)$-tiles containing such points. The correspondence is particularly nice away from $\mathcal{C}$. Thus there is almost a bijective correspondence between the preperiodic points in $S_n^{m_n}$ and the $n$-tiles containing such points. So if we can control the behavior near $\mathcal{C}$, then Theorem~\ref{thmWeakConv} applies and we finish the proof in this case. Now the control we need is provided by Lemma~\ref{lmCoverEdges}.
Now we start to implement this idea. So we fix a 0-edge $e_0 \subseteq \mathcal{C}$. We observe that for each $i\in\N$, we can pair a white $i$-tile $X_w^i\in\X_w^i$ and a black $i$-tile $X_b^i\in\X_b^i$ whose intersection $X_w^i\cap X_b^i$ is an $i$-edge contained in $f^{-i}(e_0)$. There are a total of $d^i$ such pairs and each $i$-tile is in exactly one such pair. We denote by $\mathbf{P}_i$ the collection of the unions $X_w^i\cup X_b^i$ of such pairs, i.e.,
$$
\mathbf{P}_i=\{X_w^i\cup X_b^i \,|\, X_w^i\in\X_w^i,X_b^i\in\X_b^i, X_w^i\cap X_b^i\cap f^{-i}(e_0) \in \E^i \}.
$$
We denote $\mathbf{P}'_i=\{A\in \mathbf{P}_i \,|\, A\cap\mathcal{C} = \emptyset\}$.
By Lemma~\ref{lmCoverEdges}, there exists $1 \leq L < d$ and $C>0$ such that for each $i\in\N$ there exists a collection $M$ of $i$-tiles with $\card M\leq CL^i$ such that $\mathcal{C}$ is contained in the interior of the set $\bigcup\limits_{X\in M} X$. Note that $L$ and $C$ are constants independent of $i$. Observe that for each $A\in \mathbf{P}_i$ that does not contain any $i$-tile in the collection $M$, we have $A \cap X \subseteq \partial \Big( \bigcup\limits_{X\in M} X\Big)$ for each $X\in M$, so $A \cap \inte \Big( \bigcup\limits_{X\in M} X \Big)= \emptyset$. Since the number of distinct $A\in \mathbf{P}_i$ that contains an $i$-tile in $M$ is bounded above by $CL^i$, we get
\begin{equation} \label{eqCardP'_i}
\card(\mathbf{P}'_i) \geq d^i -CL^i.
\end{equation}
Note that for each $i\in\N$ and each $A\in\mathbf{P}'_i$, either $A\subseteq X_w^0$ or $A\subseteq X_b^0$ where $X_w^0$ (resp.\ $X_b^0$) is the white (resp.\ black) $0$-tile for $(f,\mathcal{C})$. So by Proposition~\ref{propCellDecomp}(i) and Brouwer's Fixed Point Theorem, there is a map $\tau\:\mathbf{P}'_i\rightarrow P_{1,f^i}$ from $\mathbf{P}'_i$ to the set of fixed points of $f^i$ such that $\tau(A)\in A$. Note if a fixed point $x$ of $f^i$ has weight $\deg_{f^i}(x)>1$, then $x$ has to be contained in $\operatorname{post} f \subseteq \mathcal{C}$. Thus $\deg_{f^i}(\tau(A))=1$ for all $A\in\mathbf{P}'_i$.
If for some $A\in\mathbf{P}'_i$, the point $\tau(A)$ were on the boundaries of the two $i$-tiles whose union is $A$, then $\tau(A)$ would have to be contained in $\mathcal{C}$ since the boundaries are mapped into $\mathcal{C}$ under $f^i$. Thus for each $A\in\mathbf{P}'_i$, the point $\tau(A)$ is contained in the interior of one of the two $i$-tiles whose union is $A$. Hence $\tau$ is injective. Moreover,
\begin{equation} \label{eqDeg=1AwayFromC}
\deg_{f^{i+j}}(x) = 1 \text{ for each } j\in\N_0 \text{ and each } x \in \bigcup\limits_{A\in\mathbf{P}'_i} f^{-j}(\tau(A)).
\end{equation}
For each $i\in\N$, we choose a map $\beta_{n_i} \: \X_w^{n_i} \rightarrow S^2$ by letting $\beta_{n_i}(X)$ be the unique point in $f^{-m_{n_i}}(\tau(A))\cap B$ where $B\in\mathbf{P}_{n_i}$ with $X\subseteq B$, if there exists $A\in \mathbf{P}'_{n_i-m_{n_i}}$ with $f^{m_{n_i}}(X) \subseteq A$; and by letting $\beta_{n_i}(X)$ be an arbitrary point in $X$ if there exists no $A\in \mathbf{P}'_{n_i-m_{n_i}}$ with $f^{m_{n_i}}(X) \subseteq A$.
We fix a visual metric $d$ for $f$ with an expansion factor $\Lambda >1$. Note that $\Lambda$ can be chosen to depend only on $f$ and $d$. Then $\diam_d(A) < c \Lambda^{-i}$ for each $i\in\N$, where $c\geq 1$ is a constant depending only on $f$, $d$, and $\mathcal{C}$ (See Lemma~\ref{lmBMCellSizeBounds}(ii)). Define $\alpha_n=c\Lambda^{-n}$ for each $n\in\N$. Thus $\alpha_{n_i}$ and $\beta_{n_i}$ satisfies the hypothesis in Theorem~\ref{thmWeakConv}. Define $\mu_{n_i}$ as in Theorem~\ref{thmWeakConv}. Then
\begin{equation} \label{eqPfThmWeakConvPrePerPts1}
\mu_{n_i} \stackrel{w^*}{\longrightarrow} \mu_f \text{ as } i\longrightarrow +\infty,
\end{equation}
by Theorem~\ref{thmWeakConv}.
We claim that the total variation $\Norm{\mu_{n_i} - \xi_{n_i}^{m_{n_i}}}$ of $\mu_{n_i} - \xi_{n_i}^{m_{n_i}}$ converges to $0$ as $i \longrightarrow +\infty$.
Assuming the claim, then by (\ref{eqPfThmWeakConvPrePerPts1}), we can conclude that (\ref{eqWeakConvPrePerPtsWithWeight}) holds in this case.
To prove the claim, by Corollary~\ref{corNoPrePeriodicPts}, we observe that for each $i\in\N$,
\begin{align} \label{eqPfThmWeakConvPrePerPts2}
\Norm{\mu_{n_i}-\xi_{n_i}^{m_{n_i}}} \leq & \norm{\mu_{n_i} - \frac{1}{d^{n_i-m_{n_i}}} \sum\limits_{A\in\mathbf{P}'_{n_i-m_{n_i}}} \frac{1}{d^{m_{n_i}}} \sum\limits_{q\in f^{-m_{n_i}}(\tau(A))} \delta_q } \notag \\
+ & \norm{\(\frac{1}{d^{n_i}} - \frac{1}{d^{n_i} + d^{m_{n_i}}} \) \sum\limits_{A\in\mathbf{P}'_{n_i-m_{n_i}}} \sum\limits_{q\in f^{-m_{n_i}}(\tau(A))} \delta_q } \\
+ & \norm{\frac{1}{d^{n_i-m_{n_i}} + 1} \sum\limits_{A\in\mathbf{P}'_{n_i-m_{n_i}}} \frac{1}{d^{m_{n_i}}} \sum\limits_{q\in f^{-m_{n_i}}(\tau(A))} \delta_q - \xi_{n_i}^{m_{n_i}} }. \notag
\end{align}
In the first term on the right-hand side of (\ref{eqPfThmWeakConvPrePerPts2}), each $\delta_q$ in the summations cancels with the corresponding term in the definition of $\mu_{n_i}$. So the first term on the right-hand side of (\ref{eqPfThmWeakConvPrePerPts2}) is equal to the difference of the total variations of the two measures, which by (\ref{eqCardP'_i}), is
$$
\leq 1- \frac{(d^{n_i-m_{n_i}}-CL^{n_i-m_{n_i}})d^{m_{n_i}}}{d^{n_i}} = C \(\frac{L}{d} \)^{n_i-m_{n_i}}.
$$
In the second term on the right-hand side of (\ref{eqPfThmWeakConvPrePerPts2}), the total number of terms in the summations is bounded above by $d^{n_i}$. So the second term on the right-hand-side of (\ref{eqPfThmWeakConvPrePerPts2}) is
$$
\leq \Abs{\frac{1}{d^{n_i}} - \frac{1}{d^{n_i} + d^{m_i}} } d^{n_i}.
$$
In the third term on the right-hand side of (\ref{eqPfThmWeakConvPrePerPts2}), by (\ref{eqDeg=1AwayFromC}), $\deg_{f^{n_i}}(q) =1$ for each $A\in\mathbf{P}'_{n_i-m_{n_i}}$ and each $q\in f^{-m_{n_i}}(\tau(A))$. So by (\ref{eqDistrPrePerPts}) and Corollary~\ref{corNoPrePeriodicPts}, each $\delta_q$ in the summations cancels with the corresponding $\delta_q$ in $\xi_{n_i}^{m_{n_i}}$. So the third term on the right-hand-side of (\ref{eqPfThmWeakConvPrePerPts2}) is equal to the difference of the total variations of the two measures, which by (\ref{eqCardP'_i}) and Corollary~\ref{corNoPrePeriodicPts}, is
$$
\leq 1- \frac{(d^{n_i-m_{n_i}}-CL^{n_i-m_{n_i}})d^{m_{n_i}} }{(d^{n_i-m_{n_i}} + 1)d^{m_{n_i}}} = \frac{1+CL^{n_i-m_{n_i}}}{d^{n_i-m_{n_i}} + 1}.
$$
Since $n_i - m_{n_i}\longrightarrow +\infty$ as $i\longrightarrow +\infty$, each term on the right-hand-side of (\ref{eqPfThmWeakConvPrePerPts2}) converges to $0$ as $i \longrightarrow +\infty$. So
$$
\Norm{\mu_{n_i} - \xi_{n_i}^{m_{n_i}}} \longrightarrow 0 \text{ as } i\longrightarrow +\infty
$$
as claimed.
\smallskip
Case 2 for (\ref{eqWeakConvPrePerPtsWithWeight}): We assume in this case that there is a constant $K\in\N$ such that for all $i\in\N$, $n_i-m_{n_i} \leq K$. Then by choosing a subsequence of $\{ n_i \}_{i\in\N}$ if necessary, we can assume that there exists some constant $l\in[0,K]$ such that for all $i\in\N$, $n_i-m_{n_i} = l$. Note that in this case, $m_{n_i} \longrightarrow +\infty$ as $i \longrightarrow +\infty$.
Then by Corollary~\ref{corNoPrePeriodicPts} and Theorem~\ref{thmNoFixedPts},
\begin{align*}
\xi_{n_i}^{m_{n_i}} & = \frac{1}{d^{m_{n_i}}(d^l +1 )} \sum\limits_{x\in S_{n_i}^{m_{n_i}}} \deg_{f^{n_i}} (x) \delta_x \\
& = \frac{1}{d^l+1} \sum\limits_{y=f^l(y)} \deg_{f^l}(y) \bigg( \frac{1}{d^{m_{n_i}}} \sum\limits_{x\in f^{-m_{n_i}}(y)} \deg_{f^{m_{n_i}}} (x) \delta_x \bigg).
\end{align*}
By Theorem~\ref{thmWeakConvPreImg}, for each $y\in S^2$,
$$
\frac{1}{d^{m_{n_i}}} \sum\limits_{x\in f^{-m_{n_i}}(y)} \deg_{f^{m_{n_i}}} (x) \delta_x \stackrel{w^*}{\longrightarrow} \mu_f \text{ as } i\longrightarrow +\infty.
$$
So each term in the sequence $\{ \xi_{n_i}^{m_{n_i}} \}_{i\in\N}$ is a convex combination of the corresponding terms in sequences of measures, each of which converges in the weak$^*$ topology to $\mu_f$. Hence by Lemma~\ref{lmConvexCombConv}, $\{ \xi_{n_i}^{m_{n_i}} \}_{i\in\N}$ also converges to $\mu_f$ in the weak$^*$ topology. It then follows that $\mu=\mu_f$. Thus (\ref{eqWeakConvPrePerPtsWithWeight}) follows in this case.
\smallskip
Proof of (\ref{eqWeakConvPrePerPtsWoWeight}):
\smallskip
Let $\{ n_i \}_{i\in\N}$ be a strictly increasing sequence with
$$
\widetilde\xi_{n_i}^{m_{n_i}} \stackrel{w^*}{\longrightarrow} \widetilde\mu \text{ as } i\longrightarrow +\infty,
$$
for some measure $\widetilde\mu$.
\smallskip
Case 1 for (\ref{eqWeakConvPrePerPtsWoWeight}): We assume in this case that there is no constant $K\in\N$ such that for all $i\in\N$, $n_i-m_{n_i} \leq K$. Then by choosing a subsequence of $\{ n_i \}_{i\in\N}$ if necessary, we can assume that $n_i - m_{n_i}\longrightarrow +\infty$ as $i\longrightarrow +\infty$.
The idea of the proof in this case is similar to that of the proof of Case 1 for (\ref{eqWeakConvPrePerPtsWithWeight}).
We use the same notation as in the proof of Case 1 for (\ref{eqWeakConvPrePerPtsWithWeight}). Then (\ref{eqWeakConvPrePerPtsWoWeight}) follows in this case if we can prove that $\Norm{\mu_{n_i} - \widetilde\xi_{n_i}^{m_{n_i}}}$ converges to $0$ as $i \longrightarrow +\infty$.
As before, we observe that
\begin{align} \label{eqPfThmWeakConvPrePerPts3}
\Norm{\mu_{n_i}-\widetilde\xi_{n_i}^{m_{n_i}}} \leq & \norm{\mu_{n_i} - \frac{1}{d^{n_i-m_{n_i}}} \sum\limits_{A\in\mathbf{P}'_{n_i-m_{n_i}}} \frac{1}{d^{m_{n_i}}} \sum\limits_{q\in f^{-m_{n_i}}(\tau(A))} \delta_q } \notag \\
+ & \norm{\(\frac{1}{d^{n_i}} - \frac{1}{\widetilde s_{n_i}^{m_{n_i}}} \) \sum\limits_{A\in\mathbf{P}'_{n_i-m_{n_i}}} \sum\limits_{q\in f^{-m_{n_i}}(\tau(A))} \delta_q } \\
+ & \norm{\frac{1}{\widetilde s_{n_i}^{m_{n_i}}} \sum\limits_{A\in\mathbf{P}'_{n_i-m_{n_i}}} \sum\limits_{q\in f^{-m_{n_i}}(\tau(A))} \delta_q - \widetilde\xi_{n_i}^{m_{n_i}} }. \notag
\end{align}
As the first term on the right-hand side of (\ref{eqPfThmWeakConvPrePerPts2}) discussed before, the first term on the right-hand-side of (\ref{eqPfThmWeakConvPrePerPts3}) is
$$
\leq 1 - \frac{(d^{n_i-m_{n_i}}-CL^{n_i-m_{n_i}})d^{m_{n_i}}}{d^{n_i}} = C \(\frac{L}{d} \)^{n_i-m_{n_i}}.
$$
In the second term on the right-hand side of (\ref{eqPfThmWeakConvPrePerPts3}), the total number of terms in the summations is bounded above by $d^{n_i}$. By (\ref{eqCardP'_i}), (\ref{eqDeg=1AwayFromC}), and Corollary~\ref{corNoPrePeriodicPts}, we have
\begin{align} \label{eqPfThmWeakConvPrePerPts4}
& d^{m_{n_i}}(d^{n_i-m_{n_i}} +1) = s_{n_i}^{m_{n_i}} \geq \widetilde s_{n_i}^{m_{n_i}} \\
\geq & d^{m_{n_i}} \card(\mathbf{P}'_{n_i-m_{n_i}}) \geq d^{m_{n_i}}(d^{n_i-m_{n_i}}-CL^{n_i-m_{n_i}}). \notag
\end{align}
So the second term on the right-hand-side of (\ref{eqPfThmWeakConvPrePerPts3}) is
$$
\leq \Abs{\frac{1}{d^{n_i}} - \frac{1}{\widetilde s_{n_i}^{m_{n_i}}} } d^{n_i} = \Abs{1-\frac{d^{n_i}}{\widetilde s_{n_i}^{m_{n_i}}}} \leq \max\Big\{ \frac{1}{d^{n_i-m_{n_i}}}, \frac{CL^{n_i-m_{n_i}}}{d^{n_i-m_{n_i}}} \Big\}.
$$
In the third term on the right-hand side of (\ref{eqPfThmWeakConvPrePerPts3}), by (\ref{eqDeg=1AwayFromC}), $\deg_{f^{n_i}}(q) =1$ for each $A\in\mathbf{P}'_{n_i-m_{n_i}}$ and each $q\in f^{-m_{n_i}}(\tau(A))$. So by (\ref{eqDistrPrePerPts}), each $\delta_q$ in the summations cancels with the corresponding $\delta_q$ in $\widetilde\xi_{n_i}^{m_{n_i}}$. So the third term on the right-hand-side of (\ref{eqPfThmWeakConvPrePerPts3}) is equal to the difference of the total variations of the two measures, which by (\ref{eqPfThmWeakConvPrePerPts4}) and (\ref{eqCardP'_i}), for $n_i-m_{n_i}$ large enough, is
$$
\leq \frac{d^{n_i} + d^{m_{n_i}} -(d^{n_i - m_{n_i}}-CL^{n_i-m_{n_i}})d^{m_{n_i}} }{\widetilde s_{n_i}^{m_{n_i}}} \leq \frac{1+CL^{n_i-m_{n_i}}}{d^{n_i-m_{n_i}} -CL^{n_i-m_{n_i}}}.
$$
Since $n_i - m_{n_i}\longrightarrow +\infty$ as $i\longrightarrow +\infty$, each term on the right-hand-side of (\ref{eqPfThmWeakConvPrePerPts3}) converges to $0$ as $i\longrightarrow+\infty$. So we can conclude that
$$
\Norm{\mu_{n_i} - \widetilde \xi_{n_i}^{m_{n_i}}} \longrightarrow 0 \text{ as } i\longrightarrow +\infty.
$$
So $\widetilde\mu=\mu_f$. Thus (\ref{eqWeakConvPrePerPtsWoWeight}) follows in this case.
\smallskip
Case 2 for (\ref{eqWeakConvPrePerPtsWoWeight}): We assume in this case that there is a constant $K\in\N$ such that for all $i\in\N$, $n_i-m_{n_i} \leq K$. Then by choosing a subsequence of $\{ n_i \}_{i\in\N}$ if necessary, we can assume that there exists some constant $l\in[0,K]$ such that for all $i\in\N$, $n_i-m_{n_i} = l$. Note that in this case, $m_{n_i} \longrightarrow +\infty$ as $i \longrightarrow +\infty$.
Then for each $i\in\N$, we have
$$
\widetilde\xi_{n_i}^{m_{n_i}} = \frac{1}{\widetilde s_{n_i}^{m_{n_i}}} \sum\limits_{x\in S_{n_i}^{m_{n_i}}} \delta_x = \frac{1}{\widetilde s_{n_i}^{m_{n_i}}} \sum\limits_{y=f^l(y)} Z_{m_{n_i},y} \bigg( \frac{1}{Z_{m_{n_i},y}} \sum\limits_{x\in f^{-m_{n_i}}(y)} \delta_x \bigg),
$$
where $Z_{m,y} = \card \( f^{-m}(y) \)$ for each $y\in S^2$ and each $m\in\N_0$. Note that for each $i\in\N$, we have
$$
\widetilde s_{n_i}^{m_{n_i}} = \sum\limits_{y=f^l(y)} Z_{m_{n_i},y}.
$$
Denote, for each $i\in\N$ and each $y\in S^2$, the Borel probability measure $\mu_{i,y}= \frac{1}{Z_{m_{n_i},y}} \sum\limits_{x\in f^{-m_{n_i}}(y)} \delta_x$. Then by Theorem~\ref{thmWeakConvPreImg}, we have
$$
\mu_{i,y} \stackrel{w^*}{\longrightarrow} \mu_f \text{ as } i\longrightarrow +\infty.
$$
So each term in $\{ \widetilde \xi_{n_i}^{m_{n_i}} \}_{i\in\N}$ is a convex combination of the corresponding terms in sequences of measures, each of which converges in the weak$^*$ topology to $\mu_f$. Hence by Lemma~\ref{lmConvexCombConv}, $\{ \widetilde \xi_{n_i}^{m_{n_i}} \}_{i\in\N}$ also converges to $\mu_f$ in the weak$^*$ topology. It then follows that $\widetilde\mu=\mu_f$. Thus (\ref{eqWeakConvPrePerPtsWoWeight}) follows in this case.
\end{proof}
The proof of Theorem~\ref{thmWeakConvPrePerPts} also gives us the following corollary.
\begin{cor} \label{corAsympRatioNoPrePerPts}
Let $f$ be an expanding Thurston map. If $\{m_n\}_{n\in\N}$ is a sequence in $\N_0$ such that $m_n <n$ for each $n\in\N$ and $\lim\limits_{n\to+\infty} n-m_n = +\infty$, then
\begin{equation} \label{eqAsympRatioNoPrePerPts}
\lim\limits_{n\to+\infty} \frac{\widetilde s_n^{m_n}}{s_n^{m_n}} = 1.
\end{equation}
\end{cor}
\begin{proof}
By the proof of Theorem~\ref{thmWeakConvPrePerPts}, especially (\ref{eqPfThmWeakConvPrePerPts4}), we get that for each $n\in\N$,
\begin{equation}
\frac{d^{n-m_n}-CL^{n-m_n}}{d^{n-m_n}+1} \leq \frac{\widetilde s_n^{m_n}}{s_n^{m_n}} \leq 1,
\end{equation}
where $d=\deg f$.
Then (\ref{eqAsympRatioNoPrePerPts}) follows from the fact that $1\leq L <d$ and the condition that $\lim\limits_{n\to+\infty} n-m_n = +\infty$.
\end{proof}
By (\ref{eqTopEntropy}), Theorem~\ref{thmNoFixedPts}, and Corollary~\ref{corAsympRatioNoPrePerPts} with $m_n=0$ for each $n\in\N$, we get the following corollary, which is an analog of the corresponding result for expansive homeomorphisms on compact metric spaces with the \emph{specification property} (see, for example, \cite[Theorem~18.5.5]{KH95}).
\begin{cor} \label{corAsympRatioNoPerPts}
Let $f$ be an expanding Thurston map. Then for each constant $c\in (0,1)$, there exists a constant $N\in\N$ such that for each $n \geq N$,
\begin{align*}
c e^{n h_{\operatorname{top}}(f)} = c (\deg f)^n & < \card \{x\in S^2 \,|\, f^n(x)=x \} \\
& \leq \sum\limits_{x=f^n(x)}\deg_{f^n}(x) = (\deg f)^n +1 < \frac{1}{c} e^{n h_{\operatorname{top}}(f)}.
\end{align*}
In particular,
$$
\lim\limits_{n\to +\infty}\frac{\card \{x\in S^2 \,|\, f^n(x)=x \}}{\exp\(n h_{\operatorname{top}}(f)\)} = \lim\limits_{n\to +\infty}\frac{\card \{x\in S^2 \,|\, f^n(x)=x \}}{(\deg f)^n } = 1.
$$
\end{cor}
Finally, we get the equidistribution of the periodic points with respect to the measure of maximal entropy $\mu_f$ as an immediate corollary.
\begin{proof}[Proof of Corollary~\ref{corWeakConvPerPts}]
We get (\ref{eqWeakConvPerPts1}) and (\ref{eqWeakConvPerPts2}) from Theorem~\ref{thmWeakConvPrePerPts} with $m_n=0$ for all $n\in\N$. Then (\ref{eqWeakConvPerPts3}) follows from (\ref{eqWeakConvPerPts2}) and Corollary~\ref{corAsympRatioNoPerPts}.
\end{proof}
\section{Expanding Thurston maps as factors of the left-shift} \label{sctFactor}
M.~Bonk and D.~Meyer \cite{BM10} proved that for an expanding Thurs\-ton map $f$, the topological dynamical system $(S^2,f)$ is a factor of a certain classical topological dynamical system, namely, the left-shift on the one-sided infinite sequences of $\deg f$ symbols. The goal of this section is to generalize this result to the category of measure-preserving dynamical systems. The invariant measure for each measure-preserving dynamical system considered in this section is going to be the unique measure of maximal entropy of the corresponding system.
Let $X$ and $\widetilde{X}$ be topological spaces, and $f\:X\rightarrow X$ and $\widetilde{f}\:\widetilde{X}\rightarrow\widetilde{X}$ be continuous maps. We say that the topological dynamical system $(X,f)$ is a \defn{factor of the topological dynamical system} $(\widetilde{X},\widetilde{f})$ if there is a surjective continuous map $\varphi\:\widetilde X\rightarrow X$ such that $\varphi\circ\widetilde{f}=f\circ\varphi$. For measure-preserving dynamical systems $(X,g,\mu)$ and $(\widetilde{X},\widetilde{g},\widetilde{\mu})$ where $X$ and $\widetilde{X}$ are measure spaces, $g\:X\rightarrow X$ and $\widetilde{g}\:\widetilde{X}\rightarrow\widetilde{X}$ measurable maps, and $\mu\in \mathcal{M}(X,g)$ and $\widetilde{\mu}\in \mathcal{M}(\widetilde{X},\widetilde{g})$, we say that the measure-preserving dynamical system $(\widetilde{X},\widetilde{g},\widetilde{\mu})$ is a \defn{factor of the measure-preserving dynamical system} $(X,g,\mu)$ if there is a measurable map $\varphi\:\widetilde{X}\rightarrow X$ such that $\varphi\circ\widetilde{g}=g\circ\varphi$ and $\varphi_*\widetilde{\mu}=\mu$. Thus we get the following commutative diagram:
\begin{equation*}
\xymatrix{
\widetilde{X} \ar[r]^{\widetilde{f}} \ar@{->}[d]_\varphi & \widetilde{X} \ar@{->}[d]^\varphi \\
X \ar[r]^{f} & X
}
\end{equation*}
We recall a classical example of symbolic dynamical systems, namely $(J_k^\omega,\Sigma)$, where the \emph{alphabet} $J_k=\{0,1,\dots,k-1\}$ for some $k\in\N$, the \emph{set of infinite words} $J_k^\omega=\prod\limits_{i=1}^{+\infty} J_k$, and $\Sigma$ is the left-shift operator with
$$
\Sigma(i_1,i_2,\dots)=(i_2,i_3,\dots)
$$
for each $(i_i,i_2,\dots)\in J_k^\omega$. We equip $J_k^\omega$ with a metric $d$ such that the distance between two distinct infinite words $(i_1,i_2,\dots)$ and $(j_1,j_2,\dots)$ is $\frac{1}{m}$, where $m=\min\{n\in\N\,|\,i_n\neq j_n\}$.
Define the \emph{set of words of length $n$} as $J_k^n = \prod_{i=1}^n J_k$, for $n\in\N$ and $J_k^0=\{\emptyset\}$ where $\emptyset$ is considered as a word of length $0$. Denote the \emph{set of finite words} by $J_k^*=\bigcup\limits_{n=0}^{+\infty} J_k^n$. Then the left-shift operator $\Sigma$ is defined on $J_k^*\setminus J_k^0$ naturally by
$$
\Sigma(i_1,i_2,\dots,i_n)=(i_2,i_3,\dots,i_n).
$$
It is well-known that the dynamical system $(J_k^\omega,\Sigma)$ has a unique measure of maximal entropy $\mu_\Sigma$, which is characterized by the property that
$$
\mu_\Sigma \(C(j_1,j_2,\dots,j_n)\) = k^{-n},
$$
for $n\in\N$ and $j_1,j_2,\dots,j_n \in J_k$, where
\begin{equation} \label{eqCylinder}
C(j_1,j_2,\dots,j_n) = \{ (i_1,i_2,\dots)\in J_k^\omega \,|\, i_1=j_1,i_2=j_2,\dots,i_n=j_n \}
\end{equation}
is the \defn{cylinder set} determined by $j_1,j_2,\dots,j_n$ (see for example, \cite[Section~4.4]{KH95}).
We will prove that for each expanding Thurston map $f$ with $\deg f=k$ and its measure of maximal entropy $\mu_f$, the measure-preserving dynamical system $(S^2,f,\mu_f)$ is a factor of the system $(J_k^\omega,\Sigma,\mu_\Sigma)$.
We now review a construction from \cite{BM10} for the convenience of the reader.
Let $f\:S^2\rightarrow S^2$ be an expanding Thurston map, and $\mathcal{C}\subseteq S^2$ a Jordan curve with $\operatorname{post} f\subseteq \mathcal{C}$. Consider the cell decompositions induced by the pair $(f,\mathcal{C})$. Let $k=\deg f$. Fix an arbitrary point $p\in \inte X_w^0$. Let $q_1,q_2,\dots,q_k$ be the distinct points in $f^{-1}(p)$. For $i=1,\dots,k$, we pick a continuous path $\alpha_i\:[0,1] \rightarrow S^2\setminus \operatorname{post} f$ with $\alpha_i(0)=p$ and $\alpha_i(1)=q_i$.
We construct $\psi \:J_k^* \rightarrow S^2$ inductively such that $\psi(I)\in f^{-n}(p)$, for each $n\in\N_0$ and $I\in J_k^n$, in the following way:
Define $\psi(\emptyset)=p$, and $\psi((i))=q_i$ for each $(i)\in J_k^1$. Suppose that $\psi$ has been defined for all $I\in \bigcup\limits_{j=0}^n J_k^j$, where $n\in\N$. Now for each $(i_1,i_2,\dots,i_{n+1})\in J_k^{n+1}$, the point $\psi((i_1,i_2,\dots,i_{n}))\in f^{-n}(p)$ has already been defined. Since $f^n(\psi((i_1,i_2,\dots,i_{n})))=p$ and $f^n\:S^2\setminus f^{-n}(\operatorname{post} f) \rightarrow S^2\setminus \operatorname{post} f$ is a covering map, the path $\alpha_{i_{n+1}}$ has a unique lift $\widetilde{\alpha}_{i_{n+1}}\:[0,1]\rightarrow S^2$ with $\widetilde{\alpha}_{i_{n+1}} (0) = \psi((i_1,i_2,\dots,i_n))$ and $f^n\circ \widetilde{\alpha}_{i_{n+1}} = \alpha_{i_{n+1}}$. We now define $\psi((i_1,i_2,\dots,i_{n+1}))= \widetilde{\alpha}_{i_{n+1}}(1)$. Note that then
\begin{align*}
&f^{n+1}(\psi((i_1,i_2,\dots,i_{n+1})))\\
= &f^{n+1}( \widetilde{\alpha}_{i_{n+1}}(1)) = f( \alpha_{i_{n+1}}(1)) = f(q_{i_{n+1}}) =p.
\end{align*}
Hence $ \psi((i_1,i_2,\dots,i_{n+1})) \in f^{-(n+1)}(p)$. This completes the inductive construction of $\psi$.
Note that $\psi\: J_k^* \rightarrow S^2$ induces a map $\widetilde\psi \: J_k^* \rightarrow \bigcup\limits_{n=0}^{+\infty} \X_w^n$ by mapping each $(i_1,i_2,\dots,i_n)\in J_k^n$ to the unique white $n$-tile $X_w^n \in \X_w^n$ containing $\psi((i_1,i_2,\dots,i_n)) \in f^{-n}(p)$.
By the proof of Theorem~1.6 in Chapter 9 of \cite{BM10}, for each $n\in\N$, $\psi |_{J_k^n}\:J_k^n \rightarrow f^{-n}(p)$ is a bijection. Hence $\widetilde\psi|_{J^n_k} \: J_k^n \rightarrow \X_w^n$ for $n\in\N_0$, and $\widetilde\psi\: J_k^* \rightarrow \bigcup\limits_{n=0}^{+\infty} \X_w^n$ are also bijections. Moreover, by the proof of Theorem~1.6 in \cite{BM10}, we have that for each $(i_1,i_2,\dots)\in J_k^\omega$, $\{\psi((i_1,i_2,\dots,i_n))\}_{n\in\N}$ is a Cauchy sequence in $(S^2,d)$, for each visual metric $d$ for $f$. So as shown in the proof of Theorem~1.6 in \cite{BM10}, the map $\varphi \:J_k^\omega \rightarrow S^2$ defined by
\begin{equation}
\varphi ((i_1,i_2,\dots)) = \lim\limits_{n\to+\infty} \psi((i_1,i_2,\dots,i_n))
\end{equation}
satisfies
\begin{enumerate}
\smallskip
\item $\varphi$ is continuous,
\smallskip
\item $f \circ \varphi = \varphi \circ \Sigma$,
\smallskip
\item $\varphi \:J_k^\omega \rightarrow S^2$ is surjective.
\end{enumerate}
So we can now reformulate Theorem~1.6 from \cite{BM10} in the following way.
\begin{theorem}[M.~Bonk \& D.~Meyer 2010] \label{thmBMfactor}
Let $f\:S^2\rightarrow S^2$ be an expanding Thurston map with $\deg f = k$. Then $(S^2,f)$ is a factor of the topological dynamical system $(J_k^{\omega},\Sigma)$. More precisely, the surjective continuous map $\varphi\: J_k^\omega \rightarrow S^2$ defined above satisfies $f\circ \varphi =\varphi \circ \Sigma$.
\end{theorem}
We will strengthen Theorem~\ref{thmBMfactor} in the following theorem.
\begin{theorem} \label{thmLfactor}
Let $f\:S^2\rightarrow S^2$ be an expanding Thurston map with $\deg f = k$. Then $(S^2,f,\mu_f)$ is a factor of the measure-preserving dynamical system $(J_k^{\omega},\Sigma,\eta_\Sigma)$, where $\mu_f$ and $\eta_\Sigma$ are the unique measures of maximal entropy of $(S^2,f)$ and $(J_k^\omega,\Sigma)$, respectively. More precisely, the surjective continuous map $\varphi\: J_k^\omega \rightarrow S^2$ defined above satisfies $f\circ \varphi =\varphi \circ \Sigma$ and $\phi_*\eta_\Sigma=\mu_f$.
\end{theorem}
\begin{proof}
Let $\mathcal{C} \subseteq S^2$ be a Jordan curve containing $\operatorname{post} f$. Let $d$ be a visual metric on $S^2$ for $f$ with an expansion factor $\Lambda>1$. Note that $\Lambda$ can be chosen to depend only on $f,d$, and $\mathcal{C}$. Consider the cell decompositions induced by $(f,\mathcal{C})$.
By Theorem~\ref{thmBMfactor}, it suffices to prove that $\phi_*\nu=\mu_f$.
For each $n \in\N$, we fix a function $\widetilde\beta_n\: J_k^n\rightarrow J_k^\omega$ which maps each $(i_1,i_2,\dots,i_n)\in J_k^n$ to $(i_1,i_2,\dots,i_n,i_{n+1},\dots)\in J_k^\omega$, for some arbitrarily chosen $i_{n+1},i_{n+2},\dots \in J_k$ depending on $i_1,i_2,\dots, i_n$. In other words, $\widetilde\beta_n$ extends a finite word of length $n$ to an arbitrary infinite word.
Define $\beta_n = \varphi \circ \widetilde\beta_n \circ \widetilde\psi^{-1}$, for each $n\in \N$, where $\widetilde\psi$ is defined earlier in this section.
\smallskip
We claim that the maps $\beta_n \:\X_w^n \rightarrow S^2$ with $n\in\N$ satisfy the hypothesis for $\beta_n$ in Theorem~\ref{thmWeakConv}, namely,
$$
\max \{ d(\beta_n(X_w^n), X_w^n) \,|\, X_w^n \in \X_w^n \} \longrightarrow 0 \text{ as } n\longrightarrow +\infty.
$$
Indeed, by the construction of $\varphi,\widetilde\beta_n$, $\psi$ and $\widetilde\psi$ above, we have that $\beta_n$ maps a white $n$-tile $X_w^n$ to the limit of a Cauchy sequence
\begin{equation*}
\(\psi((j_1,j_2,\dots,j_m)) \)_{m\in\N}
\end{equation*}
such that $\psi((j_1,j_2,\dots,j_n))\in X_w^n$. Since for each $m\in\N$, the points $\psi((j_1,j_2,\dots,j_m))$ and $\psi((j_1,j_2,\dots,j_{m+1}))$ are joined by a lift of one of the paths $\alpha_1,\alpha_2,\dots,\alpha_k$ (defined above) by $f^m$, by Lemma~8.11 in \cite{BM10}, we have that
$$
d\(\psi((j_1,j_2,\dots,j_m)),\psi((j_1,j_2,\dots,j_{m+1}))\) \leq C \Lambda^{-m},
$$
for all $m\in\N$, where $C>0$ and $\Lambda >1$ are constants depending only on $f,\mathcal{C}$, and $d$, in particular, independent of $m$ and $(j_1,j_2,\dots)\in J_k^\omega$. So $d(\beta_n(X^n_w),X^n_w) \leq C\frac{\Lambda^n}{1-\Lambda}$ for each $n\in\N$ and each $X^n_w\in\X^n_w$. The above claim follows.
\smallskip
For $i\in\N$, define
$$
\eta_i= \frac{1}{k^i} \sum\limits_{I\in J_k^i} \delta_{\widetilde\beta_i(I)}.
$$
Observe that for all $n\in\N$ and $m\in\N$ with $m\geq n$, and each $(i_1,i_2,\dots,i_n)\in J_k^n$, we have
\begin{equation*}
\mu_m(C(i_1,i_2,\dots,i_n)) = \mu_\Sigma(C(i_1,i_2,\dots,i_n)),
\end{equation*}
where $C(i_1,i_2,\dots,i_n)$ is defined in (\ref{eqCylinder}). So by the uniform continuity of each continuous function on $J_k^\omega$, it is easy to see that
\begin{equation} \label{eqConvLS}
\eta_i \stackrel{w^*}{\longrightarrow} \eta_\Sigma \text{ as } i\longrightarrow +\infty.
\end{equation}
Note that since $\widetilde\psi|_{J^n_k} \: J_k^n \rightarrow \X_w^n$ is a bijection for each $n\in\N_0$, we have for each $i\in\N$,
\begin{equation*}
\varphi_* \eta_i
= \frac{1}{k^i} \sum\limits_{I\in J_k^i} \delta_{\varphi \circ\widetilde\beta_i(I)}
= \frac{1}{k^i} \sum\limits_{X^i\in \X_w^i} \delta_{\varphi \circ \widetilde\beta_i \circ \widetilde\psi^{-1}(X^i)}
= \frac{1}{k^i} \sum\limits_{X^i\in \X_w^i} \delta_{\beta_i(X^i)}.
\end{equation*}
Hence, by Theorem~\ref{thmWeakConv},
\begin{equation} \label{eqConvPushForwd}
\varphi_*\eta_i \stackrel{w^*}{\longrightarrow} \mu_f \text{ as } i\longrightarrow +\infty.
\end{equation}
Therefore, by (\ref{eqConvLS}), (\ref{eqConvPushForwd}), and Lemma~\ref{lmPushforwardConv}, we can conclude that $\phi_*\eta_\Sigma=\mu_f$.
\end{proof}
\section{A random iteration algorithm for producing the measure of maximal entropy} \label{sctIteration}
In this section, we follow the idea of \cite{HT03} to prove that for each $p\in S^2$, the measure of maximal entropy $\mu_f$ of an expanding Thurston map $f$ is almost surely the limit of
$$
\frac1n \sum\limits_{i=0}^{n-1} \delta_{q_i}
$$
as $n \longrightarrow +\infty$ in the weak* topology, where $q_0=p$, and $q_i$ is one of the points $x$ in $f^{-1}(q_{i-1})$, chosen with probability $\frac{\deg_f(x)}{\deg f}$, for each $i\in\N$.
To give a more precise formulation, we will use the language of Markov process from the probability theory (see, for example, \cite{Du10} for an introduction).
\smallskip
Let $(X,d)$ be a compact metric space. Equip the space $\mathcal{P}(X)$ of probability measures with the weak$^*$ topology. A continuous map $X\rightarrow \mathcal{P}(X)$ assigning to each $x\in X$ a measure $\mu_x$ defines a \defn{random walk} on $X$. We define the corresponding \defn{Markov operator} $Q\:C(X)\rightarrowC(X)$ by
\begin{equation}
Q\phi(x)=\int \! \phi(y) \, \mathrm{d} \mu_x(y).
\end{equation}
Let $Q^*$ be the adjoint operator of $Q$, i.e., for each $\phi \inC(X)$ and $\rho\in\mathcal{P}(X)$,
\begin{equation}
\int \! Q\phi \,\mathrm{d} \rho = \int \! \phi \, \mathrm{d} (Q^*\rho) .
\end{equation}
Consider a stochastic process $(\Omega,\mathcal{F},P)$, where
\begin{enumerate}
\smallskip
\item $\Omega = \{(\omega_0,\omega_1,\dots) \,|\, \omega_i\in X, i\in\N_0 \} =\prod\limits_{i=0}^{+\infty} X$, equipped with the product topology,
\smallskip
\item $\mathcal{F}$ is the Borel $\sigma$-algebra on $\Omega$,
\smallskip
\item $P\in\mathcal{P}(\Omega)$.
\end{enumerate}
This process is a \defn{Markov process with transition probabilities $\{\mu_x\}_{x\in X}$} if
\begin{equation}
P\{\omega_{n+1} \in A \,|\, \omega_0=z_0,\omega_1=z_1, \dots, \omega_n=z_n \} = \mu_{z_n}(A)
\end{equation}
for all $n\in\N_0$, Borel subsets $A\subseteq X$, and $z_0,z_1,\dots,z_n\in X$.
The transition probabilities $\{\mu_x\}_{x\in X}$ are determined by the operator $Q$ and so we can speak of a \defn{Markov process determined by $Q$}.
\smallskip
Let $f\:S^2\rightarrow S^2$ be an expanding Thurston map with $\deg f = k$. The continuous map $S^2 \rightarrow \mathcal{P}(S^2)$ assigning to each $x\in S^2$ the probability measure
\begin{equation}
\mu_x= \frac1k\sum\limits_{y\in f^{-1}(x)} \deg_f(y) \delta_y
\end{equation}
induces the Markov operator $Q\:C(S^2)\rightarrowC(S^2)$ which satisfies
\begin{equation} \label{eqQ}
Q\phi(x)=\frac1k\sum\limits_{y\in f^{-1}(x)} \deg_f(y) \phi(y)
\end{equation}
for all $\phi\inC(S^2)$ and $x\in S^2$. To show $Q$ is well-defined, we need to prove $Q\phi(x)$ is continuous in $x\in S^2$ for each $\phi\in C(S^2)$. Indeed, by fixing an arbitrary Jordan curve $\mathcal{C}\subseteq S^2$ containing $\operatorname{post} f$, we know for each $x$ in the white $0$-tile $X^0_w$,
$$
Q\phi(x)=\frac1k\sum\limits_{X\in X^1_w}\phi(y_X),
$$
where $y_X$ is the unique point contained in the white $1$-tile $X$ with the property that $f(y_X)=x$. If we move $x$ around continuously within $X^0_w$, then each $y_X$ moves around continuously within $X$. Thus $Q\phi(x)$ restricted to $X^0_w$ is continuous in $x$. Similarly, $Q\phi(x)$ restricted to $X^0_b$ is also continuous. Hence $Q\phi(x)$ is continuous in $x\in S^2$.
Fix an arbitrary $z\in S^2$. Then there exists a unique Markov process $(\Omega,\mathcal{F},P_z)$ determined by $Q$ with
\begin{enumerate}
\smallskip
\item $\Omega = \prod\limits_{i=0}^{+\infty} S^2$, equipped with the product topology,
\smallskip
\item $\mathcal{F}$ being the Borel $\sigma$-algebra on $\Omega$,
\smallskip
\item $P_z$ being a Borel probability measure on $\Omega$ satisfying
\begin{equation*}
P_z\{\omega_{n+1} \in A \,|\, \omega_0=z,\omega_1=z_1, \dots, \omega_n=z_n \} = \mu_{\omega_n}(A)
\end{equation*}
for all $n\in\N$, Borel subset $A\subseteq S^2$, and $z_1,z_2,\dots,z_n \in S^2$.
\end{enumerate}
The existence and uniqueness of $P_z$ follows from \cite[Theorem~1.4.2]{Lo77}. Since the Markov process $(\Omega,\mathcal{F},P_z)$ is determined by $f$ as well, we will also call $(\Omega,\mathcal{F},P_z)$ the \defn{Markov process determined by $f$}.
Now we can formulate our main theorem for this section.
\begin{theorem} \label{thmRandomIntConv}
Let $f\:S^2\rightarrow S^2$ be an expanding Thurston map with its measure of maximal entropy $\mu_f$. Let $(\Omega,\mathcal{F},P_z)$ be the Markov process determined by $f$. Then for each $z\in S^2$, we have that $P_z$-almost surely,
\begin{equation}
\frac1n\sum\limits_{j=0}^{n-1} \delta_{\omega_j} \stackrel{w^*}{\longrightarrow} \mu_f \text{ as } n\longrightarrow +\infty.
\end{equation}
\end{theorem}
In other words, if we fix a point $z\in S^2$ and set it as the first point in an infinite sequence, and choose each of the following points randomly according to the Markov process determined by $f$, then $P_z$-almost surely, the probability measure equally distributed on the first $n$ points in the sequence converges in the weak$^*$ topology to $\mu_f$ as $n\longrightarrow +\infty$.
In order to prove Theorem~\ref{thmRandomIntConv}, we need a theorem by H.~Furstenberg and Y.~Kifer from \cite{FK83}.
\begin{theorem}[H.~Furstenberg \& Y.~Kifer 1983] \label{thmFK83}
Let $\Omega=\{\omega_n \in X\,|\, n\in\N_0\}$ be the Markov process determined by the operator $Q$. Assume that there exists a unique Borel probability measure $\mu$ that is invariant under the adjoint operator $Q^*$ on $\mathcal{P}(X)$. Then for each $\omega_0 \in X$, we have that $P_{\omega_0}$-almost surely,
\begin{equation}
\frac1n\sum\limits_{j=0}^{n-1} \delta_{\omega_j} \stackrel{w^*}{\longrightarrow} \mu \text{ as } n\longrightarrow +\infty.
\end{equation}
\end{theorem}
Theorem~\ref{thmRandomIntConv} follows immediately from Theorem~\ref{thmFK83} and the following lemma.
\begin{lemma} \label{lmQInvMeasure}
Let $f\:S^2\rightarrow S^2$ be an expanding Thurston map. Then the unique measure of maximal entropy $\mu_f$ for $f$ is the only measure that is invariant under the adjoint operator $Q^*\:\mathcal{P}(S^2)\rightarrow\mathcal{P}(S^2)$ of $Q\:C(S^2)\rightarrowC(S^2)$, where $Q$ is defined in (\ref{eqQ}).
\end{lemma}
\begin{proof}
Let $k=\deg f$. Fix a Jordan curve $\mathcal{C}\subseteq S^2$ with $\operatorname{post} f \subseteq \mathcal{C}$. Let $d$ be a visual metric on $S^2$ for $f$ with an expansion factor $\Lambda>1$. Note that $\Lambda$ can be chosen to depend only on $f$ and $d$. Consider the cell decompositions induced by $(f,\mathcal{C})$.
Recall $\nu_n$ defined in (\ref{eqDistrPreImg}) for a fixed $p\in S^2$. Observe that $Q^*\nu_n=\nu_{n+1}$ for all $n\in\N_0$. By Theorem~\ref{thmWeakConvPreImg},
\begin{align*}
\int \! \varphi \, \mathrm{d} (Q^*\mu_f) & = \int \! Q \varphi \, \mathrm{d} \mu_f = \lim\limits_{n\to+\infty} \int \! Q\varphi \, \mathrm{d} \nu_n = \lim\limits_{n\to+\infty} \int \! \varphi \, \mathrm{d} Q^* \nu_n \\
&= \lim\limits_{n\to+\infty} \int \! \varphi \, \mathrm{d} \nu_{n+1} = \int \! \varphi \, \mathrm{d} \mu_f .
\end{align*}
Thus $Q^*\mu_f=\mu_f$, and so $\mu_f$ is indeed invariant under $Q^*$.
By (\ref{eqQ}), for each $x\in S^2$, each $n\in\N$ and each $\varphi\inC(S^2)$, we have
\begin{equation} \label{eqQnPhi}
Q^n\varphi(x)= \frac{1}{k^n}\sum\limits_{y\in f^{-n}(x)} \deg_{f^n}(y) \varphi(y).
\end{equation}
So by Theorem~\ref{thmWeakConvPreImg}, we get
\begin{equation} \label{eqQConvPtw}
Q^n\varphi(x) -\int \! \varphi \, \mathrm{d} \mu_f \longrightarrow \ 0 \text{ as } n\longrightarrow +\infty.
\end{equation}
\smallskip
We claim that the convergence in (\ref{eqQConvPtw}) is uniform in $x$.
To prove the claim, we first assume that $x$ is in the (closed) white $0$-tile $X_w^0$. If we move $x$ around continuously within $X_w^0$, then each preimage of $x$ under $f^n$ moves around continuously within one of the white $n$-tiles $X_w^n\in\X_w^n$, for each $n\in\N$. By Lemma~\ref{lmBMCellSizeBounds}, there exists a constant $C\geq 1$ depending only on $f,\mathcal{C}$, and $d$ such that $\diam_d(X^n_w) \leq C\Lambda^{-n}$ for each $n\in\N$ and each $X_w^n\in\X_w^n$. Then by the uniform continuity of $\varphi$ and (\ref{eqQnPhi}), we have that $Q^n\varphi(x)$ converges uniformly to $\int \! \varphi \, \mathrm{d} \mu_f$ over $X_w^0$ as $n \longrightarrow+\infty$. Similarly, we have that the convergence in (\ref{eqQConvPtw}) is uniform over the black $0$-tile $X_b^0$. Hence, the convergence in (\ref{eqQConvPtw}) is uniform over $S^2$. The claim is proved.
\smallskip
Suppose that $\mu\in\mathcal{P}(S^2)$ satisfies $Q^*\mu = \mu$. Then for each $\varphi\inC(S^2)$, by the claim above, we have
\begin{align*}
\int \! \varphi(x) \, \mathrm{d} \mu(x) = & \lim\limits_{n\to+\infty} \int \! \varphi(x) \, \mathrm{d} (Q^*)^n\mu(x) \\
= & \lim\limits_{n\to+\infty} \int \! Q^n\varphi(x) \, \mathrm{d} \mu(x)\\
= & \int \! \varphi(x) \, \mathrm{d} \mu_f(x).
\end{align*}
Hence $\mu=\mu_f$.
\end{proof}
As a special case of \cite[Theorem~3.4.11]{HP09}, the next corollary follows immediately from the uniform convergence in (\ref{eqQConvPtw}):
\begin{cor} \label{corConvPullbackMeasure}
Let $f\:S^2\rightarrow S^2$ be an expanding Thurston map with its measure of maximal entropy $\mu_f$. Then for each Borel probability measure $\mu$ on $S^2$, we have
\begin{equation} \label{eqConvPullbackMeasure}
\(Q^*\)^n \mu \stackrel{w^*}{\longrightarrow} \mu_f \text{ as } n\longrightarrow +\infty,
\end{equation}
where $Q^*\:\mathcal{P}(S^2)\rightarrow\mathcal{P}(S^2)$ is the adjoint operator of $Q\:C(S^2)\rightarrowC(S^2)$ defined in (\ref{eqQ}).
\end{cor}
\begin{proof}
By the claim proved in the proof of Lemma~\ref{lmQInvMeasure}, the convergence in (\ref{eqQConvPtw}) is uniform in $x$ for each $\varphi\inC(S^2)$. Thus by integrating (\ref{eqQConvPtw}) over $S^2$ with respect to $\mu$, we get
\begin{equation*}
\int \! Q^n\varphi \, \mathrm{d} \mu \longrightarrow \ \int \! \varphi \, \mathrm{d} \mu_f \text{ as } n\longrightarrow +\infty,
\end{equation*}
from which (\ref{eqConvPullbackMeasure}) follows.
\end{proof}
\begin{rem}
The operator $Q$ as defined in (\ref{eqQ}) is actually the Ruelle operator for an expanding Thurston map, in the special case when the potential is identically $0$. For some background of the Ruelle operator and the thermodynamical formalism, see, for example, \cite{Ru89, PU10}, and in the context of expanding Thurston maps, see \cite{Li13}. More generally, we prove in \cite{Li13} that for each expanding Thurston map and each H\"{o}lder continuous (with respect to any visual metric) potential $\phi$, there exists a unique equilibrium state, which is exact, and in particular, mixing. As a generalization of the measure of maximal entropy, an equilibrium state is an invariant probability measure that maximizes the pressure, which in turn is a generalization of the topological entropy. Moreover, we prove in \cite{Li13} that the equilibrium state is the unique probability measure invariant under the adjoint of the Ruelle operator $\mathcal{R}_{\widetilde\phi}$ corresponding to the H\"{o}lder continuous potential $\widetilde\phi$ determined by $\phi$. In the case when $\phi=0$, we have $\widetilde\phi=0$. Thus Lemma~\ref{lmQInvMeasure} follows from the more general result in \cite{Li13}. The direct proof of Lemma~\ref{lmQInvMeasure} we included above is much simpler though.
\end{rem}
| {
"redpajama_set_name": "RedPajamaArXiv"
} | 5,484 |
\section{Introduction and summary}
Recently, it has become clear that the problem of moduli stabilization may
find its resolution in the context of flux compactifications (see
{\it e.g.} \cite{reva, revb, revc} for recent reviews). In most recent models
(starting with \cite{kklt}) a crucial role is played by nonperturbative effects
which can generate a superpotential for the K\"{a}hler moduli.
Within the context of M-theory compactifications on Calabi-Yau fourfolds,
as was first noted in \cite{w}, the
nonperturbative effects arise from fivebrane instantons wrapping internal
divisors. In a dual IIB picture
this setup is equivalent to compactifications on Calabi-Yau threefolds, with
instantons arising from D3-branes wrapping internal divisors.
In \cite{w} Witten showed that in the absence of flux
a necessary condition for the generation of a superpotential
is that the divisor which the fivebrane wraps possesses a certain
topological property: its arithmetic genus must be equal to one.
When there are exactly two fermion zeromodes (corresponding to rigid isolated cycles)
a superpotential {is} indeed generated. If more zeromodes are present, cancellations
may occur. The lift of the arithmetic genus criterion to F-theory in general and
IIB orientifolds in particular, was given by Robbins and Sethi in \cite{rs}.
Recently attention has been drawn to the possibility that
the arithmetic genus criterion may be violated in the presence of
flux \cite{ktt, saul, kall} (a discussion of the effects of flux
was already presented in \cite{rs}). The authors of \cite{kall} defined a flux-dependent generalization of the
arithmetic genus, $\chi_F$,
to be discussed in more detailed in the following. $\chi_F$ is not, strictly-speaking, an index: it
cannot be defined as the dimension of the kernel minus the dimension of the cokernel of some operator.
At present it is not clear what should the arithmetic genus criterion be replaced
by in the presence of fluxes.
In particular, it is not clear whether the arithmetic genus criterion
should simply be replaced by the condition $\chi_F=1$ or not.
Moreover, it is conceivable that
instantons with four or more fermionic
zero-modes contribute to the superpotential\footnote{Instantons with more
than two zeromodes are known to contribute to higher-derivative and/or
multi-fermion couplings \cite{bw}. Here we examine whether such instantons
can contribute to the {\it superpotential}.},
as there exist higher-order fermionic terms in the
worldvolume action of the fivebrane
which may be used in order soak up the extra zero modes.
Clarifying these
issues is crucial for
realistic model-building.
The computation of M-theory instantons goes back to the work of Becker et al \cite{bbs}.
These techniques were further elaborated by Harvey and Moore \cite{hm} in the context
of $G_2$ compactifications.
The subject of fivebrane instantons in M-theory has largely remained
unexplored, mainly due to the exotic nature of the fivebrane worldvolume theory.
Instanton effects in heterotic M-theory have been considered in \cite{ovru, lima, angu, buch}.
Further progress beyond the computation of instantons with
two zeromodes has been hindered by the lack of knowledge of the theta-expansions of the
supervielbein and $C$-field in eleven-dimensional superspace.
Recently there have been technical advances in this direction reported in \cite{t}, which applies
the normal-coordinates approach \cite{norcor} to the case of eleven-dimensional
superspace.
Using this method, the expression for linear backgrounds
was derived to all orders in $\theta$, i.e. up to and including terms
of order $\theta^{32}$. This
constitutes significant progress, taking to account the fact that
previously this expansion
was known explicitly only to order $\theta^2$ \cite{nicolai}. Results exact in the background fields
were also presented up to and including terms of order $\theta^5$.
It is the purpose of this paper to perform
an explicit computation in the case of fivebrane instantons with four
fermion zeromodes, in the context of M-theory compactifications on Calabi-Yau fourfolds
in the presence of (normal) flux. We find that no superpotential is generated in
this case. Therefore, our result does not rule out the possibility that in the presence of flux
the arithmetic genus criterion should be replaced by the condition $\chi_F=1$.
As this is a somewhat technical paper, in the following subsections of the introduction
we have tried to put it in context and to summarize
in a self-contained way the strategy and the result of the computation.
\subsection{Review of the arithmetic genus criterion }
In \cite{w} Witten argued that M-theory compactifications on Calabi-Yau fourfolds may generate
a nonzero superpotential in three dimensions
through fivebrane instantons wrapping divisors of arithmetic genus one.
We will now review his argument:
consider a supersymmetric M-theory background of the form
$\mathbb{R}^{1,2}\times X$, where $X$ is a Calabi-Yau fourfold\footnote{Eventually
we will work in Euclideanized eleven-dimensional space.}. Provided a certain
topological condition is satisfied, this is a consistent M-theory background \cite{wittflux,wittseth}.
Compactification
on $X$ results in an ${\cal N}=2$ theory in three dimensions (four real supercharges).
This theory is very similar to a supersymmetric ${\cal N}=1$ theory in four dimensions,
and we may think of it (although this is not necessary)
as a dimensional reduction from four to three dimensions. Similarly to the case in four dimensions,
the kinetic terms are obtained by integration over the whole superspace, whereas the Yukawa couplings and the
mass terms are obtained by integrating over half the superspace (F-terms). Crucially,
powerful nonrenormalization theorems prevent radiative corrections to the F-terms.
Let us now describe the structure of the so-called `linear multiplets',
which play a distinguished role in the discussion of \cite{w} and in the following:
the bosonic part of a linear multiplet in four dimensions
consists of a second-rank antisymmetric tensor
and a real scalar. The fact that the antisymmetric
tensor is dual in four dimensions to a scalar, can be promoted at the
level of superfields to a duality between linear and chiral supermultiplets.
Upon reduction to three dimensions the chiral multiplets
give rise to chiral multiplets, whereas the linear multiplets become vector multiplets.
In analogy to the situation in four dimensions, a
vector in three dimensions is dual to the a scalar {\it provided there is no Chern-Simons term}
arising from the compactification on the fourfold.
In absence of fluxes there is indeed no Chern-Simons term which could
obstruct the dualization,
but this is generally no longer the case in the presence of fluxes \cite{haaca, haacb}.
To be more explicit:
upon compactification of M-theory on a Calabi-Yau fourfold, one obtains $b_2$ vectors
from the threeform gauge field
\begin{align}
C=\sum_{I=1}^{b_2}A^I(x)\wedge \omega_I+\dots ~,
\end{align}
where $x$ is a (three-dimensional) spacetime coordinate and $\{\omega_I, ~I=1,\dots b_2\}$ is a basis of
$H^{2}(X,\mathbb{R})$, which of course coincides with $H^{1,1}(X,\mathbb{R})$ for a Calabi-Yau fourfold.
In the absence of a
Chern-Simons term in
three dimensions the $A^I$s can be dualized to $b_2$ scalars, which we will call
the `dual scalars' $\phi^I_D$, $d\phi^I_D=\star dA^I$. Note that perturbatively there are
Peccei-Quinn symmetries whereby the dual scalars are shifted by constants; as we will see in the following,
these continuous symmetries
can be broken by instantons to discrete subgroups thereof.
In addition to the $\phi_D^I$s there are $b_2$ scalars, $\phi^I$, from the deformations of the
K\"{a}hler form $J$,
\begin{align}
J=\sum_{I=1}^{b_2}\phi^I(x)\omega_I
~.
\label{jexp}
\end{align}
After dualization, the bosonic fields of each vector multiplet in three
dimensions (these are the `descendants' of the
linear multiplets in four dimensions) consist of a pair of real scalars ($\phi^I$, $\phi^I_D$). The superpotential
$W$ depends holomorphically on $\phi^I+i\phi^I_D$.
Following \cite{w}, we note that all terms in the superpotential depend on the vector multiplets.
Indeed if there were any terms in the superpotential which did not depend on the vector multiplets, they
could be computed by scaling up the metric of $X$ (since such terms would be independent of the
K\"{a}hler class, which belongs to the vector multiplets). But in the limit where the metric is scaled up,
M-theory reduces to supergravity and $\mathbb{R}^{1,2}\times X$ becomes an exact solution --
showing that there is no superpotential in this case.
To look for instantons which may generate a superpotential, we note that the threeform gauge field
is (magnetically) sourced by the fivebrane. Hence, a relevant instanton in three dimensions
is seen from the eleven-dimensional point-of-view
as a fivebrane wrapping a six-cycle $\Sigma$ in the Calabi-Yau fourfold. In order
for the instanton to preserve half the supersymmetry (so that it may
generate an F-term), the cycle $\Sigma$ must be
a holomorphic divisor. This fact is re-derived in detail in section \ref{supersymmetriccycles},
in the presence of normal flux.
As can be verified explicitly,
the contribution of the instanton includes the classical factor
\begin{align}
\int d^2\theta_0 ~e^{-(\mathrm{Vol}_{\Sigma}+i\phi_D)}~,
\label{gras}
\end{align}
where $\mathrm{Vol}_{\Sigma}$ is the volume (in units of
the eleven-dimensional Planck length $l_P$) of the six-cycle the fivebrane is wrapping, and $\phi_D$ is
the linear combination of dual scalars which constitutes the superpartner of $\mathrm{Vol}_{\Sigma}$. I.e.
the scalars ($\mathrm{Vol}_{\Sigma}$, $\phi_D$) form the real and imaginary parts of a chiral superfield,
as is expected
from the holomorphic property of the superpotential (which is, in its turn, a consequence of supersymmetry).
For the generation of a superpotential, the
fermionic terms in the fivebrane action should conspire so as to soak up all but two of the
fermion zeromodes. The Grassmann integration in (\ref{gras}) above is the integration over the
remaining fermionic zeromodes. As was then argued in \cite{w}, apart from the classical factor above,
the superpotential should be independent of the K\"{a}hler class. This is because the
dependence on $\phi_D$ is fixed by the magnetic charge of the instanton, and so the dependence on
$\mathrm{Vol}_{\Sigma}$ is in its turn fixed by holomorphy.
Apart from the classical factor above,
the steepest-slope approximation of the path integral around the fivebrane instanton includes a
one-loop determinant, which is independent of the K\"{a}hler class but depends
holomorphically on the complex structure moduli. The one-loop result
is in fact exact, as higher loops do not contribute to the superpotential. This can be seen as follows:
higher loops would be proportional to positive powers of $l_P$ and would therefore
scale as inverse powers of the volume; but, as already mentioned, apart from the classical
factor the superpotential cannot depend on the K\"{a}hler class.
A necessary criterion for a divisor $\Sigma$ to contribute to the superpotential is that
its arithmetic genus $\chi$,
\begin{align}
\chi=\sum_{p=0}^3(-1)^p h^{p,0}(\Sigma)~,
\end{align}
is equal to one. This was arrived at in \cite{w} by the following line of arguments:
first note that, in the limit where $\Sigma$ is scaled up, the $U(1)$
rotations along the normal direction to $\Sigma$ inside the fourfold become an exact symmetry
(dubbed `$W$-symmetry' in \cite{w}) of M-theory. On the other hand, in the absence of fluxes
the worldvolume theory of the fivebrane has a one-loop $W$-anomaly equal to $\chi$. It must then
be that the exponential in (\ref{gras}) has $W$-charge equal to $-\chi$.\footnote{Note that
Witten's paper \cite{w} was written before the cancellation of the normal-bundle anomaly of the
fivebrane was properly understood in \cite{hmm}. It would be interesting to
derive this result directly using the techniques of \cite{hmm}.}
Moreover, it is straightforward to see that
the fermionic zeromode measure carries $W$-charge equal to one. It follows that
a necessary condition for the generation of a superpotential is $\chi=1$;
this is the arithmetic genus criterion.
\subsection{Caveats to the arithmetic genus criterion}
\label{caveats}
As already anticipated in \cite{w}, the arithmetic genus criterion may
be violated in cases where the assumption of $W$-symmetry fails. This can occur if there
are couplings of the fermions to normal
derivatives of the background fields (i.e. normal to the divisor $\Sigma$ inside $X$).
Indeed, in the presence of flux such couplings
are present already in the `minimal' quadratic-fermion action $\theta\slsh{\cal D}\theta$,
where $\slsh{\cal D}$ is a flux-dependent Dirac operator which we will define more precisely in the following.
Even in the absence of flux,
$W$-violating couplings will generally be present at higher orders in the fermions, they will
however be suppressed in the large-volume limit.
A further complication is the following:
in the presence of flux, there is a Chern-Simons term in the three-dimensional
low-energy supergravity,
\begin{align}
T_{IJ}d\phi^I\wedge A^J~,
\label{cs}
\end{align}
which will a priori obstruct the straightforward
dualization of the vectors $A^I$ to scalars $\phi_D^I$ \cite{haaca,haacb}.
One may therefore worry about the fate of holomorphy, on which the derivation of the
arithmetic genus criterion relied. (Recall that the holomorphic property of the superpotential allowed us to take
the large-volume limit in which the $W$-symmetry becomes exact). The object $T_{IJ}$ which enters
the Chern-Simons term above is a constant symmetric matrix given by
\begin{align}
T_{IJ}&:=\frac{\partial^2 T }{\partial\phi^I\partial\phi^J}=\int_X F\wedge\omega_I\wedge\omega_J\nonumber\\
T&:=\frac{1}{2}\int_X F\wedge J\wedge J=\frac{1}{2}T_{IJ}\phi^I\phi^J~,
\label{tdef}
\end{align}
where $F$ is the internal component of the fourform flux. Its quantization condition is equivalent to
the expansion
\begin{align}
F=\sum_{a=1}^{b_4}n^a\omega_a+\sum_{I=1}^{b_2}dA^I\wedge\omega_I~,
\end{align}
where $\{ \omega_a, ~a=1\dots b_4\}$ is a basis of $H^4(X,\mathbb{Z})$, and
the $n^a$s are integers.
An additional effect of the flux is the gauging of the
the Peccei-Quinn isometries. The gauging is completely determined by the constant matrix $T_{IJ}$.
Contrary perhaps to the na{i}ve expectation, the dualization
of vectors to scalars can proceed more-or-less straightforwardly also
in the case with fluxes. Let us assume for simplicity that we work in a basis
of $H^{2}(X,\mathbb{R})$ such that $T_{IJ}$ is diagonal, and for the moment let's assume that the
complex structure moduli are frozen. It then
follows from the work of \cite{bhs} (which is based on general results on three-dimensional
gauged supergravities \cite{whs}) that {\it (i)} the isometries $\phi_D^I\rightarrow \phi_D^I+\mathrm{constant}$
corresponding to zero eigenvalues of $T_{IJ}$ are {\it not} gauged and {\it (ii)} if
$\phi_D^I\rightarrow \phi_D^I+\mathrm{constant}$ is an isometry which {\it does} get gauged, the superpotential
cannot depend on $\phi_D$ (nor can it depend on the K\"{a}hler modulus $\phi^I$, by holomorphy).\footnote{On
the other hand, if there are additional fields
which are charged under the gauge potential, this conclusion may be relaxed \cite{haack}.
We thank M. Haack for pointing this out.
In the present context, such phenomena may arise presumably in the presence of M2 branes
\cite{gano} and will not
be examined here. }
This picture is consistent with the conclusions of \cite{poortomasiello} who find (in the context of
IIA string theory) that those isometries which are gauged by the flux are protected from quantum corrections.
\subsection{The results of the present paper}
In the presence of fluxes, the scalar potential of the low-energy three-dimensional supergravity is still
given in terms of the holomorphic superpotential $W$, but in addition will also generally depend on $T$.
On the other hand the fermion bilinears
\begin{align}
\chi^I\chi^JD_ID_JW+\mathrm{c.c.}~,
\label{fbils}
\end{align}
where $D_I$ is a K\"{a}hler-covariant derivative,
solely depend on the holomorphic superpotential, $W$, even in the presence of fluxes \cite{whs}. (
Fermion mass terms of the form $\bar{\chi}^I\chi^J M_{IJ}$ do depend on $T$, as we will see in
section \ref{gravitinokkreduction}). Hence, a straightforward
way to obtain instanton corrections to the superpotential is to compute the coupling (\ref{fbils}).
For the purpose of examining the possible generation of a superpotential by instanton effects,
it follows from the discussion in section \ref{caveats} that
we only need examine whether the coupling (\ref{fbils}) is generated for fermions
$\chi^I$ which correspond to zero eigenvalues of $T_{IJ}$ (we may consider a
basis where $T_{IJ}$ is diagonal, for simplicity).
Hence, we may assume that the K\"{a}hler moduli corresponding
to nonzero eigenvalues of $T_{IJ}$ are frozen to zero\footnote{Examples of fourfolds for which there are
choices of fourform flux such that $T_{IJ}$ vanishes identically, were examined in \cite{mayra}.}.
In other words we can assume, as follows
from (\ref{jexp},\ref{tdef}),
that we are in the region of the K\"{a}hler moduli space where:
\begin{align}
\int_{X}F\wedge J\wedge\omega_I =0; ~~~~~I=1\dots b_2~.
\label{wderiv}
\end{align}
If no such region exists, {\it i.e.} if $T_{IJ}$ has no zero eigenvalues, all isometries are gauged
and there can be no superpotential dependence on the K\"{a}hler moduli:
the superpotential is protected against instanton contributions. Moreover, condition (\ref{wderiv})
implies that
\begin{align}
\omega_I\lrcorner F=0~,
\label{dkn}
\end{align}
for all $\omega_I$s corresponding to zero eigenvalues of $T_{IJ}$.
This observation simplifies somewhat the rather tedious computational task of this paper.
In particular, we may assume we are in the region of the K\"{a}hler moduli space where the
flux is primitive: $J\lrcorner F=0$. Furthermore, for the purposes of the present
computation we may assume that the complex structure moduli are frozen to values
such that the internal fourform flux is of type (2,2). These are exactly the
conditions which ensure that
{\it the flux is compatible with supersymmetry},
as we will see in detail in section \ref{mtheoryonfourfolds}.
Despite the fact that certain conceptual subtleties remain, there are clear rules for instanton
computations in M-theory first put forward in \cite{bbs} and
subsequently elucidated in \cite{hm}. We will schematically describe the procedure here, relegating
the details to the main body of the paper. In order to compute the instanton
contribution to the coupling (\ref{fbils}), one first decomposes the eleven-dimensional gravitino
in terms of three-dimensional fermions $\chi^I$,
\begin{align}
\Psi_m=\chi^I\otimes\Omega_{I,m}\xi~,
\label{kkgravit}
\end{align}
where $\xi$ is the covariantly constant spinor of the Calabi-Yau fourfold\footnote{In the
presence of flux, the internal space becomes a warped Calabi-Yau. As we will see, however, the
effect of the warp factor can be ignored at leading order in the large-volume expansion.}
and $\Omega_I$ is a one-form on $X$ valued in the Clifford algebra $Cl(TX)$. Next, from the
fivebrane action one reads off the coupling of the eleven-dimensional
gravitino to the fivebrane worldvolume fermion $\theta$, schematically:
\begin{align}
V=\sum_{n} c_n\Psi\theta^{2n+1} ~,
\end{align}
for some, possibly flux-dependent, `coefficients' $c_n$.
The coupling $V$ is the `gravitino vertex operator'. Finally, to read off the
coefficient $D_ID_JW$ in (\ref{fbils}) one evaluates the correlator $\langle VV\rangle$
in the worldvolume theory of the fivebrane.
Note that the worldvolume fermions are valued in the normal bundle to the fivebrane, which is
the sum of $T\mathbb{R}^3$ (after passing to Euclidean signature)
and the normal bundle to the divisor inside the fourfold.
Thus, each worldvolume
fermion should be thought of as tensored with a two-component spinor of $Spin(3)$.
The main result of the present paper is that {\it instantons with exactly four fermionic zeromodes
do not contribute to the superpotential.} In deriving this result we have made the
simplifying assumption that both the curvature of the worldvolume self-dual tensor
as well as the pull-back of the threeform flux onto the
worldvolume vanish. This is what we call the condition of `normal flux'.
One major technical difficulty with the present computation is the explicit expansion of the
fivebrane action in terms of the worldvolume fermion, the so-called
`theta-expansion'. This, in its turn,
stems from the theta-expansion of the eleven-dimensional background superfields
on which the fivebrane action depends. Until recently, this expansion had only been fully worked
out to quadratic order in the fermions. The present computation is now possible
thanks to the recent results of \cite{t} in which, among other things, the
theta-expansion of the eleven-dimensional superfields was computed explicitly
to fifth order in the fermions.
We should at this point elaborate on what we mean by `the fivebrane action'. The fivebrane dynamics
was given in terms of covariant field equations in \cite{howea, howeb}. For the application
we are interested in, however, one needs to work with an action. As is well known, the
worldvolume theory of the fivebrane contains a self dual antisymmetric tensor which
renders the formulation of an action problematic. A covariant supersymmetric action
for the fivebrane can be constructed
with the help of an auxiliary scalar \cite{pst}. Alternatively, the auxiliary field can
be eliminated at the expense of explicitly breaking Lorentz invariance \cite{schw}. The equivalence of all
different formulations was shown in \cite{equi}. Here we will
use the covariant action of \cite{pst}.
An important cautionary remark is in order.
In \cite{wfive} Witten pointed out that a useful way to define the action of a
self-dual field is in terms of
a Chern-Simons theory in one dimension higher. This definition, for spacetime
dimensions higher than two, involves a suitable
generalization of the notion of spin structure -- on
a choice of which the self-dual action depends. These issues have been recently
clarified by Belov and Moore \cite{beloa, belob}. Unfortunately, the action of \cite{pst} does not take
these topological aspects into account; it is however at present our only available
(covariant) { supersymmetric action} for the fivebrane.
\subsection{Outline}
We now give a detailed plan of the rest of the paper. Section \ref{thetaexpansions}
relies on \cite{t} treating
the theta-expansion of the various superfields of the eleven-dimensional background,
with the aim of applying it to the worldvolume theory of the fivebrane. The theta
expansion of the sixform potential was not considered in \cite{t}, and this is addressed in
section \ref{thetaexpansions1}. The worldvolume theory of the fivebrane
is considered in section \ref{pst} in the framework of the covariant action of \cite{pst}.
Eventually we make the simplifying assumption that the flux is `normal', {\it i.e.}
that both the field-strength of the worldvolume antisymmetric tensor and the pull-back of the background
threeform flux onto
the fivebrane worldvolume, vanish.
The main result of this section is the form of the gravitino vertex operator
in the case of normal flux, equation (\ref{grv}).
Section \ref{mtheoryonfourfolds}
considers M-theory backgrounds of the form of a warp product
$\mathbb{R}^{1,2}\times_{w}X$, where $X$ is a Calabi-Yau fourfold.
(Eventually we Wick-rotate to Euclidean signature and take the large-volume limit
in which the warp factor becomes trivial).
Requiring
${\cal N}=2$ supersymmetry in three dimensions (four real supercharges) implies certain
restrictions on the fourform flux, equation (\ref{gform}).
Next we consider fivebrane instantons such that the worldvolume
wraps a six-cycle ${\Sigma} \subset X$ and we assume that $X$ can be
thought of as the total space
of the normal bundle of ${\Sigma}$ inside $X$. As discussed in the introduction,
this approximation becomes
more accurate as the size of ${\Sigma}$ is scaled up.
Imposing the normal-flux condition,
the form of the background flux simplifies further, equations
(\ref{fffn}, \ref{nfff}).
In section \ref{supersymmetriccycles}
we show that, in the case of normal flux, demanding that the instanton
preserve one-half the supersymmetry of the background
implies that ${\Sigma}$ is an
(anti)holomorphic cycle. Section \ref{zeromodes}
treats the worldvolume fermion zeromodes of
the flux-dependent Dirac operator,
equation (\ref{dirac}). After decomposing the background fermion
in terms of forms on the fivebrane, we
derive the explicit expression of the fermion zeromodes (\ref{zm}).
This result agrees with the analysis of \cite{saul, kall},
in the case of normal flux and
provided the warp factor is trivial. This can be consistently taken
to be the case in the large-volume limit, as explained in
section \ref{mtheoryonfourfolds}.
In section \ref{instantoncontributions} we finally come to the main subject of the paper,
the instanton contributions
to the superpotential.
Section \ref{gravitinokkreduction} discusses the Kaluza-Klein Ansatz for the
gravitino, equation (\ref{kkgr}).
Next, the Kaluza-Klein ans\"{a}tze
for the gravitino as well as for the fermion zeromodes are substituted into
the expression (\ref{grv}) for the gravitino vertex operator.
The result of the fermion zeromode integration in the case of two zeromodes
is briefly discussed in section \ref{ofrzm}.
In section \ref{frzm} it is shown that in the case
of four fermion zeromodes
the result of the zeromode integration is zero. I.e. in this case the
instanton contribution to the superpotential vanishes.
The appendices contain several useful technical details. For quick reference,
we have also included an index of our conventions and notation in
section \ref{notation/conventions}.
\section{Theta-expansions}
\label{thetaexpansions}
This section examines the theta-expansions of the various eleven-dimensional superfields. Except
for the expansion of the sixform which is given in section \ref{thetaexpansions1}, these
were treated in reference \cite{t} to which the reader is referred for
further details.
For reasons which are explained below (\ref{grv}),
for our purposes we will not need the explicit form of the $\Psi^2$ contact terms.
It also suffices to keep terms up to and including order $\theta}\def\Th{\Theta}\def\vth{\vartheta^3$. Also note that
we are using standard superembedding notation, whereby target-space indices are underlined.
Further explanation of the notation can be found in appendix \ref{notation/conventions}.
\subsection{Vielbein and threeform }
Using the formul{\ae} in \cite{t}, to which the interested reader is referred
for further details, we find
\begin{alignat}{2}
E_{m}{}^{\underline a}\def\unA{\underline A}&=e_{m}{}^{\underline a}\def\unA{\underline A}
-\frac{i}{2}({\cal D}_m\theta}\def\Th{\Theta}\def\vth{\vartheta\C^{\underline a}\def\unA{\underline A}\theta}\def\Th{\Theta}\def\vth{\vartheta)
+\frac{1}{24}({\cal D}_m\theta}\def\Th{\Theta}\def\vth{\vartheta\mathfrak{G}\C^{\underline a}\def\unA{\underline A}\theta}\def\Th{\Theta}\def\vth{\vartheta)
+\frac{1}{24}(\theta}\def\Th{\Theta}\def\vth{\vartheta{\cal R}_{\underline n}\def\unN{\underline N\underline{\phantom{a}}\!\!\! p}\def\unP{\underline P}{\cal I}_m{}^{\underline n}\def\unN{\underline N\underline{\phantom{a}}\!\!\! p}\def\unP{\underline P}\C^{\underline a}\def\unA{\underline A}\theta}\def\Th{\Theta}\def\vth{\vartheta)\nonumber\\
&-i(\Psi_m\C^{\underline a}\def\unA{\underline A}\theta}\def\Th{\Theta}\def\vth{\vartheta)+\frac{1}{6}(\Psi_m\mathfrak{G}\C^{\underline a}\def\unA{\underline A}\theta}\def\Th{\Theta}\def\vth{\vartheta)
+\frac{1}{6}(\Psi_{\underline n}\def\unN{\underline N\underline{\phantom{a}}\!\!\! p}\def\unP{\underline P}{\cal I}_m{}^{\underline n}\def\unN{\underline N\underline{\phantom{a}}\!\!\! p}\def\unP{\underline P}\C^{\underline a}\def\unA{\underline A}\theta}\def\Th{\Theta}\def\vth{\vartheta)
+{\cal O}(\Psi^2, \theta}\def\Th{\Theta}\def\vth{\vartheta^5)~,
\label{v}
\end{alignat}
\vfill\break
where
\begin{alignat}{2}
(\mathfrak{G})_{\underline{\alpha}}{}^{\underline{\phantom{\alpha}}\!\!\!\beta}&:=\frac{1}{576}\Big\{
(\theta}\def\Th{\Theta}\def\vth{\vartheta\C^{\underline a}\def\unA{\underline A\underline b}\def\unB{\underline B\underline c}\def\unC{\underline C\underline d}\def\unD{\underline D\underline e}\def\unE{\underline E\underline{\phantom{e}}\!\!\!\! f}\def\underline{F}}\def\unT{\underline{T}}\def\unR{\underline{R}{\underline F})_{\underline{\alpha}}(\theta}\def\Th{\Theta}\def\vth{\vartheta\C_{\underline e}\def\unE{\underline E\underline{\phantom{e}}\!\!\!\! f}\def\underline{F}}\def\unT{\underline{T}}\def\unR{\underline{R}{\underline F})^{\underline{\phantom{\alpha}}\!\!\!\beta}
-2(\theta}\def\Th{\Theta}\def\vth{\vartheta\C_{\underline e}\def\unE{\underline E})_{\underline{\alpha}}(\theta}\def\Th{\Theta}\def\vth{\vartheta\C^{\underline a}\def\unA{\underline A\underline b}\def\unB{\underline B\underline c}\def\unC{\underline C\underline d}\def\unD{\underline D\underline e}\def\unE{\underline E})^{\underline{\phantom{\alpha}}\!\!\!\beta}
-16(\theta}\def\Th{\Theta}\def\vth{\vartheta\C^{\underline a}\def\unA{\underline A})_{\underline{\alpha}}(\theta}\def\Th{\Theta}\def\vth{\vartheta\C^{\underline b}\def\unB{\underline B\underline c}\def\unC{\underline C\underline d}\def\unD{\underline D})^{\underline{\phantom{\alpha}}\!\!\!\beta}\nonumber\\
&+24(\theta}\def\Th{\Theta}\def\vth{\vartheta\C^{\underline a}\def\unA{\underline A\underline b}\def\unB{\underline B})_{\underline{\alpha}}(\theta}\def\Th{\Theta}\def\vth{\vartheta\C^{\underline c}\def\unC{\underline C\underline d}\def\unD{\underline D})^{\underline{\phantom{\alpha}}\!\!\!\beta}
\Big\}G_{\underline a}\def\unA{\underline A\underline b}\def\unB{\underline B\underline c}\def\unC{\underline C\underline d}\def\unD{\underline D}~,
\label{fgdef}
\end{alignat}
\begin{alignat}{2}
({\cal I}_{m}{}^{\underline e}\def\unE{\underline E\underline{\phantom{e}}\!\!\!\! f}\def\underline{F}}\def\unT{\underline{T}}\def\unR{\underline{R}{\underline F})_{\underline{\alpha}}{}^{\underline{\phantom{\alpha}}\!\!\!\beta}&:=-\frac{1}{48}\Big\{
(\theta}\def\Th{\Theta}\def\vth{\vartheta\C_{\underline a}\def\unA{\underline A\underline b}\def\unB{\underline B})_{\underline{\alpha}}(\theta}\def\Th{\Theta}\def\vth{\vartheta\C_m{}^{\underline a}\def\unA{\underline A\underline b}\def\unB{\underline B\underline e}\def\unE{\underline E\underline{\phantom{e}}\!\!\!\! f}\def\underline{F}}\def\unT{\underline{T}}\def\unR{\underline{R}{\underline F})^{\underline{\phantom{\alpha}}\!\!\!\beta}
+4(\theta}\def\Th{\Theta}\def\vth{\vartheta\C_{m\underline a}\def\unA{\underline A})_{\underline{\alpha}}(\theta}\def\Th{\Theta}\def\vth{\vartheta\C^{\underline a}\def\unA{\underline A\underline e}\def\unE{\underline E\underline{\phantom{e}}\!\!\!\! f}\def\underline{F}}\def\unT{\underline{T}}\def\unR{\underline{R}{\underline F})^{\underline{\phantom{\alpha}}\!\!\!\beta}
-4(\theta}\def\Th{\Theta}\def\vth{\vartheta\C_{\underline a}\def\unA{\underline A\underline b}\def\unB{\underline B})_{\underline{\alpha}}(\theta}\def\Th{\Theta}\def\vth{\vartheta\C^{\underline a}\def\unA{\underline A\underline b}\def\unB{\underline B\underline e}\def\unE{\underline E})^{\underline{\phantom{\alpha}}\!\!\!\beta}e_m{}^{\underline{\phantom{e}}\!\!\!\! f}\def\underline{F}}\def\unT{\underline{T}}\def\unR{\underline{R}{\underline F} \nonumber\\
&+6(\theta}\def\Th{\Theta}\def\vth{\vartheta\C_{m})_{\underline{\alpha}}(\theta}\def\Th{\Theta}\def\vth{\vartheta\C^{\underline e}\def\unE{\underline E\underline{\phantom{e}}\!\!\!\! f}\def\underline{F}}\def\unT{\underline{T}}\def\unR{\underline{R}{\underline F})^{\underline{\phantom{\alpha}}\!\!\!\beta}
-12(\theta}\def\Th{\Theta}\def\vth{\vartheta\C_{\underline a}\def\unA{\underline A})_{\underline{\alpha}}(\theta}\def\Th{\Theta}\def\vth{\vartheta\C^{\underline a}\def\unA{\underline A\underline e}\def\unE{\underline E})^{\underline{\phantom{\alpha}}\!\!\!\beta}e_m{}^{\underline{\phantom{e}}\!\!\!\! f}\def\underline{F}}\def\unT{\underline{T}}\def\unR{\underline{R}{\underline F}
\Big\}
~.
\end{alignat}
Using (\ref{v}) we find for the Green-Schwarz metric
\begin{alignat}{2}
g_{mn}&=G_{mn}-\frac{1}{4}({\cal D}_m\theta}\def\Th{\Theta}\def\vth{\vartheta\C^{\underline a}\def\unA{\underline A}\theta}\def\Th{\Theta}\def\vth{\vartheta)({\cal D}_n\theta}\def\Th{\Theta}\def\vth{\vartheta\C_{\underline a}\def\unA{\underline A}\theta}\def\Th{\Theta}\def\vth{\vartheta)
-i({\cal D}_{(m}\theta}\def\Th{\Theta}\def\vth{\vartheta\C_{n)}\theta}\def\Th{\Theta}\def\vth{\vartheta)+\frac{1}{12}({\cal D}_{(m}\theta}\def\Th{\Theta}\def\vth{\vartheta\mathfrak{G}\C_{n)}\theta}\def\Th{\Theta}\def\vth{\vartheta)\nonumber\\
&+\frac{1}{12}(\theta}\def\Th{\Theta}\def\vth{\vartheta{\cal R}_{\underline{\phantom{a}}\!\!\! p}\def\unP{\underline P\underline{\phantom{a}}\!\!\! q}{\cal I}_{(m}{}^{\underline{\phantom{a}}\!\!\! p}\def\unP{\underline P\underline{\phantom{a}}\!\!\! q}\C_{n)}\theta}\def\Th{\Theta}\def\vth{\vartheta) -2i(\Psi_{(m}\C_{n)}\theta}\def\Th{\Theta}\def\vth{\vartheta)
+\frac{1}{3}(\Psi_{(m}\mathfrak{G}\C_{n)}\theta}\def\Th{\Theta}\def\vth{\vartheta) \nonumber\\
&+\frac{1}{3}(\Psi_{\underline{\phantom{a}}\!\!\! p}\def\unP{\underline P\underline{\phantom{a}}\!\!\! q}{\cal I}_{(m}{}^{\underline{\phantom{a}}\!\!\! p}\def\unP{\underline P\underline{\phantom{a}}\!\!\! q}\C_{n)}\theta}\def\Th{\Theta}\def\vth{\vartheta)
-(\Psi_{(m}\C^{\underline a}\def\unA{\underline A}\theta}\def\Th{\Theta}\def\vth{\vartheta)({\cal D}_{n)}\theta}\def\Th{\Theta}\def\vth{\vartheta\C_{\underline a}\def\unA{\underline A}\theta}\def\Th{\Theta}\def\vth{\vartheta)+{\cal O}(\Psi^2,\theta}\def\Th{\Theta}\def\vth{\vartheta^5)
~.
\end{alignat}
Similarly, for the pull-back of the three-form we find
\begin{alignat}{2}
C_{mnp}&=
c_{mnp}-\frac{3i}{2}({\cal D}_{[m}\theta}\def\Th{\Theta}\def\vth{\vartheta\C_{np]}\theta}\def\Th{\Theta}\def\vth{\vartheta)+\frac{1}{8}({\cal D}_{[m}\theta}\def\Th{\Theta}\def\vth{\vartheta\mathfrak{G}\C_{np]}\theta}\def\Th{\Theta}\def\vth{\vartheta)
+\frac{1}{8}(\theta}\def\Th{\Theta}\def\vth{\vartheta{\cal R}_{\underline{\phantom{a}}\!\!\! p}\def\unP{\underline P\underline{\phantom{a}}\!\!\! q}{\cal I}_{[m}{}^{\underline{\phantom{a}}\!\!\! p}\def\unP{\underline P\underline{\phantom{a}}\!\!\! q}\C_{np]}\theta}\def\Th{\Theta}\def\vth{\vartheta)
\nonumber\\
&-\frac{3}{4}({\cal D}_{[m}\theta}\def\Th{\Theta}\def\vth{\vartheta\C_{n}{}^{\underline a}\def\unA{\underline A}\theta}\def\Th{\Theta}\def\vth{\vartheta)({\cal D}_{p]}\theta}\def\Th{\Theta}\def\vth{\vartheta\C_{\underline a}\def\unA{\underline A}\theta}\def\Th{\Theta}\def\vth{\vartheta)
-3i(\Psi_{[m}\C_{np]}\theta}\def\Th{\Theta}\def\vth{\vartheta)-(\Psi_{[m}\C_{n}{}^{\underline a}\def\unA{\underline A}\theta}\def\Th{\Theta}\def\vth{\vartheta)({\cal D}_{p]}\theta}\def\Th{\Theta}\def\vth{\vartheta\C_{\underline a}\def\unA{\underline A}\theta}\def\Th{\Theta}\def\vth{\vartheta )\nonumber\\
&-2(\Psi_{[m}\C^{\underline a}\def\unA{\underline A}\theta}\def\Th{\Theta}\def\vth{\vartheta)({\cal D}_{n}\theta}\def\Th{\Theta}\def\vth{\vartheta\C_{p]}{}_{\underline a}\def\unA{\underline A}\theta}\def\Th{\Theta}\def\vth{\vartheta)
+\frac{1}{2}(\Psi_{[m}\mathfrak{G}\C_{np]}\theta}\def\Th{\Theta}\def\vth{\vartheta)+\frac{1}{2}(\Psi_{\underline n}\def\unN{\underline N\underline{\phantom{a}}\!\!\! q}{\cal I}_{[m}{}^{\underline n}\def\unN{\underline N\underline{\phantom{a}}\!\!\! q}\C_{np]}\theta}\def\Th{\Theta}\def\vth{\vartheta)
+{\cal O}(\Psi^2,\th^5)~.
\end{alignat}
\subsection{Sixform}
\label{thetaexpansions1}
The $\theta}\def\Th{\Theta}\def\vth{\vartheta$-expansion for $C_6$ was not given in \cite{t}, but the same methods
can be applied in this case. First we note that the $C_6$-field satisfies
\begin{align}
7\partial_{[\unM_1}C_{\unM_2\dots \unM_7\}}=G_{\unM_1\dots \unM_7}.
\label{bianchi}
\end{align}
Up to a gauge choice,
the following is a solution of
the Bianchi identity (\ref{bianchi}) at each order
in the $\theta$ expansion:
\begin{alignat}{2}
C^{(0)}_{\underline {\phantom{\alpha}}\!\!\!\mu_1\dots\underline {\phantom{\alpha}}\!\!\!\mu_6}=
C^{(0)}_{\underline {\phantom{\alpha}}\!\!\!\mu_1\dots\underline {\phantom{\alpha}}\!\!\!\mu_5 \underline m}\def\unM{\underline M_1}&=\dots
C^{(0)}_{\underline {\phantom{\alpha}}\!\!\!\mu_1 \underline m}\def\unM{\underline M_1\dots \underline m}\def\unM{\underline M_5}=0~,\nonumber\\
7\partial_{[\underline m}\def\unM{\underline M_1}C^{(0)}_{\underline m}\def\unM{\underline M_2\dots \underline m}\def\unM{\underline M_7]}&=G^{(0)}_{\underline m}\def\unM{\underline M_1\dots \underline m}\def\unM{\underline M_7}
\label{cexpa}
\end{alignat}
\vfill\break
and
\begin{alignat}{2}
C^{(n+1)}_{\underline {\phantom{\alpha}}\!\!\!\mu_1\dots\underline {\phantom{\alpha}}\!\!\!\mu_6}&=\frac{1}{n+7}~\theta^{\underline{\lambda}} G^{(n)}_{{\underline{\lambda}}\underline {\phantom{\alpha}}\!\!\!\mu_1\dots\underline {\phantom{\alpha}}\!\!\!\mu_6}\nonumber\\
C^{(n+1)}_{\underline {\phantom{\alpha}}\!\!\!\mu_1\dots\underline {\phantom{\alpha}}\!\!\!\mu_5 \underline m}\def\unM{\underline M_1}&=\frac{1}{n+6}~\theta^{\underline{\lambda}} G^{(n)}_{{\underline{\lambda}}\underline {\phantom{\alpha}}\!\!\!\mu_1\dots\underline {\phantom{\alpha}}\!\!\!\mu_5 \underline m}\def\unM{\underline M_1}\nonumber\\
C^{(n+1)}_{\underline {\phantom{\alpha}}\!\!\!\mu_1\dots\underline {\phantom{\alpha}}\!\!\!\mu_4 \underline m}\def\unM{\underline M_1\underline m}\def\unM{\underline M_2}&=\frac{1}{n+5}~\theta^{\underline{\lambda}} G^{(n)}_{{\underline{\lambda}}\underline {\phantom{\alpha}}\!\!\!\mu_1\dots\underline {\phantom{\alpha}}\!\!\!\mu_4 \underline m}\def\unM{\underline M_1\underline m}\def\unM{\underline M_2}\nonumber\\
C^{(n+1)}_{\underline {\phantom{\alpha}}\!\!\!\mu_1\underline {\phantom{\alpha}}\!\!\!\mu_2\underline {\phantom{\alpha}}\!\!\!\mu_3 \underline m}\def\unM{\underline M_1\underline m}\def\unM{\underline M_2 \underline m}\def\unM{\underline M_3}&=\frac{1}{n+4}~\theta^{\underline{\lambda}} G^{(n)}_{{\underline{\lambda}}\underline {\phantom{\alpha}}\!\!\!\mu_1\underline {\phantom{\alpha}}\!\!\!\mu_2\underline {\phantom{\alpha}}\!\!\!\mu_3 \underline m}\def\unM{\underline M_1\underline m}\def\unM{\underline M_2 \underline m}\def\unM{\underline M_3}\nonumber\\
C^{(n+1)}_{\underline {\phantom{\alpha}}\!\!\!\mu_1\underline {\phantom{\alpha}}\!\!\!\mu_2 \underline m}\def\unM{\underline M_1\dots \underline m}\def\unM{\underline M_4}&=\frac{1}{n+3}~\theta^{\underline{\lambda}} G^{(n)}_{{\underline{\lambda}}\underline {\phantom{\alpha}}\!\!\!\mu_1\underline {\phantom{\alpha}}\!\!\!\mu_2 \underline m}\def\unM{\underline M_1\dots \underline m}\def\unM{\underline M_4}\nonumber\\
C^{(n+1)}_{\underline {\phantom{\alpha}}\!\!\!\mu \underline m}\def\unM{\underline M_1\dots \underline m}\def\unM{\underline M_5}&=\frac{1}{n+2}~\theta^{\underline{\lambda}} G^{(n)}_{{\underline{\lambda}}\underline {\phantom{\alpha}}\!\!\!\mu \underline m}\def\unM{\underline M_1\dots \underline m}\def\unM{\underline M_5}\nonumber\\
C^{(n+1)}_{\underline m}\def\unM{\underline M_1\dots \underline m}\def\unM{\underline M_6}&=\frac{1}{n+1}~\theta^{\underline{\lambda}} G^{(n)}_{{\underline{\lambda}} \underline m}\def\unM{\underline M_1\dots \underline m}\def\unM{\underline M_6}
~, ~~~~~n\geq 0~.
\label{cexp}
\end{alignat}
Using the fact that
\begin{equation}}\def\ee{\end{equation}
G_{\underline a}\def\unA{\underline A_1\dots \underline a}\def\unA{\underline A_5{\underline{\alpha}}_1{\underline{\alpha}}_2}=-i(\C_{\underline a}\def\unA{\underline A_1\dots \underline a}\def\unA{\underline A_5})_{{\underline{\alpha}}_1{\underline{\alpha}}_2}
~,
\end{equation}
we find for the right-hand sides of the equations (\ref{cexp}),
\begin{alignat}{2}
\theta^{\underline{\lambda}} G_{{\underline{\lambda}}\underline {\phantom{\alpha}}\!\!\!\mu_1\dots\underline {\phantom{\alpha}}\!\!\!\mu_6}&= 6iE_{(\underline {\phantom{\alpha}}\!\!\!\mu_1}{}^{{\underline a}\def\unA{\underline A}_1} \dots E_{\underline {\phantom{\alpha}}\!\!\!\mu_5}{}^{{\underline a}\def\unA{\underline A}_5} E_{\underline {\phantom{\alpha}}\!\!\!\mu_6)}{}^{\underline{\alpha}}
(\C_{{\underline a}\def\unA{\underline A}_1\dots {\underline a}\def\unA{\underline A}_5}\theta)_{\underline{\alpha}} \nonumber\\
\theta^{\underline{\lambda}} G_{{\underline{\lambda}}\underline {\phantom{\alpha}}\!\!\!\mu_1\dots\underline {\phantom{\alpha}}\!\!\!\mu_5 \underline m}\def\unM{\underline M}&=
-5i E_m{}^{{\underline a}\def\unA{\underline A}_1} E_{(\underline {\phantom{\alpha}}\!\!\!\mu_1}{}^{{\underline a}\def\unA{\underline A}_2} \dots E_{\underline {\phantom{\alpha}}\!\!\!\mu_4}{}^{{\underline a}\def\unA{\underline A}_5} E_{\underline {\phantom{\alpha}}\!\!\!\mu_5)}{}^{\underline{\alpha}}
(\C_{{\underline a}\def\unA{\underline A}_1\dots {\underline a}\def\unA{\underline A}_5}\theta)_{\underline{\alpha}} \nonumber\\
&~~~+i E_{\underline {\phantom{\alpha}}\!\!\!\mu_1}{}^{{\underline a}\def\unA{\underline A}_1} \dots E_{\underline {\phantom{\alpha}}\!\!\!\mu_5}{}^{{\underline a}\def\unA{\underline A}_5} E_{\underline m}\def\unM{\underline M}{}^{\underline{\alpha}}
(\C_{{\underline a}\def\unA{\underline A}_1\dots {\underline a}\def\unA{\underline A}_5}\theta)_{\underline{\alpha}}
\nonumber\\
\theta^{\underline{\lambda}} G_{{\underline{\lambda}}\underline {\phantom{\alpha}}\!\!\!\mu_1\dots\underline {\phantom{\alpha}}\!\!\!\mu_4 \underline m}\def\unM{\underline M_1\underline m}\def\unM{\underline M_2}&=
4i E_{\underline m}\def\unM{\underline M_1}{}^{{\underline a}\def\unA{\underline A}_1} E_{\underline m}\def\unM{\underline M_2}{}^{{\underline a}\def\unA{\underline A}_2} E_{(\underline {\phantom{\alpha}}\!\!\!\mu_1}{}^{{\underline a}\def\unA{\underline A}_3} E_{\underline {\phantom{\alpha}}\!\!\!\mu_2}{}^{{\underline a}\def\unA{\underline A}_4} E_{\underline {\phantom{\alpha}}\!\!\!\mu_3}{}^{{\underline a}\def\unA{\underline A}_5} E_{\underline {\phantom{\alpha}}\!\!\!\mu_4)}{}^{\underline{\alpha}}
(\C_{{\underline a}\def\unA{\underline A}_1\dots {\underline a}\def\unA{\underline A}_5}\theta)_{\underline{\alpha}} \nonumber\\
&~~~+2i E_{\underline {\phantom{\alpha}}\!\!\!\mu_1}{}^{{\underline a}\def\unA{\underline A}_1} \dots E_{\underline {\phantom{\alpha}}\!\!\!\mu_4}{}^{{\underline a}\def\unA{\underline A}_4} E_{[\underline m}\def\unM{\underline M_1}{}^{{\underline a}\def\unA{\underline A}_5} E_{\underline m}\def\unM{\underline M_2]}{}^{\underline{\alpha}}
(\C_{{\underline a}\def\unA{\underline A}_1\dots {\underline a}\def\unA{\underline A}_5}\theta)_{\underline{\alpha}}
\nonumber\\
\theta^{\underline{\lambda}} G_{{\underline{\lambda}}\underline {\phantom{\alpha}}\!\!\!\mu_1\underline {\phantom{\alpha}}\!\!\!\mu_2\underline {\phantom{\alpha}}\!\!\!\mu_3 \underline m}\def\unM{\underline M_1\underline m}\def\unM{\underline M_2 \underline m}\def\unM{\underline M_3}&=
-3i E_{\underline m}\def\unM{\underline M_1}{}^{{\underline a}\def\unA{\underline A}_1} E_{\underline m}\def\unM{\underline M_2}{}^{{\underline a}\def\unA{\underline A}_2} E_{\underline m}\def\unM{\underline M_3}{}^{{\underline a}\def\unA{\underline A}_3} E_{(\underline {\phantom{\alpha}}\!\!\!\mu_1}{}^{{\underline a}\def\unA{\underline A}_4} E_{\underline {\phantom{\alpha}}\!\!\!\mu_2}{}^{{\underline a}\def\unA{\underline A}_5} E_{\underline {\phantom{\alpha}}\!\!\!\mu_3)}{}^{\underline{\alpha}}
(\C_{{\underline a}\def\unA{\underline A}_1\dots {\underline a}\def\unA{\underline A}_5}\theta)_{\underline{\alpha}} \nonumber\\
&~~~+3i E_{\underline {\phantom{\alpha}}\!\!\!\mu_1}{}^{{\underline a}\def\unA{\underline A}_1}E_{\underline {\phantom{\alpha}}\!\!\!\mu_2}{}^{{\underline a}\def\unA{\underline A}_2} E_{\underline {\phantom{\alpha}}\!\!\!\mu_3}{}^{{\underline a}\def\unA{\underline A}_3} E_{[\underline m}\def\unM{\underline M_1}{}^{{\underline a}\def\unA{\underline A}_4}E_{\underline m}\def\unM{\underline M_2}{}^{{\underline a}\def\unA{\underline A}_5} E_{\underline m}\def\unM{\underline M_3]}{}^{\underline{\alpha}}
(\C_{{\underline a}\def\unA{\underline A}_1\dots {\underline a}\def\unA{\underline A}_5}\theta)_{\underline{\alpha}}
\nonumber\\
\theta^{\underline{\lambda}} G_{{\underline{\lambda}}\underline {\phantom{\alpha}}\!\!\!\mu_1\underline {\phantom{\alpha}}\!\!\!\mu_2 \underline m}\def\unM{\underline M_1\dots \underline m}\def\unM{\underline M_4}&=
+2i E_{\underline m}\def\unM{\underline M_1}{}^{{\underline a}\def\unA{\underline A}_1} \dots E_{\underline m}\def\unM{\underline M_4}{}^{{\underline a}\def\unA{\underline A}_4} E_{(\underline {\phantom{\alpha}}\!\!\!\mu_1}{}^{{\underline a}\def\unA{\underline A}_5} E_{\underline {\phantom{\alpha}}\!\!\!\mu_2)}{}^{\underline{\alpha}}
(\C_{{\underline a}\def\unA{\underline A}_1\dots {\underline a}\def\unA{\underline A}_5}\theta)_{\underline{\alpha}} \nonumber\\
&~~~+4i E_{\underline {\phantom{\alpha}}\!\!\!\mu_1}{}^{{\underline a}\def\unA{\underline A}_1}E_{\underline {\phantom{\alpha}}\!\!\!\mu_2}{}^{{\underline a}\def\unA{\underline A}_2} E_{[\underline m}\def\unM{\underline M_1}{}^{{\underline a}\def\unA{\underline A}_3}E_{\underline m}\def\unM{\underline M_2}{}^{{\underline a}\def\unA{\underline A}_4}E_{\underline m}\def\unM{\underline M_3}{}^{{\underline a}\def\unA{\underline A}_5} E_{\underline m}\def\unM{\underline M_4]}{}^{\underline{\alpha}}
(\C_{{\underline a}\def\unA{\underline A}_1\dots {\underline a}\def\unA{\underline A}_5}\theta)_{\underline{\alpha}}
\nonumber\\
\theta^{\underline{\lambda}} G_{{\underline{\lambda}}\underline {\phantom{\alpha}}\!\!\!\mu \underline m}\def\unM{\underline M_1\dots \underline m}\def\unM{\underline M_5}&=
-i E_{\underline m}\def\unM{\underline M_1}{}^{{\underline a}\def\unA{\underline A}_1} \dots E_{\underline m}\def\unM{\underline M_5}{}^{{\underline a}\def\unA{\underline A}_5} E_{\underline {\phantom{\alpha}}\!\!\!\mu}{}^{\underline{\alpha}}
(\C_{{\underline a}\def\unA{\underline A}_1\dots {\underline a}\def\unA{\underline A}_5}\theta)_{\underline{\alpha}} \nonumber\\
&~~~+5i E_{\underline {\phantom{\alpha}}\!\!\!\mu}{}^{{\underline a}\def\unA{\underline A}_1} E_{[\underline m}\def\unM{\underline M_1}{}^{{\underline a}\def\unA{\underline A}_2}\dots E_{\underline m}\def\unM{\underline M_4}{}^{{\underline a}\def\unA{\underline A}_5} E_{\underline m}\def\unM{\underline M_5]}{}^{\underline{\alpha}}
(\C_{{\underline a}\def\unA{\underline A}_1\dots {\underline a}\def\unA{\underline A}_5}\theta)_{\underline{\alpha}} \nonumber\\
\theta^{\underline{\lambda}} G_{{\underline{\lambda}} \underline m}\def\unM{\underline M_1\dots \underline m}\def\unM{\underline M_6}&= 6i E_{[\underline m}\def\unM{\underline M_1}{}^{{\underline a}\def\unA{\underline A}_1}\dots E_{\underline m}\def\unM{\underline M_5}{}^{{\underline a}\def\unA{\underline A}_5} E_{\underline m}\def\unM{\underline M_6]}{}^{\underline{\alpha}}
(\C_{{\underline a}\def\unA{\underline A}_1\dots {\underline a}\def\unA{\underline A}_5}\theta)_{\underline{\alpha}}
~.
\label{cexpr}
\end{alignat}
In the following we will only need the part $\Delta C_6$ of $C_6$ which is linear in
the gravitino. Plugging the expressions for the vielbein components
given in \cite{t} into (\ref{cexpr}) we obtain
\begin{alignat}{2}
\Delta C_{m_1\dots m_6}=
&-6i(\Psi_{[m_1}\C_{m_2\dots m_6]}\theta}\def\Th{\Theta}\def\vth{\vartheta)
+10(\Psi_{[m_1}\C^{\underline a}\def\unA{\underline A}\theta}\def\Th{\Theta}\def\vth{\vartheta)({\cal D}_{m_2}\theta}\def\Th{\Theta}\def\vth{\vartheta\C_{m_3\dots m_6]\underline a}\def\unA{\underline A}\theta}\def\Th{\Theta}\def\vth{\vartheta)\nonumber\\
&+(\Psi_{[m_1}\mathfrak{G}\C_{m_2\dots m_6]}\theta}\def\Th{\Theta}\def\vth{\vartheta)
+(\Psi_{\underline{\phantom{a}}\!\!\! p}\def\unP{\underline P\underline{\phantom{a}}\!\!\! q}{\cal I}_{[m_1}{}^{\underline{\phantom{a}}\!\!\! p}\def\unP{\underline P\underline{\phantom{a}}\!\!\! q}\C_{m_2\dots m_6]}\theta}\def\Th{\Theta}\def\vth{\vartheta)\nonumber\\
&-5(\Psi_{[m_1}\C_{m_2\dots m_5 \underline a}\def\unA{\underline A}\theta}\def\Th{\Theta}\def\vth{\vartheta)({\cal D}_{m_6]}\theta}\def\Th{\Theta}\def\vth{\vartheta\C^{\underline a}\def\unA{\underline A}\theta}\def\Th{\Theta}\def\vth{\vartheta)
+{\cal O}(\Psi^2,\th^5)~.
\label{dc6}
\end{alignat}
\section{Fivebrane action}
\label{pst}
We are now ready to consider the application of the theta-expansion
discussed in the previous section to the case of the fivebrane
worldvolume action.
As already mentioned in the introduction, we will adopt the covariant framework of \cite{pst} to which
the reader is referred for more details. The main result of this
section is the gravitino vertex operator, equation (\ref{grv}) below.
To improve the presentation, we have relegated
the details of the derivation to appendix \ref{pstapp}.
The fivebrane action is of the form
\begin{alignat}{2}
S=S_1+S_2+S_3~,
\end{alignat}
where
\begin{alignat}{2}
S_1&:=T_{M5}\int_{{\Sigma}} d^6x\sqrt{-det(g_{mn}+i\widetilde{H}_{mn} ) }\nonumber\\
S_2&:=T_{M5}\int_{{\Sigma}} d^6x\sqrt{-g}~ \frac{1}{4}{\widetilde{H}}_{mn} {H}^{mn}\nonumber\\
S_3&:=T_{M5}\int_{{\Sigma}} \Big(C_6+\frac{1}{2}F_3\wedge C_3 \Big)~
\label{action}
\end{alignat}
and $T_{M5}\sim l_P^{-6}$ is the fivebrane tension.
Moreover, we have made the following definitions
\begin{alignat}{2}
H_{mnp}&:=F_{mnp}-C_{mnp}\nonumber\\
H_{mn}&:=H_{mnp}v^p\nonumber\\
\widetilde{H}^{mn}&:=\frac{1}{6\sqrt{-g}}\epsilon}\def\vare{\varepsilon^{mnpqrs}v_pH_{qrs}\nonumber\\
v_p&:=\frac{\partial_p a}{\sqrt{-g^{mn}\partial_ma\partial_n a}}~,
\end{alignat}
where $F_{mnp}$ is
the field-strength of the world-volume chiral two-form and $a$ is an auxiliary
world-volume scalar. It follows from the above definitions that
\begin{alignat}{2}
det(\delta}\def\D{\Delta_{m}{}^{n}+i\widetilde{H}_{m}{}^{n} )=
1+\frac{1}{2}tr\widetilde{H}^2+\frac{1}{8}(tr\widetilde{H}^2)^2-\frac{1}{4}tr\widetilde{H}^4~.
\end{alignat}
\subsection{The gravitino vertex operator}
\label{grvsec}
In the case of normal flux, {\em i.e.} when the world-volume two-form tensor is flat
($F_{mnp}=0$) and
the pull-back of the three-form potential onto the fivebrane vanishes
($c_{mnp}=0$), the expression for the gravitino
vertex operator simplifies considerably. Skipping the details
of the derivation, which can be found in appendix \ref{pstapp}, the
final result reads:
\begin{center}
\fbox{\parbox{14.5cm}{
\begin{alignat}{2}
V= T_{M5}\int_{{\Sigma}}d^6x\sqrt{-G}~
\Big\{
2(\Psi_m\C^m\theta)+i(\Psi_{\underline m}\def\unM{\underline M} V^{(2)\underline m}\def\unM{\underline M})
&+\frac{i}{3}(\Psi_m\mathfrak{G}\C^m\theta)
\nonumber\\
&+\frac{i}{3}(\Psi_{\underline{\phantom{a}}\!\!\! p}\def\unP{\underline P\underline{\phantom{a}}\!\!\! q}{\cal I}_m{}^{\underline{\phantom{a}}\!\!\! p}\def\unP{\underline P\underline{\phantom{a}}\!\!\! q}\C^m\theta)
+{\cal O}(\Psi^2, \theta^5)
\Big\}
~.\nonumber
\end{alignat}
}}
\end{center}
\begin{align}\label{grv}\end{align}
We can now see why the $\Psi^2$ contact-terms can be neglected. As is easy to verify,
$\Psi^2$ terms first appear in the $\theta}\def\Th{\Theta}\def\vth{\vartheta$-expansion at order $\theta}\def\Th{\Theta}\def\vth{\vartheta^4$.
Consequently, a single vertex-operator insertion $V_{\Psi^2}$ is needed
to saturate the four fermion zeromodes --which is the
case examined here. A single insertion, however, is proportional
to $T_{M5}$ and is of order ${\cal O}(l_P^6)$ relative to two
vertex-operator insertions: the latter give a contribution
proportional to $T_{M5}^2$. Clearly, this analysis is valid
provided the `radius' of the six-cycle is much larger than the Planck length,
${\rm Vol}_{\Sigma}>>l^6_P$.
As was shown in the case of the M-theory membrane \cite{hklt} and is
also expected in the case of the fivebrane \cite{dk}, the first higher-order
correction to the world-volume action occurs at order $l_P^4$. Hence it would be
inconsistent to include contact terms without considering the higher-order
derivative corrections to the world-volume action. Moreover, at order
$l_P^6$ (eight derivatives) there are higher-order curvature corrections to the background supergravity
action\footnote{The eleven-dimensional supergravity
admits a supersymmetric deformation at order $l_P^3$ (five derivatives) \cite{ttt}. On a
topologically-nontrivial spacetime $M$ such that $p_1(M)\neq 0$,
this deformation can be removed by a $C$-field redefinition, at the cost of shifting the
quantization condition of the fourform fieldstrentgh.} which,
as was explained in \cite{t}, modify the $\theta}\def\Th{\Theta}\def\vth{\vartheta$-expansion of all superfields.
\subsection{Quadratic fermion terms}
It follows from the preceding sections that
in a bosonic background ($\Psi_{\underline m}\def\unM{\underline M}^{\underline{\alpha}}=0$) the part of the Lagrangian quadratic in $\theta}\def\Th{\Theta}\def\vth{\vartheta$
(this is the analogue of equations (38), (39) of \cite{tt}) is given by
\begin{alignat}{2}
{\cal L}^{(quad)}&= \frac{i}{2}\sqrt{det(A_i{}^j)}(A^{-1})^{(mn)}(\theta}\def\Th{\Theta}\def\vth{\vartheta\C_{m}{\cal D}_n\theta}\def\Th{\Theta}\def\vth{\vartheta)\nonumber\\
&-\frac{\epsilon}\def\vare{\varepsilon^{lpqrs}{}_m}{6\sqrt{-G}}\sqrt{det(A_i{}^j)}(A^{-1})^{[mn]}
(\theta}\def\Th{\Theta}\def\vth{\vartheta\C_{(n}{\cal D}_{l)}\theta}\def\Th{\Theta}\def\vth{\vartheta) a_p (F_{qrs}-c_{qrs})\nonumber\\
&-\frac{\epsilon}\def\vare{\varepsilon^{klpqrs}}{24\sqrt{-G}}\sqrt{det(A_i{}^j)}(A^{-1})_{kl}
a_p \nonumber\\
&~~~~~~~~~~~~~~~~~\times\Big\{
(F_{qrs}-c_{qrs})\Big[ a^ma^n(\theta}\def\Th{\Theta}\def\vth{\vartheta\C_m{\cal D}_n\theta}\def\Th{\Theta}\def\vth{\vartheta) +(\theta}\def\Th{\Theta}\def\vth{\vartheta\C^{m}{\cal D}_m\theta}\def\Th{\Theta}\def\vth{\vartheta)
\Big] +3(\theta}\def\Th{\Theta}\def\vth{\vartheta\C_{qr}{\cal D}_s\theta}\def\Th{\Theta}\def\vth{\vartheta) \Big\} \nonumber\\
&-\frac{i\epsilon}\def\vare{\varepsilon^{klpqrs}}{24\sqrt{-G}}
a_ka^m(F_{lpq}-c_{lpq})\nonumber\\
&~~~~~~~~~~~~~~~~~\times\Big\{(F_{rst}-c_{rst})
\Big[a^ta^n (\theta}\def\Th{\Theta}\def\vth{\vartheta\C_n{\cal D}_m\theta}\def\Th{\Theta}\def\vth{\vartheta)+\frac{1}{2}(\theta}\def\Th{\Theta}\def\vth{\vartheta\C^t{\cal D}_m\theta}\def\Th{\Theta}\def\vth{\vartheta) \Big]
+\frac{1}{2}(\theta}\def\Th{\Theta}\def\vth{\vartheta\C_{rs}{\cal D}_m\theta}\def\Th{\Theta}\def\vth{\vartheta)
\Big\} \nonumber\\
&-\frac{i\epsilon}\def\vare{\varepsilon^{klpqrs}}{48\sqrt{-G}}
a_ka^n (F_{lpq}-c_{lpq}) (F_{rs}{}^{t}-c_{rs}{}^{t})
(\theta}\def\Th{\Theta}\def\vth{\vartheta\C_n{\cal D}_t\theta}\def\Th{\Theta}\def\vth{\vartheta)\nonumber\\
&-\frac{i\epsilon}\def\vare{\varepsilon^{klpqrs}}{2\times 5!\sqrt{-G}}
\Big\{ 15 a^ta_k(F_{lpt}-c_{lpt})(\theta}\def\Th{\Theta}\def\vth{\vartheta\C_{qr}{\cal D}_s\theta}\def\Th{\Theta}\def\vth{\vartheta)
-10 a^ta_k(F_{lpq}-c_{lpq})(\theta}\def\Th{\Theta}\def\vth{\vartheta\C_{rt}{\cal D}_s\theta}\def\Th{\Theta}\def\vth{\vartheta)\nonumber\\
&~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- 5F_{klp}(\theta}\def\Th{\Theta}\def\vth{\vartheta\C_{qr}{\cal D}_s\theta}\def\Th{\Theta}\def\vth{\vartheta) -(\theta}\def\Th{\Theta}\def\vth{\vartheta\C_{klpqr}{\cal D}_s\theta}\def\Th{\Theta}\def\vth{\vartheta)
\Big\}
~.
\end{alignat}
Note that ${\cal L}^{(quad)}$ is related to $V_{~~~\underline{\alpha}}^{(1)\underline m}\def\unM{\underline M}$ in a simple way.
\subsection*{Normal flux}
In this case the part of the Lagrangian quadratic in the fermions
simplifies to
\begin{alignat}{2}
{\cal L}^{(quad)}&= \frac{i}{2}\Big\{ (\theta}\def\Th{\Theta}\def\vth{\vartheta\C^{m}{\cal D}_m\theta}\def\Th{\Theta}\def\vth{\vartheta)
+\frac{\epsilon}\def\vare{\varepsilon^{klpqrs}}{ 5!\sqrt{-G}}
(\theta}\def\Th{\Theta}\def\vth{\vartheta\C_{klpqr}{\cal D}_s\theta}\def\Th{\Theta}\def\vth{\vartheta) \Big\}
~.
\end{alignat}
After Wick-rotating we obtain
\begin{alignat}{2}
{\cal L}^{(quad)}&= -(\theta}\def\Th{\Theta}\def\vth{\vartheta\C^{m}{\cal D}_m\theta}\def\Th{\Theta}\def\vth{\vartheta)
~,
\label{ityu}
\end{alignat}
where we have taken (\ref{gammahodge}) into account, and
we have noted that after gauge-fixing the physical fermion modes satisfy $P^+\theta}\def\Th{\Theta}\def\vth{\vartheta=\theta}\def\Th{\Theta}\def\vth{\vartheta$.
\section{Supersymmetric cycles}
This section is devoted to the analysis of the conditions for a
supersymmetric six-cycle, and the derivation
of the worldvolume fermionic zeromodes in the presence of (normal) flux.
\subsection{M-theory on fourfolds}
\label{mtheoryonfourfolds}
We start by reviewing M-theory on
a Calabi-Yau fourfold with flux. Let the eleven-dimensional metric be of the form
\begin{align}
ds^2=\Delta^{-1}ds_3^2+\Delta^{1/2}ds^2_8~,
\end{align}
where $ds_3^2$ is the metric of three-dimensional Minkowski space,
$\Delta$ is a warp factor, and
$ds^2_8$
is the metric on $X$. Let us also decompose the eleven-dimensional Majorana-Weyl supersymmetry
parameter
$\eta$ in terms of a real anticommuting spinor $\epsilon$ along the three-dimensional
Minkowski space, and a real chiral spinor $\xi$ on $X$:
\begin{align}
\eta=\Delta^{-1/4}\epsilon\otimes\xi~.
\label{ansa}
\end{align}
As was first shown in \cite{bb}, the requirement of
${\cal N}=1$ supersymmetry in three dimensions (two real supercharges) leads
to the condition
\begin{align}
\nabla_m\xi=0~,
\end{align}
i.e. the `internal' spinor is covariantly constant with respect to the connection associated
with the metric $g_{mn}$ on $X$. Under the Ansatz (\ref{ansa}), requiring
${\cal N}=2$ supersymmetry in three dimensions implies the existence of two
real covariantly-constant spinors $\xi_{1,2}$ of the same chirality.
It follows that $X$ is a Calabi-Yau four-fold. In the following
we shall combine $\xi_{1,2}$
into a complex chiral spinor on $X$, $\xi:=\xi_1+i\xi_2$. An antiholomorphic
$(0,4)$ fourform $\Omega$ and a complex structure $J$ on $X$ can be constructed as
bilinears of $\xi$, as is discussed in detail in appendix \ref{sus}.
Moreover,
supersymmetry imposes
the following conditions on the components of the fourform field-strength:
\begin{align}
G={\rm Vol}_3\wedge d\Delta^{-3/2}+F~,
\label{gform}
\end{align}
where $F$ is a fourform on $X$ which is purely $(2,2)$ and traceless,
$J\lrcorner F=0$, with respect to the
complex structure $J$ on $X$. We have
denoted by ${\rm Vol}_3$ the volume element of the three-dimensional
Minkowski space.
Finally, the warp factor is constrained by the Bianchi identities to satisfy
\begin{align}
d\star d~{\rm log}\Delta=\frac{1}{3}F\wedge F-\frac{2}{3}(2\pi)^4\beta X_8~,
\label{x8}
\end{align}
where $\beta$ is a constant of
order $l_P^6$, and the Hodge star is with respect to the metric on $X$.
The second term on the right-hand side of the equation above is a higher-order correction
related to the fivebrane anomaly. In general there will be other corrections of the same order
which should also be taken into account. However, it can be argued that
in the large-radius approximation it is consistent
to only take the above correction into account (see \cite{pvw}, for example).
In the large-volume limit $g^{CY}=tg_{0}^{CY}+\dots$, $t\rightarrow\infty$, the two terms on the
right-hand side of (\ref{x8}) scale like $t^{-3}$ relative to the left-hand side and can be
neglected. It is therefore consistent to take the warp factor to be trivial, $\Delta=1$ \cite{beck}.
We will henceforth assume this to be the case. In particular, it follows from (\ref{gform}) that
the fourform's only nonzero components are along the Calabi-Yau fourfold.
Note that the integrated version
of equation (\ref{x8}),
\begin{align}
\int_X F\wedge F+\frac{\beta}{12} ~\chi(X)=0~,
\end{align}
is the tadpole cancellation condition.
Finally, note that the normal flux condition, together with the constraints of supersymmetry
on the fourform flux explained in section
\ref{mtheoryonfourfolds}, imply that $F$ is of the form
\begin{align}
F_{mnpq}=4\widetilde{F}_{[mnp}K_{q]}+4\widetilde{F}^*_{[mnp}K^*_{q]}~,
\label{fffn}
\end{align}
where $\widetilde{F}$ obeys
\begin{align}
{J}\lrcorner \widetilde{F}=0; ~~~~~
\iota_K\widetilde{F}=\iota_{K^*}\widetilde{F}=0~
\label{nfff}
\end{align}
and $K$ is a complex vector field normal to the
fivebrane worldvolume, see eq. (\ref{see}) below.
The above results can be extended to include more general fluxes
\cite{ms, teight}. In this case the internal
manifold generally ceases to be Calabi-Yau.
\subsection{Supersymmetric cycles}
\label{supersymmetriccycles}
Consider a bosonic superembedding of the fivebrane ($X^{\underline m}\def\unM{\underline M}(\sigma), ~\theta}\def\Th{\Theta}\def\vth{\vartheta^{\underline {\phantom{\alpha}}\!\!\!\mu}(\sigma)=0)$
in a bosonic background $(\Psi_ {\underline m}\def\unM{\underline M}{}^{\underline{\alpha}}=0)$, where $\sigma^m$ is
the coordinate on the fivebrane worldvolume.
The fivebrane action is invariant under superdiffeomorphisms
\begin{alignat}{2}
\delta}\def\D{\Delta_\zeta Z^{\unM}=\zeta^{\unA} E_{\unA}{}^{\unM}
\label{a}
\end{alignat}
such that
\begin{alignat}{2}
{\cal L}_{\zeta}E_{\unM}{}^{\unA}= -(\partial_{\unM}+\Omega_{\unM\unB}{}^{\unA})\zeta^{\unB}
-\zeta^{\unB}T_{\unB\unM}{}^{\unA} =0~.
\label{ui}
\end{alignat}
This can be seen by first noting that
\begin{alignat}{2}
{\cal L}_{\zeta}C_3=d(\iota_\zeta C_3)+\iota_\zeta G_4~.
\end{alignat}
The first term on the right-hand side pulls back to a total derivative on the fivebrane worldvolume, which
can be compensated by a gauge transformation.
The pull-back of the second term on the right-hand side vanishes
for a bosonic background at $\theta}\def\Th{\Theta}\def\vth{\vartheta=0$, as can be seen by
(\ref{q}) below and by taking into
account that the only nonzero components of $G_4$ are $G_{\underline a}\def\unA{\underline A\underline b}\def\unB{\underline B\underline{\alpha}\underline{\phantom{\alpha}}\!\!\!\beta}$
and $G_{\underline a}\def\unA{\underline A\underline b}\def\unB{\underline B\underline c}\def\unC{\underline C\underline d}\def\unD{\underline D}$. Similarly, the WZ term transforms under (\ref{a}) as
\begin{alignat}{2}
\int_{W_6} {\cal L}_{\zeta}(C_6+\frac{1}{2} F_3\wedge C_3)=
\int_{W_6} {\iota}_{\zeta}(G_7+\frac{1}{2} H_3\wedge G_4)~,
\end{alignat}
where we have dropped a total derivative from the
integrand. Again, this vanishes
for a bosonic background at $\theta}\def\Th{\Theta}\def\vth{\vartheta=0$. Finally, the Green-Schwarz
metric is manifestly invariant under (\ref{a}, \ref{ui}).
Condition (\ref{ui}) can be solved for $\zeta$, order by order in a $\theta$-expansion.
By taking the torsion constraints into account, it can be shown that
\begin{alignat}{2}
\zeta^{\underline{\alpha}}&=\eta^{\underline{\alpha}}(X)+{\cal O}(\theta}\def\Th{\Theta}\def\vth{\vartheta^2)\nonumber\\
\zeta^{\underline a}\def\unA{\underline A}&=i(\eta\C^{\underline a}\def\unA{\underline A}\theta}\def\Th{\Theta}\def\vth{\vartheta)+{\cal O}(\theta}\def\Th{\Theta}\def\vth{\vartheta^3)~,
\label{q}
\end{alignat}
where $\eta^{\underline{\alpha}}$ is a Killing spinor,
\begin{alignat}{2}
{\cal D}_{\underline m}\def\unM{\underline M}\eta^{\underline{\alpha}}(X)=0~.
\label{m}
\end{alignat}
Transformation (\ref{a}) corresponds to a zero mode
iff it can be compensated by a $\kappa$-transformation, i.e. iff there
exists $\kappa^{\underline{\alpha}}(\sigma)$ such that
\begin{alignat}{2}
\eta^{\underline{\alpha}}(X(\sigma))+\kappa^{\underline{\alpha}}(\sigma)=0~.
\label{p}
\end{alignat}
On the other hand $\kappa$ satisfies
$\kappa^{\underline{\phantom{\alpha}}\!\!\!\beta}\bar{\C}_{\underline{\phantom{\alpha}}\!\!\!\beta}{}^{\underline{\alpha}}=\kappa^{\underline{\alpha}}$,
where
\begin{alignat}{2}
\bar{\C}(\sigma):= \frac{1}{\sqrt{det(\delta}\def\D{\Delta_r{}^s+i\widetilde{H}_r{}^s)}}
\Big\{
\frac{1}{6!}\frac{\epsilon}\def\vare{\varepsilon^{m_1\dots m_6}}{\sqrt{-g}}\C_{m_1\dots m_6}
&+\frac{i}{2}\C_{mnp}\widetilde{H}^{mn}v^p\nonumber\\
&-\frac{1}{16}\frac{\epsilon}\def\vare{\varepsilon^{m_1\dots m_6}}{\sqrt{-g}}
\widetilde{H}_{m_1m_2}\widetilde{H}_{m_3m_4}
\C_{m_5m_6}
\Big\}~,
\label{kproj}
\end{alignat}
so that $\bar{\C}^2=1$.
Hence (\ref{p}) is equivalent to
\begin{alignat}{2}
\eta^{\underline{\phantom{\alpha}}\!\!\!\beta}(X(\sigma))(1-\bar{\C}(\sigma))_{\underline{\phantom{\alpha}}\!\!\!\beta}{}^{\underline{\alpha}}=0~,
\label{k}
\end{alignat}
with $\bar{\C}(\sigma)$ evaluated for the bosonic fivebrane superembedding
in the bosonic background.
To summarize: the `global' zero modes are given by
\begin{alignat}{2}
\theta}\def\Th{\Theta}\def\vth{\vartheta^{\underline{\alpha}}(\sigma)=\eta^{\underline{\alpha}}(X(\sigma))~,
\end{alignat}
where $\eta$ satisfies (\ref{m}), (\ref{k}). Consequently, $\theta}\def\Th{\Theta}\def\vth{\vartheta^{\underline{\alpha}}$ is
annihilated by ${\cal D}_m=\partial_mX^{\underline m}\def\unM{\underline M}{\cal D}_{\underline m}\def\unM{\underline M}$ and hence obeys the
Dirac equation on the fivebrane:
\begin{align}
\C^m{\cal D}_m\theta}\def\Th{\Theta}\def\vth{\vartheta=0~,
\label{dirac}
\end{align}
which follows from the quadratic part of the fivebrane action
(\ref{ityu}).
I.e. `global'
zero modes give rise to zero modes on the fivebrane. The converse is not generally true.
\subsection*{Supersymmetric cycles in the case of normal flux}
For a large six-cycle ${\Sigma}$, $X$ can be approximated by the total space
of the normal bundle of ${\Sigma}$ in $X$ as in \cite{w}.
Equivalently, ${\Sigma}$ can be specified
by a complex vector field $K$ on $X$ such that
\begin{align}
ds^2(X)
=G_{mn}d\sigma^{m}\otimes d\sigma^{n}+K\otimes K^* ~,
\label{see}
\end{align}
where $G_{mn}(\sigma)$ is the metric of
${\Sigma}$, and $K^mG_{mn}=0$. We shall normalize
$K$ as in appendix \ref{sus}, $|K|^2=2$,
in which case the determinants of the metrics on $X$, ${\Sigma}$
are equal.
The kappa-symmetry
projector simplifies considerably in the case of normal flux. Passing
to the static gauge and Wick-rotating, condition (\ref{k})
can be seen to be equivalent to
\begin{align}
\Big(1-\frac{ K^mK^{*n}\epsilon}\def\vare{\varepsilon_{mn}{}^{m_1\dots m_6}}{2\times 6!\sqrt{G}}\C_{m_1\dots m_6}\Big)\xi=0
~.
\label{iy}
\end{align}
Furthermore,
using the formul{\ae} in the appendix,
equation (\ref{iy}) can be rewritten as
\begin{align}
P^+\xi=\xi; ~~~~~
P^+:=\frac{1}{2}\Big(1+\frac{1}{2}
K^mK^{*n}\Gamma_{mn}\Gamma_9
\Big)
~.
\label{iyu}
\end{align}
The normal vector
$K$ is not a priori holomorphic with respect to the
complex structure of $X$. However, it is straightforward to
see from (\ref{iyu}) that
\begin{align}
J_m{}^nK_n=-iK_m~.
\end{align}
It follows that in the case of normal flux, supersymmetric cycles are antiholomorphic
cycles.
\subsection{Zero modes}
\label{zeromodes}
We are now ready to come to the analysis of the fermionic zeromodes
on the worldvolume of the fivebrane. The main result of this
section is given in (\ref{zmeqs}) below. In the process we make contact
with the earlier results of \cite{saul, kall}. The form of the Dirac operator in the
linear approximation was derived in \cite{dira}.
A note on notation: in the remainder of the paper, lower-case Latin letters
from the middle of the alphabet ($m,n,\dots$) denote
indices along $X$ (as opposed to indices along
the fivebrane worldvolume).
\subsection*{Spinors-forms correspondence on $X$}
Using formul{\ae} (\ref{fierzsu}) in appendix \ref{sus}
we can see that any chiral spinor $\lambda_+$ on $X$ can be expanded as
\begin{align}
\lambda_+=\Phi^{(0,0)}\xi+\Phi^{(2,0)}_{mn}\gamma^{mn}\xi+\Phi^{(4,0)}_{mnpq}\gamma^{mnpq}\xi ~,
\end{align}
where $\Phi^{(p,0)}$ is a $(p,0)$-form with respect to the
complex structure $J$. I.e. $\Phi^{(2,0)}$ is
in the $\bf{6}$ of $SU(4)$ and $\Phi^{(4,0)}$
is a singlet.
Similarly in the case of an antichiral spinor $\lambda_-$ we can expand
\begin{align}
\lambda_-=\Phi^{(1,0)}_{m}\gamma^{m}\xi+\Phi^{(3,0)}_{mnp}\gamma^{mnp}\xi ~,
\end{align}
where $\Phi^{(1,0)}$ is
in the $\bf{4}$ of $SU(4)$ and $\Phi^{(3,0)}$
is in the $\bar{\bf{4}}$. More succinctly, the equations above
are nothing but the equivalence
\begin{align}
S_+&\cong \Lambda^{({\rm even}, 0)}\nonumber\\
S_-&\cong \Lambda^{({\rm odd}, 0)}~,
\end{align}
which can be shown to hold in the case of a Calabi-Yau manifold.
\subsection*{Spinors-forms correspondence on the fivebrane}
We will now assume that the fivebrane wraps a supersymmetric cycle, as
described above.
Ignoring the three flat directions for simplicity,
after gauge-fixing the kappa-symmetry
the fermions on the worldvolume of the fivebrane transform as sections
of the tensor product
\begin{align}
S_+\otimes (S_+(N)\oplus S_-(N)) &\cong
\Lambda^{(0,0)}\oplus\Lambda^{(2,0)}\oplus K \oplus(K\otimes\Lambda^{(2,0)}) \nonumber\\
&\cong \Lambda^{(0,0)}\oplus \Lambda^{(2,0)} \oplus\Lambda^{(0,1)}\oplus\Lambda^{(0,3)}~,
\label{kloi}
\end{align}
where $S_{\pm}(N)$ are the positive-, negative-chirality spin bundles associated
to the normal bundle $N$ of ${\Sigma}$ in $X$,
$\Lambda^{(p,0)}$ is the bundle of
$(p,0)$-forms on ${\Sigma}$,
and $K$ is the canonical bundle of ${\Sigma}$. The first equivalence above
can be shown by taking the adjunction formula into account, and the triviality
of the canonical bundle of $X$. The second equivalence is proven by
noting that $K\otimes\Lambda^{(3-p,0)}\cong \Lambda^{(0,p)}$,
as can be seen by contracting with the
antiholomorphic $(0,4)$-form on $X$.
More explicitly, after gauge-fixing the kappa-symmetry, the physical
fermion $\theta$ on the world-volume ${\Sigma}$ can be expanded as
\begin{align}
\theta=\epsilon\otimes
P^+\sum_{p=0}^{4}\Phi^{(p,0)}_{i_i\dots i_p}\gamma^{{i_i\dots i_p}}\xi~,
\label{ty}
\end{align}
where $\Phi^{(p,0)}\in \Lambda^{(p,0)}$ and $\epsilon$ is a two-component
spinor in the noncompact directions. Expanding
\begin{align}
\Phi^{(p,0)}=\widehat{\Phi}^{(p,0)}+\frac{1}{p}K^*\wedge\widehat{\Psi}^{(p-1,0)}~,
\end{align}
where $\iota_K\widehat{\Phi}$, $\iota_K\widehat{\Psi}=0$,
and substituting $P^+$,
(\ref{ty}) reads
\begin{align}
\theta=\epsilon\otimes \Big( \widehat{\Phi}^{(0,0)}+
\widehat{\Phi}^{(2,0)}_{ij}\gamma^{ij}
+\widehat{\Phi}^{(1,0)}_i\gamma^i
+\widehat{\Phi}_{ijk}^{(3,0)}\gamma^{ijk}
\Big)\xi
~,
\label{koo}
\end{align}
where we have set
\begin{align}
\widehat{\Phi}^{(1,0)}_i&:=\widehat{\Psi}^{(0,0)}K^*_{i}\nonumber\\
\widehat{\Phi}_{ijk}^{(3,0)}&:=\widehat{\Psi}_{[ij}^{(2,0)}K^*_{k]}~.
\label{leg}
\end{align}
Equation (\ref{koo}) above is the explicit form of (\ref{kloi}).
\subsection*{Zero modes}
The zero modes on the fivebrane satisfy the Dirac equation (\ref{dirac})
where, after gauge-fixing
$\theta$ has positive chirality along the fivebrane world-volume, $\theta=P^+\theta$.
Having explained the spinor-form correspondence, we would now like to rewrite
the Dirac equation in terms of forms on the fivebrane. First, it would be useful to note the
following relations:
\begin{align}
(\Pi^{\parallel})_m^rF_{rnpq}\gamma^m\gamma^{npq}\theta_-&=0\nonumber\\
(\Pi^{\parallel})_m^rF_{rnpq}\gamma^m\gamma^{npq}\theta_+&=\frac{3}{4}F_{mnpq}\gamma^{mnpq}\theta_+~,
\label{topi}
\end{align}
where $\theta_{\pm}$ denotes the chirality of $\theta$ along the normal directions,
and $\Pi^{\parallel}$ is the projector onto the fivebrane worldvolume defined
in appendix \ref{kixlh}.
Since $\theta$ has positive chirality along the fivebrane world-volume, we have
$\theta_{\pm}=\frac{1}{2}(1\pm\C_9)\theta$.
It further follows that
\begin{align}
{\cal D}_m\theta}\def\Th{\Theta}\def\vth{\vartheta^{(p,0)}=\epsilon\otimes
\left\{
\begin{array}{ll}
\nabla_m\widehat{\Phi}\xi~, & ~~~~~p=0\\
\nabla_m\widehat{\Phi}_r\ga^r\xi-\frac{1}{4}\widehat{\Phi}^rF_{rstm}\ga^{st}\xi~, & ~~~~~p=1\\
\nabla_m\widehat{\Phi}_{rs}\ga^{rs}\xi-\frac{1}{6}\widehat{\Phi}^{rn}F_{rstm}\ga^{st}{}_n\xi~, & ~~~~~p=2\\
\nabla_m\widehat{\Phi}_{rst}\ga^{rst}\xi-\frac{3}{4}\widehat{\Phi}^{rnp}F_{rstm}\ga^{st}{}_{np}\xi~, & ~~~~~p=3
\end{array}\right.
~,
\label{koptz}
\end{align}
where we have denoted $\theta}\def\Th{\Theta}\def\vth{\vartheta^{(p,0)}:=\epsilon\otimes
\widehat{\Phi}^{(p,0)}_ {i_1\dots i_p}\ga^{i_1\dots i_p}\xi$.
Plugging (\ref{koptz})
into (\ref{dirac}), we obtain
\begin{center}
\fbox{\parbox{11cm}{
\begin{align}
0&=\Big\{(\nabla^{\parallel})_m\widehat{\Phi}
+4(\nabla^{\parallel})^{ p}\widehat{\Phi}_{pm}\Big\}\gamma^m\xi\nonumber\\
0&=\Big\{(\nabla^{\parallel})_{m}\widehat{\Phi}_{n}+6(\nabla^{\parallel})^{ p}\widehat{\Phi}_{pmn}
-\frac{1}{2}F_{mn}{}^{pq}\widehat{\Phi}_{pq}\Big\}\Omega^{mnrs}\gamma_{rs}\xi^*\nonumber\\
0&=\Big\{(\nabla^{\parallel})_{m}\widehat{\Phi}_{np}\Big\}\Omega^{mnpq}\gamma_q\xi^*\nonumber\\
0&=\Big\{(\nabla^{\parallel})_{m}\widehat{\Phi}_{npq}\Big\}\Omega^{mnpq}
~,\nonumber
\end{align}
}}
\end{center}
\begin{align}\label{zmeqs}\end{align}
where $(\nabla^{\parallel})_m:= (\Pi^{\parallel})_m^n\nabla_n$, is the covariant derivative
projected along the fivebrane.
Passing to complex coordinates, the above can be seen to be equivalent to
equations (3.6-3.9) of \cite{saul}, or (3.10-3.13) of \cite{kall}.
Following the analysis of \cite{kall}, the space of solutions to
the above system of equations is spanned by harmonic forms\footnote{\label{foot}
The forms $\widehat{\Phi}_{I_p}^{(p,0)}$, $p=1,3$, have a leg in the normal bundle,
see definition (\ref{leg}). More precisely: they are in $H^{0}(\Sigma, K\otimes\Omega^{3-p})$, $p=1,3$.
Out of these, we can construct
harmonic forms in $H^{0,p}(\Sigma)\cong H^p(\Sigma, {\cal O})$, by contracting with the antiholomorphic
fourform on $X$.
This is just the statement of Serre duality.
}
$\{\widehat{\Phi}_{I_{p}}^{(p,0)}; ~p=0\dots 3\}$,
where in addition the $\widehat{\Phi}^{(2,0)}$s satisfy the constraint
\begin{align}
\mathcal{H}\Big\{F_{mnpq}\widehat{\Phi}^{np}(\Pi^{\parallel})_r^qdx^r\Big\}=0
\label{hcon}
\end{align}
and we have denoted by $\mathcal{H}$ the projector onto the space of harmonic forms.
The corresponding
fermion zero modes are of the form
\begin{align}
\theta= \sum_{p=0}^3\sum_{I_p}
\epsilon^{I_{p}}\otimes X_{I_p}\xi ~,
\label{zm}
\end{align}
where (no summation over $p$)
\begin{align}
X_{I_p}=\left\{ \begin{array}{ll}
\widehat{\Phi}_{I_{p}}^{(p,0)}\gamma_{(p)}, & ~~~p\neq2\\
\widehat{\Phi}_{I_{2}}^{(2,0)}\gamma_{(2)}+\delta\widehat{\Phi}^{(1,0)}_{I_{2}}\gamma_{(1)}
+\delta\widehat{\Phi}^{(3,0)}_{I_{2}}\gamma_{(3)} , & ~~~p=2
\end{array}\right.
;~~~
I_p=\left\{ \begin{array}{ll}
1,\dots, h^{p,0}({\Sigma}), & ~~~p\neq2\\
1,\dots,n, & ~~~p=2
\end{array}\right.~,
\end{align}
the $\widehat{\Phi}_{I_{p}}^{(p,0)}$s are harmonic
and $\{\delta\widehat{\Phi}^{(1,0)}_{I_{2}}$, $\delta\widehat{\Phi}^{(3,0)}_{I_{2}}\}$ is
a special solution of the inhomogeneous equation
\begin{align}
(\nabla^{\parallel})^+_{[m}\widehat{\Phi}_{n]}+6(\nabla^{\parallel})^{ p}\widehat{\Phi}_{pmn}
=\frac{1}{2}F_{mn}{}^{pq}\widehat{\Phi}_{I_2,pq}
~.
\label{opuio}
\end{align}
In the above, $n$ is the number of harmonic (2,0) forms on ${\Sigma}$
which in addition satisfy the constraint (\ref{hcon});
the $\epsilon^{I_{p}}$s are spinors in the $\bf{2}$ of $Spin(3)$ (after
Wick-rotating to Euclidean signature).
Note that (\ref{opuio}) implies condition (\ref{hcon}).
The authors of \cite{kall}
define a flux-dependent generalization of the arithmetic genus:
\begin{align}
\chi_F:=h^{0,0}-h^{1,0}+n-h^{3,0}~.
\label{ketal}
\end{align}
\section{Instanton contributions}
\label{instantoncontributions}
We can now proceed to the computation of the instanton contributions to the coupling
(\ref{fbils}). The main result of the paper is arrived at in this section:
instantons with four fermionic zeromodes do not contribute to the superpotential.
\subsection{Gravitino Kaluza-Klein reduction}
\label{gravitinokkreduction}
Before proceeding to integrate over the fermion zeromodes,
we will need the Kaluza-Klein ansatz for the gravitino entering the
vertex operator $V$ in (\ref{grv}).
As already discussed in the introduction, only terms
which depend on the descendants of the linear multiplets contribute to the superpotential. Hence, the
relevant part of the Kaluza-Klein ansatz for the gravitino reads
\begin{align}
\left\{
\begin{array}{l}
\Psi_{\mu}=i(\omega_I\cdot J)~\gamma_{\mu}\chi^I\otimes\xi^* +{\rm c.c.} \\
\Psi_m = \chi^I\otimes
\omega_{I,mp}\ga^p\xi^*+{\rm c.c.}~; ~~~~~I=1,\dots b_2~,
\end{array}\right.
\label{kkgr}
\end{align}
where
the $\chi^{I}$s are complex spinors in the $\bf{2}$ of $Spin(3)$,
and $\omega_I\in H^{2}(X,\mathbb{R})$.
As is straightforward
to see, the eleven-dimensional gravitino equation, $\Gamma^M{\cal D}_{[M}\Psi_{N]}=0$,
is satisfied if $\chi^{I}$ is a massless three-dimensional fermion,
\begin{align}
\slsh\nabla\chi^I=0~,
\end{align}
provided
\begin{align}
\omega_I\lrcorner F=0~.
\label{34}
\end{align}
The implications of this condition were discussed extensively in the introduction.
In this picture, $\chi^{I}$ is massless if it corresponds to a zero eigenvalue
of the matrix $T_{IJ}$ (in a diagonal basis). Alternatively this can be seen as follows.
The quadratic part of the three-dimensional action for the $\chi^I$s
comes from the dimensional reduction of the quadratic-gravitino term
in the eleven-dimensional supergravity action
\begin{align}
\int{
d^{11}x\sqrt{g_{11}} \Psi_M\C^{MNP}{\cal D}_N\Psi_P
}~.
\end{align}
Plugging the Kaluza-Klein ansatz (\ref{kkgr}) in the action above, we obtain
\begin{align}
{\rm Vol}(X)\int{
d^{3}x\sqrt{g_{3}} \Big(
D_{IJ}\bar{\chi}^I\slsh\nabla\chi^J-\frac{4}{9}T_{IJ}\bar{\chi}^I\chi^J
\Big)}~,
\label{3daction}
\end{align}
where
\begin{align}
D_{IJ}&:= \int_X \Big(
\omega_I\wedge\star\omega_J+\frac{2}{3}~\omega_I\wedge\omega_J\wedge J\wedge J
\Big)
~
\label{irw}
\end{align}
and the Hodge star is with respect to the
metric of the Calabi-Yau fourfold. In the above we have made use of the identity
\begin{align}
\star(\omega_I\wedge\omega_J\wedge J\wedge J)
=\frac{1}{2}\Big\{(\omega_I\cdot J)(\omega_J\cdot J)-2(\omega_I\cdot\omega_J)\Big\}~,
\end{align}
which can be proven with the help of
(\ref{jids}). As advertised,
massless fermions correspond to zero eigenvalues of $T_{IJ}$.
We remark that in (\ref{3daction}) there is no coupling of the form
\begin{align}
{\rm Vol}(X)\int
d^{3}x\sqrt{g_{3}}\Big(
W_{IJ}\chi^I\chi^J
+{\rm c.c.}
\Big)~.
\label{poten}
\end{align}
In the following we will investigate whether such a term is
generated by instanton contributions. In the context
of three-dimensional supersymmetric field theory the fact that such
a term can indeed be generated by instanton effects, was demonstrated in
\cite{wittenold}.
\subsection{Two zeromodes}
\label{ofrzm}
Before coming to the subject of instantons with four fermionic zeromodes in the next subsection,
we will briefly comment on the case of instantons with two zeromodes (corresponding to the fivebrane wrapping rigid,
isolated cycles). As can be seen from (\ref{zm}),
there are always two zero modes corresponding to $p=0$:
\begin{align}
\theta= \epsilon\otimes\xi~.
\label{oipi}
\end{align}
These are the
zero modes which come from the supersymmetry of the Calabi-Yau background\footnote{
In three-dimensional nomenclature the supersymmetry of the
background is ${\cal N}=2$
(equivalently: ${\cal N}=1$ in four dimensions), i.e.
four real supercharges. The instanton breaks half the
supersymmetries, as can be seen from (\ref{iyu}).
Note that $\xi$ in (\ref{oipi}) is complex
and $\epsilon$ is a
spinor in the $\bf{2}$ of $Spin(3)$. Henceforth
we are complexifying our notation for $\theta$, $\Psi_m$, $V$.
At any rate, $\theta$ must be complexified in order to pass to
Euclidean signature.}.
We would like to compute the instanton contribution of these zeromodes to the superpotential.
First, we need to define the integration over fermion zeromodes:
\begin{align}
\int d^2\epsilon~\epsilon^{\alpha}\epsilon^{\beta}:=C^{\alpha\beta}~,
\label{zint}
\end{align}
where $C$ in the equation above is the charge-conjugation matrix in three dimensions.
It follows that
\begin{align}
\int d^2\epsilon~(\chi\epsilon)(\epsilon\psi)=(\chi\psi)~,
\end{align}
for any two three-dimensional spinors $\chi$, $\psi$
in the $\bf{2}$ of $Spin(3)$. To simplify the presentation,
we are using the notation $(\chi\psi):=(\chi^{Tr}C\psi)$.
Integrating over the zeromodes using the above prescription, we find that the instanton induces a
two-fermion coupling of the form
\begin{align}
\chi^I\chi^J\int [DZ'(\sigma)]v_Iv_{J}
e^{-S_{PST}[Z(\sigma); g,C,\Psi]}+{\rm c.c.} ~,
\label{2zm}
\end{align}
where
\begin{align}
v_I&:=2i\int_{{\Sigma}}J\wedge J \wedge\omega_I
\label{vdef}
\end{align}
and the path integration above does not include the zeromodes.
In (\ref{vdef}) all the forms should be understood as pulled-back to ${\Sigma}$.
In particular the pull-back of the
almost complex structure to ${\Sigma}$ can be identified with
$\widehat{J}$, which is discussed from the point of view of the
induced $SU(3)$ structure on ${\Sigma}$ in appendix \ref{kixlh}. Note that in the formula above
the primitive part of $\omega_I$ is projected out.
We are not going to elaborate on the one-loop determinants, as this lies outside the
main focus of this paper.
The result of the
integration over the bosonic coordinates should be obtainable using
techniques similar to \cite{hm}. The
integration over the fermionic variables is proportional to the
determinant of the flux-dependent
Dirac operator $\gamma^m{\cal D}^{\parallel}_m$ (away from its kernel),
as follows from equation (\ref{ityu}).
\subsection{Four zeromodes}
\label{frzm}
In the presence of four zeromodes there are the following possibilities
which we will examine in turn: either $h^{0,0}=n=1$ (corresponding
to $\chi_F=2$) or
$h^{0,0}= h^{p,0}=1$, where $p$ is odd (corresponding
to $\chi_F=0$). Recall that $n$ is the number of harmonic (2,0) forms on ${\Sigma}$
which in addition satisfy the constraint (\ref{hcon}). As we will see, no superpotential
is generated in either case. Since $\chi_F\neq 1$ in all cases, we conclude that our
result does not rule out the possibility that in the presence of flux the arithmetic genus criterion
should be replaced by the condition $\chi_F=1$.
$~\bullet h^{0,0}=n=1$
In this case we have $\chi_F=2$. Let us substitute
the Kaluza-Klein ansatz (\ref{kkgr}) and the expression for the zeromodes,
\begin{align}
\theta=
\epsilon\otimes\xi+ \zeta\otimes\Big(
\widehat{\Phi}_{mn} \gamma^{mn}+\delta\widehat{\Phi}_{m}\gamma^{m}
+\delta\widehat{\Phi}_{mnp}\gamma^{mnp}
\Big)\xi~,
\end{align}
into equation (\ref{grv}) for the gravitino vertex operator.
Integrating over the zeromodes using (\ref{zint}) we get, up to a total
worldvolume derivative,
\begin{align}
\int d^2\epsilon ~d^2\zeta~V V =
\chi^I\chi^J v_I w_{J}~,
\label{4zm}
\end{align}
where $v_I$ was defined in (\ref{vdef}) above and
\begin{align}
w_{J}&:=
\frac{2}{9}\int_{{\Sigma}
\widehat{\Theta}\wedge\widehat{\Phi}\wedge\omega_J
~.
\label{ji}
\end{align}
The object $\widehat{\Theta}$ is defined by
\begin{align}
\widehat{\Theta}_{mn}:=\Omega_{mnpq}F^{pq}{}_{rs}\widehat{\Phi}^{rs}
\label{thdefi}
\end{align}
and is a (0,2)-form on ${\Sigma}$. (Recall that in our conventions
$\Omega$ is antiholomorphic).
In deriving this result, we had to perform some tedious but
straightforward gamma-matrix algebra making repeated use of the formul\ae {} in the
appendices \ref{gammapp}, \ref{sus}, especially
equations (\ref{bfive}, \ref{usefids}).
Moreover we have taken into account the normal flux condition
and we have implemented (\ref{dkn}), as discussed in the introduction.
In the following we show that the right-hand side of (\ref{ji}) vanishes;
no instanton-induced superpotential is generated in this case. Before demonstrating this
fact however, let us note that the following group-theoretical reasoning can be used to gain
insight into the result (\ref{4zm}). As follows from the
form of the vertex operator, the integration over the
zeromodes receives three kinds of contributions:
\begin{align}
\chi^I\chi^J v_I\otimes \omega_J\otimes F\otimes
(\widehat{\Phi}^{(2,0)}+ \delta\widehat{\Phi}^{(1,0)}+ \delta\widehat{\Phi}^{(3,0)} )^{2\otimes_s}
~,
\label{a111}
\end{align}
coming from terms of the form $VV\propto (\Psi_m\C^m\theta}\def\Th{\Theta}\def\vth{\vartheta)(\Psi\theta}\def\Th{\Theta}\def\vth{\vartheta^3)F$,
\begin{align}
\chi^I\chi^J
v_I\otimes \nabla\omega_J\otimes(\widehat{\Phi}^{(2,0)}+
\delta\widehat{\Phi}^{(1,0)}+ \delta\widehat{\Phi}^{(3,0)} )^{2\otimes_s}~,
\label{a211}
\end{align}
coming from terms of the form $VV\propto (\Psi_m\C^m\theta}\def\Th{\Theta}\def\vth{\vartheta)(\nabla\Psi\theta}\def\Th{\Theta}\def\vth{\vartheta^3)$,
and
\begin{align}
\chi^I\chi^Jv_I\otimes \omega_J\otimes
(\widehat{\Phi}^{(2,0)}+ \delta\widehat{\Phi}^{(1,0)}+ \delta\widehat{\Phi}^{(3,0)} )
\otimes
\nabla
(\widehat{\Phi}^{(2,0)}+ \delta\widehat{\Phi}^{(1,0)}+ \delta\widehat{\Phi}^{(3,0)} )
\label{a311}~,
\end{align}
coming from terms of the form $VV\propto (\Psi_m\C^m\theta}\def\Th{\Theta}\def\vth{\vartheta)(\Psi\theta}\def\Th{\Theta}\def\vth{\vartheta^2\nabla\theta}\def\Th{\Theta}\def\vth{\vartheta)$.
Contributions of the type (\ref{a111}) transform in the\footnote{
In the following we are using the Dynkin notation for $A_3$.}
$$
\Big((000)\oplus(101)\Big)\otimes (020)\otimes\Big(
(010) \oplus(100)\oplus(001) \Big)^{2\otimes_s}
$$
of $SU(4)$. There are exactly three scalars in the decomposition of the
tensor product above. These we can write explicitly as:
\begin{align}
S_1&:=\chi^I\chi^Jv_I\omega_{J, mn}\Omega^{mpij}
F_{ijqr}\widehat{\Phi}^{qr}\widehat{\Phi}_{p}{}^n\nonumber\\
S_2&:=\chi^I\chi^Jv_I(\omega_J\cdot J)
\Omega^{mpij}
F_{ijqr}\widehat{\Phi}^{qr}\widehat{\Phi}_{mp}\nonumber\\
S_3&:=\chi^I\chi^Jv_I\delta\widehat{\Phi}^i\delta\widehat{\Phi}^{jk}{}_m
\Omega_{ijkn}F^{mnpq}\omega_{J, pq}~.
\end{align}
The last one, however, vanishes by virtue of equation (\ref{34}).
Moreover, using equation (\ref{opuio}), the scalars
$S_{1,2}$ can be expressed as a linear combination of
$R_{1}, \dots R_7$ defined in equation (\ref{taro}) below:
\begin{align}
S_1&=-2R_2+4R_5-4R_6\nonumber\\
S_2&=2R_4-8R_7
~.
\end{align}
In deriving the above we have used the identity
\begin{align}
\delta\widehat{\Phi}_{qrs}\Omega^{rsmp}=-\frac{2}{3}\Omega^{ijk[m}(\Pi^+)_q{}^{p]}
\delta\widehat{\Phi}_{ijk}
~,
\end{align}
which can be proved using (\ref{bfive}). A direct computation
of the terms of the form (\ref{a111}), yields the contribution
\begin{align}
\frac{2i}{9}S_1-\frac{1}{18}S_2=-\frac{4i}{9}(R_2-2R_5+2R_6)- \frac{1}{9}(R_4-4R_7)
\label{contr1}
\end{align}
to the zeromode integral (\ref{4zm}). The linear combination above can
be written in a more elegant way by noting that
\begin{align}
iS_1-\frac{1}{4}S_2=\chi^I\chi^Jv_I
\star (\widehat{\Theta}\wedge\widehat{\Phi}\wedge\omega_J)~,
\label{525}
\end{align}
where the Hodge star is along ${\Sigma}$. In proving (\ref{525}) we have
made use of equation (\ref{b0}).
Taking into account that $\omega_I$ is a harmonic (1,1) form
and that therefore $(\omega_I\cdot J)$ is a constant\footnote{\label{ext}
A direct computation reveals that it is in fact
$\widehat{\omega}^I$ rather than
$\omega_I$, where the hat denotes
the pull-back to $\Sigma$, which appears in the various invariants
of this section. However,
using the inclusion map
$$
\iota^*:~H^{p,q}(X,\mathbb{R})\longrightarrow H^{p,q}({\Sigma},\mathbb{R})~,
$$
we can think of ${\omega}^I$ as the extension to $X$
of the harmonic form $\widehat{\omega}^I$ on $\Sigma$ \cite{gh}.
In the text, we do not make an explicit distinction
between $\omega_I$ and $\widehat{\omega}^I$. See also the next footnote.
},
it follows that
$\nabla\omega_I$ transforms in the $(201)\oplus(102)$ of $SU(4)$. Hence,
contributions of the type (\ref{a211}) transform in the
$$
\Big((201)\oplus(102)\Big)\otimes \Big(
(010) \oplus(100)\oplus(001) \Big)^{2\otimes_s}
$$
of $SU(4)$. As there are no scalars in the decomposition of the
tensor product above, we conclude that these terms vanish.
Taking into account that $\widehat{\Phi}^{(2,0)}$ is a harmonic
(2,0) form on a K\"{a}hler manifold, it follows
that $\nabla\widehat{\Phi}^{(2,0)}$ transforms in the $(110)$ of $SU(4)$.
Similarly, $\nabla\delta\widehat{\Phi}^{(1,0)}$
transforms in the
$(000)\oplus(200)\oplus(010)\oplus(101)$ of $SU(4)$.
Finally, taking into account the last of equations (\ref{zmeqs}), it
follows that $\nabla\delta\widehat{\Phi}^{(3,0)}$ transforms in the
$(010)\oplus(101)\oplus(002)$ of $SU(4)$.
Putting everything together, it follows that
contributions of the type (\ref{a311}) transform in the
\begin{align}
\Big((000)\oplus(101)\Big)\otimes \Big((010)\oplus&(100)\oplus(001) \Big)\nonumber\\
\otimes &\Big(
(110)\oplus(000)\oplus(200)
\oplus 2(010)\oplus 2(101)\oplus(002)
\Big)\nonumber
\end{align}
of $SU(4)$. There are exactly seven scalars in the decomposition of the
tensor product above:
one coming from $\nabla\widehat{\Phi}^{(2,0)}$,
three from $\nabla\delta\widehat{\Phi}^{(1,0)}$
and three from $\nabla\delta\widehat{\Phi}^{(3,0)}$.
These can be written explicitly as
\begin{align}
R_1&:=\chi^I\chi^Jv_I\nabla^m\widehat{\Phi}_{ij}\Omega^{ijpq}\delta\widehat{\Phi}_{p}
\omega_{J,qm}\nonumber\\
R_2&:=\chi^I\chi^Jv_I\nabla_m\delta\widehat{\Phi}_n\Omega^{mnij}\omega_{J,ip}
\widehat{\Phi}^{p}{}_{j}\nonumber\\
R_3&:=\chi^I\chi^Jv_I\nabla^m\delta\widehat{\Phi}_n
\Omega^{nijk}\omega_{J,km}\widehat{\Phi}_{ij}\nonumber\\
R_4&:=\chi^I\chi^Jv_I(\omega_J\cdot J)\nabla_m\delta\widehat{\Phi}_n
\Omega^{mnij}\widehat{\Phi}_{ij}\nonumber\\
R_5&:=\chi^I\chi^Jv_I\nabla^m\delta\widehat{\Phi}_{ijk}\Omega^{ijkq}
\omega_{J,qp}\widehat{\Phi}^{p}{}_{m}\nonumber\\
R_6&:=\chi^I\chi^Jv_I\nabla^m\delta\widehat{\Phi}_{ijk}\Omega^{ijkq}
\omega_{J,mp}\widehat{\Phi}^{p}{}_{q}\nonumber\\
R_7&:=\chi^I\chi^Jv_I(\omega_J\cdot J)
\nabla^m\delta\widehat{\Phi}_{ijk}\Omega^{ijkq}
\widehat{\Phi}_{qm}
~.
\label{taro}
\end{align}
A direct computation of the terms of the form (\ref{a311}), yields the contribution
\begin{align}
-4i(R_1+R_3+2R_5)
\label{contr2}
\end{align}
to the zeromode integral (\ref{4zm}).
Putting the contributions (\ref{contr1}, \ref{contr2}) together,
we arrive at equation (\ref{4zm}). Note that the
invariants $R_4, \dots R_7$ as well as the linear combinations
$R_1+2R_2$ and $R_1+R_3$, can be written as total derivatives. This can
readily be seen by
taking into account that $\Omega$ is covariantly constant while $\omega$, $\widehat{\Phi}$
are harmonic\footnote{
Note that in general the pull-back of the
Christoffel connection from the total space $X$
to the base $\Sigma$,
$(\nabla^{\parallel})_m$, {\it cannot}
be identified with the Christoffel connection $\widehat{\nabla}_m$
associated with the metric on $\Sigma$. However if $\widehat{S}$ is an
arbitrary $p$-form on $\Sigma$ whose extension to $X$ is $S$, we have
$$
(\nabla^{\parallel})_m{S}^{mm_2\dots m_p}=
\nabla_m{S}^{mm_2\dots m_p}=
(\widehat{\nabla})_m\widehat{S}^{mm_2\dots m_p}~.
$$
The first equality follows from (\ref{tbb}). The second equality follows
from $\C_{mn}^n=g^{-1/2}\partial_mg^{1/2}$
and the fact that the determinants of the metrics $X$, $\Sigma$ are equal, as can be
seen from the explicit form of the fibration (\ref{explfibr}).
}.
It follows that the total contribution can be cast in the form
$\propto R_2$+total derivative.
On the other hand, up to a total derivative, $R_2$ is proportional to
the right-hand-side of (\ref{525}), as follows from
(\ref{contr1},\ref{525}).
We are now ready to show that the left-hand-side of (\ref{ji}) vanishes identically.
First note that, as follows from (\ref{hcon}) or (\ref{opuio}),
the projection of $\widehat{\Theta}$ onto the space of harmonic forms on
${\Sigma}$ vanishes: ${\cal H}\{\widehat{\Theta}\}=0$. It follows that
\begin{align}
\int_{{\Sigma}}\widehat{\Theta}\wedge\widehat{\Phi}\wedge J=0~,
\label{gnv}
\end{align}
since $\widehat{\Phi}\wedge J$ is harmonic (this can
be seen by noting that $\star\widehat{\Phi}=\widehat{\Phi}\wedge J$).
Varying this equation with respect to the K\"{a}hler structure,
$\phi^I\rightarrow \phi^I+\delta \phi^I$, we get
\begin{align}
\int_{\Sigma}\frac{\delta\widehat{\Theta}}{\delta\phi^I}\wedge\widehat{\Phi}\wedge J
+ \int_{{\Sigma}}\widehat{\Theta}\wedge\widehat{\Phi}\wedge \omega_I=0~.
\label{vrtrr}
\end{align}
Furthermore, under a K\"{a}hler-structure variation the metric transforms as
\begin{align}
\delta g_{mn}&=\sum_I \delta\phi^I\omega_{I,mp}J_n{}^p~.
\end{align}
Note that the right-hand side above is automatically symmetric in the indices $m$, $n$.
Taking the above into account together with the fact that
$S_2$ is a total worldvolume derivative
it follows that
\begin{align}
\int_{\Sigma}\frac{\delta\widehat{\Theta}}{\delta\phi^I}\wedge\widehat{\Phi}\wedge J=0~.
\label{asd}
\end{align}
In the derivation we made use of the identity
\begin{align}
\widehat{\Phi}^{mn}\Omega_{mnpq}\widehat{\Phi}^{rs}F_{rs}{}^{qt}\omega_{I,pt}=-
\widehat{\Phi}^{mn}\Omega_{mn}{}^{pq}\widehat{\Phi}_s{}^{t}F_{pq}{}^{sr}\omega_{I,rt}
~.
\end{align}
From (\ref{vrtrr}, \ref{asd}) it finally follows that the right-hand side of (\ref{ji}) vanishes, as
advertised.
No potential is generated in the remaining cases either, as we now show.
$~\bullet h^{0,0}=h^{1,0}=1$
In this case we have $\chi_F=0$.
As can be verified by direct computation,
no potential is generated in this case.
The easiest way to arrive at this result is by
the following group-theoretical argument. It follows from the
form of the vertex operator that the integration over the
zeromodes receives three kinds of contributions:
\begin{align}
\chi^I\chi^Jv_I\otimes \omega_J\otimes F\otimes\widehat{\Phi}^{(1,0)}\otimes\widehat{\Phi}^{(1,0)}~,
\label{c111}
\end{align}
coming from terms of the form $VV\propto (\Psi_m\C^m\theta}\def\Th{\Theta}\def\vth{\vartheta)(\Psi\theta}\def\Th{\Theta}\def\vth{\vartheta^3)F$,
\begin{align}
\chi^I\chi^Jv_I\otimes \nabla\omega_J\otimes\widehat{\Phi}^{(1,0)}\otimes\widehat{\Phi}^{(1,0)}~,
\label{c211}
\end{align}
coming from terms of the form $VV\propto (\Psi_m\C^m\theta}\def\Th{\Theta}\def\vth{\vartheta)(\nabla\Psi\theta}\def\Th{\Theta}\def\vth{\vartheta^3)$, and
\begin{align}
\chi^I\chi^Jv_I\otimes \omega_J\otimes\widehat{\Phi}^{(1,0)}\otimes
\nabla\widehat{\Phi}^{(1,0)}
\label{c311}~,
\end{align}
coming from terms of the form $VV\propto (\Psi_m\C^m\theta}\def\Th{\Theta}\def\vth{\vartheta)(\Psi\theta}\def\Th{\Theta}\def\vth{\vartheta^2\nabla\theta}\def\Th{\Theta}\def\vth{\vartheta)$.
Contributions of the type (\ref{c111}) transform in the
$$
\Big((000)\oplus(101)\Big)\otimes (020)\otimes (100)^{2\otimes_s}
$$
of $SU(4)$. As there are no scalars in the decomposition of the
tensor product above, we conclude that these terms vanish.
Taking into account that $\omega_I$ is a harmonic (1,1) form, it follows that
$\nabla\omega_I$ transforms in the $(201)\oplus(102)$ of $SU(4)$. Hence,
contributions of the type (\ref{c211}) transform in the
$$
\Big((201)\oplus(102)\Big)\otimes (100)^{2\otimes_s}
$$
of $SU(4)$. As there are no scalars in the decomposition of the
tensor product above, we conclude that these terms vanish.
Taking into account that $\widehat{\Phi}^{(1,0)}$ is harmonic, it follows
that $\widehat{\Phi}^{(1,0)}$ transforms in the $(200)$ of $SU(4)$. Hence,
contributions of the type (\ref{c311}) transform in the
$$
\Big((000)\oplus(101)\Big)\otimes (100)\otimes (200)
$$
of $SU(4)$. As there are no scalars in the decomposition of the
tensor product above, we conclude that these terms vanish.
$\bullet$ $h^{0,0}=h^{3,0}=1$
In this case we have $\chi_F=0$. As in the previous case,
no potential is generated. This can be shown {\it e.g.} by
the same type of group-theoretical reasoning as before.
\section{Discussion}
Taking advantage of the recent
progress in explicit theta-expansions in eleven-dimensional superspace \cite{t},
we have performed a computation of the contribution of fivebrane instantons
with four fermionic zeromodes in M-theory compactifications
on Calabi-Yau fourfolds with (normal) flux. The calculus of fivebrane
instantons in M-theory is still largely unexplored, and we hope that our computation
will initiate a more extensive study of these phenomena directly in M-theory.
We have found that no superpotential is
generated in this case -- a result which is compatible
with replacing the arithmetic genus criterion
by the condition $\chi_F=1$, where $\chi_F$ is the flux-dependent
`index' of \cite{kall}. It would be interesting to reexamine this statement when the condition
of normal flux is relaxed.
It would be desirable to explore the obvious generalizations of our computation:
fivebrane instanton contributions to non-holomorphic couplings, and/or
contributions to higher-derivative and multi-fermion couplings as in \cite{bw}.
The expansions of \cite{t} can also be used to study instantons with more than four zeromodes.
So far the precise
relation between instanton calculus in M-theory \cite{bbs, hm} and the rules of D-instanton
computations in string theory put forward in \cite{gg, bill, blum}, has not been clearly spelled out.
Understanding this
relation may help clarify some of the conceptual issues associated with the M-theory calculus, see {\it e.g.}
\cite{hm}. This would be another interesting possibility for future investigation.
Last but not least, it is important to address the reservations, discussed in the introduction,
about the fivebrane action of \cite{pst}
and to incorporate the topological considerations of \cite{beloa, belob} in
a supersymmetric context\footnote{I would like to thank Greg Moore for correspondence on this point.}.
\vfill\break
\section*{Acknowledgment}
I am indebted to Greg Moore for encouragement, correspondence and
valuable comments on several previous versions of
the manuscript.
I am also grateful to
Ralph Blumenhagen,
Michael Haack, Peter Mayr and Henning Samtleben for useful discussions and correspondence.
| {
"redpajama_set_name": "RedPajamaArXiv"
} | 5,715 |
The Family Friendly Programming Forum is a coalition of over 40 advertisers, all of whom belong to the Association of National Advertisers. They seek to increase the amount of "family-friendly" programming on U.S. television.
They define family-friendly programming as:
It is relevant to today's TV viewer, has generational appeal, depicts real life and is appropriate in theme, content and language for a broad family audience. Family friendly programs also embody a responsible resolution. Family friendly programs may include movies, dramas, situation comedies and informational programs.
The FFPF supports various programs and initiatives:
Script Development Fund
Student Scholarship Program
Annual Symposium
Family Television Awards
Projects that the script-development fund has helped reach the pilot stage include:
Gilmore Girls
Life Is Wild
The 2007 reboot of Bionic Woman
Chuck
Ugly Betty
Friday Night Lights
Brothers & Sisters
Everybody Hates Chris
Notes from the Underbelly
Runaway
Commander in Chief
The New Adventures of Old Christine
Related
Complete Savages
Clubhouse
8 Simple Rules for Dating My Teenage Daughter
American Dreams
Big Time
The fund has no influence on the direction of the show further than the pilot.
External links
Official homepage
Television organizations in the United States | {
"redpajama_set_name": "RedPajamaWikipedia"
} | 9,021 |
The Z Bar Heavy Garment Rack is a great choice for any use. Its double hang rails boast tremendous capacity. The Z design is a feature that adds to the heavy duty nature of this fixture. It's not all-or-nothing in height with this rack. You can move it up or down by 3-inch intervals between 48-inches and 66-inches. You'll stand back and say "That's just right." Another plus is the adjustable levelers that are included with this rack. Just a tweak here and there and you can overcome a less than level floor. The Z Bar rack is 60-inches long and 22-inches wide. The tubing used to construct this double garment rack measures 1-1/4-inches. You won't even have to worry about how to post signage. These hang rails have been drilled to allow for signs. This fixture will serve you well as you load it down with merchandise. Just think "Z". | {
"redpajama_set_name": "RedPajamaC4"
} | 3,154 |
Q: What is com.google.android.gm? One of my logs is returning the following message:
12-07 10:42:45.201 6622-17013/? D/DownloadManager: 23 Starting {Apk-apks_1.1.017-812_releaseVersion.apk} by {10112:com.google.android.gm}
I believe com.google.android.gm represents my gmail account but not sure. Need more insights.
A: Yes that is indeed the Gmail android app. Any links clicked via the Gmail app have this as the traffic source.
You can see the ID here: https://play.google.com/store/apps/details?id=com.google.android.gm
| {
"redpajama_set_name": "RedPajamaStackExchange"
} | 742 |
{"url":"https:\/\/math.stackexchange.com\/questions\/1147446\/why-doesnt-the-author-straight-up-multiply-the-15-by-2-in-chinese-remainder-the","text":"# Why doesn't the author straight up multiply the 15 by 2 in Chinese Remainder Theorem?\n\nThis is from a Youtube video on the Chinese Remainder Theorem -https:\/\/www.youtube.com\/watch?v=ru7mWZJlRQg\n\nThe value at each column is the product of the mod of the two other columns(so moding will reduce to one value)\nThe author is currently on the step to ensure x, which is composed of a sum of congruences, is\n$\\equiv$ 2(mod 4). Once he applied modulus 4 to all the congruences, he was left with x $\\equiv$ 3(mod 4) which isn't the same as x $\\equiv$ 2(mod 4). To do this the author recommended taking an approach of converting 3 (mod 4) to 1(mod 4) then to 2 (mod 4). Why doesn't the author go straight from 3 mod 4 to 2 mod 4 by multiplying the 15 by 2? That way x $\\equiv$ 6 $\\equiv$ 2 mod(4). Is there a reason he chose this roundabout approach and not the direct approach?\n\nAs you\u2019ve observed, in the demonstration problem there really isn\u2019t any need for the two-step approach; he\u2019s recommending it with an eye towards messier problems of the same kind.\n\nIn general we have a congruence $x\\equiv a\\pmod m$, and what we actually want is $x\\equiv b\\pmod m$. With small $a,b$, and $m$ we can use trial and error to find $c$ such that $ac\\equiv b\\pmod m$ just about as easily as we can use it to find $c$ such that $ac\\equiv 1\\pmod m$. Then we can multiply $x\\equiv a\\pmod m$ by $c$ to get $cx\\equiv ac\\equiv b\\pmod m$, and $cx$ is the number that we wanted. That\u2019s what you\u2019re doing when you notice that if $a=3$ and $b=2$, we can take $c=2$ and get the desired result.\n\nIf, however, $a,b$, and $m$ are larger, as in the example that he mentions at the end of the video, trial and error can be painful. Fortunately, it turns out that there\u2019s a rather simple mechanical procedure, the extended Euclidean algorithm, for finding $c$ such that $ac\\equiv 1\\pmod m$ without any trial and error. When you have that, you know that $cx\\equiv 1\\pmod m$, and the final step of multiplying by $b$ to get $bcx\\equiv b\\pmod m$, is trivial.\n\nIn other words, the two-step procedure that he recommends replaces what could be a long process of trial and error with a straightforward algorithmic step, something that can be done mechanically with considerable efficiency, followed by a simple multiplication.\n\n\u2022 Thus spoke @zarathustra. \u2013\u00a0Daniel W. Farlow Feb 14 '15 at 8:42\n\u2022 @crash: No, no, no: Also sprach @zarathustra! :-) (Apparently my fingers are more accustomed to typing efficient than to typing efficiency; I\u2019m glad that someone caught it. \u2013\u00a0Brian M. Scott Feb 14 '15 at 8:52\n\u2022 I saw the opportunity and I had to run with it! :-) This is kind of off-topic, but I thought you might be interested in a question I posted just a little while ago--I thought you might find it interesting given your teaching background. It addresses some very funky calculator behavior. Possibly something to use for a class or two in the future! \u2013\u00a0Daniel W. Farlow Feb 14 '15 at 8:59\n\u2022 @crash: That is rather strange behavior; I\u2019ll have to take a closer look. Pretty pictures, though! (I confess that I never bothered to learn to use a graphing calculator. My weapon of choice when I needed more than mental arithmetic in the classroom was my trusty old HP-$11$c, which had the added virtue of being almost unborrowable: RPN was completely foreign to the overwhelming majority of students!) \u2013\u00a0Brian M. Scott Feb 14 '15 at 9:08\n\u2022 Thanks for the response! The unborrowable factor can certainly come in handy! Yes, the behavior completely confounds me simply because of just how many functions seem to be involved. Honestly, I haven't used a calculator myself in quite some time, but I figured it would be fun to get back to some \"elementary\" things but to spruce them up (if that made any sense--so used to dealing in the abstract I thought it would be nice to \"come back down\" and toy around with my calculator). Math...such a truly beautiful art. :-) \u2013\u00a0Daniel W. Farlow Feb 14 '15 at 9:12","date":"2019-06-20 05:45:15","metadata":"{\"extraction_info\": {\"found_math\": true, \"script_math_tex\": 0, \"script_math_asciimath\": 0, \"math_annotations\": 0, \"math_alttext\": 0, \"mathml\": 0, \"mathjax_tag\": 0, \"mathjax_inline_tex\": 1, \"mathjax_display_tex\": 0, \"mathjax_asciimath\": 0, \"img_math\": 0, \"codecogs_latex\": 0, \"wp_latex\": 0, \"mimetex.cgi\": 0, \"\/images\/math\/codecogs\": 0, \"mathtex.cgi\": 0, \"katex\": 0, \"math-container\": 0, \"wp-katex-eq\": 0, \"align\": 0, \"equation\": 0, \"x-ck12\": 0, \"texerror\": 0, \"math_score\": 0.7423936724662781, \"perplexity\": 440.2663818714851}, \"config\": {\"markdown_headings\": true, \"markdown_code\": true, \"boilerplate_config\": {\"ratio_threshold\": 0.18, \"absolute_threshold\": 10, \"end_threshold\": 15, \"enable\": true}, \"remove_buttons\": true, \"remove_image_figures\": true, \"remove_link_clusters\": true, \"table_config\": {\"min_rows\": 2, \"min_cols\": 3, \"format\": \"plain\"}, \"remove_chinese\": true, \"remove_edit_buttons\": true, \"extract_latex\": true}, \"warc_path\": \"s3:\/\/commoncrawl\/crawl-data\/CC-MAIN-2019-26\/segments\/1560627999141.54\/warc\/CC-MAIN-20190620044948-20190620070948-00477.warc.gz\"}"} | null | null |
Les Corniques sont un groupe ethnique originaire des Cornouailles, en Royaume Uni (à ne pas confondre avec les Cornouaillais qui sont les habitants de la Cornouaille — sans « s » —, région de Bretagne). C'est un peuple celtique qui parlait la langue cornique jusqu'au et qui s'est mis à la cultiver de nouveau depuis le , bien que de manière minoritaire. Selon une enquête de 2004, 35 % des habitants des Cornouailles se considèrent corniques (environ 181 000 personnes).
Présentation
L'ethnicité cornique est reconnue dans les recensements canadiens et, en 2006, 1550 canadiens ont déclaré que leur origine ethnique était cornique. Le gouvernement britannique leur accorde le statut de minorité nationale en . La langue cornique avait toutefois été reconnue en , lors de la seconde ratification de la Charte Européenne des langues régionales ou minoritaires par le Royaume-Uni.
Pour identifier un ethnique cornique, il existe une phrase en anglais : « By Tre, Pol and Pen, you shall know the Cornishmen » (« Par Tre, Pol et Pen, vous saurez les hommes corniques »). Les trois mots dans la phrase réfèrent aux préfixes corniques dans les noms qui signifient « village, étang et tête ». Les exemples de pareils noms dont Trevithick, Tremayne, Polkinghorne, Pentreath et Penrose.
Notes et références
Voir aussi
Articles connexes
Celtes
Cornouailles
Démographie du Royaume-Uni
Langues celtiques
Liens externes
Les Corniques sur le site web d'Eurominority, une organisation pour les minorités européennes
Union fédéraliste des communautés ethniques européennes
Tre Pol and Pen
Corniques
Cornouailles | {
"redpajama_set_name": "RedPajamaWikipedia"
} | 2,859 |
Дижонская горчица () — традиционная французская горчица, названная в честь города Дижона в Бургундии.
История
Дижон был центром производства горчицы в раннем Средневековье, а эксклюзивные права на производство во Франции получил в XVII веке. Впервые дижонская горчица была подана к столу короля Филиппа VI в 1336 году, но известность получила только в XVIII веке. В 1634 году производители уксуса и горчицы приняли решение объединиться в единую мануфактуру со строгими правилами: каждый производитель мог иметь только один магазин и одного подмастерья. В 1712 году был принят устав, который, в том числе, подписал Франсуа Нажон (), чей сын — Жан Нажон () в 1756 году изменил рецепт горчицы, заменив столовый уксус вержусом — кислым соком недозрелого винограда. В 1777 году в Дижоне было создано первое партнёрское предприятие по производству горчицы по оригинальной рецептуре с использованием белого вина, а в 1853 году Морис Грей () изобрёл паровую горчичную мельницу, а позже основал компанию по производству горчицы Grey, на настоящий момент . В 2008 году нидерландская компания Unilever, которой принадлежало несколько заводов по производству горчицы в Европе, закрыло подразделение . С 15 июля 2009 года производство было перенесено из Дижона в соседний город Шевиньи-Сен-Совёр, а восемьдесят процентов семян горчицы, из которых производят дижонскую горчицу, выращены в Канаде.
Состав
Основными ингредиентами дижонской горчицы являются коричневые семена горчицы и белое вино или смесь из винного уксуса, воды и соли, что имитирует вкус вержуса. Дижонская горчица имеет палево-жёлтый цвет и насыщенный вкус.
Использование в кулинарии
Дижонская горчица употребляется с мясными блюдами как самостоятельная приправа или в составе соусов. Термин «дижонская» в названии блюда обозначает, что оно или соус к нему были приготовлено с использованием дижонской горчицы.
Географическое наименование
В 1937 году для дижонской горчицы была введена сертификация подлинности происхождения. Благодаря давней истории производства горчицы Дижон считается «горчичной столицей мира».
Примечания
Дижон
Горчица | {
"redpajama_set_name": "RedPajamaWikipedia"
} | 8,003 |
<?php
namespace FeedWriter;
/**
* Wrapper for creating ATOM feeds
*
* @package UniversalFeedWriter
*/
class ATOM extends Feed
{
/**
* {@inheritdoc}
*/
public function __construct()
{
parent::__construct(Feed::ATOM);
}
}
| {
"redpajama_set_name": "RedPajamaGithub"
} | 6,599 |
Learn the correct way to say Minotte in its native language with our online pronunciation dictionary. Listen the name on our online audio dictionary and practice speaking Minotte to sound like the native speaker of Spanish language.
Description: Minotte is the name of a place in Italy.
Description: Minotte is the name of a place in Spain. | {
"redpajama_set_name": "RedPajamaC4"
} | 6,978 |
\section{Introduction}
A large amount of our knowledge of the solar extreme ultraviolet (EUV) corona comes from the observations of the first two \ion{H}{i} Lyman lines at 1215.67 \AA\ and 1025.72 \AA. The Lyman~$\alpha$ line is indeed the brightest line in this range of radiation. To infer the coronal plasma properties from observations it is necessary to get the line profiles with enough spectral resolution.
{While chromospheric profiles show a self-reversal at line centre, the coronal profile of the scattered Lyman~$\alpha$ line is close to a gaussian profile.}
From the knowledge of the line width one can derive an {equivalent} temperature {(ie. including all causes of line broadening),} and with further assumptions, the kinetic {(thermal)} temperature of the hydrogen atoms. If the coupling due to charge exchange between hydrogen atoms and ions is strong enough, then the proton temperature is equal to the hydrogen temperature.
{\cite{loaetal98,loaetal00} concluded from fast solar wind models that this coupling is strong up to 3 R$_{\sun}$, while \cite{olh94} studied slow wind models and found strong coupling up to 10 R$_{\sun}$ in some conditions. The decoupling of hydrogen atoms and protons will occur at different densities, depending on which physical assumptions are made in the models. In our case, this coupling is strong enough at densities above 10$^6$ cm$^{-3}$. This condition is met within the whole streamer \citep[see for further details][]{lll06}.}
Thus we can obtain a reliable estimate of the proton temperature from the full width at half maximum (FWHM) of the Lyman line profiles \citep[see also][]{marschetal99}.
In this paper we show that in the case of coronal streamers, radiative transfer calculations in non local thermodynamic equilibrium (NLTE) are useful to predict the properties of the Lyman~$\alpha$ and Lyman~$\beta$ lines. The temperature diagnostic from these two lines is examined. We describe the streamer model in section~\ref{bomodel} and the radiative transfer calculations in section~\ref{radtrans}. Results are discussed in section~\ref{discuss}.
\section{The coronal model}\label{bomodel}
The coronal model is obtained from a global three fluid solar wind model with $\alpha$ particles. It is described in another paper by {\cite{lll06}}. {It is a 2.5-D, axially symmetric model, where all variables depend on two spatial coordinates, but where the three components of vector quantities are retained. }In the streamer (the closed magnetic field region) no external heating is applied. A hot coronal boundary, electron heat flux and Coulomb coupling lead to a non-isothermal streamer in which all three species (namely electrons, protons, and $\alpha$'s) have the same temperature. The properties of the streamer model that we use for this study are shown in Fig.~\ref{streamer} for two heights. One has the line-of-sight (LOS) centre situated at a heliocentric distance of 1.05 R$_{\sun}$ at the equator (left column) and the other one is at a heliocentric distance of 1.88 R$_{\sun}$, slightly below the cusp which is located at $\sim 2$~R$_{\sun}$. The LOS is chosen to be 4~R$_{\sun}$ long. The top panels in Fig.~\ref{streamer} present the density variations along the LOS of the protons. The electron density is almost the same as the proton density due to the small amount of $\alpha$'s. The bottom panels of the figure show the temperature variation along the LOS.
For the present investigation we have made use of 10 different LOS for a streamer axis along the equator. All LOS are 4~R$_{\sun}$ long, but the centre of the LOS is located at an increasing distance from the sun. As one goes further away from the surface, the temperature profiles flatten and the density variations become smoother as well as less sharp.
\begin{figure}
\centering
\resizebox{\hsize}{!}{\includegraphics{5059fig1.eps}
\caption{Streamer models at a distance of 0.05~R$_{\sun}$ (left) and 0.88 R$_{\sun}$ (right) above the solar surface. Top panels: proton density in cm$^{-3}$; bottom panels: electron temperature in K. All quantities are plotted against the location along the line-of-sight expressed in units of the solar radius R$_{\sun}$, where the origin is at the centre of the LOS.}
\label{streamer}
\end{figure}
\section{Computation of the emergent radiation}\label{radtrans}
In order to compute the radiation emitted by the corona we use a numerical code which solves the radiative transfer (RT) equations and the statistical equilibrium (SE) equations in NLTE. This code has been described in \cite{ghv,gl00,gh02}. Here we recall its most relevant features.
The SE is solved for a 10 level + continuum hydrogen atom. The RT is solved for the lines and continua in a 1D plane-parallel geometry using a Feautrier method \citep{feautrier} with variable Eddington factors. The boundary conditions for the RT equations are determined by the radiation coming from the disk. This incident radiation is allowed to freely penetrate the structure. The Lyman~$\alpha$ profile from the solar disk is taken from OSO-8 observations \citep{glva78}. For Lyman~$\beta$ to Lyman-9 they are taken from SUMER observations \citep{warrenetal98}. Thomson scattering and Rayleigh scattering are taken into account in the computation of the continuous absorption coefficients.
It is necessary to perform the calculations in NLTE. Because of the importance of the incident radiation emitted by the solar disk and the low coronal densities, LTE cannot be reached. We define the LTE departure coefficient $b_i$ of the energy level $i$ by:
\begin{equation}
b_i = \frac{N_i}{N_c} \left[ \frac{N_c}{N_i} \right] _\mathrm{LTE} . \label{ltedc}
\end{equation}
With this definition, the LTE departure coefficient is $b_c=1$ for the continuum level $c$. Here $N_i$ and $N_c$ are the populations of the bound level $i$ and continuum $c$. $\left[ \frac{N_c}{N_i} \right] _\mathrm{LTE}$ is given by the Saha-Boltzmann distribution.
Figure~\ref{bih} presents the value of the LTE departure coefficients $b_1$ of the ground level of hydrogen for our 10 LOS. It shows that the ground level is far from LTE, and that this tendency increases with height. This can be explained by the fall-off in densities, which makes the incident radiation even more predominant in the formation of the hydrogen spectrum at higher altitudes. In fact the variation of $b_1$ closely follows that of $1/N_e$ (inverse of the mean electron density) with altitude, where the mean of the density is defined as $ N_e = \int_0^L{n_e(x) \mathrm{d}x}/L$,
with $L$ the total length of the LOS. This is illustrated in Fig.~\ref{bih} where we plotted as 'plus' signs $1/N_e$ in arbitrary units normalized so that the values of $b_1$ and $1/N_e$ at 1.05 R$_{\sun}$ are identical.
{The factor of proportionality between $b_1$ and $1/N_e$ is dependent on temperature. This result is valid as long as the ionization balance of hydrogen is governed by collisional ionization and radiative recombination.}
The excited levels also are far from LTE but to a lesser extent. For instance we have $b_2/b_1 \lesssim 10^{-8}$.
\begin{figure}
\centering
\resizebox{\hsize}{!}{\includegraphics{5059fig2.eps}
\caption{LTE departure coefficient for the ground level of hydrogen as a function of heliocentric distance (solid line). The 'plus' signs give the value of $1/N_e$ in arbitrary units (see text).}
\label{bih}
\end{figure}
Once the coupled system of SE and RT equations is solved we compute the emergent intensity in a particular line from the knowledge of the line source function, the source function for continuous absorption, and the related absorption coefficients. {The emergent intensity $I_\nu(\mu)$ at an angle $\theta$ between the normal to the surface and the LOS such that $\mu=\cos\theta$ is given by:
\begin{equation}
\label{solform}
I_\nu(\mu) = \int_{0}^{\tau_\nu}{S_\nu(t) \mathrm{e}^{-t/\mu} \ \mathrm{d}t/\mu} \ ,
\end{equation}
where $\tau_\nu$ is the optical {thickness} at frequency $\nu$.}
Of course the simplicity of Eq.~(\ref{solform}) hides the fact that {the computation of the total source function $S_\nu(\tau_\nu)$ in NLTE is a non-trivial task.} Here we use the formulation of the equivalent two-level atom. One can then express the line source function as:
\begin{equation}
\label{source}
S_\nu^l = \varepsilon^\ast B^\ast + (1-\varepsilon^\ast) \tilde{J}_\nu .
\end{equation}
In Eq.~(\ref{source}), $\varepsilon^\ast$ and $B^\ast$ account for all the processes that can affect the creation and the destruction of photons in the transition at frequency $\nu$, while $\mathbf{(1-\varepsilon^\ast)} \tilde{J_\nu}$ is the {\emph{scattering} term of the source function.} Expressions for $\varepsilon^\ast$ and $B^\ast$ are given in \cite{phdgoutte} \citep[see also][Chap.~8.1]{jeff}. Through Eq.~(\ref{source}) a non-local and non-linear coupling between the radiation and the plasma arises.
{The scattering {integral} in eq.~(\ref{source}) }is expressed as:
\begin{equation}
\label{jredistrib}
\tilde{J_\nu} = \frac{1}{\varphi_\nu} \int_0^\infty{R(\nu^\prime,\nu) J_{\nu^\prime} d\nu^\prime} .
\end{equation}
$\varphi_\nu$ is the normalized absorption profile of the line, and $R(\nu^\prime,\nu)$ is the angle-averaged frequency redistribution function. It gives the probability density that a photon absorbed at frequency $\nu^\prime$ is re-emitted at frequency $\nu$.
{We follow the treatment of \cite{hummer62} who studied four types of redistribution of radiation scattered from moving atoms. The redistribution functions with the greatest significance \citep{mihalas} are Hummer's case~II (radiation damping with coherence in the atom's rest frame), and case~III (radiation and collision damping with complete redistribution in the atom's rest frame). A Doppler effect arises when deriving expressions for the redistribution functions in the observer's frame.
The case~II redistribution function $R_{II}$ is well suited to describe the scattering of radiation in resonance lines \citep{mihalas,cv78}, and we use it for the redistribution in the Lyman lines. In the Doppler core of the line (2--3 Doppler widths from line centre) this function is close to complete redistribution (CRD). Coherency effects are more noticeable in the line wings \citep{mm73}. This means that in the solar corona the coherency effects of $R_{II}$ may be important at about 1.5~\AA\ from the line centre of the Lyman lines, and thus difficult to detect.
The case~III redistribution function $R_{III}$ is close to CRD over the entire line profile. It is the predominant redistribution mechanism in collisionally-dominated regions of the solar atmosphere.
For studies of the Lyman~$\alpha$ line profile in the corona, it is generally assumed that the redistribution function is well described in the coherent scattering approximation for an atom with two sharp energy levels (zero natural line width; \citealp[see e.g.,][]{witetal82apj,lietal98,loaetal98,cranmer98}). This is the case~I redistribution function in \citet{hummer62}. \cite{cranmer98} studied case~II and found that it makes a small difference relative to case~I which would be hardly noticed in the observations of the Lyman~$\alpha$ profile. This is due to the fact that both case~I and case~II redistribution functions are close to CRD in the Doppler core of the Lyman lines, which extend to about 1.5~\AA\ from line centre at coronal temperatures. The differences between case~I and case~II arise in the far wings of Lyman~$\alpha$ where the Thomson component of the line (chromospheric Ly$\alpha$ radiation scattered from electrons in the corona) becomes the major contributor to the line intensity. It would be interesting to see if this effect is similar for Lyman~$\beta$.}
{In this work we use partial redistribution in frequency (PRD) to compute the frequency redistribution function in Eq.~(\ref{jredistrib}) for all Lyman lines up to Ly-9, assuming isotropic scattering {in the laboratory frame}. Our redistribution function is therefore a linear combination of $R_{II}$ and $R_{III}$. The redistribution function $R_{III}$ is taken to be equal to the complete redistribution function given by the product $\varphi_\nu \varphi_{\nu^\prime}$ as in \cite{mihalas}.}
Defining the branching ratio $\gamma=\Gamma_r/(\Gamma_r+\Gamma_c)$, where $\Gamma_r$ and $\Gamma_c$ are the radiative and collisional damping constants respectively, we have {\citep{osc72}}:
\begin{equation}
R(\nu,\nu^\prime) = \gamma R_{IIA}(\nu,\nu^\prime) + (1-\gamma) \varphi_\nu \varphi_{\nu^\prime} \ .
\label{defredi}
\end{equation}
We compute $\Gamma_c$ for each {Lyman} line at each position along the LOS. It is important to take this spatial variation into account as we found that it makes a difference in the width of the emergent line profiles if it is neglected. Furthermore, since the Lyman~$\alpha$ line has extended wings, we use a frequency-dependent collisional damping coefficient \citep{yeletal81}. All other subordinate lines are treated with the {standard} CRD approximation by imposing $\gamma=0$ in eq.~(\ref{defredi}).
A comparison between CRD and PRD computations shows that CRD alone would be a bad approximation for the Lyman lines. However CRD cannot be neglected, as we find {noticeable} differences in the line widths between {the scattering with $R_{II}$ only} {(by forcing $\gamma=1$)} and PRD for Lyman~$\beta$.
\section{Results and discussion}\label{discuss}
Figure~\ref{intensites} presents the resulting integrated intensities of the first two Lyman lines as a function of distance from sun centre. The intensities are calculated by summing over the computed line profiles. The decrease of the intensity with altitude is more pronounced for Lyman~$\beta$ than for Lyman~$\alpha$, a first indication that they relate to the plasma parameters in different ways. The Lyman~$\alpha$ intensities compare well with the computed and observed values presented in \cite{vbr03}.
{We find that the variation of the Ly$\alpha$ intensity with height is not related to the variation of the electron density, while the decrease of Ly$\beta$ and H$\alpha$ (not shown on Fig.~\ref{intensites}) intensities follows the decrease of $n_e^2$ closely up to $\sim 1.5$~R$_{\sun}$, consistent with the fact that these two lines are mostly formed by collisional excitation in the inner corona. Then the fall-off in intensity is less rapid than that of the square of the electron density, owing to the growing importance of radiative excitation (see also Fig.~\ref{compo}).
In fact there is a coupling between Ly$\beta$ and H$\alpha$, which means that {an} H$\alpha$ photon can be absorbed and subsequently lead to an emission of a Ly$\beta$ photon. We obtain a nearly constant ratio between Ly$\beta$ and H$\alpha$ intensities, with $I(\mathrm{Ly}\beta)/I(\mathrm{H}\alpha) \simeq 8$. This coupling leads to lowered coherency effects, as was illustrated by \cite{hgv87}.
The coherence coefficient $\gamma$ (eq.~\ref{defredi}) is close to 1 for Ly$\alpha$ and 0.57 for Ly$\beta$ at the centre of the LOS closer to the Sun (height of 1.05~R$_{\sun}$). We have computed the parameter $\lambda = (A_{ji} / P_j) \times \gamma$ as in \cite{hgv87} -- although we use a slightly different definition for $\gamma$, where $A_{ji}$ and $P_j$ are the spontaneous emission coefficient in the $j\to i$ transition and the total depopulation rate of level $j$, respectively.
Close to the Sun, in the streamer base, our value of $\lambda$ is around 0.99 for Ly$\alpha$ and 0.3 for Ly$\beta$ (at the centre of LOS). This confirms that the coherency effects in Ly$\beta$ are less important than in Ly$\alpha$.
}
\begin{figure}
\centering
\resizebox{\hsize}{!}{\includegraphics{5059fig3.eps}
\caption{Integrated intensities in photons s$^{-1}$ cm$^{-2}$ sr$^{-1}$ of Lyman~$\alpha$ (triangles) and Lyman~$\beta$ (squares) as a function of heliocentric distance.}
\label{intensites}
\end{figure}
Now we turn to the study of the Lyman line profiles. We fit each computed profile with a gaussian profile obtained from a non-linear least squares fit.
{We find that the Ly$\alpha$ wings are broader than our gaussian fits at all altitudes. The line centre is well reproduced by the gaussian fits at altitudes above $\sim$1.6~R$_{\sun}$.
The Ly$\beta$ line deviates from the gaussian fits as the altitude increases. The line centre is always well reproduced by the gaussian fit, but the wings get broader with height.}
From our gaussian fits we obtain the full width at half maximum (FWHM) of the computed line profiles. As we go further up in the corona, the line widths decrease. This reflects the decrease in temperature.
However, care should be taken when inferring the temperature from the line width. {As the profile is not exactly gaussian, this introduces an error in the temperature derivation \citep[for a more thorough discussion see, e.g.,][]{loaetal98}}. In this respect it seems more reliable to exploit the diagnostic possibilities of the Lyman~$\beta$ line which is closer to a gaussian {than the Lyman~$\alpha$ line}.
To infer the temperature of the neutrals from the line widths, one can relate the FWHM and the temperature $T_H$ with:
\begin{equation}
T_H = \frac{m}{2k} \left[ \mathrm{FWHM}^2 \frac{c^2}{4 \lambda^2\ln 2}- \xi^2 \right] \ .
\label{temp}
\end{equation}
We arbitrarily chose to use $\xi = 20$ km s$^{-1}$ for the non-thermal motions in the calculations of the line profiles at all altitudes. While it might not be accurate, the exact value for $\xi$ is not important for our discussion on the temperatures inferred from the Lyman lines, as we assume that they both have the same non-thermal broadening. The resulting temperatures derived from the width of the Lyman~$\alpha$ line and the Lyman~$\beta$ line are plotted in Fig.~\ref{temperatures} as a function of height of the LOS, together with the mean temperature derived from the model input and the temperature at the centre of the LOS. The mean temperature is defined as $T_\mathrm{mean} = \int_0^M{T(m)\mathrm{d}m}/M$, with $m$ the column mass along the LOS, and $M$ the total column mass of the LOS.
\begin{figure}
\centering
\resizebox{\hsize}{!}{\includegraphics{5059fig4.eps}
\caption{Temperature derived from the width of Lyman~$\alpha$ (triangles), Lyman~$\beta$ (squares), temperature at centre of LOS (thick solid line), and mean temperature from the model input (thick dotted line), as a function of heliocentric distance.}
\label{temperatures}
\end{figure}
Inspection of figure~\ref{temperatures} clearly shows that the width of the Lyman~$\beta$ line is indeed a good indicator of the plasma temperature from 1.05~R$_{\sun}$ up to $\sim$2~R$_{\sun}$. Above this height, the temperature derived from the Lyman~$\beta$ FWHM is slightly lower than the mean plasma temperature $T_\mathrm{mean}$ and than the central temperature, an indication that the plasma conditions have significantly changed. Indeed, from the streamer model the magnetic cusp is located at about 2~R$_{\sun}$. Within that distance, outflow velocities are very small (a few km~s$^{-1}$). Above the cusp, the protons reach a velocity of about 100 km~s$^{-1}$ at 3~R$_{\sun}$.
Close to the Sun, the temperature derived from the Ly$\beta$ FWHM is in very good agreement with the temperature at the centre of the LOS. However, higher in the streamer, the temperature derived from the Ly$\beta$ line width is in better agreement with the mean plasma temperature than with the temperature at the centre of the LOS.
Due to the temperature and hydrogen density variations along the LOS (see Fig.~\ref{streamer}), the contribution of collisional excitation in the formation of the Ly$\beta$ line is more concentrated at the centre of the LOS when close to the Sun, and more smoothly distributed along the line of sight higher in the streamer.
We note that when neglecting CRD in frequency redistribution in eq.~(\ref{defredi}), the temperature derived from the Ly$\beta$ width \emph{exactly} matches the mean temperature of the models up to a height of 1.6~R$_{\sun}$.
It can also be seen from Fig.~\ref{temperatures} that the temperature derived from the Lyman~$\alpha$ line significantly underestimates the mean plasma temperature and the temperature at the centre of the LOS. This difference can be as much as $3.8\times10^5$~K at $r=1.39$~R$_{\sun}$ for the mean temperature, and $5.0\times10^5$~K at $r=1.58$~R$_{\sun}$ for the central temperature.
The difference in behaviour between the two Lyman lines points to the different relative contributions of radiative and collisional excitation in the formation of the two lines. From eq.~(\ref{source}) we identify the first term of the right-hand side with the collisional component and the second term with the radiative component. Figure~\ref{compo} shows their variation with altitude for the Lyman~$\alpha$ and Lyman~$\beta$ lines. The radiative component is represented with a solid line, and the collisional component with a dotted line, while triangles stand for Lyman~$\alpha$ and squares for Lyman~$\beta$.
\begin{figure}
\centering
\resizebox{\hsize}{!}{\includegraphics{5059fig5.eps}
\caption{Relative contribution of the radiative (solid line) and collisional (dotted line) components for Lyman~$\alpha$ (triangles) and Lyman~$\beta$ (squares) as a function of heliocentric distance.}
\label{compo}
\end{figure}
This figure shows that, as expected, Lyman~$\beta$ is mostly formed by collisional excitation in the streamer. However it is interesting to note that the collisional component of Lyman~$\alpha$ is not negligible in the streamer base, contributing to nearly 10\% of the line intensity close to the Sun. We should stress here that both {the scattered and collisional} components will include contributions resulting from exchanges with other transitions (lines and continua) involving the rest of the atomic states.
The values reported in Fig.~\ref{compo} can be compared with those given by \citet{rayetal97} in the centre of a streamer observed by UVCS. These authors obtained a collisional contribution of 1.1\% for Lyman~$\alpha$ and 57\% for Lyman~$\beta$ at $\log T=6.2$, while our values at that temperature (at a heliocentric height of 1.7~R$_{\sun}$) lead to 3\% and 77\%, respectively. \\
In this paper NLTE radiative transfer calculations are performed to compute the properties of the Lyman lines of hydrogen in the solar corona. It is shown that the width of the Lyman~$\beta$ line is a better indicator of the plasma temperature than the width of the Lyman~$\alpha$ line, especially within the streamer. {It is due to the formation mechanisms of the lines, and the coupling of Ly$\beta$ with H$\alpha$. This work has been done using the approximation of isotropic frequency redistribution {in the laboratory frame}. We do not expect that the inclusion of angle-dependent redistribution functions in our calculations would change the main conclusion of this Letter, namely that the width of Lyman~$\beta$ is a better proxy for the plasma temperature in the streamer. It is known that the consideration of angular redistribution with dipole scattering has the effect of narrowing the line profile compared to isotropic redistribution, due to non-90$\degr$ scattering of photons \citep[e.g.][]{witetal82,loaetal98,cranmer98}. Therefore we believe that the discrepancy found here between temperatures derived from Ly$\alpha$ line profiles and model temperatures could be even greater by using angle-dependent redistribution functions. Furthermore, \cite{cv78} showed that the effects of angle-dependent PRD will be more important when the incident lines show substantial center-to-limb variations. This is not the case of hydrogen Lyman lines, but this is the case for, e.g., the \ion{O}{vi} lines at 1032 and 1038 \AA. We will include this effect in a future study when we include \ion{O}{vi} in our calculations.}
We also obtain new estimates of the radiative and collisional contributions of the Lyman line intensities in a non-isothermal streamer. These new values may have some importance in the derivation of element abundances. {Element abundances relative to hydrogen can be inferred independently from the ratio of the resonantly scattered (or collisional) component of a spectral line to the resonantly scattered (or collisional) component of, say, \ion{H}{i}~Ly$\beta$ \citep{witetal82,rayetal97}. Therefore the relative contributions of the two components of the Lyman lines to their total observed intensities must be known with good accuracy. Finally, }
our results can be compared with observations by the SUMER and UVCS spectrometers on SOHO.
The radiative transfer calculations can be enhanced by including other effects such as Doppler dimming to improve the modelling in regions of the corona where outflow velocities cannot be ignored. This has been presented in \cite{corsoho17}.
\begin{acknowledgements}
The authors are grateful {to the referee, Dr. P.~Heinzel, for his thorough comments that improved the clarity of this work, and} to P. Gouttebroze, S. Habbal and J.-C. Vial for their critical reading of an early version of the manuscript. Support from PPARC grant PPA/G/O/2003/00017 is acknowledged.
\end{acknowledgements}
\bibliographystyle{../../../LATEX/aa}
| {
"redpajama_set_name": "RedPajamaArXiv"
} | 7,748 |
The Master of Science in International Tourism, Hospitality and Travel Marketing & Management offers highly professional training for international graduates who wish to focus their professional careers and become managers in the tourism and hospitality industry.
Through a multidisciplinary approach, participants in this program will acquire key skills in tourism and hospitality marketing, management and communication, together with a comprehensive knowledge of the new technologies applied to the tourism, hospitality and travel business models and strategies.
Année académique : de septembre 2017 à juin 2018 + 6 mois de stage.
Sélection sur dossier suivi d'un entretien. | {
"redpajama_set_name": "RedPajamaC4"
} | 23 |
\section{Introduction}
Gravitating bodies in asymptotically flat spacetimes admit stable particle orbits in four dimensions. These provide some of the simplest experimental tests of general relativity (and basis for our everyday experience). For example, the precession of the perihelion of Mercury is famously explained by geodesic motion in the Schwarzschild geometry. When gravitational radiation is taken into account, one discovers that these orbits are in fact metastable, since they can lose energy to the gravitational field and eventually fall into the black hole. The gravity waves emitted from this inspiral process were observed by LIGO, leading to another powerful confirmation of general relativity \cite{LIGOScientific:2016aoc}. \\
\indent Given that orbits and gravity waves play such a fundamental role in asymptotically flat spacetime, it is interesting to investigate their significance in the asymptotically AdS case as well \cite{Festuccia:2008zx,Berenstein:2020vlp}. From the AdS/CFT point of view, gravitational orbits are interesting for the following simple reason. On one hand, quantum-mechanical systems with simple gravity duals are known to be maximally chaotic \cite{Maldacena:2015waa}. On the other hand, gravitational orbits present an example of dynamics which is easily reversible in time and in this sense is integrable. This paradoxical aspect of gravitational theories is similar to the fact that when chaos becomes maximal the scattering becomes purely elastic \cite{Shenker:2014cwa,Gu:2018jsv,Costa:2012cb}. Also, due to the existence of gravity waves, we might expect the orbits to eventually fall into the black hole, just like their flat space counterparts. In the quest of identifying the quantum-mechanical systems with simple gravity duals it is therefore very important to identify the salient features related to both chaos and approximate integrability of the dual gravitational dynamics. The same comment applies to flat space and de Sitter holography as well.
In this paper we explore some basic aspects of the AdS orbital dynamics, both from the bulk and boundary perspectives, extending the analysis of \cite{Festuccia:2008zx,Berenstein:2020vlp}. At the classical level, gravitational orbits are associated to a set of Regge trajectories in the dual CFT.\footnote{We use the term ``Regge trajectory'' to specify a family of states with scaling dimension $\Delta_n(J)$ labeled by spin $J$ and potentially some other quantum numbers $n$ whose dependence is analytic in $J$.} These are characterized by two quantum numbers: spin $J$ (related to the size of an orbit) and radial excitation $n$ (related to the eccentricity of an orbit). There are two basic features that distinguish $AdS_{d+1}$ orbits from their flat space counterparts: stable orbits exist in any $d \geq 3$,\footnote{Stable orbits exist below the BTZ threshold in $AdS_3$ as well \cite{Fitzpatrick:2014vua}.} and the energy and angular momentum of an orbit around a black hole grow with the orbital radius. There are two basic mechanisms that render AdS orbits unstable: emission of gravity waves and tunneling of the orbiting body into the black hole. Emission of gravity waves is very universal and it does not rely on the precise nature of the gravitating bodies (whether they are black holes or stars). In this paper we will not directly analyze gravity waves, but we will instead consider the closely related problem of computing the lifetime of an orbit due to the emission of scalar radiation in the semi-classical approximation. An important aspect of gravitational radiation is that it is ${1 \over c_T}$ suppressed and as such is not present in the large $c_T$ limit.\footnote{Here $c_T$ stands for the two-point function of the stress-energy tensor. Since we assume a simple gravity dual we also assume that the 't Hooft coupling $\lambda$, or gap in the spectrum of higher spin single trace operators \cite{Heemskerk:2009pn}, is large in this paper. } Tunneling, on the other hand, is present in the large $c_T$ limit as well but it requires the presence of the black hole horizon.
\indent What is the nature of the orbit states on the AdS boundary? The simplest example of this type corresponds to a binary system of light operators supported by the centrifugal potential in AdS. The boundary dual of this system is provided by a family of double-twist operators $[\mathcal{O}_L\mathcal{O}_L]_{n,J}$, where $n$ and $J$ are the same quantum numbers that appeared above, and $\mathcal{O}_L$ denotes a light operator \cite{Alday:2007mf,Fitzpatrick:2012yx,Komargodski:2012ek}. Such states are completely universal and persist as energy eigenstates at finite $c_T$ as well, even though their precise bulk nature at finite $c_T$ is not known.\footnote{Presumably, they correspond to orbiting states dressed by a specific cloud of gravitational radiation that make the whole system an energy eigenstate.}
Consider next a situation where one of the operators is heavy, $\Delta_H \sim c_T$, such that its AdS dual is a black hole. By a naive analogy we can try to associate a set of double-twist operators $[\mathcal{O}_H\mathcal{O}_L]_{n,J}$ to orbits in this case as well. And, indeed, such operators were recently observed in the light-cone bootstrap analysis of the heavy-light four point function \cite{Kulaxizi:2018dxo,Fitzpatrick:2019zqz,Karlsson:2019qfi,Kulaxizi:2019tkd,Fitzpatrick:2019efk,Karlsson:2019dbd,Li:2019zba,Li:2020dqm,Parnachev:2020fna,Fitzpatrick:2020yjb,Parnachev:2020zbr,Karlsson:2021duj,Karlsson:2021mgg}. Their presence in the spectrum, however, is puzzling from the bulk perspective. Indeed, due to tunneling we expect orbits to be meta-stable already to leading order in $c_T$. Relatedly, due to the presence of the black hole horizon in the bulk we expect the CFT spectrum to be effectively continuous and not be given by a discrete set of double-twist operators. The correct picture in this case is that orbit states are narrow resonances: they present pole singularities of the conformal partial waves $c(\Delta,J)$, see e.g. \cite{Mack:2009mi,Costa:2012cb,Caron-Huot:2017vep}, on the second sheet.\footnote{At finite $c_T$, $c(\Delta,J)$ are meromorphic functions. However, they develop a cut in the large $c_T$ limit in the presence of a heavy state dual to a black hole. We can then study the multi-sheeted structure of $c(\Delta,J)$.} More physically, they represent a superposition over a small band of energy eigenstates.
A more familiar manifestation of the same phenomenon is known as quasi-normal modes \cite{cardoso}. Quasi-normal modes are defined as normalizable solutions to bulk wave equations which are purely ingoing at the horizon. They lead to poles in the retarded thermal two-point function. At non-zero spin they are directly related to orbits as shown in \cite{Festuccia:2008zx}. As we will review, via the eigenstate thermalization hypothesis (ETH) \cite{srednicki1999approach,DAlessio:2015qtq,Lashkari:2016vgj,Delacretaz:2020nit} they become poles on the second sheet of $c(\Delta,J)$.
\indent This picture of orbit states as resonances raises an immediate question for the light-cone bootstrap program in the heavy-light regime, which aims to compute the operator product coefficients and anomalous dimensions of double-twist operators $[\mathcal{O}_H \mathcal{O}_L]_{n,J}$. If these operators are not energy eigenstates, then what is actually being computed by the bootstrap? The resolution of the puzzle comes from the fact that there is the following equivalence
\begin{equation}
\label{eq:mirage}
{1 \over 2 \pi i} \Big( {1 \over \Delta - \Delta_n(J) - i e^{- c_0(\mu) J}} - {1 \over \Delta - \Delta_n(J) + i e^{- c_0(\mu) J}}\Big) \overset{\text{PT}}{\simeq} \delta(\Delta - \Delta_n(J) ) ,
\end{equation}
where $\mu \sim {\Delta_H \over c_T}$ and $\text{PT}$ stands for perturbatively in $1/J$.\footnote{Let us emphasize that we are talking about the continuum emerging in the large $c_T$ limit. In particular, $e^{- c_0(\mu)J} \gg e^{- c_T}$, where $e^{- c_T}$ is the scale associated with the discreteness of the CFT spectrum.} Here the LHS corresponds to the spectral density with an isolated operator of dimension $\Delta_n(J)$. The RHS represents, on the other hand, a continuous spectrum containing a pair of narrow resonances whose width is nonperturbative in spin $J$ and is thus not visible in perturbation theory. This explains why \cite{Kulaxizi:2018dxo,Fitzpatrick:2019zqz,Karlsson:2019qfi,Kulaxizi:2019tkd,Fitzpatrick:2019efk,Karlsson:2019dbd,Li:2019zba,Li:2020dqm,Parnachev:2020fna,Fitzpatrick:2020yjb,Parnachev:2020zbr,Karlsson:2021duj,Karlsson:2021mgg} have observed a discrete spectrum, even though the true spectrum is continuous.
Using the ETH we derive the expansion of the correlator in terms of the quasi-normal modes (QNMs). We then notice that perturbatively in ${1 \over J}$ and leading order in $c_T \to \infty$ it becomes the ordinary OPE expansion, where the sum over QNMs becomes the sum over the double-twist operators.
In this sense the discrete spectrum of heavy-light double-twist operators is a {\it mirage} that emerges in the large spin perturbation theory. In the light-light channel large $J$ perturbation theory is captured by the multi-trace stress-energy tensor operators, schematically $T^n$.
Note that this simple analysis shows that the conclusion of \cite{Fitzpatrick:2012yx,Komargodski:2012ek} regarding the existence of a discrete family of double-twist operators is not a consequence of crossing symmetry in the presence of the continuum spectrum, or, equivalently, for $c_T$ being the largest parameter in the problem.
\indent The identification of double-twist operators with bulk orbit states is not just a conceptual point. We show that it provides a powerful tool for computing anomalous dimensions to all orders in $\mu$ at large $\Delta_L$ and large spin $J$. Indeed, as explained above, the metastable orbits are in one-to-one correspondence with long-lived quasi-normal modes with large spin. These modes are subject to the Bohr-Sommerfeld quantization rule, which is applicable when the mass of the orbiting particle, or equivalently $\Delta_L$, is large \cite{Festuccia:2008zx}. Moreover, we will see that corrections in $1/\Delta_L$ can be systematically computed by analyzing corrections to Bohr-Sommerfeld. This allows us to match known results from the light-cone bootstrap literature, and extend these results to all orders in $\mu$.\\
\indent After computing the spectrum of resonances, we analyze correlation functions in the orbit states. In particular, it is interesting to ask whether these states behave like typical high-energy states, or if there are simple measurements that can be done to distinguish them from black hole microstates. Using the light-cone bootstrap at five points, as in the recent work \cite{Antunes:2021kmm}, we show that light operators have one-point functions of order one in the orbit states. In contrast, a one-point function in a typical black hole microstate is suppressed as $\lambda,c_T\to\infty$. If the orbit states were exact energy eigenstates, this would imply a violation of ETH, since the one-point function would not be a smooth function of energy. \\
\indent This apparent violation of ETH is not necessarily a contradiction. ETH-violating states are known as many-body quantum scars, and many examples can be found in the condensed matter literature starting from \cite{bernien2017probing}, see e.g. \cite{serbyn,moudgalya2021quantum} for reviews. In our case, the orbit states are not true scars, since they eventually decay. However, we may think of them as scars perturbatively in ${1 \over J}$ and at infinite $c_T$, where the decay rate is zero. We also comment on the role of bulk higher spin symmetry in organizing the spectrum of orbit states at large $J$.
The plan of the paper is the following. In Section \ref{sec:classicalorbits}, we review properties of stable orbits around AdS-Schwarzschild black holes. In Section \ref{sec:raddecay} we analyze the decay of the orbits due to the emission of radiation. In Section \ref{sec:orbitsLCbootstrap} we consider the heavy-light four-point function and use the ETH to write the heavy-light OPE expansion in terms of the QNMs. We then discuss how it reduces to the sum over double-twist operators in perturbation theory. In Section \ref{sec:doubletwistdim} we use the Bohr-Sommerfeld formula and corrections to it to compute the anomalous dimensions of the double-twist operators. In Section \ref{sec:scars} we explore the connection between the gravitational orbits and many-body scars. We end with conclusions and a few open directions.
\section{Classical orbits}
\label{sec:classicalorbits}
In this section we analyze classical stable orbits around AdS black holes. We focus on the case of $AdS_4$, which is the minimal number of spacetime dimensions that admits stable orbits around black holes. It has an additional virtue of admitting stable orbits in the flat space regime as well. We show that classical gravitational orbits are naturally associated to the double-twist-like Regge trajectories in the boundary CFT \cite{Berenstein:2020vlp} --- we will explore this connection in great detail in the next sections.
Higher-dimensional $AdS_{d+1}$ cases are completely analogous and we also briefly discuss them in this section. Finally, we comment on orbits in the presence of higher-derivative corrections and orbits in dS.
\subsection{Review of stable orbits}
\indent We consider the Schwarzschild-AdS black hole in four dimensions. We focus on classical, stable time-like orbits in this geometry \cite{Berenstein:2020vlp}, which we review next.
The black hole metric in Schwarzschild coordinates takes the form \cite{Witten:1998zw}
\begin{align}
ds^2=-f(r)\, dt^2+\frac{dr^2}{f(r)}+r^2\, d\Omega^2, \hspace{10 mm}f(r)={r^2 \over R_{AdS}^2}+1-\frac{GM}{r},\label{schmetric}
\end{align}
where $R_{AdS}$ is the AdS radius and $M$ is proportional to the mass of the black hole. The black hole horizon is located at $f(r_s)=0$ and the AdS boundary is at $r=\infty$.
Next we consider a probe, classical body that follows a timelike geodesic in the black hole geometry. Due to the symmetries of the problem, the geodesic motion is characterized by the conserved energy $E$ and the angular momentum $L$ per unit mass. In terms of these quantities the equation of motions take the form
\begin{align}
\dot{t}&=\frac{E}{f(r)}, ~~~\dot{\phi}=\frac{L}{r^2},~~~\dot{r}^2=E^2-V(r),
\label{eq:timelike}
\end{align}
where we introduced the potential
\begin{equation}
\label{eq:potentialBH}
V(r)=f(r)\left(\frac{L^2}{r^2}+1\right) .
\end{equation}
From \eqref{eq:potentialBH} it is clear that the potential is zero at the horizon $V(r_s)=0$, and goes to infinity at the AdS boundary.
It is convenient to introduce a dimensionless parameter $\mu$
\begin{equation}
\mu \equiv {G M \over R_{AdS}} ,
\end{equation}
and measure distances and other dimensionful quantities in AdS units by setting
\begin{equation}
R_{AdS}=1 .
\end{equation}
With that in mind, the flat space limit corresponds to $\mu \ll 1$, whereas large
black holes that dominate the canonical ensemble in the dual CFT correspond to $\mu>2$ \cite{Hawking:1982dh,Witten:1998zw}.
\vspace{0.1cm}
\subsubsection{ Circular orbits}
\vspace{0.1cm}
First, we consider the simplest case of circular orbits at constant radial distance $r_0$. They are found by finding a critical point of the potential $V'(r_0)=0$. This condition can be solved as follows,
\begin{align}
\label{eq:orbitsQN}
E&=\frac{f(r_0)}{\sqrt{1-3 \mu/(2r_0)}}, ~~~~~ L=r_0^2 \frac{\sqrt{1+\mu/(2 r_0^3)}}{\sqrt{1-3 \mu/(2r_0)}} \ .
\end{align}
The circular orbits only exist outside the photon sphere\footnote{The photon sphere is the location of unstable, circular, null geodesics in the black hole geometry.} $r>3\mu/2$, as can be seen from \eqref{eq:orbitsQN} by requiring positivity of the square root in the denominator.
We will be only interested in the stable orbits in this paper\footnote{Although the unstable orbits are not directly relevant for our analysis, they lead to interesting singularities in the two-point function at finite temperature \cite{Hubeny:2006yu,Dodelson:2020lal}.}. These correspond to the minimum of the potential or, equivalently, $V''(r_0)>0$. The stability condition takes the form
\begin{equation}
\left( r - 3 \mu \right) + 8 r^3 \left({r \over \mu} - {15 \over 8} \right) > 0 .
\end{equation}
Let us note that the orbits with $r > 3 \mu $ are stable for any value of the dimensionless parameter $\mu$. In the flat space limit $\mu \to 0$, $r = 3 \mu$ becomes an inner-most stable orbit (ISCO). In the opposite limit of large black holes in AdS, $\mu \to \infty$, we get that stable orbits exist for $r > {15 \over 8} \mu$. Orbits with $3 \mu>r>{15 \over 8} \mu$ are stable depending on the precise value of $\mu$.
Let us now discuss the description of this state in terms of the CFT dual. A particle of mass $m_L$ in AdS corresponds to an operator with scaling dimension $\Delta_L \simeq m_L R_{AdS}$, where we assumed that $\Delta_L \gg 1$. This assumption effectively makes the bulk particle classical and the analysis of the present section accurate. The angular momentum $J$ in the CFT is related to $L$ as follows
\begin{equation}
J =\Delta_{L} L .
\end{equation}
Similarly, $E$ measures energy per unit mass. In this way circular orbits describe the following Regge trajectory in the dual CFT,
\begin{equation}
\Delta_{H,L}(J) &= \Delta_H + \Delta_L E {\nonumber} \\
&= \Delta_H + \Delta_L + J + \gamma(\mu , J) , ~~~ J \geq J_{\text{min}}(\mu) ,
\end{equation}
where $\Delta_H$ is related to the mass of the black hole, $\Delta_L$ is related to the mass of the probe, and $J_{\text{min}}(\mu)$ is the minimal spin for which the stable orbit exists. For $\mu \ll 1$, $J_{\text{min}}(\mu) \sim \sqrt{3}\Delta_L \mu$, whereas for $\mu \gg 1$ we have $J_{\text{min}}(\mu) \sim {225 \over 64} \sqrt{5}\Delta_L \mu^2$.
The anomalous dimension takes the form
\begin{equation}
\gamma(\mu, J) = \Delta_L \Big(E- L -1 \Big) \Big|_{L = {J \over \Delta_L}} .
\end{equation}
This formula is correct for fixed $\mu$ and $L= {J \over \Delta_L}$ and to leading order in the $\Delta_L \to \infty$ limit. In other words, the formula above has $\Delta_L^{-1}$ corrections due to the quantum fluctuations of the particle around the classical orbit which we will discuss further below.
For convenience let us write down the first few terms in the expansion at small $\mu$,
\begin{equation}
\label{eq:largespin4d}
{\gamma(\mu, J) \over \Delta_L} =-\frac{\mu }{2 \sqrt{L}} - \frac{9}{32} {\mu ^2 \over L} \left( 1 + \frac{1}{9 L}\right) - \frac{81 \mu^3}{256 L^{3/2}} \left(1+ {1 \over 9 L}\right)^2 + ... \Big|_{L = {J \over \Delta_L}} \ .
\end{equation}
Several comments are in order. First, we note that the small $\mu$ expansion is closely related to the large $L$ expansion. Moreover, the large $L$ expansion translates into the large spin $J$ expansion which naturally appears in the context of the light-cone bootstrap \cite{Kulaxizi:2018dxo,Fitzpatrick:2019zqz,Karlsson:2019qfi,Kulaxizi:2019tkd,Fitzpatrick:2019efk,Karlsson:2019dbd,Li:2019zba,Li:2020dqm,Parnachev:2020fna,Fitzpatrick:2020yjb,Parnachev:2020zbr,Karlsson:2021duj,Karlsson:2021mgg}. Finally, we observe that all terms in the expansion are sign-definite. The same pattern continues when higher orders in $\mu$ are included. It is interesting to plot the exact Regge trajectory against its large spin expansion. We present the result in Figure \ref{fig:ReggeTr}.
Let us next discuss the flat space limit of the orbits above. In dimensionless units this limit corresponds to taking $\mu \to 0$, while keeping $\Delta_L \mu \simeq G M m_L $ and $J = L \Delta_L$ fixed. In particular, this implies that $L \sim \mu$ in the flat space limit. It is convenient to take the limit at the level of \eqref{eq:orbitsQN}. In this way we get the following Regge trajectory,
\begin{equation}
\Delta_{H,L}^{\text{flat}}(J) &= \Delta_H + \Delta_L + \gamma_{\text{flat}}(\Delta_L \mu , J) , ~~~ J \geq \sqrt{3} \Delta_L \mu ,
\end{equation}
where the Regge trajectory takes the form
\begin{equation}
{\gamma_{\text{flat}}(\Delta_L \mu , J) \over \Delta_L} &= \frac{\sqrt{2}}{3} \sqrt{\frac{1}{ \sqrt{1-\frac{3 (\Delta_L \mu)^2}{J^2}}+1}+\sqrt{1-\frac{3 (\Delta_L \mu)^2}{J^2}}+3}-1 {\nonumber} \\
&\simeq -\frac{\mu ^2 \Delta _L^2}{8 J^2} -\frac{9 \mu ^4 \Delta _L^4}{128 J^4} - ... \ .
\end{equation}
Note the absence of the term $+J$ in the formula for the Regge trajectory, which would be present in AdS. This is a manifestation of the familiar fact that the binding energy of a flat space orbit decays as a function of spin.
\begin{figure}[t]
\hspace{-24pt}
\begin{tabular}{c c}
\includegraphics[scale=0.7]{fig1.pdf} & \includegraphics[scale=0.5]{fig2.pdf}\\
(a) & (b)
\end{tabular}
\caption{The exact Regge trajectory for circular orbits versus its large spin expansion approximation. We set $\mu=5$ for which $J_{\rm min} \simeq 196.83 \Delta_L $. For other values of $\mu$ the situation is very similar. (a) We plot the exact Regge trajectory against its large spin approximation given by \eqref{eq:largespin4d}. The red dashed curve corresponds to only keeping the leading $O(\mu)$ term in \eqref{eq:largespin4d}. The black dashed curve corresponds to keeping all terms in \eqref{eq:largespin4d}. The exact Regge trajectory in blue can be plotted using its parameteric representation given by \eqref{eq:orbitsQN}. (b) The relative error in approximating the exact anomalous dimension $\gamma_{\text{Exact}}$ by its large spin expansion up to order $O(\mu^3)$ as in \eqref{eq:largespin4d}, which we denote by $\gamma_{\text{LC}}$. We see that the error is of order $10 \%$ at $J= J_{\text{min}}$, and becomes less than $1 \%$ for $J \gtrsim 3 J_{\text{min}}$. }
\label{fig:ReggeTr}
\end{figure}
\vspace{0.1cm}
\subsubsection{ Non-circular orbits}
\vspace{0.1cm}
\indent We can also consider non-circular orbits, see Figure \ref{potentialfig}. In this case $r$ does not stay constant but changes between $r_b \leq r \leq r_a$. It is therefore natural to characterize non-circular orbits by eccentricity $x$, defined
as follows
\begin{equation}
\sqrt{1-x^2} \equiv r_b/r_a .
\end{equation} From the condition $V(r_a) = V(r_b)$ we get for the energy and angular momentum
\begin{equation}
\label{eq:noncircQN}
E=\sqrt{V(r_a)}, ~~~~~ L=r_a^2 \frac{y \sqrt{1+\mu/(y (1+y) r_a^3)}}{\sqrt{1-\mu(1+y+y^2)/(y(1+y)r_a)}} ,
\end{equation}
where we introduced $y = \sqrt{1-x^2}$. By setting $y=1$, or $x=0$, we reproduce the previous formula \eqref{eq:orbitsQN}.
A convenient way to think about the non-circular orbits is the following. Let us fix $\mu$ and $L$, which fixes the form of the potential. We then consider energy levels of a particle in this potential labeled by $n$.
The quantization condition takes the form
\begin{equation}
\label{eq:BSclassical}
\Delta_L \int_{r_b}^{r_a} {d r \over f(r)} \sqrt{E^2 - V(r)} = \pi n ,
\end{equation}
where we are only interested in the terms that contribute to leading order in the classical $\Delta_L \gg 1$ limit. In particular, classical non-circular orbits correspond to $n \gg 1$ such that ${n \over \Delta_L}$ is kept fixed. We will discuss various corrections to \eqref{eq:BSclassical} in Section \ref{sec:doubletwistdim}.
\begin{figure}[t]
\includegraphics[scale=.7]{potential}
\centering
\caption{For $J>J_{\text{min}}(\mu)$, the potential $V(r)$ goes to zero at the horizon $r=r_s$, behaves like $r^2$ for large $r$, and has a metastable minimum at the radial position of the circular orbit. For a given value of $E$, there are three turning points $r_a$, $r_b$, and $r_c$ where $E^2=V(r)$, with $r_a>r_b>r_c$.\label{potentialfig}}
\centering
\end{figure}
Given $L$ and $r_a$, \eqref{eq:BSclassical} fixes $n$. And conversely, given $L$ and $n$ the corresponding orbit labeled by $y$ and $r_a$ can be identified via \eqref{eq:noncircQN} and \eqref{eq:BSclassical}. Note that if we fix $L$ and increase $n$ the corresponding orbits correspond to larger and larger values of $E$. By setting in \eqref{eq:BSclassical} $E^2 = V_{\text{max}}(\mu,L)$ we find that $n$ is bounded above,
\begin{align}
n\le n_{\text{max}}(\mu,J).
\end{align}
It is not hard to check that for $\mu \ll 1$ and $L\gg \mu$ we have $V_{\text{max}}(\mu,L) \simeq \frac{4L^2}{27\mu^2}$. Correspondingly, we have ${n_{\text{max}}(\mu,J) \over \Delta_L} \sim {4 L^2 \over 27 \pi \mu^2}$.
As before, we can associate to non-circular orbits a family of Regge trajectories in the dual CFT. In terms of the quantum numbers introduced above, the dimension of the dual state takes the form
\begin{equation}
\Delta_{H,L}(n, J) = \Delta_H + \Delta_L + 2n + J + \gamma(\mu , n, J) ,~~~ J \geq J_{\text{min}}(\mu), ~~~ n\leq n_{\text{max}}(\mu,J),
\end{equation}
where as before
\begin{equation}
\label{eq:anomdimn}
\gamma(\mu, n, J) &= \Delta_L \Big(E- L -1 - {2n \over \Delta_L} \Big) \Big|_{L = {J \over \Delta_L}} {\nonumber} \\
&=\sum_{k=1}^{\infty} \mu^k \gamma_k (n, J) ,
\end{equation}
and as before $\gamma_k (n, J) \to 0$ at large $J$. We will provide more details on the explicit form of $\gamma_k (n, J)$ below.
From the formulas above it is not obvious that the term $\mu^0$ is absent in \eqref{eq:anomdimn}. It is instructive to demonstrate this explicitly and we do so in Section \ref{sec:doubletwistdim}.
\subsection{Multi-orbit states}
In the sections above we discussed the simplest case of an orbit where we consider a single orbiting body around the black hole. In the same way we could have considered multi-orbit states and analyzed their properties. These should be related to the multi-twist operators in the dual theory as in \cite{Fitzpatrick:2012yx,Komargodski:2012ek}.
In particular, we can imagine the Milky Way galaxy in the middle of AdS with the black hole Sagittarius A* of mass $\mu_{A^*}$ at its center. Its dynamics will be encoded in the complicated properties of the multi-twist QNMs in the CFT dual. These would look like multi-twist operators perturbatively at large $J$.
\subsection{General spacetime dimensions}
Let us briefly discuss the situation in general number of spacetime dimensions $d$. In this case the red shift factor in the metric for the $AdS_{d+1}$ black hole takes the form \cite{Witten:1998zw}
\begin{equation}
f_d(r)={r^2 \over R_{AdS}^2}+1-\frac{GM}{r^{d-2}}.
\end{equation}
\noindent First, for $d=2$, in $AdS_3$ the criticality of the potential $V'(r_0)$ takes the following form
\begin{equation}
r_0^4 = R_{AdS}^2 L^2 (1 - G M) .
\end{equation}
Therefore orbits exist only below the BTZ threshold $GM<1$. One can also easily check that these orbits are stable \cite{Fitzpatrick:2014vua}.
These orbits disappear in the flat space limit $R_{AdS} \to \infty$ limit.
For $d>2$, timelike circular orbits exist around the Schwarzschild black hole in $AdS_{d+1}$ only for
\begin{equation}
r_0 > \Big( {d G M \over 2} \Big)^{{1 \over d-2}} .
\end{equation}
Their energy and angular momentum take the form
\begin{align}
\label{eq:orbitsQNd}
E&=\frac{f_d(r_0)}{\sqrt{1-dGM/(2r_0^{d-2})}}, ~~~~~ L={r_0^2 \over R_{AdS}} \frac{\sqrt{1+(d-2)GM R_{AdS}^2/(2 r_0^d)}}{\sqrt{1-d GM/(2r_0^{d-2})}}\ .
\end{align}
The stability of orbits condition $V''(r_0)>0$ takes the form
\begin{equation}
(d-2) GM R_{AdS}^2 \Big( (4-d) r_0^{d-2} - d G M \Big) + 8 r_0^{d} \Big( r_0^{d-2} - {d (d+2) \over 8} G M \Big) > 0 .
\end{equation}
Let us consider this condition in the flat space limit $R_{AdS} \to \infty$. We see that no stable orbits exist for $d \neq 4$. This is the famous fact about celestial motion in flat space. In the opposite limit $r_0^{d-2} \sim GM \gg (R_{AdS})^{d-2}$, on the other hand, stable orbits always exist and the story is very similar to the case of $AdS_4$ considered at the beginning of this section.
\subsection{Higher derivative corrections}
We can also consider higher derivative corrections to the Regge trajectories, which are small at large 't Hooft coupling $\lambda$ \cite{Camanho:2014apa,Afkhami-Jeddi:2016ntf,Afkhami-Jeddi:2017rmx,Kulaxizi:2017ixa,Costa:2017twz,Caron-Huot:2022ugt}. As a particular example, we take the case of Einstein gravity with a Gauss-Bonnet term,
\begin{align}
S=\frac{1}{16\pi G}\int d^{d+1}x\, \sqrt{-g}\left(R+\frac{d(d-1)}{l^2}+\alpha(R_{\mu\nu\lambda\delta}R^{\mu\nu\lambda \delta}-4R_{\mu\nu}R^{\mu\nu}+R^2)\right).
\end{align}
In addition to purely gravitational higher derivative terms, there can be couplings between matter and curvature such as $\phi W^2$, where $W$ is the Weyl tensor. However, for $\Delta_L\gg 1$ such terms are subleading, since the mass term $\sqrt{-g}\Delta_L^2\phi^2$ in the potential for $\phi$ dominates.\footnote{However, the coupling $\phi W^2$ induces a nontrivial one-point function \cite{Grinberg:2020fdj}.}\\
\indent In $d=3$ the Gauss-Bonnet term is topological, but in higher dimensions it is nontrivial and a spherically symmetric black hole solution exists \cite{Boulware:1985wk,Cai:2001dz}. The redshift factor is
\begin{align}
f(r)=1+\frac{r^2}{2\alpha}\left(1-\sqrt{1+4\alpha\left(\frac{ GM}{r^{d}}-\frac{1}{l^2}\right)}\right).
\end{align}
This is an asymptotically AdS spacetime, whose AdS radius is related to $l$ by
\begin{align}
l=\frac{R^2_{AdS}}{\sqrt{R_{AdS}^2-\alpha}}
\end{align}
\indent We now take $\alpha$ to zero, holding $R_{AdS}$ fixed. We
set $R_{AdS}=1$ and $\mu=GM$. Solving for the stable orbits gives
\begin{align}
E&=E(\alpha=0)+\alpha\frac{\mu r_0^{2-d/2}(r_0^d(dr_0^2+d-4)-(d-2)\mu)}{2(r_0^d-d\mu r_0^2/2)^{3/2}}+O(\alpha^2) \\
L&=L(\alpha=0)+\alpha \frac{\mu r_0^2(2r_0^d(dr_0^2+d-2)-2\mu(dr_0^2+d-1)+d\mu^2 r_0^{2-d})}{(2r_0^d-d\mu r_0^2)^{3/2}\sqrt{2r_0^d+(d-2)\mu }}+O(\alpha^2).
\end{align}
Solving for the anomalous dimension perturbatively in $\mu$, we find
\begin{align}
\label{eq:gendgammaorbit}
\frac{\gamma(\mu,J)}{\Delta_L}&=-\frac{1+2\alpha}{2} \frac{\mu}{L^{d/2-1}}-{d^2 \over 32} \left(\frac{\mu}{L^{d/2-1}}\right)^2 \Big(1+4 \alpha + {(1+4 \alpha) (d-4) + {4 \over d} \over d L} \Big) \\
&\hspace{5 mm}-\frac{d^4}{256}\left(\frac{\mu}{L^{d/2-1}}\right)^3\Big( \left(1+{(d-2)^2 \over d^2 L} \right)^2(1+6\alpha)-\frac{32 \alpha}{d^2 L} \left( 1+\frac{(d-2) (d-1)}{d^2 L} \right) \Big) {\nonumber} \\
&\hspace{5 mm}+O(\mu^4)\big|_{L=J/\Delta_L}\notag.
\end{align}
Setting $d=3$ and $\alpha=0$ in the above formula we reproduce \eqref{eq:largespin4d}.
Note that in heterotic string theory, the Gauss-Bonnet term $\alpha$ is positive \cite{Boulware:1985wk}, so we see that for $L\gg1$ the anomalous dimensions $\gamma_i$ increase in magnitude when the Gauss-Bonnet term is present. It would be interesting to understand whether this pattern persists for more general higher derivative interactions arising from string theory.
\subsection{Orbits in $dS_4$}
It is also curious to consider stable orbits in de Sitter. These exist only in $dS_4$ and the corresponding black hole metric can be obtained by changing $R_{AdS} \to i R_{dS}$. Denoting $\mu \equiv {G M \over R_{dS}}$ the black hole solution only exists for
\begin{equation}
\mu \leq {2 \over 3 \sqrt{3}},
\end{equation}
where $\mu = {2 \over 3 \sqrt{3}}$ corresponds to the extremal Nariai solution.
Let us discuss circular orbits in this case. Using $V'(r_0)=0$ and setting $R_{dS}=1$, the conserved energy and angular momentum take the form
\begin{align}
\label{eq:orbitsQNdS}
E&=\frac{f(r_0)}{\sqrt{1-3 \mu/(2r_0)}}, ~~~~~ L=r_0^2 \frac{\sqrt{\mu/(2 r_0^3) - 1}}{\sqrt{1-3 \mu/(2r_0)}} \ .
\end{align}
From the structure of the square roots, time-like orbits only exist for
\begin{equation}
\label{eq:orbitsdS}
{3 \over 2} \mu \leq r \leq \left( {\mu \over 2} \right)^{1/3} .
\end{equation}
Moreover, imposing stability of the orbits, $V''(r_0)>0$ leads to the following constraint,
\begin{equation}
(r-3 \mu) - 8 r^3 \left({r \over \mu} - {15 \over 8}\right)>0, ~~~\mu < {4 \over 75 \sqrt{3}} .
\end{equation}
In particular, for small $\mu \ll 1$, the stability condition takes the form
\begin{align}
3\mu<r<\frac{\mu^{1/3}}{2} .
\end{align}
In sharp contrast to the AdS case, stable orbits have spin which is bounded both from below and from above. The existence of a maximal spin for Regge trajectories in dS was also discussed in \cite{Noumi:2019ohm,Lust:2019lmq}.
\section{Decay of orbits}
\label{sec:raddecay}
We now turn to the decay of the orbits. The two relevant decay processes are gravitational radiation and tunneling into the black hole. The essential difference between the two is that gravitational radiation is ${1 \over c_T}$ suppressed and therefore is suppressed in the $c_T \to \infty$ limit.
We will first compute the decay rate due to radiation, and then compare the answer with the tunneling rate. Rather than deal with gravitational perturbations directly, we will consider the simpler case of scalar radiation. This model is defined by coupling a massless scalar field $\Phi$ to the orbit,
\begin{align}
S=-\frac{1}{2}\int d^4 x\, \sqrt{-g}g^{\mu\nu}\partial_\mu \Phi\partial_\nu \Phi-\kappa \int d\tau \, (-g_{\mu\nu}\dot{x}^\mu(\tau)\dot{x}^\nu(\tau))^{1/2}\Phi.
\end{align}
We work in $d=3$ for simplicity. In higher dimensions the functional form of the decay rate will be different, but the scaling with $\kappa$ is the same as in $d=3$. Also, in this section we will strictly consider the case of large black holes $\mu\gg1$, for which WKB methods are able to capture the leading contribution to the radiation.
\subsection{Radiation from circular orbits}\label{radcircular}
We are interested in computing the flux of energy and angular momentum through the horizon from the radiation field. These take the form
\begin{align}
\frac{dE}{dt}&=-\int_{r=r_s} d^2 \Omega \, r^2 \partial_t \Phi \partial_z\Phi\label{energyexp}\\
\frac{dL}{dt}&=-\int_{r=r_s}d^2 \Omega\, r^2 \partial_\phi \Phi \partial_z \Phi\label{momentumexp}.
\end{align}
Here we have introduced the tortoise coordinate $z$ defined as follows,
\begin{equation}
z = \int_r^{\infty} {d r' \over f(r')} .\label{tortoise}
\end{equation}
In particular, $d z = -{dr \over f(r)}$. The black hole exterior corresponds to $z \in (0, \infty)$, where $z \to 0$ corresponds to the AdS boundary, and $z \to \infty$ being the black hole horizon. In this section we will consider circular orbits, leaving the general non-circular case to Appendix \ref{radnoncircular}.\\
\indent Let us first review the setup. We would like to solve the wave equation for a circular source at $r=r_0$. For $r_0\sim \mu $, we will see that the tunneling rate dominates over the radiation, so it suffices to analyze the radiation for $r_0\gg \mu$. Separating into Fourier modes,
\begin{align}
\Phi(t,z,\theta,\phi)=\sum_{J m}\int d\omega \,\frac{1}{r(z)}e^{-i\omega t}Y_{J m}(\theta,\phi)\psi_{Jm\omega}(z).\label{modeexpansion}
\end{align}
The wave equation becomes \cite{Cardoso:2002up}
\begin{align}
\psi_{Jm\omega}''(z)+(\omega^2-V(r(z)))\psi_{Jm\omega}(z)= j_{Jm}(z),
\end{align}
where
\begin{align}
j_{Jm}(z)&=\kappa \int d\tau\, dt\, d\theta\, d\phi \, r(z)e^{i\omega t-im\phi}Y_{Jm}^*(\theta,0)\delta(t-\tau)\delta(r(z)-r_0)\delta(\phi-\tau)\delta(\theta-\pi/2) \notag\\
&=\kappa r_0 Y_{J m}^*(\pi/2,0)\delta(\omega-m)\delta(r(z)-r_0).\label{sourcej}
\end{align}
In deriving this equation we have used $r_0\gg \mu\gg 1$. The potential $V$ is given by
\begin{align}
V(r)=f(r)\left(\frac{J(J+1)}{r^2}+2+\frac{\mu}{r^3}\right).\label{potentialfull}
\end{align}
This potential is applicable for massless radiation, and is therefore different from the orbital potential (\ref{eq:potentialBH}), which describes the classical motion of heavy particles.\\
\indent The general solution to the radial part of the equation is then \cite{Arfken:379118}
\begin{align}\label{gensolution}
\psi_{Jm\omega}(z)&=c_1\psi^1_{J\omega}(z)+c_2\psi^2_{J\omega}(z)\\
&\hspace{10 mm}+\frac{1}{W_{J\omega}}\int_{r(z)}^{\infty}\frac{dr'}{f(r')}\, (\psi^1_{J\omega}(z)\psi^2_{J\omega}(z'(r'))-\psi^2_{J\omega}(z)\psi^1_{J\omega}(z'(r')))j_{Jm}(z'(r'))\notag.
\end{align}
Here $\psi^1$ and $\psi^2$ are solutions to the wave equation without the source, satisfying the boundary conditions
\begin{align}
\psi^1_{J\omega}(z)&\sim e^{-i\omega z}\text{ as }z\to \infty,\\
\psi^1_{J\omega}(z)&\sim \frac{A_{J\omega}}{z}+B_{J\omega}z^{2}\text{ as }z\to 0,\label{bcspsi1}\\
\psi^2_{J\omega}(z)&\sim z^{2}\text{ as }z\to 0,\label{bcs}\\
\psi^2_{J\omega}(z)&\sim C_{J\omega}e^{i\omega z}+D_{J\omega}e^{-i\omega z} \text{ as }z\to \infty.
\end{align}
In other words, $\psi^2$ is a normalizable mode, and $\psi^1$ satisfies purely ingoing boundary conditions. The Wronskian $W$, which is independent of $z$, is given by
\begin{align}
W_{J\omega}\equiv\psi^1_{J\omega}\partial_z \psi^2_{J\omega}-\psi^2_{J\omega}\partial_z \psi^1_{J\omega}=2iC_{J\omega} \omega=3A_{J\omega}.\label{wronskian}
\end{align}
\indent We compute the integration constants $c_1$ and $c_2$ as follows. We need purely ingoing boundary conditions at the horizon, and we need $\psi\to 0$ as $z\to0$ since we are interested in a normalizable solution. Near the horizon, the term proportional to $\psi^2$ must cancel because it involves an outgoing mode. Therefore
\begin{align}
c_2&=\frac{1}{W_{J\omega}} \int_{r_s}^{\infty}\frac{dr'}{f(r')}\, \psi^1_{J\omega}(z'(r'))j_{Jm}(z'(r'))
\end{align}
Near the boundary, the integral in (\ref{gensolution}) is zero, since the source is supported at $r'=r_0$. From (\ref{bcspsi1}) we have $\psi^1(z)\sim A/z$ for $z\sim 0$, so in order for $\psi$ to vanish at the boundary we need $c_1=0$. \\
\indent Plugging these values of $c_1$ and $c_2$ into (\ref{gensolution}), we now look at the behavior of $\psi$ near the horizon $z\to\infty$. This gives
\begin{align}
\psi_{Jm\omega }(z)&\sim \frac{\psi^1_{J\omega}(z)}{W_{J\omega}}\int_{r_s}^{\infty}\frac{dr'}{f(r')}\psi^2_{J\omega}(z'(r'))j_{Jm}(z'(r'))\notag\\
&\sim \frac{\kappa r_0Y_{Jm}^*(\pi/2,0)\psi^2_{J\omega}(z(r_0))}{2iC_{J\omega}\omega f(r_0)}\delta(\omega-m)e^{-i\omega z},\hspace{10 mm}z\to\infty\label{psinearrs},
\end{align}
where in the second line we used (\ref{sourcej}) and (\ref{wronskian}). From (\ref{bcs}), we see $\psi^2(z(r_0))$ behaves as $1/r_0^2$ as $r_0$ goes to infinity. Plugging (\ref{psinearrs}) into (\ref{modeexpansion}) and taking $r_0\to \infty$, we find
\begin{align}
\Phi(t,z,\theta,\phi)&\sim \sum_{J m}\frac{\kappa }{2iC_{Jm} mr_sr_0^3}Y_{J m}(\pi/2,0)^*Y_{J m}(\theta,\phi)e^{-im(t+z)},\hspace{10 mm}z\to \infty.
\end{align}\\
Using (\ref{energyexp}) and (\ref{momentumexp}), the radiated power and angular momentum is (neglecting an order one constant)
\begin{align}
\left(\frac{dL}{dt}\right)_{J m}=\left(\frac{dE}{dt}\right)_{J m}\propto\frac{\kappa^2 |Y_{J m}(\pi/2,0)|^2}{|C_{Jm}|^2r_0^6}.\label{radiatedpower}
\end{align}
The equality of the rate of energy loss and angular momentum loss is a consequence of the delta function at $\omega=m$ in the source (\ref{sourcej}). Our task is now to solve for the coefficient $C_{Jm}$.\\
\indent In the limit $J\to \infty$, we can use a WKB analysis to solve the wave equation \cite{Festuccia:2008zx}. Luckily, it turns out that the power spectrum is dominated by this regime. This is completely different from stable orbits in flat space, where the dominant frequency is equal to the orbital frequency, and higher harmonics are suppressed. At large $J$ the potential (\ref{potentialfull}) takes the form
\begin{align}
V(r)=\frac{f(r)}{r^2}J(J+1).
\end{align}
With the WKB ansatz $\psi^2=e^{J S}$, the wave equation becomes
\begin{align}
(\partial_z S)^2=\frac{J(J+1)-m^2}{J^2}+\frac{1}{r^2}-\frac{\mu }{r^3}.\label{WKBaction}
\end{align}
There is a single turning point $r_t$ outside of the horizon. The region $r<r_t$ is the classically allowed region of a particle in the potential. For $r$ outside of this turning point, the wavefunction is exponentially decaying,
\begin{align}
\psi^2_{Jm}(z)\propto \frac{1}{(\partial_z S)^{1/2}}\exp\left(J\int_{r(z)}^{\infty}\frac{dr'}{f(r')}\partial_z S\right).
\end{align}
\indent Now we need to fix the proper normalization. The WKB expansion fails near the boundary, where the potential (\ref{potentialfull}) takes the form
\begin{align}
V(r)=2r^2+J(J+1).
\end{align}
The solution to the wave equation near the boundary is therefore
\begin{align}
\psi^2_{J m}(z)=\frac{r(z)^{-1/2}J_{3/2}(\sqrt{m^2-J(J+1)}/r(z))}{(J(J+1)-m^2)^{3/4}}.
\end{align}
We have chosen the normalization so that (\ref{bcs}) is satisfied. Expanding for $J/r\gg 1$, we get
\begin{align}
\psi^2_{Jm}(z)\sim \frac{\exp(\sqrt{J(J+1)-m^2}/r(z))}{J(J+1)-m^2}.
\end{align}
This fixes the normalization of the WKB wavefunction,
\begin{align}
\psi^2_{Jm}(z)=\frac{1}{\sqrt{J}(J(J+1)-m^2)^{3/4}} \frac{1}{(\partial_z S)^{1/2}}\exp\left(J\int_{r(z)}^{\infty}\frac{dr'}{f(r')}\partial_z S\right).\label{wvfnc}
\end{align}
\indent The integral (\ref{wvfnc}) can be done to examine the behavior of the wavefunction near the turning point. Let us start with the case $J\ll \mu^2$. Then we can drop the $1/r^2$ term in (\ref{WKBaction}), since the first term in (\ref{WKBaction}) is bounded below by $1/J$. In this case the turning point is at
\begin{align}
r_t=\left(\frac{J^2 \mu }{J(J+1)-m^2}\right)^{1/3},\hspace{5 mm}J\ll \mu^2.
\end{align}
At the turning point, the exponent in (\ref{wvfnc}) becomes \begin{align}
J\int_{r_t}^{\infty}\frac{dr}{f(r)}\partial_z S=\sqrt{J(J+1)-m^2}\int_{r_t}^{\infty}\frac{dr}{r^2-\mu/r}\sqrt{1-r_t^3/r^3}.\label{exponentsmallspin}
\end{align}
There are two limits we can take in (\ref{exponentsmallspin}). The first is $r_t\to \mu^{1/3}$, or $|m|\ll J$. We get
\begin{align}
J\int_{r_t}^{\infty}\frac{dr}{f(r)}\partial_z S\sim \frac{J}{\mu^{1/3}},\hspace{10 mm}|m|\ll J\ll \mu^2.
\end{align}
The second limit is $r_t\to \infty$, or $|m|-J\ll J$. Then (\ref{exponentsmallspin}) becomes
\begin{align}
J\int_{r_t}^{\infty}\frac{dr}{f(r)}\partial_z S\sim \frac{c(J(J+1)-m^2)^{5/6}}{(J^2\mu)^{1/3}},\hspace{10 mm}|m|-J\ll J\ll \mu^2,\label{wkbsdominant}
\end{align}
where $c= \sqrt{\pi}\Gamma(4/3)/(2\Gamma(11/6))$. \\
\indent We now turn to the opposite case of large spin $J\gg \mu^2$. In this limit the turning point approaches $r=\mu$, and the exponent in (\ref{wvfnc}) is
\begin{align}
J\int_{\mu}^{\infty}\frac{dr}{r^2}\sqrt{\frac{1}{r^2}-\frac{\mu}{r^3}}=\frac{4J}{15\mu^2},\hspace{10 mm}J\gg \mu^2\label{cutoff}.
\end{align}
\indent We can now use the standard WKB connection formulas to write down the oscillating solution in the classically allowed region. But actually we just need the magnitude of $C_{Jm}$, which can be computed by evaluating the magnitude of the solution (\ref{wvfnc}) at the horizon. The dominant region is $J\ll \mu^2$. In this limit we find from (\ref{wkbsdominant})
\begin{align}
|C_{Jm}|=\frac{1}{\sqrt{J}(J(J+1)-m^2)^{3/4}}\exp\left(-\frac{2c(J(J+1)-m^2)^{5/6}}{(J^2\mu)^{1/3}}\right),
\end{align}
so the radiated power (\ref{radiatedpower}) is
\begin{align}
P_{J}=\frac{\kappa^2}{r_0^6}\sum_{m=-J}^{J}|Y_{J m}(\pi/2,0)|^2J(J(J+1)-m^2)^{3/2}\exp\left(-\frac{2c(J(J+1)-m^2)^{5/6}}{(J^2\mu)^{1/3}}\right).
\end{align}
Because of the exponential factor, the sum is dominated by $1-|m|/J\ll 1$. In this limit the spherical harmonics take the form
\begin{align}
|Y_{J m}(\pi/2,0)|^2\sim J^{1/4}\frac{\Gamma(2n+1)}{2^{2n}\Gamma(n+1)^2}.
\end{align}
where we have defined $J=|m|+2n$. We finally get
\begin{align}
P_{J}&\sim \frac{\kappa^2J^3 }{r_0^6}\sum_{n=0}^{\infty}\frac{\Gamma(2n+1)}{2^{2n}\Gamma(n+1)^2}(1+4n)^{3/2}\exp\left(-\frac{2c(1+4n)^{5/6}J^{1/6}}{\mu^{1/3}}\right)\notag\\
&\sim \frac{\kappa^2J^3 }{r_0^6}\left(\frac{\mu^2}{J}\right)^{2/5}.\label{dominant}
\end{align}
\begin{figure}[t]
\includegraphics[scale=.65]{power}
\centering
\caption{The power spectrum $P_J$ grows like a power law until $J\sim \mu^2$, at which point it begins to decay exponentially.}
\centering
\end{figure}
\indent In order to compute the total power, we need to sum (\ref{dominant}) over all $J$. This sum is divergent, but there is an effective cutoff at $J=\mu^2$. Indeed, from (\ref{cutoff}) we find that for $J\gg \mu^2$
\begin{align}\label{powerlargeJ}
P_J\propto \frac{1}{|C_{Jm}|^2}\sim \exp\left(-\frac{8J}{15\mu^2}\right),\hspace{10 mm}J\gg \mu^2.
\end{align}
We can therefore approximate the sum over $J$ by only summing up to $J=\mu^2$, and the total power is
\begin{align}
\sum_{J}P_{J}&\approx \sum_{J<\mu^2} \frac{\kappa^2J^3 }{r_0^6}\left(\frac{\mu^2}{J}\right)^{2/5}\notag\\
&\sim \frac{\kappa ^2 \mu^8}{r_0^6}.\label{totalpower}
\end{align}
A similar analysis gives the radiation power for a general massive scalar field with dimension $\Delta$,
\begin{align}
\sum_{J<\mu^2}P_{J}\sim \frac{\kappa ^2 \mu^{2\Delta+2}}{r_0^{2\Delta}},\
\end{align}
At large $r_0$, the lowest dimension operators dominate the radiation spectrum. \\
\indent To summarize, the result of the analysis is a power spectrum that is peaked at very large $J\sim \mu^2$. Since $|m|$ is close to $J$, the radiation is emitted near the equatorial plane. Note that for black holes near the Hawking-Page transition, the $|m|=J$ mode is dominant over the others. This is purely synchrotronic radiation, as found numerically in \cite{Cardoso:2002up,Brito:2021qiw}. In our case we are considering black holes with $\mu\gg 1$, so the radiation is not purely synchrotronic.
\subsection{Decay of circular orbits}
Now that we have found the emitted power, we can compute the decay time. For the purposes of this section, we will only consider the case where the energy of the orbit is much smaller than the mass of the black hole. The energy of a circular orbit with $r_0\gg \mu$ and $\Delta_L\gg 1$ is $
\Delta_Lr_0^2$, so we require $r_0^2\ll M/\Delta_L$, along with $r_0\gtrsim \mu$ for stability. These two conditions can only be satisfied above the Hawking-Page transition if
\begin{align}
\label{eq:regionofM}
\frac{1}{G}<M\ll \frac{1}{\Delta_L G^2}.
\end{align}
In terms of the temperature, we have
\begin{align}
1< T\ll \frac{1}{(\Delta_L G)^{1/3}}.
\end{align}
In this intermediate range of temperatures, the final state after decay is a slowly spinning black hole whose mass is slightly larger than the original black hole, and we can use the results of Section \ref{radcircular} to analyze the decay process. \\
\indent Since the decay process is adiabatic, the orbit remains approximately circular with radius $r_0(t)$ when radiation is taken into account. The energy of the orbit is $\Delta_L r_0(t)^2$ for $r_0\gg \mu$, so the rate of energy loss is
\begin{align}
\frac{d(\Delta_Lr_0^2)}{dt}=2\Delta_L r_0\dot{r_0}.
\end{align}
Equating this to the radiated power (\ref{totalpower}) gives
\begin{align}
\dot{r}_0\sim- \kappa^2\frac{\mu^8}{\Delta_L r_0^7},
\end{align}
so it follows that the decay time is
\begin{align}\label{decaytimescalar}
t_{\text{radiation}}&\sim \frac{\Delta_L}{\kappa^2 }\left(\frac{r_0}{\mu}\right)^8.
\end{align}
This goes to infinity at large $r_0$, where the orbit approaches the asymptotically AdS region.\\
\indent Let us briefly comment on the case of gravitational radiation. Naively, the power emitted through gravity waves can be computed by replacing $\kappa$ with $\Delta_L\sqrt{1/c_T}$ \cite{Misner:1972jf}, and the corresponding decay time is
\begin{align}
t_{\text{radiation}}\sim \frac{c_T}{\Delta_L^5}\frac{J^4}{\mu^8}.\label{decaytime}
\end{align}
However, there are subtleties involved in replacing $\kappa$ with $\Delta_L\sqrt{1/c_T}$ when computing the radiation from an unstable orbit around an asymptotically flat black hole \cite{Breuer:1973kt,Chitre:1972fv,Davis:1972dm}. It would be interesting to explicitly compute the gravitational radiation spectrum and verify whether (\ref{decaytime}) holds.
\subsection{Radiation vs. tunneling}\label{radvstunn}
\indent Let us now compare the relative effects of radiation and tunneling. Since the potential barrier is finite, there is a small probability for the particle to tunnel over the barrier into the black hole horizon. This is the sole source of instability at infinite $c_T$, where gravitational radiation can be neglected. The tunneling rate is exponentially suppressed in $\Delta_L$, since the action for a point particle is proportional to $\Delta_L$. Therefore tunneling is not captured by the Bohr-Sommerfeld approximation (\ref{eq:BSclassical}). The tunneling rate is related to the imaginary part of the quasi-normal mode energy, \cite{Festuccia:2008zx}
\begin{align}
\Gamma=|\text{Im }\omega_n|=\exp\left(-2\Delta_L \int_{z_b}^{z_c} dz\, \sqrt{V(z)-E_n^2}\right).
\end{align}
Here $z_c$ is the third turning point, which is over the potential barrier (see Figure \ref{potentialfig}) . \\
\indent The decay rate simplifies in the limiting case of a circular orbit that approaches the boundary. In this limit we have $E_n\sim L$, $r(z_c)\sim \mu^{1/(d-2)}$, and $r(z_b)\sim \sqrt{L}\to \infty$. The tunneling rate becomes
\begin{align}\label{tunnelinggend}
\Gamma=\exp\left(-2J \int_{\mu^{1/(d-2)}}^{\infty}\frac{dr}{rf(r)}\sqrt{1-\frac{\mu}{r^{d-2}}}\right) .
\end{align}
For large black holes $\mu\gg1$, we have $\mu^{1/(d-2)}\gg r_s$, so we can replace $f(r)$ by $r^2$ in the denominator of (\ref{tunnelinggend}. We can then perform the integral to find
\begin{align}
\Gamma=\exp\left(-\frac{J}{\mu^{2/(d-2)}}\frac{\sqrt{\pi}\Gamma\left(\frac{d}{d-2}\right)}{2\Gamma\left(\frac{d}{d-2}+\frac{1}{2}\right)}\right),\hspace{10 mm}\mu\gg 1,\frac{J}{\mu^{2/(d-2)}}\gg 1.\label{tunnlargemu}
\end{align}
For small black holes with $\mu\ll 1$, we consider the region of the integral where $r/ \mu^{1/(d-2)}$ is held fixed as $\mu\to0$. Then we can replace $f(r)$ by $1-\mu/r^{d-2}$ in the denominator of (\ref{tunnelinggend}), finding
\begin{align}\label{tunnsmallmu}
\Gamma&=\exp\left(-2J \int_{\mu^{1/(d-2)}}\frac{dr}{r\sqrt{1-\frac{\mu}{r^{d-2}}}}\right)\approx\mu^{\frac{2J}{d-2}},\hspace{10 mm}\mu\ll 1,J\gg 1.
\end{align}
In $d=3$ this reproduces the leading behavior at large $J$ of the imaginary part of the quasi-normal mode energy of a small black hole, see eq. (112) in \cite{berti}.\\
\indent We now compare the tunneling rate to the radiation rate for large black holes in $d=3$. From (\ref{tunnlargemu}), we find that the characteristic time scale for tunneling is
\begin{align}
t_{\text{tunneling}}=\exp\left(\frac{8J}{15\mu^2}\right).
\end{align}
Therefore tunneling occurs before the radiative decay time (\ref{decaytimescalar}) in $d=3$ if
\begin{align}
\exp\left(\frac{8J}{15\mu^2}\right)\ll\frac{1}{\kappa^2 \Delta_L^3}\frac{J^4}{\mu^8}.\label{inequality}
\end{align}
Replacing $\kappa$ by $\Delta_L\sqrt{1/c_T}$ as is appropriate for gravitational radiation, the equation (\ref{inequality}) requires
\begin{align}
\Delta_L\mu^2< J\ll \mu^2\log c_T.
\end{align}
For larger $r_0$, the radiation begins to dominate. In this regime the imaginary part of the quasi-normal mode energy is controlled by $1/c_T$ in the presence of gravitational radiation. It would be interesting to verify this by analyzing the poles of the retarded Green function.
\section{Orbit states and the light-cone bootstrap}
\label{sec:orbitsLCbootstrap}
In this section we consider the four-point function of scalar primary operators in a theory with a classical gravity dual. In terms of the CFT data we assume that the CFT central charge (or equivalently, the two-point function of the stress-energy tensor) is very large, $c_T \gg 1$. We also assume that the gap in the spectrum of single-trace higher spin operators is large, $\Delta_{\text{gap}} \gg 1$.
We take a pair of operators to be heavy, $\Delta_H \sim c_T$, and think of them as creating a classical black hole background as we take $c_T \to \infty$. The second pair of operators we take to be light, $\Delta_L \ll c_T$, and we will use them to probe the background created by the heavy operators. This is the setup considered recently in \cite{Kulaxizi:2018dxo,Fitzpatrick:2019zqz,Karlsson:2019qfi,Kulaxizi:2019tkd,Fitzpatrick:2019efk,Karlsson:2019dbd,Li:2019zba,Li:2020dqm,Parnachev:2020fna,Fitzpatrick:2020yjb,Parnachev:2020zbr,Karlsson:2021duj,Karlsson:2021mgg}.
Let us start by setting up some basic conventions (we closely follow the conventions of \cite{Jafferis:2017zna}). We define the four-point function as follows
\begin{equation}
\label{eq:basicdef}
G(z,\bar z) \equiv \langle \mathcal{O}_H(0) \mathcal{O}_L(z,\bar z) \mathcal{O}_L(1,1) \mathcal{O}_H(\infty) \rangle ,
\end{equation}
where all operators for simplicity are taken to be real scalars. As usual $\mathcal{O}_H(\infty) = \lim_{x_4 \to \infty} |x_4|^{\Delta_H} \mathcal{O}_H(x_4)$. In writing the formula above we used conformal symmetry to put all four operators in a two-dimensional plane, on which we define the coordinate $z = x^1 + i x^2$. As usual in Euclidean space $\bar z = z^*$, and upon Wick rotation $z$ and $\bar z$ become real and independent.
The OPE expansion in different channels takes the form
\begin{equation}
\label{eq:schannel}
s\text{-channel}: ~~~G(z,\bar z) &= (z \bar z)^{- {1 \over 2} (\Delta_H + \Delta_{L}) } \\
&\hspace{5 mm}\times \sum_{{\cal O}_{\Delta, J}} | \lambda_{H,L, {\cal O}_{\Delta, J}} |^2 g_{\Delta, J}^{\Delta_{H,L}, - \Delta_{H,L}}(z, \bar z), ~~~ |z|<1 \notag \ .
\end{equation}
\begin{equation}
\label{eq:tchannel}
t\text{-channel}: ~~~G(z,\bar z) &= \left([1-z][1- \bar z]\right)^{- \Delta_L } \\
&\hspace{5 mm}\times\sum_{ {\cal O}_{\Delta, J} } \lambda_{L,L, {\cal O}_{\Delta, J}} \lambda_{H,H, {\cal O}_{\Delta, J}} g_{\Delta, J}^{0,0}(1-z,1-\bar z) ,\quad |1-z|<1\notag \ .
\end{equation}
\begin{equation}
\label{eq:uchannel}
u\text{-channel}: ~~~G(z,\bar z) &= (z \bar z)^{{1 \over 2} (\Delta_H - \Delta_{L}) } \\
&\hspace{5 mm}\times\sum_{ {\cal O}_{\Delta, J} } | \lambda_{H,L, {\cal O}_{\Delta, J}} |^2 g_{\Delta, J}^{\Delta_{H,L}, - \Delta_{H,L}}\left({1 \over z}, {1 \over \bar z} \right),
~~~ |z|>1,\notag \
\end{equation}
where we have defined $\Delta_{H,L} = \Delta_{H} - \Delta_{L}$, and the sum is over an infinite set of primary operators labeled by their scaling dimension $\Delta$ and spin $J$.
It is also convenient to define
\begin{equation}
g(z,\bar z) \equiv (z\bar z)^{{1\over 2} \Delta_L} G(z,\bar z) \ .
\end{equation}
The correlation function is invariant under the exchange of the locations of the two light operators. This is encoded in the crossing equation in the $s$ and $u$ channels,
\begin{equation}
g(z, \bar z) = g \left({1 \over z}, {1 \over \bar z} \right) \ .
\end{equation}
In the case where the operators are charged, the corresponding formulas can be found in \cite{Jafferis:2017zna}.
The formulas above are completely general. We would like to focus on the situation where $c_T \to \infty$ with ${\Delta_H \over c_T}$ kept fixed. In this limit we think of $\Delta_H$ as creating a classical black hole background.
Let us consider the $s$- and $u$-channel OPEs. As explained in \cite{Jafferis:2017zna}, the conformal blocks simplify in this limit because descendants are suppressed by the factor
\begin{equation}
\label{eq:conditiondesc}
{(\Delta - \Delta_H)^2 \over \Delta_H } \ll 1 .
\end{equation}
At this point it is not obvious that in the limit of interest the relevant primary operators that appear in the OPE satisfy \eqref{eq:conditiondesc}, but later using the eigenstate thermalization hypothesis (ETH) we will check that this is indeed the case.
Therefore the relevant conformal blocks are simply
\begin{equation}
g_{\Delta, J}^{\Delta_{H,L}, - \Delta_{H,L}}(z, \bar z) = (z \bar z)^{{\Delta \over 2}} P_J^{(d)} \Big( {z + \bar z \over 2 \sqrt{z \bar z}} \Big) + O \Big({(\Delta - \Delta_H)^2 \over \Delta_H} \Big),
\end{equation}
where
\begin{align}
P_J^{(d)}(x) \equiv {\Gamma({d-2 \over 2}) \Gamma(J+1) \over \Gamma(J+{d \over 2}-1)} C_J^{({d \over 2}-1)}(x),
\end{align}
and $C_J^{(\alpha)}(x)$ are the standard Gegenbauer polynomials. For $d=3$ these are the usual Legendre polynomials.
\subsection{OPE and ETH}
We would next like to rewrite the expansion above using the eigenstate thermalization hypothesis (ETH) \cite{srednicki1999approach}, see \cite{DAlessio:2015qtq} for review, which states that \cite{Lashkari:2016vgj,Delacretaz:2020nit}
\begin{equation}
\label{eq:ETH}
\langle E_H | {\cal O}_L | E_{H'} \rangle = {\cal O}_L(E_H) \delta_{H, H'} + e^{-{1\over 2} S(\bar E)} f_{{\cal O}_L}(\bar E, \omega) R_{H H'},
\end{equation}
where $\bar E = {1\over 2}(E_H + E_{H'})$, $\omega = E_{H'} - E_H$. The function ${\cal O}_L(E_H)$ is a smooth function of the energy given by the microcanonical average of ${\cal O}_L$. The matrix $R_{H H'}$ is a random matrix with zero mean and unit variance (this is not exactly correct \cite{Foini:2018sdb} but it will be sufficient for our purposes).\footnote{See also a related discussion in \cite{Dymarsky:2018ccu}.} The function $f_{{\cal O}_L}(E, \omega)$ is a smooth function of both variables closely related to the thermal two-point function.
In the formula \eqref{eq:ETH} above we used energy eigenstates of the theory on the cylinder. We would like to switch to the plane
and use the operator-state correspondence. The mapping takes the form
\begin{equation}
ds^2_{\text{cyl}} = d \tau^2 + R^2 d \Omega_{d-1}^2 = \Big( {R \over r} \Big)^2 \Big( d r^2 + r^2 d \Omega_{d-1}^2 \Big), ~~~ \tau = R \log r .
\end{equation}
Under this conformal map we have
\begin{equation}
\langle H | \mathcal{O}(\tau, \vec n) | H' \rangle_{\text{cyl}} = \Big( {r \over R} \Big)^{\Delta_\mathcal{O} } { \langle {\cal O}_{H'}(0) {\cal O}(x) {\cal O}_H^\dagger (\infty) \rangle_{{\mathbb R}^d} \over \sqrt{\langle {\cal O}_{H'}(0) {\cal O}_{H'}^\dagger (\infty) \rangle_{{\mathbb R}^d} \langle {\cal O}_{H}(0) {\cal O}_H^\dagger (\infty) \rangle_{{\mathbb R}^d}} } ,
\end{equation}
where $r = e^{{\tau \over R}}$ and $\vec x = r \vec n$. In particular, the energy on the cylinder is related to the scaling dimension by $E_H = {\Delta_H \over R}$. Via this mapping the ETH ansatz \eqref{eq:ETH} becomes a statement about the three-point functions that appear in the heavy-light OPE channel. For example, setting $H' = H$ we get
\begin{equation}
\lambda_{H,H,\mathcal{O}_{\Delta,J}} = { \langle {\cal O}_{H}(0) {\cal O}(1) {\cal O}_H^\dagger (\infty) \rangle_{{\mathbb R}^d} \over \langle {\cal O}_{H}(0) {\cal O}_{H}^\dagger (\infty) \rangle_{{\mathbb R}^d} } = R^{\Delta_\mathcal{O}} \langle H | \mathcal{O}_{\Delta,J}(0) | H \rangle_{\text{cyl}}.
\end{equation}
Using the ETH ansatz we find for the four-point function in the $s$-channel (after trivially averaging over $R_{HH'}$)
\begin{equation}
&g(z, \bar z) \equiv (z \bar z)^{{1\over 2} \Delta_L}G\left( z , \bar z \right) = R^{2\Delta_L} \Bigg[ {\cal O}_L(\Delta_H)^2 \notag\\
& + \sum_{\Delta_{H'}, J} e^{-S(\bar \Delta/R)} \left|f_J\left({\bar \Delta \over R}, {\omega \over R}\right) \right|^2 (z \bar z)^{{1\over 2} (\Delta_{H'} - \Delta_H) } P_J^{(d)} \left( z + \bar z \over 2 \sqrt{z \bar z} \right) \Bigg], \notag\\
&\bar \Delta = {\Delta_H + \Delta_{H'} \over 2}, \qquad \omega = \Delta_{H'} - \Delta_H ,
\end{equation}
where we used that descendants are suppressed by powers of $\Delta_H$. The sum is over primary operators. To simplify the formulas and avoid extra clutter we next set $R=1$.
Assuming an approximately continuous spectrum and introducing the corresponding density of states we convert the OPE sum into an integral,\footnote{In principle, we should write at this point $S_J(\Delta_H)$. However, using the symmetry of the Kerr black holes under $J \to - J$, we immediately see that $S_J(\Delta_H) - S(\Delta_H) \sim {1 \over c_T}$ for $\Delta_H \sim c_T \to \infty$ and $J \sim O(1)$. It would be interesting to understand if this is in fact true in any large $c_T$ CFT.}
\begin{equation}
\label{eq:sumtoint}
\sum_{\Delta_{H'}} \to \int d \Delta_{H'}\, e^{S(\Delta_{H'})} = \int_{-\infty}^\infty d \omega ~e^{S\left( \Delta_H + \omega \right)} .
\end{equation}
Using that $z =e^{\tau+ i \theta}$ and $\overline{z}=e^{\tau - i \theta}$, we have in the $s,u$-channels for the connected correlator
\begin{equation}
\label{eq:gcon}
g_c(z,\bar z) &\equiv g(z,\bar z) -{\cal O}_L(\Delta_H)^2 \\
& = \sum_{J = 0}^\infty \int_{-\infty}^\infty d \omega
~ e^{ S(\Delta_H + \omega) - S\left( \Delta_H + {\omega \over 2} \right) } e^{- |\tau| \omega } \left|f_J\left(\Delta_H + {\omega \over 2} , \omega \right) \right|^2 P_J^{(d)}\left( \cos \theta \right),\notag
\end{equation}
where $\tau <0$ for the $s$-channel and $\tau >0$ in the $u$-channel. In the formula above we extended the integral over $\omega $ in \eqref{eq:sumtoint} and \eqref{eq:gcon} to $(-\infty, \infty)$, which makes sense in the $\Delta_H \to \infty$ limit.
Next we would like to expand in the limit $\omega \ll \Delta_H$, as in \eqref{eq:conditiondesc}. We have
\begin{equation}
\label{eq:entropyexp}
&S(\Delta_H + \omega) - S\left( \Delta_H + {\omega \over 2} \right) = {\omega \over 2} {\partial S \over \partial \Delta_H} + {3\over 8} \omega^2 {\partial^2 S \over \partial \Delta_H^2} + \dots \ ,\notag \\
&{\partial S(\Delta_H) \over \partial \Delta_H} = \beta , \qquad
{\partial^2 S(\Delta_H) \over \partial \Delta_H^2} =- {1 \over T^2} {\partial T \over \partial E} = - {\beta^2 \over C} \ ,
\end{equation}
where $C = {\partial E \over \partial T}$ is the heat capacity.
The expansion \eqref{eq:entropyexp} can be used when the second term is much smaller then the first term,
\begin{equation}
\label{eq:expansionval}
\omega \ll {C \over \beta} \sim c_T \sim \Delta_H .
\end{equation}
Therefore the expansion is reliable as long as $\omega \ll \Delta_H$.
To summarize, the OPE expansion in the $s$- and $u$- channel takes the following form,
\begin{equation}
\label{eq:EuclidOPE}
g(z, \bar z) = \lambda_{H,H,L}^2 + \sum_{J = 0}^\infty \int_{-\infty}^\infty d \omega\, e^{\left( {\beta \over 2} - |\tau| \right)\omega } \left|f_J\left(\Delta_H , \omega \right) \right|^2 P_J^{(d)}\left( \cos \theta \right) ,
\end{equation}
where so far we used ETH to go from a sum over a continuum of heavy operators to an integral over a smooth function $\left|f_J\left(\Delta_H , \omega \right) \right|^2$.
Next let us connect the above representation to the thermal two-point function on the sphere. To this end we define the connected two-sided Wightman function
\begin{equation}
\label{eq:two-sided}
\left\langle \mathcal{O}_L\left(t - i {\beta \over 2} , \theta\right) \mathcal{O}_L(0,0) \right\rangle_\beta = \sum_{J = 0}^\infty \int_{- \infty}^\infty d \omega\, e^{- i \omega t} g_J(\beta, \omega) P_J^{(d)}(\cos \theta) .
\end{equation}
Equivalently, we can take the Euclidean formula \eqref{eq:EuclidOPE} and continue it to Lorentzian time by setting $\tau = {\beta \over 2} + i t$.
The statement of the ETH hypothesis then leads to the following identification
\begin{equation}
\label{eq:ETHrelation}
{\rm ETH}:~~~ \left|f_J\left(\Delta_H , \omega \right) \right|^2 = g_J(\beta, \omega) |_{\beta = \beta(\Delta_H)}.
\end{equation}
The KMS condition (or equivalently, invariance of \eqref{eq:EuclidOPE} under $\tau \to \beta - \tau$, see e.g. \cite{Iliesiu:2018fao}) leads to
\begin{equation}
\label{eq:KMSimage}
\text{KMS}: ~~~ g_J(\beta, \omega) =g_J(\beta,-\omega).
\end{equation}
Unitarity implies that $g_J(\beta, \omega)$ is real and non-negative for real $\omega$. This fact implies that away from the real axis it satisfies
\begin{equation}
\label{eq:complexconjugation}
\text{Unitarity}: ~~~g_J(\beta, \omega^*) = \Big( g_J(\beta, \omega) \Big)^* .
\end{equation}
In deriving the formula above we assumed that $\omega \ll \sqrt{\Delta_H}$, see \eqref{eq:conditiondesc}: first, when neglecting the contribution of the descendants; second, when expanding the entropy. Let us check that this is indeed the case.
To this end recall the universal large $\omega$ asymptotic of $g_J(\beta, \omega)$,
\begin{equation}\label{tauberian}
\lim_{\omega \to \infty} g_J(\beta, \omega) \sim \omega^{2 \Delta_L - d} e^{- {\beta \omega \over 2}} ,
\end{equation}
which should be understood in the averaged/Tauberian sense \cite{Pappadopulo:2012jk}. It follows from reproducing correctly the $\tau \to 0$ behavior, which is controlled by the unit operator in the $t$-channel,
\begin{equation}
g(e^{\tau},e^{\tau}) \sim {1 \over \tau^{2 \Delta_L}}, ~~~ \tau \to 0 .
\end{equation}
More precisely, to derive \eqref{tauberian} one computes the spin $J$ projection of the contribution of the unit operator in the $t$-channel to $g_J(\beta, \omega)$. Taking the $\tau \to 0$ limit then gives \eqref{tauberian}.
From \eqref{tauberian} we conclude that only operators with $\omega \sim {1 \over \beta} \sim O(1) \ll \sqrt{\Delta_H}$ contribute significantly to the OPE as has been assumed in the derivation above.
\subsection{Quasi-normal modes}
Quasi-normal modes are defined as poles of $g_J(\beta, \omega)$, see e.g. \cite{Horowitz:1999jd,cardoso,Turiaci:2016cvo}. From the properties above it is clear that given a pole at $\omega_0$, $g_J(\beta, \omega)$ also has poles at $- \omega_0$, $\omega_0^*$, $- \omega_0^*$. The conclusion of this discussion is that QNMs manifest themselves as singularities of the averaged OPE coefficients continued in the complex plane as a function of $\omega = \Delta_{H'} - \Delta_{H}$. Below we will focus on QNMs with ${\rm Re}[\omega_{n,J}],{\rm Im}[\omega_{n,J}]>0$ with the residue $\lambda_{n,J}$, from which we obtain all other QNMs using the KMS symmetry \eqref{eq:KMSimage} and complex conjugation \eqref{eq:complexconjugation}.
In theories with a classical gravity dual it is known that poles are in fact the only singularities of $g_J(\beta, \omega)$. We can thus try to close the integration contour in \eqref{eq:EuclidOPE} to the upper half-plane to get an alternative expansion of the correlation function in terms of the QNMs. For convergence reasons we set
\begin{equation}
\tau = \beta/2 - i t ,
\end{equation}
which is naturally related to the two-sided thermal function $\left\langle \mathcal{O}_L\left(t - i \beta/2 , \theta\right) \mathcal{O}_L(0,0) \right\rangle_\beta$.
Introducing in this way
\begin{equation}
g(t,\theta) \equiv g \Big( e^{\beta/2 - i (t-\theta) }, e^{\beta/2 - i (t+\theta)} \Big) ,
\end{equation}
we get by closing the $\omega$ integration contour in \eqref{eq:EuclidOPE} in the upper half-plane for $t>0$\footnote{For $t<0$ we can close the contour in the lower half-plane.}
\begin{equation}
\label{eq:QNM}
g(t,\theta) = \lambda_{H,H,L}^2 + 2 \pi i \sum_{n,J = 0}^\infty (\lambda_{n,J} e^{i t \omega_{n,J} } - \lambda_{n,J}^* e^{-i t \omega_{n,J}^* } ) P_J^{(d)}\left( \cos \theta \right) , ~~~ {\rm Re}[\omega_{n,J}],{\rm Im}[\omega_{n,J}]>0 ,
\end{equation}
where we dropped the contribution of the arc at infinity thanks to the exponential suppression $e^{- t {\rm Im}[\omega]}$ of the integrand. In this way we get the following QNM representation of the two-sided correlator,
\begin{equation}
\label{eq:QNMexpansion}
&\text{QNM}: ~~~g(t,\theta) = \lambda_{H,H,L}^2 - 2 \pi \sum_{n,J = 0}^\infty {\rm Im} \lambda_{n,J} e^{- |t|{\rm Im} \omega_{n,J} } \cos ( {\rm Re} \omega_{n,J} |t| ) P_J^{(d)}\left( \cos \theta \right){\nonumber} \\
&+ 2 \pi \sum_{n,J = 0}^\infty {\rm Re} \lambda_{n,J} e^{- |t|{\rm Im} \omega_{n,J} } \sin ({\rm Re} \omega_{n,J} |t| ) P_J^{(d)}\left( \cos \theta \right) , ~~~ {\rm Re}\omega_{n,J},{\rm Im}\omega_{n,J} >0 .
\end{equation}
The imaginary part of ${\rm Im} \omega_{n,J} $ controls the decay rate in Lorentzian time. As in \cite{Jafferis:2017zna} we see that $g(t,\theta)$ is not manifestly analytic when expanded around $t=0$, which leads to an infinite set of sum rules which we do not explore in this paper.
In the present paper we assume that $\lambda_{H,H,L}=0$ to leading order in $c_T$ (or equivalently, that the thermal one-point function of the light operator is zero to leading order). In this case the hydrodynamic modes do not appear in the QNM expansion above, see \cite{Delacretaz:2020nit}.
Another useful way to think about QNMs is in terms of the conformal partial wave expansion of the four-point function \cite{Mack:2009mi,Costa:2012cb,Caron-Huot:2017vep},
\begin{equation}
G(z,\bar z) = \sum_{J=0}^\infty \int_{{d \over 2} - i \infty}^{{d \over 2} + i \infty} {d \Delta \over 2 \pi i} c(\Delta,J) F_{\Delta,J}(z, \bar z).
\end{equation}
The function $c(\Delta,J)$ is meromorphic and it contains poles at the position of operators with residues controlled by the three-point functions
\begin{equation}
{\rm Res}_{\Delta = \Delta_H'} c(\Delta,J) \sim |\lambda_{H,L,H'}|^2 .
\end{equation}
As we take the large $c_T$ limit these poles merge and form a cut. QNMs are then nothing but poles of $c(\Delta,J)$ on the second sheet!\footnote{In other words, they are somewhat analogous to resonances in S-matrix theory.}
\subsection{Relation to the light-cone bootstrap}
Let us next see how the unit operator in the $t$-channel is reproduced in more detail. It is useful first to write the precise formula in generalized free field theory. The heavy-light OPE expansion then takes the form
\begin{equation}
\label{eq:unitoperatorGFF}
&\sum_{n,J=0}^{\infty} c_{n,J} e^{-(\Delta_L+2n+J)|\tau|} P_J^{(d)}(\cos \theta) = {1 \over 2^{\Delta_L} (\cosh \tau - \cos \theta)^{\Delta_L}} , {\nonumber} \\
c_{n,J} &= \frac{\Gamma \left(\frac{d}{2}+J\right) \Gamma
\left(-\frac{d}{2}+n+\Delta_L+1\right) \Gamma
(J+n+\Delta_L)}{\Gamma (\Delta_L) \Gamma (J+1) \Gamma
(n+1) \Gamma \left(-\frac{d}{2}+\Delta_L+1\right) \Gamma
\left(\frac{d}{2}+J+n\right)} ,
\end{equation}
where $c_{n,J}$ correspond to the three-point functions of the double-twist operators with dimension $\Delta = \Delta_H + \Delta_L + 2n + J$ and spin $J$ in the limit $\Delta_H \to \infty$, see e.g \cite{Li:2019zba,Li:2020dqm}. In agreement with \eqref{tauberian}, $c_{n,J} \sim n^{2 \Delta_L - d}$ at large $n$ and fixed $J$.
In an interacting theory the RHS represents the leading singularity in the light-cone limit and the LHS becomes more and more accurate at high spin (with computable corrections). In terms of the three-point functions discussed in the previous section, the result above takes the following form,
\begin{equation}
\label{eq:discretesum}
e^{{\beta \omega \over 2} } \left|f_J\left(\Delta_H , \omega \right) \right|^2 |_{\eqref{eq:unitoperatorGFF}} = \theta(\omega) \sum_{n=0}^\infty c_{n,J} \delta(\omega - \Delta_L - 2n - J) ,
\end{equation}
which is expected to become a good approximation at large spin $J \gg 1$. These are precisely the double-twist operators discussed in \cite{Kulaxizi:2018dxo,Fitzpatrick:2019zqz,Karlsson:2019qfi,Kulaxizi:2019tkd,Fitzpatrick:2019efk,Karlsson:2019dbd,Li:2019zba,Li:2020dqm,Parnachev:2020fna,Fitzpatrick:2020yjb,Parnachev:2020zbr,Karlsson:2021duj,Karlsson:2021mgg}. The KMS image of the double-twist operators obtained by $\omega \to - \omega$, see \eqref{eq:ETHrelation} and \eqref{eq:KMSimage}, produces contributions regular in the $\tau \to 0$ limit (they are captured by the double-twist operators in the $t$-channel OPE).\footnote{For the same reason double-twist operators in the $t$-channel are sensitive to the boundary condition of the wave equation imposed at the horizon in the bulk.}
It is clear, however, that in the present context such a conclusion would be too hasty. Indeed, let us imagine that instead of the discrete spectrum above we have a pair of closely separated poles that correspond to a pair of QNMs,
\begin{equation}
\label{eq:doubletwistmirage}
&\delta \Big(\omega - \Delta_L - 2n - J - \gamma(n,J) \Big) \sim {1 \over 2 \pi i} \Big( {1 \over \omega - \Delta_L - 2n - J - \gamma(n,J) - i e^{- c_0(\mu) J }} - \text{c.c.} \Big),
\end{equation}
where $\sim$ denotes equivalence in large $J$ perturbation theory.
A continuum with a pair of poles whose separation from the real axis is nonperturbatively small in spin is not distinguishable in large spin perturbation theory from the discrete sum. This is precisely what happens in our case! As we reviewed in Section \ref{radvstunn}, the QNMs acquire an imaginary part which is nonperturbative in spin due to the effect of tunneling. To conclude, the basic mechanism for reproducing the identity operator is different compared to \cite{Fitzpatrick:2012yx,Komargodski:2012ek} in the presence of the black hole horizon: in this case we have a continuum of operators (which are the black hole microstates), and ``double-twist operators'' are resonances whose imaginary part is nonperturbative in spin.
In fact, as we reviewed in Section \ref{radvstunn}, the nonperturbatively small imaginary part has the form $\exp(- c_0(\mu) J)$. The analysis in the previous works \cite{Kulaxizi:2018dxo,Fitzpatrick:2019zqz,Karlsson:2019qfi,Kulaxizi:2019tkd,Fitzpatrick:2019efk,Karlsson:2019dbd,Li:2019zba,Li:2020dqm,Parnachev:2020fna,Fitzpatrick:2020yjb,Parnachev:2020zbr,Karlsson:2021duj,Karlsson:2021mgg} is organized perturbatively in $\mu \sim {\Delta_H \over c_T}$ and to leading order in $c_T$. It is then clear from \eqref{eq:doubletwistmirage} that in this perturbative expansion, the continuum disappears and the relevant spectral density becomes discrete. Therefore we can interpret the results of \cite{Kulaxizi:2018dxo,Fitzpatrick:2019zqz,Karlsson:2019qfi,Kulaxizi:2019tkd,Fitzpatrick:2019efk,Karlsson:2019dbd,Li:2019zba,Li:2020dqm,Parnachev:2020fna,Fitzpatrick:2020yjb,Parnachev:2020zbr,Karlsson:2021duj,Karlsson:2021mgg} concerning the properties of double-twist operators $[\mathcal{O}_H, \mathcal{O}_L]$ as statements about the quasi-normal modes perturbatively in $\mu$, and correspondingly relate them to gravitational orbits studied in Section \ref{sec:classicalorbits}. We analyze this connection in detail in the next section.
In other words, if we set ${\rm Im} \omega_{n,J}=0$ and $\lambda_{n,J} = - {i \over 2 \pi} e^{- {\beta \over 2} \omega_{n,J}} c_{n,J}$ in \eqref{eq:QNMexpansion}, we get the following representation of the correlator,
\begin{equation}
\text{QNM}|_{{\rm Im} \omega_{n,J}, {\rm Re} \lambda_{n,J}=0} :~~~g(t, \theta) &= \lambda_{H,H,L}^2 + \sum_{n,J}^\infty c_{n,J} e^{ - \omega_{n,J} \beta/2 } \cos (\omega_{n,J} t) \ P_J\left( \cos \theta \right) ,
\end{equation}
which is nothing but the standard OPE representation \eqref{eq:two-sided} where we sum over a discrete family of operators! This time, however, we effectively sum over CFT resonances instead of CFT operators. This is what happens as we work in the ${1 \over J} $ perturbation theory.
It is instructive to compare the ${1 \over J}$ expansion to the small $\mu$ expansion. In the latter case one can check that ${\rm Im} \omega_{n,J} \sim \mu^{J}$ \cite{cardoso,berti} and therefore the effects related to the double-twist operators emerging as the limit of resonances should appear at high enough order in $\mu$. It would be interesting to explore this aspect in detail.
\section{Double-twist dimensions from the Bohr-Sommerfeld formula}\label{sec:doubletwistdim}
For matter propagating on a black hole geometry, the concept of a normal mode is not applicable since waves can fall into the black hole. Instead one considers quasi-normal modes with complex energy $\omega$ \cite{Horowitz:1999jd,berti}. These are solutions to the wave equation which are purely ingoing at the horizon and satisfy normalizable boundary conditions at the boundary. By writing the solution to the wave equation in terms of the retarded Green function, one finds that the quasi-normal modes determine the late time behavior of the field. The real part of $\omega$ captures the oscillatory behavior of the wave, and the imaginary part of $\omega$ encodes the decay rate. \\
\indent Various tools have been developed for computing quasi-normal modes in different regimes. For our purposes, we are interested in quasi-normal modes that correspond to orbit states. This means that we take the mass of the orbiting particle to be large, so that the wavefunction is well-localized on the orbit. Since the spin $J$ of the state is proportional to $\Delta_L$, we are considering the large spin limit as well. This limit was considered in \cite{Festuccia:2008zx}, which we will review next. A similar large spin limit was analyzed for massless QNMs in the asymptotically flat case in \cite{Schutz:1985km}.
\subsection{Quasi-normal modes and the Bohr-Sommerfeld formula}
Given a scalar primary operator $\mathcal{O}_L$ of dimension $\Delta_L$ we consider the dual scalar field $\phi$ in AdS of mass $m$. The two are related via the standard AdS/CFT dictionary \cite{Maldacena:2011ut}
\begin{equation}
\Delta_L = {d \over 2} + \nu, ~~~ \nu = \sqrt{{d^2 \over 4} + m^2},
\end{equation}
where we introduced a new parameter $\nu$. We will be interested in the semi-classical limit $\nu \gg 1$.
The bulk field $\phi$ satisfies the Laplace equation on the black hole background. Using the symmetries of the problem, we can write
\begin{equation}
\phi(t,r, \vec n) = e^{- i \omega t} Y_{J,{\bf m}}(\vec n) \psi_{\omega, J}(r) ,
\end{equation}
where $Y_{J,{\bf m}}(\vec n)$ are the spherical harmonics on $S^{d-1}$ of spin $J$.
\indent The quasi-normal modes are solutions to the radial wave equation with complex $\omega$ with the following boundary conditions
\begin{equation}
\psi_{\omega,J}(z) \simeq z^{{d \over 2} + \nu}, ~~~ z \to 0, \\
\psi_{\omega,J}(z) \sim e^{i \omega z}, ~~~ z \to \infty ,
\end{equation}
where the latter condition means that the wave is purely ingoing at the horizon. Here the tortoise coordinate is defined by $dz=-dr/f(r)$ as in (\ref{tortoise}).
The wave equation for the radial part of the field $\psi_{\omega, J}(r)$ takes the form
\begin{align}
(-\partial_z^2+ \nu^2 V(z)-\omega^2)\psi_{\omega, J}(z)=0,\label{waveeq}
\end{align}
where the potential $V(z)$ is
\begin{align}
V(z)=\frac{f(r)}{\nu^2}\left(\frac{(2J+d-2)^2-1}{4r^2}+\nu^2-\frac{1}{4}+\frac{\mu(d-1)^2}{4r^d}\right)\label{fullpotential},
\end{align}
We will be interested in solving the wave equation in the large $\nu$ limit or, equivalently, perturbatively in ${1 \over \nu}$ following \cite{Festuccia:2008zx}. Note the convenience of choosing $\nu$ as an expansion parameter (as opposed to $\Delta_L$) since the potential has a simple form $\#_0 + {\#_1 \over \nu^2}$. For the same reason it is convenient to choose a new parameterization for spin $J$
\begin{equation}
2J+d-2=2\nu k .
\end{equation}
Finally, we set
\begin{align}\label{scalinglimit}
\omega=\nu u ,
\end{align}
to get a nontrivial limit in the wave equation (\ref{waveeq}). The potential becomes
\begin{align}
V(z)=f(r)\left(\frac{k^2}{r^2}+1\right)+O(1/\nu^2).\label{potentiallimit}
\end{align}
Note that this matches the potential (\ref{eq:potentialBH}) describing the classical geodesic motion if we replace $k$ by $L$. As we will demonstrate shortly, this provides a direct link between the analysis of the present section and our discussion of the semi-classical orbits in Section \ref{sec:classicalorbits}.\\
\indent For large $\nu$, the wave equation (\ref{waveeq}) can be solved by a WKB analysis. The authors of \cite{Festuccia:2008zx} then showed how to compute the quasi-normal mode energies by analytically continuing the WKB wavefunctions in $u$. The resulting quasi-normal mode spectrum is discrete, and is quantized according to the Bohr-Sommerfeld rule. This rule becomes particularly simple when the potential has a minimum. This is the case when $k>k_{\text{min}}$, where $k_\text{min}$ is the critical momentum at which $V'(z)=V''(z)=0$ has a solution in $z$. This is what appeared as $J_{\text{min}}(\mu)$ in the analysis of the orbits in Section \ref{sec:classicalorbits}. For large black holes, we have
\begin{align}
k_\text{min}\sim \mu^{2/(d-2)},\hspace{10 mm}\mu\gg1.
\end{align}
\indent For $k<k_\text{min}$ the potential is monotonic, so the wave simply falls into the black hole, leading to an order one imaginary part of the quasi-normal mode energy. When $k>k_\text{min}$ there is a finite potential well, and the imaginary part of $\omega$ is related to the tunneling rate over the potential barrier, which is exponentially small, as we discussed in Section \ref{radvstunn}. Neglecting these nonperturbatively small effects, for $k>k_\text{min}$ we have the usual Bohr-Sommerfeld rule for a particle moving in the potential,
\begin{align}
\nu \int_{z_a}^{z_b}dz\, \sqrt{u_n^2-V(z)}=\pi\left(n+1/2\right), \hspace{10 mm}n\ge 0.
\label{leadingBS}
\end{align}
Here $z_{a}$ and $z_b$ are the turning points inside the potential well at which $u_n^2=V(z)$, see Figure \ref{potentialfig}. To leading order at large $\nu$ and ${n \over \nu}$ fixed \eqref{leadingBS} becomes the equation \eqref{eq:BSclassical} from the classical orbit section. Formula \eqref{leadingBS}, however, is more precise in that it is applicable to finite $n$ as well. Next we discuss further $1/\nu$ corrections to \eqref{leadingBS}. \\
\indent
\subsection{Corrections to the Bohr-Sommerfeld formula}\label{sec:BScorrections}
The quantization condition (\ref{leadingBS}) allows us to compute the spectrum to leading order in $1/\nu$. However, it is possible to go further. Let us now give a systematic method for computing higher $1/\nu$ corrections. To do so we need to recall some little-known facts about higher order corrections to the Bohr-Sommerfeld rule in quantum mechanics \cite{PhysRev.41.713,Bender:1977dr}. \\
\indent We consider the second order differential equation
\begin{align}
\psi''(z)=\nu^2 Q(z) \psi(z),\hspace{10 mm}Q(z)\equiv V(z)-u^2\label{schrodinger}
\end{align}
where $\nu$ is a large parameter. We assume that $Q(z)$ has a unique minimum at $z=0$, and is monotonically increasing on both sides of this minimum. Then the spectrum of allowed values of $u$ is discrete. The wavefunction has a WKB expansion
\begin{align}
\psi(z)=\exp\left(\nu \sum_{i=0}^{\infty}\frac{1}{\nu^{i}}S_i\right).
\end{align}
Plugging into (\ref{schrodinger}), one finds that the $S_i$'s satisfy the recursion relations
\begin{align}\label{recursion}
S_0'(z)&=-\sqrt{Q(z)},\\
0&=2S_0'S_i'+\sum_{j=1}^{i-1}S_j'S_{i-1}'+S_{i-1}''.\
\end{align}
By matching the wavefunction near the turning points using an Airy function analysis, it is possible to derive the quantization relation to all orders in $1/\nu$ \cite{PhysRev.41.713}. This condition is
\begin{align}
\frac{1}{2i}\oint dz\, \sum_{i=0}^{\infty}S_i'(z)=n\pi, \hspace{10 mm}n\ge 0,
\end{align}
where the contour integral is taken counterclockwise around the turning points. \\
\indent Let us explicitly write the quantization condition to first order in $1/\nu$. This is
\begin{align}
\nu\int_{z_a}^{z_b}dz\, \sqrt{-Q(z)}-\frac{1}{96 i\nu }\oint dz\, \frac{Q''(z)}{Q(z)^{3/2}}+O(1/\nu^2)=\pi(n+1/2).\label{BSfirstorder}
\end{align}
Note that the replacement of $n$ with $n+1/2$ comes from the contour integral of $S_1'= - {1 \over 4} {d \log Q(z) \over d z}$, \cite{Bender:1977dr}
\begin{equation}
{1 \over 2 i} \oint dz \Big( - {1 \over 4} {d \log Q(z) \over d z} \Big) = - {\pi \over 2},
\end{equation}
where the counterclockwise integral of $\log Q(z)$ gives $2 \times 2 \pi i$ because it encircles a pair of simple zeros.
Using the recursion relations (\ref{recursion}), corrections to any desired higher order in $\nu$ can be computed in terms of contour integrals of derivatives of $Q$.
\subsection{Perturbation theory around circular orbits}
The integrals on the left hand side of (\ref{BSfirstorder}) can be computed by perturbation theory around the minimum of the potential. Here we would like to consider a regime of $n$ in which this perturbation theory is valid, which allows us to obtain results to all orders in $\mu$ (see also \cite{Berenstein:2020vlp} for the leading order term in this expansion). Consider a nearly circular geodesic, which oscillates around the minimum of the potential with small amplitude. This means that the mode number $n$ is held fixed as $\nu\to \infty$. Then we can evaluate the integrals in perturbation theory around the minimum. We refer the reader to Appendix \ref{appendix} for the details. \\
\indent After performing the integrals, one finds the expression (\ref{finalquant}), which expresses the quantization condition for $n$ in terms of derivatives of the potential at the minimum. Now solving for $\omega=\nu u$ perturbatively in $\nu$, we have
\begin{align}
\omega &=\nu \sqrt{V}+\sqrt{\frac{V''}{2V}}(n+1/2)\label{BSfinal}\\
&\hspace{5 mm}+\frac{9(2n^2+2n+1)V''V''''V-(30n^2+30n+11)V'''^2V-18(2n+1)^2V''^3}{288\nu V^{3/2}V''^2}+O(1/\nu^2).\notag
\end{align}
In this expression, all derivatives are evaluated at the minimum of the potential. The power of this formula is that it is valid to \emph{all orders} in $\mu$, and also easy to evaluate explicitly. One simply needs to find the minimum of the potential and compute derivatives with respect to $z$.
\subsection{Example: $O(\mu^3)$ in $d=4$}
\indent As an example of how to apply this technology, we consider the case $d=4$. The formulas for general dimensions are displayed in Appendix \ref{gammagend}. To match the known results from bootstrap, we solve for the minimum of the potential (\ref{fullpotential}) to second order in $\mu$ and $1/\nu^2$,
\begin{align}
r(z_{\text{min}})&=\left(1+\frac{k^2-1}{16k^2\nu^2}\right)\sqrt{k}-\frac{\mu}{k^{3/2}} \left(\frac{2k+1}{4}-\frac{2k^3+39k^2+70k}{64k^2\nu^2}\right)\\
&\hspace{10 mm}-\frac{\mu^2}{k^{7/2}}\left(\frac{28k^2+20k+3}{32}-\frac{84k^4+820k^3+1305k^2+620k-21}{512k^2\nu^2}\right)+O(\mu^3,1/\nu^3).\notag
\end{align}
\indent We now plug this value of the minimum radius into the expression for the energy $\ref{BSfinal}$. In doing so, one must be careful to use the full potential (\ref{fullpotential}) including $1/\nu^2$ corrections, and not just the leading order term. One finds
\begin{align}
\gamma_1&=-\frac{\nu^2+(6n+3)\nu+6n^2+6n+2}{2k\nu}+O(1/\nu^2),\label{gamma1} \\
\gamma_2&=-\frac{1}{8k^4\nu}\left( k(4k+1)\nu^2+k (21k+10)(2n+1)\nu\notag\right.\\
&\hspace{20 mm}\left.+35k^2+35k+4+ k (102k+90)n(n+1)\right)+O(1/\nu^2)\label{gamma2} \ .
\end{align}
Note that $n$ is held fixed as $\nu\to \infty$. As a special case of these formulas, we can take a circular orbit $n=0$ at leading order in $1/\nu$,
\begin{align}
\gamma_1&=-\frac{\nu}{2k}+O(\nu^{0}),\hspace{10 mm}\gamma_2=-\frac{\nu (4k+1)}{8k^3}+O(\nu^0).
\end{align}
Comparing these results to \eqref{eq:gendgammaorbit} with $\alpha=0$ and $d=4$, one finds that the answers match at order $\nu$.\\
\indent Let us now compare with the results of the light-cone bootstrap. The value of $\gamma_1$ was computed at finite spin in \cite{Li:2020dqm}, which found
\begin{align}
\gamma_1=-\frac{\Delta_L^2-\Delta_L+6n\Delta_L+6n^2-6n}{2(J+1)}.
\end{align}
Replacing $\Delta_L=2+\nu$ and $J=\nu k-1$, we see that this exactly matches the result (\ref{gamma1}). Note that the two results agree to all orders in $\nu$, even though our approximation is only valid to order $1/\nu$. One can check that this is a coincidence that only holds in $d=4$.\\
\indent At order $\mu^2$ and infinite spin, the anomalous dimension was computed in \cite{Kulaxizi:2018dxo,Li:2019zba},
\begin{align}
\gamma_2=-\frac{4\Delta_L^3+3(14n-1)\Delta_L^2+(102n^2-66n-1)\Delta_L+34(2n-1)n(n-1)}{8J^2}\label{gamma2him}.
\end{align}
Taking $k\to \infty$ in (\ref{gamma2}), we find that the answers again agree. Our formula $(\ref{gamma2})$ has additional finite spin corrections at order $O(1/J^3)$, which is a new prediction for the anomalous dimension $\gamma_2$ at finite spin. Note that the contribution of the final term in the numerator of (\ref{gamma2him}) is of order $1/\nu^2$, so we are not able to match it to this order in the Bohr-Sommerfeld approximation. \\
\indent To obtain another new prediction using the formula (\ref{BSfinal}), we can carry out the same process to order $\mu^3$. We omit the details since they are the same as above: one needs to solve for the minimum at order $\mu^3$, and compute derivatives of the potential to the same order. The result is
\begin{align}
\gamma_3&=-\frac{1}{16k^5\nu} \Big( (4 k+1)^2 \nu ^2 +3 (41 k^2 + 34 k + 7 ) (2 n+1) \nu \Big. \notag \\
&\Big. + 346 k^2 + 500 k + 199 + {16 \over k} + 12 ( 83 k^2+ 110 k + 35 ) n (n+1) \Big) .
\end{align}
Note that it is relatively straightforward to generalize the expansion to higher orders in the small $\mu$ expansion.
We present analogous formulas in $d$ spacetime dimensions in Appendix \ref{gammagend}.
\section{Gravitational orbits and many-body scars}
\label{sec:scars}
In this section we comment on the connection between gravitational orbits and the phenomenon of many-body scars recently discovered in the condensed matter systems \cite{bernien2017probing}, see also \cite{serbyn,moudgalya2021quantum} for reviews.\footnote{We thank Daniel Jafferis and Baur Mukhametzhanov for discussions on this topic.} Many-body scars are non-thermal energy eigenstates that violate ETH.
In this section, we demonstrate that the double-twist operators that appear in the light-cone bootstrap analysis of \cite{Kulaxizi:2018dxo,Fitzpatrick:2019zqz,Karlsson:2019qfi,Kulaxizi:2019tkd,Fitzpatrick:2019efk,Karlsson:2019dbd,Li:2019zba,Li:2020dqm,Parnachev:2020fna,Fitzpatrick:2020yjb,Parnachev:2020zbr,Karlsson:2021duj,Karlsson:2021mgg} are scars, perturbatively in $1/J$.\footnote{In Appendix \ref{app:bulkhigherspin} we discuss the emergence of bulk higher spin symmetry at large spin, which naturally organizes the spectrum of the double-twist operators.} The presence of the black hole horizon (related to the nonperturbative in spin effects in the light-cone bootstrap) turns them into long-lived but eventually thermalizing states, similar to \cite{lin2020slow}. For this reason and due to the fact that their lifetime can be made arbitrarily big in the large $c_T$ limit, we can think of gravitational orbits as \emph{perturbative scars}.
\subsection{Five-point function}\label{fivepointfnc}
In this section we use the light-cone bootstrap technique to evaluate the one-point function of the light operator $\mathcal{O}_L$ in the double-twist state that corresponds to a stable orbit. We imagine that the AdS theory contains a cubic coupling $g \phi^3$ which induces a nontrivial three-point function $\langle \mathcal{O}_L \mathcal{O}_L \mathcal{O}_L \rangle \sim g$. To compute the one-point function we impose crossing on the five-point function that involves three light operators and two heavy operators.
We next consider a light-cone limit in which the two heavy operators become light-like separated. In this limit the leading contribution to the correlator comes from the factorized answer
\begin{equation}
\label{eq:limit5}
\langle \mathcal{O}_H(x_1) \mathcal{O}_L(x_2) \mathcal{O}_L(x_3) \mathcal{O}_L(x_5) \mathcal{O}_H(x_4) \rangle & \overset{x_{14}^2 \to 0}{=} \langle \mathcal{O}_H(x_1) \mathcal{O}_H(x_4) \rangle \langle \mathcal{O}_L(x_2) \mathcal{O}_L(x_3) \mathcal{O}_L(x_5) \rangle + ... {\nonumber} \\
&= {1 \over x_{14}^{2 \Delta_{H} } } {g \over (x_{23}^2 x_{25}^2 x_{35}^2)^{{\Delta_L/2}}} + ... \ ,
\end{equation}
where on the RHS we focused on the leading contribution as $x_{14}^2 \to 0$. We would like to understand how this result can be reproduced in the crossed channel $(12)$, $(34)$, where $(i j)$ stands for doing the OPE between $\mathcal{O}_i (x_i) \mathcal{O}_j (x_j)$. This setup is almost identical to the recent work \cite{Antunes:2021kmm} which we closely follow.
For simplicity we focus on the leading twist contribution only. To this end we write the following expansion in the light-cone limit,
\begin{equation}
\label{eq:leadingtwist}
\mathcal{O}_H(x_1) \mathcal{O}_L(x_2) \simeq \sum_{k} 2^J C_{H,L,k} \int_0^1 [d t]\, {\mathcal{O}_{k,J}(x_1+t x_{21}, x_{12} ) \over (x_{12}^2)^{{\Delta_H + \Delta_L - \tau_k \over 2}}} , \notag\\
[d t] = {\Gamma(\Delta_k+J) t^{{\Delta_k +J - \Delta_H + \Delta_L \over 2} - 1} (1-t)^{{\Delta_k +J + \Delta_H - \Delta_L \over 2} - 1} \over \Gamma({\Delta_k+J \over 2} + {\Delta_H - \Delta_L \over 2}) \Gamma({\Delta_k+J \over 2} - {\Delta_H - \Delta_L \over 2})} ,
\end{equation}
where the RHS is designed to correctly reproduce the leading twist $\tau = \Delta - J$ part coming
from the operator $\mathcal{O}_{k,J}(x)$ that appears in the OPE of $\mathcal{O}_H \mathcal{O}_L$.\footnote{We use the normalizations of the two- and three-point functions as in \cite{Antunes:2021kmm}.}
Applying the formula \eqref{eq:leadingtwist} twice in the (12) and (45) channels we get
\begin{equation}
&\langle \mathcal{O}_H(x_1) \mathcal{O}_L(x_2) \mathcal{O}_L(x_3) \mathcal{O}_L(x_5) \mathcal{O}_H(x_4) \rangle {\nonumber} \\
&\simeq {1 \over (x_{12}^2 x_{34}^2)^{{\Delta_L+\Delta_H \over 2}}} \Big( {x_{13}^2 \over x_{15}^2 x_{35}^2 } \Big)^{{\Delta_L \over 2}}
\Big( {x_{23}^2 \over x_{14}^2} \Big)^{{\Delta_H- \Delta_L \over 2}}\sum_{k_1,k_2, \ell} P_{k_1 k_2 \ell} {\cal G}_{k_1 k_2 \ell}(u_i) ,
\end{equation}
where $k_i$ label operators that appear in the heavy-light channels \eqref{eq:leadingtwist}, and the quantum number $0 \leq \ell \leq \min ( J_{k_1}, J_{k_2})$ labels different tensor structures in the three-point function $\langle \mathcal{O}_{k_1} \mathcal{O}_L \mathcal{O}_{k_2} \rangle$.
We have also introduced $P_{k_1 k_2 \ell}$, defined as a product of the three-point functions
\begin{equation}
P_{k_1 k_2 \ell} = C_{H,L,k_1} C_{H,L,k_2} C_{k_1, k_2, \ell} ,
\end{equation}
and $C_{k_1, k_2, \ell}$ describes the three-point function of the light operator and two orbit states $\langle \mathcal{O}_{k_1} \mathcal{O}_L \mathcal{O}_{k_2} \rangle$ that we want to compute.
The collinear blocks ${\cal G}_{k_1 k_2 \ell}(u_i)$ take the form
\begin{equation}
&{\cal G}_{k_1 k_2 \ell}(u_i)=u_1^{{\tau_1 \over 2}} u_3^{{\tau_2 \over 2}} (1-u_2)^{\ell} u_5^{{\Delta_L \over 2}} u_2^{{\Delta_L - \Delta_{H} \over 2}} \int_0^1 [d t_1] [d t_2] {\nonumber} \\
&\times {\left( 1 + t_1 (1-u_2) u_4 - u_4 \right)^{J_2 - \ell} \left( 1 + t_2 (1-u_2) u_5 - u_5 \right)^{J_1 - \ell} \over (u_4+ t_2 (1-u_4) )^{{\Delta_2-\Delta_1+ J_1 + J_2 -2 \ell + \Delta_L \over 2}} (u_5 + t_1 (1-u_5) )^{{\Delta_1-\Delta_2+ J_1 + J_2 -2 \ell + \Delta_L \over 2}} } {\nonumber} \\
&\times {1 \over (1- t_1 t_2 (1-u_2))^{{\Delta_1+\Delta_2 + J_1 + J_2 - \Delta_L \over 2}}} ,
\end{equation}
where $0 \leq \ell \leq \min (J_1,J_2)$ labels various three-point function tensor structures in the spinning correlator $\langle \mathcal{O}_{J_1} \mathcal{O}_{L} \mathcal{O}_{J_2} \rangle$, see \cite{Antunes:2021kmm} for details. In the formula above we introduced conformal cross-ratios as follows,
\begin{equation}
u_i = {x_{i,i+1}^2 x_{i+2,i+4}^2 \over x_{i,i+2}^2 x_{i+1,i+4}^2}~, ~~~ i \in \mathbb{N} ~(\text{mod} ~5) .
\end{equation}
Setting $\Delta_H = \Delta_L$ the result above reproduces the corresponding formula from \cite{Antunes:2021kmm} (upon doing the change of variables $t_i \to 1-t_i$).
To reproduce \eqref{eq:limit5} we thus get the following equation,
\begin{equation}
\label{eq:crossing5}
\sum_{k_1,k_2,J} P_{k_1 k_2 J} {\cal G}_{k_1 k_2 J}(u_i) =g (u_1 u_3)^{{\Delta_H+\Delta_L \over 2}} u_2^{-{\Delta_H \over 2}} u_5^{{\Delta_L \over 2}}.
\end{equation}
Matching the dependence on $u_1$ and $u_3$ we get $\tau_1 = \tau_2 = \Delta_H+\Delta_L $ which is simply the statement that the result is reproduced by the leading twist double-twist operators.
At this point we can take the limit $\Delta_H \to \infty$ and also introduce $J_i = \ell + j_i$. Under the integral we set $\Delta_i = \Delta_H+\Delta_L+J_i$ and rescale $t_i \to {t_i \over \Delta_H}$, after which both the limit and the integral can be trivially computed. The crossing equation \eqref{eq:crossing5} then takes the form
\begin{equation}
\sum_{\ell=0}^\infty (1-u_2)^{\ell} \sum_{j_1,j_2=0}^\infty (u_4^{-1}-1)^{j_2} (u_5^{-1}-1)^{j_1} P_{\ell, j_1, j_2} =g \left( {u_4 u_5 \over u_2} \right)^{{\Delta_L \over 2}} + ... , ~ u_2 \to 0 .
\end{equation}
Note that the dependence on $u_2$ is non-analytic around $u_2=0$ on the RHS and analytic for fixed $\ell$ on the LHS. This means that it can only be generated by the infinite spin tail.
Let us introduce
\begin{equation}
g_{\ell}(u_4, u_5) = \sum_{j_1,j_2=0}^\infty (u_4^{-1}-1)^{j_2} (u_5^{-1}-1)^{j_1} P_{\ell, j_1, j_2} .
\end{equation}
A simple way to satisfy crossing is to impose the following condition,
\begin{equation}
\lim_{\ell \to \infty} g_{\ell}(u_4, u_5) = g \left( u_4 u_5 \right)^{{\Delta_L \over 2}}{\ell^{{\Delta_L \over 2} - 1} \over \Gamma({\Delta_L \over 2})} .
\end{equation}
In this way we get
\begin{equation}
\lim_{\ell \to \infty} P_{\ell, j_1, j_2} = (-1)^{j_1 + j_2} g { \ell^{{\Delta_L \over 2} - 1} \over \Gamma({\Delta_L \over 2})} {\Gamma(j_1+{\Delta_L \over 2}) \over \Gamma(j_1+1) \Gamma({\Delta_L \over 2})}{\Gamma(j_2+{\Delta_L \over 2}) \over \Gamma(j_2+1) \Gamma({\Delta_L \over 2})}.
\end{equation}
Finally, dividing by the GFF three-point functions of the double-twist operators we get for the desired one-point function\footnote{Here we have chosen to define the heavy-light double twist operators of odd spin in a way that the corresponding GFF three-point function contains $(-1)^J$.}
\begin{equation}
C_{j_1, j_2, \ell} = {g \over \ell^{{\Delta_L \over 2}} } {\Gamma(\Delta_L) \over \Gamma({\Delta_L \over 2})^3} {\Gamma(j_1+{\Delta_L \over 2}) \Gamma(j_2+{\Delta_L \over 2}) \over \Gamma(j_1+1) \Gamma(j_2+1) } , ~~~ J_i = j_i + \ell, ~~~ \ell \gg 1 .
\end{equation}
As expected, the one-point function of the light operator in the double-twist state is different from zero to leading order in $c_T$, given a non-zero three-point function $\langle \mathcal{O}_L \mathcal{O}_L \mathcal{O}_L \rangle \sim g$. In contrast it is zero in the state which is dual to the rotating Kerr black hole with the same quantum numbers. Perturbatively in spin these two states do not mix, and thus orbit states represent eigenstates that naively violate ETH. However, the nonperturbative effect of tunneling introduces mixing between the orbit states and the black hole states. Orbits become long-lived resonances which eventually thermalize.
\subsection{Perturbative scars}\label{perturbativescars}
\indent In any chaotic theory, the matrix elements of simple operators in energy eigenstates are expected to take the ETH form (\ref{eq:ETH}). In some systems the ETH is known to hold in all energy eigenstates \cite{kim2014testing}. However, recently a class of systems has been discovered, where the ETH holds in all but a small number of eigenstates \cite{bernien2017probing}. Eigenstates in the middle of the spectrum whose matrix elements violate the ETH are called quantum scars. Typically they occupy a small subsector of the Hilbert space but lead to interesting phenomena such as revivals and lack of thermalization. Many examples of theories containing scars have been found, and we refer the reader to the reviews \cite{moudgalya2021quantum,serbyn} for a survey of the literature on the subject. Many of the models that have been found can be understood based on symmetry properties of the Hamiltonian \cite{Pakrouski:2020hym,Pakrouski:2021jon}.
\indent Although there are theories containing isolated scar states, some of the simplest cases contain towers of scars with equally spaced energy eigenstates. For example, the Hubbard model contains states with equally spaced energy of the form $(\eta^\dagger)^n|0\rangle$ \cite{PhysRevLett.63.2144}. Here $\eta^\dagger$ is the raising operator for a pseudospin $SU(2)$ symmetry, and raises the energy of the state by a fixed amount. It is possible to add an interaction that breaks the symmetry but preserves the tower of states \cite{PhysRevB.102.075132,PhysRevB.102.085140}, so that these states constitute a tower of many-body scars. In this example the pseudospin $SU(2)$ is referred to as a spectrum generating algebra, since the full tower can be generated by acting with $\eta^\dagger$ on the vacuum.
\indent Now let us turn to the case of holographic CFTs. In this paper we discussed the following interesting phenomenon. Starting from the four-point function of heavy-light operators and performing the light-cone bootstrap analysis \cite{Kulaxizi:2018dxo,Fitzpatrick:2019zqz,Karlsson:2019qfi,Kulaxizi:2019tkd,Fitzpatrick:2019efk,Karlsson:2019dbd,Li:2019zba,Li:2020dqm,Parnachev:2020fna,Fitzpatrick:2020yjb,Parnachev:2020zbr,Karlsson:2021duj,Karlsson:2021mgg}, one concludes that the spectrum contains an infinite family of double-twist states. The light-cone bootstrap analysis of the five-point function (along the lines of \cite{Antunes:2021kmm}) reveals that these double-twist states violate ETH while being in the middle of the spectrum \eqref{eq:regionofM}, and as such they look like scars. In the bulk these non-thermal states correspond to gravitational orbits around black holes.
The bulk picture, however, immediately reveals the limitation of this conclusion: the gravitational orbits are not stable due to tunneling and gravitational radiation. Therefore gravitational orbits are long-lived states that eventually thermalize. Both effects are closely related to the presence of the black hole horizon in the bulk. Its presence also indicates that the spectrum of the dual CFT is effectively continuous. How is it possible then that the light-cone bootstrap analysis reveals a set of discrete states?
The resolution of this puzzle is that the continuum of states contains an infinite set of narrow resonances whose widths is nonperturbative in ${1 \over J}$, more precisely $\exp(- c_0(\mu) J)$, see formulas (\ref{tunnsmallmu}) and (\ref{tunnlargemu}). The light-cone bootstrap large spin expansion studied in \cite{Kulaxizi:2018dxo,Fitzpatrick:2019zqz,Karlsson:2019qfi,Kulaxizi:2019tkd,Fitzpatrick:2019efk,Karlsson:2019dbd,Li:2019zba,Li:2020dqm,Parnachev:2020fna,Fitzpatrick:2020yjb,Parnachev:2020zbr,Karlsson:2021duj,Karlsson:2021mgg} misses such effects, and as we explained the set of resonances (or quasi-normal modes) becomes the set of double-twist operators that we described above. This can also be understood by recalling that at $J=\infty$ the spectrum of the CFT is effectively controlled by the bulk higher spin symmetry, and the double-twist operators form a multiplet under this symmetry, see Appendix \ref{app:bulkhigherspin}. As we go to finite spin $J$ the bulk higher spin symmetry gets broken by $\epsilon = {1 \over J}$ effects. Perturbatively in $\epsilon$, the double-twist operators persist as non-thermal energy eigenstates. Nonperturbatively in $\epsilon$ they disappear from the spectrum and become long-lived resonances.
In this sense we can say that double-twist operators (or gravitational orbits) present an example of \emph{perturbative scars:} an infinite family of long-lived states whose lifetime can be made arbitrarily big in the large $c_T$ limit and whose lifetime is nonperturbative in the symmetry breaking parameter $\epsilon$. \\
\indent One notable difference between the states we have found and conventional examples of quantum scars is that the latter have a sub-volume law scaling of the entanglement entropy. The towers of scars analyzed in the literature generally consist of quasiparticles above a low-entanglement state, which implies that the states in the tower have a smaller entanglement than a state obeying ETH. However, there are also known examples with a volume-law scaling \cite{Langlett:2021efq}. In our case, the presence of the black hole means that the entanglement entropy scales with the volume. \\
\indent The discussion so far applies to perturbation theory in $1/J$. At finite $J$, the orbits are no longer exact eigenstates, but are instead broadened into resonances. They can be expressed as a sum of exact eigenstates in a band of width $\Gamma$,
\begin{align}
|\text{orbit}\rangle=\sum_{E_i=E_{\text{orbit}}-\Gamma}^{E_{\text{orbit}}+\Gamma}c_i|E_i\rangle,\hspace{10 mm}\sum_{i=1}^N |c_i|^2=1.
\end{align}
Here we have defined $N\sim e^{S(E_{\text{orbit}})}$, and the normalization condition implies that $|c_j| \sim O(N^{-1/2})$. Let us assume that each of the eigenstates $|E_i\rangle$ obeys ETH. Then the one-point function of a light operator in the orbit state is
\begin{align}
\langle \text{orbit}|\mathcal{O}_L|\text{orbit}\rangle&= \sum_{i,j} c_j^* c_i \langle E_j | \mathcal{O}_L|E_i\rangle\notag \\
&= \sum_{i,j} e^{-S \Big({E_i + E_j \over 2} \Big)/2} c_j^* R_{j,i} c_i f_{\mathcal{O}_L} \Big({E_i + E_j \over 2}, E_i - E_j\Big) ,
\end{align}
where we used that by assumption the one-point function of $\mathcal{O}_L$ in each energy eigenstates is suppressed at large $c_T$. Let us write the pseudorandom matrix $R_{j,i} =|R_{j,i}| e^{i \phi_{j,i}}$, where hermiticity implies that $\phi_{j,i} = - \phi_{i,j}$ and $|R_{j,i}| \sim O(1)$. We can also write $c_i = |c_i| e^{i \phi_i}$. We would like to make the matrix element $O(1)$ by choosing the phases in a way that they add up instead of canceling each other. Note that there are $O(N^2)$ phases $\phi_{i.j}$, so we cannot cancel all of them since we only have $O(N)$ coefficients at our disposal.
Let us consider a simplified model where we set $f_{\mathcal{O}_L}(\bar E, \omega)=1$ in the model above. We also consider a state where $|c_i|=\frac{1}{\sqrt{N}}$ for all $i$. We first estimate the sum over $j$,
\begin{equation}
\sum_{j=1}^{N} R_{j,i} c_j^* \equiv O(1) \times e^{- i \tilde \phi_i} ,
\end{equation}
where the $O(1)$ coefficient comes as follows: the sum over $N$ random phases produces $\sqrt{N}$, which together with the fact that $|c_j| \sim O(N^{-1/2})$ gives something $O(1)$.
We next choose the phases of $c_i$ to be
\begin{equation}
\phi_i = \tilde \phi_i, ~~~ i = 1, ..., N .
\end{equation}
This produces the following estimate,
\begin{equation}
\sum_{i,j} e^{-S \Big({E_i + E_j \over 2} \Big)/2} c_j^* R_{j,i} c_i &\simeq e^{- S(E_{\text{orbit}})/2} \sum_{i,j=1}^N c_j^* R_{j,i} c_i {\nonumber} \\
&\sim e^{- S(E_{\text{orbit}})/2} \sum_{i=1}^N c_i e^{- i \tilde \phi_i} \notag\\
&\sim O(N^{1/2}) e^{- S(E_{\text{orbit}})/2}\sim O(1),
\end{equation}
where we used $N \sim e^{S(E_{\text{orbit}})}$. Therefore we conclude that the matrix elements $O(1)$ are indeed consistent with the ETH upon a proper choice of $c_i$. In this way, at finite $c_T$ and $J$ we can think of orbits as superpositions of the black hole microstates.
\indent It is instructive to contrast the orbit states with other long-lived quasi-normal modes. For instance, the large AdS black hole has a family of parity-odd gravitational quasi-normal modes with purely imaginary frequency, \cite{cardoso,berti}
\begin{align}\label{longlivedj}
\omega_J=-\frac{i}{r_s}\frac{(J-1)(J+d-1)}{d}, ~~~ r_s \gg 1.
\end{align}
The decay time of these modes is proportional to $r_s$, which is much longer than the expected thermalization time $1/r_s$ at high temperatures. Therefore these modes are long-lived resonances, just like the orbit states. However, there are several key differences between the modes (\ref{longlivedj}) and the orbits:
\begin{itemize}
\item Since $\text{Re }\omega=0$ in (\ref{longlivedj}), the overlap $\langle \omega_J(t)|\omega_J(0)\rangle$ decays exponentially in time but does not oscillate. This is in contrast to the orbit modes, for which $\omega$ has a nonzero real part, and which exhibit approximate revivals.
\item In the case of the orbits the decay rate decreases exponentially with spin, while the decay rate of the modes (\ref{longlivedj}) grows polynomially in spin. In particular, for (\ref{longlivedj}) there is no small parameter analogous to $\mu/J^{d/2-1}$ in which to expand. Therefore there is not a limit in which these states become approximate energy eigenstates for which we can identify the corresponding boundary operator.
\item The orbit quasi-normal modes come in a two-parameter family labeled by $J$ and $n$, while the modes (\ref{longlivedj}) are only parameterized by $J$. In contrast to the orbit states, they also disappear from the spectrum in the $\mu \to 0$ limit \cite{cardoso,berti}.
\end{itemize}
\section{Discussion}
In this paper we have considered classically stable orbits around AdS Schwarzschild black holes. These are long-lived states in the bulk that eventually decay due to tunneling and gravitational radiation. The tunneling rate is given by $e^{-c_0(\mu)J}$, where $J$ is the spin of the orbit. We have computed the life-time of the orbits due to scalar radiation in the case of large black holes, $\mu \gg 1$ and large spin $J \gg \mu^2$, and found that it is $\sim {J^4 \over {\kappa^2\mu^8}}$ in $d=3$. In the analogous gravitational problem, the lifetime scales as $\sim c_T$.
The existence of such orbits in the bulk have various manifestations in the dual conformal field theory \cite{Festuccia:2008zx,Berenstein:2020vlp}. First, they correspond to quasi-normal modes in the thermal two-point function \cite{Festuccia:2008zx}
\begin{equation}
\text{Gravitational orbits} ~~~\leftrightarrow~~~ \text{Quasi-normal modes} \ . {\nonumber}
\end{equation}
The fact that they are long-lived is mapped to the fact that the imaginary part of the corresponding quasi-normal modes is very small. Second, via the ETH they appear as resonances in the heavy-light OPE, where the heavy operator is dual to the black hole, and the light operator is dual to the orbiting body. By resonances we mean poles on the second sheet of the conformal partial waves $c(\Delta,J)$.
It is then natural to consider the expansion of the heavy-light four-point function in terms of the QNMs \eqref{eq:QNMexpansion}.
After connecting orbits to QNMs and considering the QNM expansion of the heavy-light four-point correlator we noted that there is a natural expansion in which QNMs \emph{look} like energy eigenstates or primary operators. First, we take the large $c_T$ limit. Second, we consider the large $J$ expansion. In this setting, orbits become stable: tunneling is nonperturbative in $J$; gravitational radiation is ${1 \over c_T}$ suppressed. In fact this is precisely the setup studied in the light-cone bootstrap \cite{Fitzpatrick:2012yx,Komargodski:2012ek} that has been recently applied to the heavy-light correlators \cite{Kulaxizi:2018dxo,Fitzpatrick:2019zqz,Karlsson:2019qfi,Kulaxizi:2019tkd,Fitzpatrick:2019efk,Karlsson:2019dbd,Li:2019zba,Li:2020dqm,Parnachev:2020fna,Fitzpatrick:2020yjb,Parnachev:2020zbr,Karlsson:2021duj,Karlsson:2021mgg}. The orbital quasi-normal modes then become nothing but the double-twist operators\footnote{The relation between the double-twist operators and stable orbits has previously been discussed in \cite{Berenstein:2020vlp}.}
\begin{equation}
\text{Quasi-normal modes} ~~~\leftrightarrow~~~ \text{Double-twist operators} \ . {\nonumber}
\end{equation}
We then used the Bohr-Sommerfeld quantization formula and corrections thereto to compute the anomalous dimensions of the double-twist operators in the semi-classical expansion: ${1 \over \Delta_L} \to 0$, ${J \over \Delta_L}$ fixed. We found complete agreement with the results obtained using the light-cone bootstrap and we made further all-order in $\mu$ predictions to the anomalous dimensions of the double-twist operators, see e.g. Figure \ref{fig:ReggeTr}.
Finally, following \cite{Antunes:2021kmm}, we used the five-point light-cone bootstrap analysis of the heavy-light operator to compute the one-point function of a light operator in the double-twist states. As expected based on the correspondence of the double-twist states with orbits in AdS, the one-point functions are not thermal. Such a behavior was recently observed in many condensed matter systems and the corresponding phenomenon is known as many-body scars \cite{serbyn,moudgalya2021quantum}. We thus find that there is a natural connection
\begin{equation}
\text{Double-twist operators}~~~\leftrightarrow~~~ \text{Many-body scars} \ . {\nonumber}
\end{equation}
However, there is an important difference between the phenomenon of scars observed in condensed matter systems and in holography. In our case the non-thermal nature of the orbits is an artifact of working in perturbation theory in $1/J$ and to leading order in $c_T$. Including nonperturbative effects in $1/J$ or $1/c_T$ corrections turn double-twist operators into resonances, and make the spectrum continuous as opposed to being discrete. In this sense we can call double-twist operators \emph{perturbative scars}. \\
\indent The correspondence between orbits, QNMs, double-twist operators, and scars can be succinctly summarized by the equation
\begin{align}
\label{eq:finalformula}
{1 \over 2 \pi i} \Big(\overbrace{ {1 \over \underbrace{\Delta - \Delta_n(J)}_{\text{stable orbit}} - i e^{- c_0(\mu) J}} - {1 \over \Delta - \Delta_n(J) + i \underbrace{ e^{- c_0(\mu) J}}_{\text{BH tunneling}} } }^{\text{BH microstates}}\Big) \overset{\text{PT}}{\simeq} \overbrace{\underbrace{\delta(\Delta - \Delta_n(J) )}_{\text{double-twist}}}^{\text{scar}},
\end{align}
where PT denotes that the equivalence holds in perturbation theory in $1/J$. Of course, the continuum on the LHS of \eqref{eq:finalformula} is a large $c_T$ limit effect. In particular, we always work in the regime $e^{- c_0(\mu)J} \gg e^{- c_T}$, where $e^{- c_T}$ is the scale associated with the discreteness of the CFT spectrum.
Let us comment on a few possible future directions:
\begin{itemize}
\item In this paper we have focused on the simplest case of Schwarzschild-AdS black holes, but orbits should exist in more general situations as well. It would be interesting to generalize our analysis to other gravitational solutions, such as charged, rotating, extremal, and supersymmetric black holes. On the boundary, this corresponds to considering $[\mathcal{O}_H\mathcal{O}_L]_{n,J}$, where now $\mathcal{O}_H$ is a more general heavy operator with one of the aforementioned properties. It would be interesting to use the Bohr-Sommerfeld condition to compute the spectrum of anomalous dimensions in this more general case, as well as to reproduce these results using the light-cone bootstrap.
\item Quasi-normal modes of black holes in flat space were recently connected to four-dimensional supersymmetric gauge theories \cite{Aminov:2020yma}, see also \cite{Bianchi:2021mft}. In that work an exact Bohr-Sommerfeld quantization condition was formulated and solved using the Nekrasov partition function in a particular phase of the $\Omega$-background \cite{Nekrasov:2002qd,Nekrasov:2003rj,Nekrasov:2009rc}. This connection can be also generalized to the AdS black holes \cite{AlbaWIP}. It would be very interesting to explore this connection further in the context of the conformal bootstrap and see if it can be used to ``solve'' the thermal two-point function in the black hole background.
\item It would be interesting to develop a deeper understanding of the connection between the many-body scars and gravitational orbits. In particular, it would be very interesting to see if there are other condensed matter systems which exhibit a similar phenomenon of perturbative scars. As emphasized in \cite{Berenstein:2020vlp}, the spatial curvature and the finite volume of the space on which the quantum system lives are necessary for the existence of stable orbits in the gravity dual. It would be also very interesting to understand the interplay between the lifetime of gravitational orbits and maximal chaos. In the context of holographic theories this is related to understanding the fate of gravitational orbits at finite 't Hooft coupling $\lambda$ and understanding how stringy corrections affect the lifetime of the orbits.
\item We have observed that a finite lifetime of the gravitational orbits drastically changes the structure of the heavy-light OPE. Instead of a discrete sum over the double-twist operators we get a continuum spectrum with many narrow resonances. The width of these resonances is nonperturbative in spin $J$. It would be interesting to explore nonperturbative in spin $J$ effects using the Lorentzian inversion formula, which should correctly capture them \cite{Caron-Huot:2017vep,Caron-Huot:2020adz}. It would be also interesting to see if the heavy-light four-point function bootstrap together with the ETH could provide new insights into the finite temperature bootstrap \cite{Iliesiu:2018fao,Iliesiu:2018zlz,Alday:2020eua}.
\item Existence of gravitational orbits around AdS black holes is a very robust feature of holographic theories. In particular, it would be interesting to analyze orbits when the geometry of the boundary is different from $S^{d-1}$. It is clear that the positive curvature of $S^{d-1}$ is important for having gravitational orbits, e.g. they are obviously absent for $\mathbb{R}^{d-1}$ or $T^{d-1}$. Relatedly, it would be interesting to better understand the implication of the light-cone bootstrap for CFTs on general spatial manifolds $\mathbb{R} \times \Sigma$.
\item In this work we have focused on gravitational orbits in asymptotically AdS spaces. As Earthlings well know,\footnote{This comment does not apply to flat-Earthers.} stable orbits are characteristic to the gravitational dynamics in four dimensions in asymptotically flat and de Sitter spacetimes as well. It would be very interesting to understand how potentially very intricate structure of the orbits, e.g. the Milky way galaxy, is realized in the dual theories and if there are simple toy models that could correctly capture the gross features of the orbital dynamics (together with the maximal chaos).
\item An interesting aspect of the heavy-light bootstrap is the role of the horizon in the dual classical geometry. The presence of the horizon makes the spectrum of the normalizable solutions to the bulk wave equation continuous (and correspondingly the spectrum of the dual CFT). Instead having a horizon-less geometry, e.g. an AdS star \cite{deBoer:2009wk,Arsiwalla:2010bt}, would produce a discrete spectrum in the heavy-light channel. Both geometries look identical close to the boundary (due to the no-hair theorem) and in a related manner they will acquire an identical contribution from the multi-trace stress energy tensor operators $T^k$ as discussed, for example, in \cite{Fitzpatrick:2019zqz}. The difference between the two geometries is captured in the light-light channel by the properties of the double-twist operators $\mathcal{O}_L \Box^n \partial^J \mathcal{O}_L$ as well as by the spectrum of the double-twist heavy-light operators $\mathcal{O}_H \Box^n \partial^J \mathcal{O}_L$. When using the Lorentzian inversion formula \cite{Caron-Huot:2017vep,Li:2019zba,Li:2020dqm} the contribution of the double-twist operators $\mathcal{O}_L \Box^n \partial^J \mathcal{O}_L$ is suppressed by ${1 \over c_T}$, but the contribution of the operators $\mathcal{O}_H \Box^n \partial^J \mathcal{O}_L$ is only suppressed by powers of $\mu$. Therefore, the difference between black holes and stars will be visible. It would be interesting to explore these effects in detail.
\item It would be very interesting to generalize our discussion to finite $c_T$. There are many places in which our discussion will have to be modified. One important effect is gravitational radiation, which contributes to the lifetime of the orbits at order $1/c_T$. More conceptually, the basic features of the black hole geometry, such as the black hole horizon or the black hole singularity \cite{Festuccia:2005pi,Festuccia:2006sa,Festuccia:2008zx}, naturally appear on the second sheet of the conformal partial wave expansion $c(\Delta,J)$. The notion of a second sheet of $c(\Delta,J)$ is a large $c_T$ effect, which is absent in a single CFT at finite $c_T$ with a discrete spectrum. Still, it should be possible to define the second sheet of $c(\Delta, J)$ at finite $c_T$ upon a proper coarse-graining procedure. Naturally it should be related to the experience of a low-energy observer in the bulk with a finite energy resolution. For example, it is natural to smear $c(\Delta,J)$ over a finite region of the $\Delta$-plane, which effectively creates a cut even at finite $c_T$, see e.g. \cite{Mukhametzhanov:2018zja}. Indeed, perturbative in $c_T$ computations effectively perform such an averaging in a region of size $1/c_T^{\#}$ since they do not resolve the $e^{-c_T}$ discreteness of the spectrum. It would be also interesting to explore the effects of other notions of averaging in higher-dimensional CFTs that have been recently discussed in the literature, see e.g. \cite{Pollack:2020gfa,Belin:2020hea,Collier:2022emf,Schlenker:2022dyo,Chandra:2022bqq}.
\end{itemize}
\section*{Acknowledgements}
We thank Alexandre Belin, Shouvik Datta, Anatoly Dymarsky, Thomas Iadecola, Alba Grassi, Daniel Jafferis, Daniel Kapec, Shota Komatsu, Baur Mukhametzhanov, Kyriakos Papadodimas, G\'abor S\'arosi, Wilke van der Schee, Steven Shenker, and Evgeny Skvortsov for helpful discussions. We thank Liam Fitzpatrick for comments on the manuscript. This project has received funding from the European Research Council (ERC) under the European Union's Horizon 2020 research and innovation programme (grant agreement number 949077).
| {
"redpajama_set_name": "RedPajamaArXiv"
} | 1,578 |
Q: Doubts about RecyclerView with CursorAdapter I read this thread about RecyclerViews and Cursor adapters and I'm trying to use the first solution. I'm confused about the implementation of OnViewHolder function. I understood that OnViewHolder could be called by (ViewHolder, int) or (ViewHolder, Cursor) but I don't know how to use it with my project.
This is my original Adapter
public class FeedAdapter extends CursorAdapter {
/*
Etiqueta de Depuración
*/
private static final String TAG = FeedAdapter.class.getSimpleName();
/**
* View holder para evitar multiples llamadas de findViewById()
*/
static class ViewHolder {
TextView titulo;
TextView descripcion;
int tituloI;
int descripcionI;
}
public FeedAdapter(Context context, Cursor c, int flags) {
super(context, c, flags);
}
public View newView(Context context, Cursor cursor, ViewGroup parent) {
LayoutInflater inflater = LayoutInflater.from(parent.getContext());
View view = inflater.inflate(R.layout.item_layout, null, false);
ViewHolder vh = new ViewHolder();
// Almacenar referencias
vh.titulo = (TextView) view.findViewById(R.id.titulo);
vh.descripcion = (TextView) view.findViewById(R.id.descripcion);
// Setear indices
vh.tituloI = cursor.getColumnIndex(ScriptDatabase.ColumnEntradas.TITULO);
vh.descripcionI = cursor.getColumnIndex(ScriptDatabase.ColumnEntradas.DESCRIPCION);
view.setTag(vh);
return view;
}
public void bindView(View view, Context context, Cursor cursor) {
final ViewHolder vh = (ViewHolder) view.getTag();
// Setear el texto al titulo
vh.titulo.setText(cursor.getString(vh.tituloI));
// Obtener acceso a la descripción y su longitud
vh.descripcion.setText(descripcion);
}
}
My new adapter with RecyclerView. OnBindViewHolder is unfinished because I don't know how set it.
public class RVAdapter extends CursorRecyclerViewAdapter<RVAdapter.ViewHolder>{
private Cursor mCursorAdapter;
public RVAdapter(Context context, Cursor cursor){
super(context,cursor);
}
public static class ViewHolder extends RecyclerView.ViewHolder {
CardView cv;
TextView titulo;
TextView descripcion;
int tituloI;
int descripcionI;
public ViewHolder (View view){
super(view);
cv = (CardView) itemView.findViewById(R.id.cv);
titulo = (TextView) itemView.findViewById(R.id.titulo);
descripcion = (TextView) itemView.findViewById(R.id.descripcion);
}
}
@Override
public ViewHolder onCreateViewHolder(ViewGroup parent, int viewType) {
View itemView = LayoutInflater.from(parent.getContext())
.inflate(R.layout.item_layout, parent, false);
ViewHolder vh = new ViewHolder(itemView);
return vh;
}
@Override
public void onBindViewHolder(ViewHolder viewHolder, int position) {
mCursorAdapter.moveToPosition(position);
final ViewHolder vh = (ViewHolder) view.getTag();
}
}
Should I use "onBindViewHolder(ViewHolder viewHolder, Cursor cursor)" in this case? How could I make it works?
EDIT 1
This is the abstract class that extends my RVAdapter
public abstract class CursorRecyclerViewAdapter<VH extends RecyclerView.ViewHolder> extends RecyclerView.Adapter<VH> {
private Context mContext;
private Cursor mCursor;
private boolean mDataValid;
private int mRowIdColumn;
private DataSetObserver mDataSetObserver;
public CursorRecyclerViewAdapter(Context context, Cursor cursor) {
mContext = context;
mCursor = cursor;
mDataValid = cursor != null;
mRowIdColumn = mDataValid ? mCursor.getColumnIndex("_id") : -1;
mDataSetObserver = new NotifyingDataSetObserver();
if (mCursor != null) {
mCursor.registerDataSetObserver(mDataSetObserver);
}
}
public Cursor getCursor() {
return mCursor;
}
@Override
public int getItemCount() {
if (mDataValid && mCursor != null) {
return mCursor.getCount();
}
return 0;
}
@Override
public long getItemId(int position) {
if (mDataValid && mCursor != null && mCursor.moveToPosition(position)) {
return mCursor.getLong(mRowIdColumn);
}
return 0;
}
@Override
public void setHasStableIds(boolean hasStableIds) {
super.setHasStableIds(true);
}
public abstract void onBindViewHolder(VH viewHolder, Cursor cursor);
@Override
public void onBindViewHolder(VH viewHolder, int position) {
if (!mDataValid) {
throw new IllegalStateException("this should only be called when the cursor is valid");
}
if (!mCursor.moveToPosition(position)) {
throw new IllegalStateException("couldn't move cursor to position " + position);
}
onBindViewHolder(viewHolder, mCursor);
}
/**
* Change the underlying cursor to a new cursor. If there is an existing cursor it will be
* closed.
*/
public void changeCursor(Cursor cursor) {
Cursor old = swapCursor(cursor);
if (old != null) {
old.close();
}
}
/**
* Swap in a new Cursor, returning the old Cursor. Unlike
* {@link #changeCursor(Cursor)}, the returned old Cursor is <em>not</em>
* closed.
*/
public Cursor swapCursor(Cursor newCursor) {
if (newCursor == mCursor) {
return null;
}
final Cursor oldCursor = mCursor;
if (oldCursor != null && mDataSetObserver != null) {
oldCursor.unregisterDataSetObserver(mDataSetObserver);
}
mCursor = newCursor;
if (mCursor != null) {
if (mDataSetObserver != null) {
mCursor.registerDataSetObserver(mDataSetObserver);
}
mRowIdColumn = newCursor.getColumnIndexOrThrow("_id");
mDataValid = true;
notifyDataSetChanged();
} else {
mRowIdColumn = -1;
mDataValid = false;
notifyDataSetChanged();
//There is no notifyDataSetInvalidated() method in RecyclerView.Adapter
}
return oldCursor;
}
private class NotifyingDataSetObserver extends DataSetObserver {
@Override
public void onChanged() {
super.onChanged();
mDataValid = true;
notifyDataSetChanged();
}
@Override
public void onInvalidated() {
super.onInvalidated();
mDataValid = false;
notifyDataSetChanged();
//There is no notifyDataSetInvalidated() method in RecyclerView.Adapter
}
}
}
A: You can use your ViewHolder and position to access your views and fields like
below:
@Override
public void onBindViewHolder(ViewHolder viewHolder, final int position) {
StructName name = names.get(position);
viewHolder.txttitle.setText(name.title);
viewHolder.rootItem.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
}
});
}
It is easier than using cursor
if that was helpful let me be aware.
A: Use Cursor as it is more memory efficient.
@Override
public void onBindViewHolder(ViewHolder viewHolder, int position) {
mCursorAdapter.moveToPosition(position);
viewHolder.titulo.setText(mCursorAdapter.getString(mCursorAdapter.getColumnIndex(ScriptDatabase.ColumnEntradas.TITULO)));
viewHolder.descripcion.setText(mCursorAdapter.getString(mCursorAdapter.getColumnIndex(ScriptDatabase.ColumnEntradas.DESCRIPCION)));
}
| {
"redpajama_set_name": "RedPajamaStackExchange"
} | 5,931 |
\section*{Inclination Corrections for Low-z SDSS Galaxies}
In order to understand the formation and evolution of galaxies it is necessary to measure the distribution of galaxy properties and how they evolve with redshift. However, most studies of galaxies look at the distributions of galaxies'
observed properties like luminosity, color, size and morphology. Because of
attenuation by dust these properties can change with galaxy inclination and
therefore what is measured is a convolution of a galaxy's intrinsic properties
and the effects of dust. This complicates attempts to understand how galaxy properties evolve with redshift as any observed change could be due to variations in dust properties or evolution of galaxies' intrinsic properties. Furthermore, comparison to theory is made more difficult because a model
of attenuation from dust as a function of inclination is needed to compare
theoretical models to observations.
For these reasons it is useful to determine inclination corrections for galaxies. This can be done for large samples in a statistical manner by searching for
correlations between a galaxy property and inclination, and then assuming that
correlation should be removed \citep{giov:94,giov:95,tully:98,mgh:03,shao:07}.
In \citet{mbbh:08} we perform such an analysis on a sample of galaxies from
the NYU-VAGC \citep{blan:03c} which are imaged in both the Sloan Digital Sky Survey \citep{york:00} and the Two Micron All Sky Survey \citep{skru:06}. This
gives us $10,340$ galaxies with eight wavebands of coverage from $u$ to $K_s$ with which to determine inclination corrections.
\begin{figure}[t]
\includegraphics[width=\textwidth] {maller_b1.ps}
\vspace{-1.6cm}
\caption{Color-magnitude diagrams for the observed (left panel) and intrinsic
(right panel) galaxies in our sample. Purple points are face-on ($b/a > 0.85$)
while orange points highly inclined ($b/a < 0.35$). Clearly, the observed
properties of face-on and edge-on galaxies differ, with edge-on galaxies
being fainter and redder. However, when inclination corrections are applied
the two show a comparable distribution in the color-magnitude diagram. Also,
the fraction of galaxies that one would consider red decreases when going
from observed to intrinsic color.}
\label{fig:cmd}
\end{figure}
When inclination corrections are applied to the sample interesting changes to the population of galaxies can be noted. Figure \ref{fig:cmd} shows the color-magnitude diagram for observed (left panel) and intrinsic (right panel) galaxy properties.
The color-magnitude diagram changes in a number of ways. Galaxies
redder than the red sequence are found in the observed color-magnitude
diagram, but not once inclination corrections have been applied. Also, the
fraction of galaxies with $g - r \ge 0.7$ in the observed color-magnitude diagram
is $46\%$. This drops to $32\%$ when going to the intrinsic color-magnitude diagram a reduction of almost one third in the number of red galaxies a significant change. Please see \citet{mbbh:08} for more detail.
\acknowledgements AHM acknowledges support from the PDAC and the College of Arts and Sciences to attend this meeting.
| {
"redpajama_set_name": "RedPajamaArXiv"
} | 3,265 |
Human Trafficking News
Disembarkation of migrants on the Syracusan coast – Capture of smugglers on the high seas
14th July 2021 12th July 2021 Tony Kingham human trafficking, maritime patrol, maritime surveillance
In the early hours of the morning of Saturday 10 July, a patrol of the Italian State Police surprised 32 migrants on the beach of Contrada Marianelli, in the province of Syracuse; a short distance away was found a small rubber dinghy with which they had reached the coast, evidently disembarked from a larger boat. Thanks to a quick exchange of information between the Syracuse Police Headquarters and the Aeronavale Operational Department of the Guardia di Finanza of Palermo, it was noted that the dinghy could be the tender of a sailing ship flying the German flag sighted and photographed the day before at 50 miles to east of the Sicilian coasts by an ATR aircraft of the Aeronavale Operational Command of the Guardia di Finanza, during a maritime patrol mission as part of the Frontex European Agency's "Themis 2021" operation.
A new air-sea search mission for the aircraft of the yellow flames was immediately arranged, which at 10.15 on the same day on Saturday identified the ship 20 miles south of Pozzallo, sailing towards Malta; the suspect vessel flew the US flag and no longer the German flag and lacked a service dinghy.
Therefore, the operation coordinated by the Operations Center of the General Command of the Guardia di Finanza in collaboration with the Command in Chief of the Naval Squad of the Navy began. A coastguard and a very fast lookout from the Aeronavale Operational Department of Palermo, already engaged in surveillance missions of the waters of Lampedusa and Pozzallo respectively, while the Navy used the Vega patrol vessel, already sailing in the Sicilian Channel, to provide a greater security framework for police intervention.
Thanks to the constant updating of the position of the target provided by the plane, at 13 the sailing ship was reached by the lookout of the yellow flames 33 miles south of Pozzallo; the officers carried out the so-called "flag investigation" to exercise the "right of visit" provided for by the United Nations Convention on the Law of the Sea, given the suspicion that the boat was devoid of nationality. Since the boat did not comply with the order to stop the motion, continuing on its route to Malta, the officers boarded and assumed control of the ship, finding the presence of two subjects of Ukrainian nationality; on board the German flag hoisted the day before was noticed and evident signs of bivouac of numerous people;
The men of the Mobile Squad and the Naval Operations Section of the Syracuse Finance Police, coordinated by the Syracuse Public Prosecutor's Office, carried out detailed investigations by the judicial police, acquiring a quantity of evidence such as to believe that the migrants traced on the coast day before they had disembarked from that sailing ship captured on the high seas. The Ukrainian subjects were therefore subjected to detention by the judicial police and the boat was seized.
← A large amount of narcotic substances was seized during a special operation by criminal investigators from the Ministry of Interior
Frontex to help authorities fight gun smuggling →
Terrorist groups must not be allowed to exploit 'fragilities' caused by global health pandemic
7th July 2020 7th July 2020 Tony Kingham
Wellesley Island US CBP Agents and the Jefferson County Sheriff's Office Seized Approximately 150 Pounds of Marijuana
Foiled: Creative drug import attempts revealed
22nd December 2022 21st December 2022 Tony Kingham | {
"redpajama_set_name": "RedPajamaCommonCrawl"
} | 4,181 |
Home » News » Public Transport Permit Mandatory for Commercial EVs To Avail FAME-II Incentives
Public Transport Permit Mandatory for Commercial EVs To Avail FAME-II Incentives
By Sneha Verma/ Updated On Fri, Mar 29th, 2019
The Faster Adoption and Manufacturing of (Hybrid) and Electric Vehicles (FAME) scheme in its second phase starting next week, has made Public transport Mandatory for commercial EVs.
From April 1, the owners of commercial electric vehicles (three wheelers and four wheelers) will be required to have a public transport permit from a government agency in order to avail the benefits FAME II scheme.
FAME II scheme which was launched in February by the Ministry of Heavy Industries and Public Enterprises (MoHIPE), is due to take effect from April 1, 2019 and will span for three years.
According to operational guidelines for delivery of demand incentives under FAME-II scheme include:
Subsidies to buyers on the purchase of vehicles along with other benefits like exemptions in roads tax, registrations fees, and parking fee.
The public transport permit required for electric three wheelers and four wheelers needs to state that the vehicle will be used only for public transport purposes and not for private use.
FAME II will support 5 lakh e-rickshaws (e-3W) having ex-factory price of up to Rs 5 lakh with an incentive of Rs 50,000 each.
It will also offer a sop of Rs 1.5 lakh each to 35,000 electric four-wheelers (e-4W) with an ex-factory price of up to Rs 15 lakh with an aim to boost clean mobility in mass transport.
It will also offer a subsidy of up to Rs. 1.5 lakh each to 55,000 commercial electric four-wheelers with an ex-factory price of up to Rs. 15 lakh.
Under FAME II, for e-2W segment, incentives are also applicable for privately-owned vehicles. Two-wheelers costing up to Rs 1.5 lakh are eligible for an incentive of up to Rs 20,000.
"In the e-3W, e-4W and e-bus segments, incentives will be applicable mainly to vehicles used for public transport or those registered for commercial purposes," it added.
The guidelines of FAME II require that the vehicle dealers ensure that the incentives are passed on to individuals using vehicles only for commercial use in case of three wheelers and four wheelers. This is because FAME II aims to encourage the use of electric vehicles as public and commercial transports.
The guidelines also say, "A dealer should ensure that only one vehicle per category per person is allowed to claim demand incentives. No individual person can purchase more than one vehicle of the same category and claim incentives under the scheme. However, there will be no restrictions for the number of vehicles to be purchased by other than individual categories of buyers."
The second phase of the FAME scheme, which has a total outlay of Rs. 10,000 crore aims to boost clean mobility in public transport. FAME II lays special focus on public and shared transports and aims to develop an ecosystem for electric vehicles in the country by incentivizing the EVs and building a robust charging infrastructure.
Tags: electric mobility industry in India, FAME II Incentives, FAME-II, Incentives, India | {
"redpajama_set_name": "RedPajamaCommonCrawl"
} | 2,673 |
Piparpati Parchrouwa (nep. पिपरपती पचरौता) – gaun wikas samiti w środkowej części Nepalu w strefie Narajani w dystrykcie Bara. Według nepalskiego spisu powszechnego z 2001 roku liczył on 607 gospodarstw domowych i 4106 mieszkańców (1910 kobiet i 2196 mężczyzn).
Przypisy
Dystrykt Bara | {
"redpajama_set_name": "RedPajamaWikipedia"
} | 9,197 |
Court rejects bid to stop Tesla felling trees for German plant
Destinations, Lifestyle, News, Travel, Trending Topics
Central banks split on role of private sector in payments – survey
FRANKFURT (Reuters) – A German court has rejected a bid by environmentalists to halt tree cutting at Tesla's proposed manufacturing site near Berlin, paving the way for the electric carmaker to proceed with plans to finalise construction of its Gigafactory.
Earlier this week, environmentalists went to court in the eastern city of Frankfurt an der Oder in an attempt to stop Tesla clearing a forest, arguing that cutting down more trees could endanger hibernating snakes. [L8N2IO1OV]
The court said on Thursday it had rejected the appeal on the grounds that Tesla had taken sufficient measures to protect the reptiles by complying with requirements to capture and resettle them in an alternative habitat.
Tesla's permission to start construction depends on a conditional approval by local authorities, who are obliged to consult environmental groups and the community.
The regional court in Frankfurt an der Oder reviewed a complaint by environmental group NABU, which argued that snakes and sand lizards could be put at risk by Tesla's plans.
Environmentalists can appeal against the decision at a higher administrative court.
(Reporting by Nadine Schimroszik, writing by Edward Taylor, editing by Emma Thomasson and Mark Potter)
The post Court rejects bid to stop Tesla felling trees for German plant first appeared on One America News Network.
© 2020 Myrtle Beach Choice. All Rights Reserved. | {
"redpajama_set_name": "RedPajamaCommonCrawl"
} | 3,055 |
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<html>
<head>
<META http-equiv="Content-Type" content="text/html; charset=UTF-8">
<meta content="Apache Forrest" name="Generator">
<meta name="Forrest-version" content="0.10-dev">
<meta name="Forrest-skin-name" content="pelt">
<title>Upgrading from an Earlier Version of Apache™ FOP</title>
<link type="text/css" href="../skin/basic.css" rel="stylesheet">
<link media="screen" type="text/css" href="../skin/screen.css" rel="stylesheet">
<link media="print" type="text/css" href="../skin/print.css" rel="stylesheet">
<link type="text/css" href="../skin/profile.css" rel="stylesheet">
<script src="../skin/getBlank.js" language="javascript" type="text/javascript"></script><script src="../skin/getMenu.js" language="javascript" type="text/javascript"></script><script src="../skin/fontsize.js" language="javascript" type="text/javascript"></script>
<link rel="shortcut icon" href="../favicon.ico">
</head>
<body onload="init()">
<script type="text/javascript">ndeSetTextSize();</script>
<div id="top">
<!--+
|header
+-->
<div class="header">
<!--+
|start group logo
+-->
<div class="grouplogo">
<a href="http://xmlgraphics.apache.org/"><img class="logoImage" alt="Apache XML Graphics" src="../images/group-logo.gif" title="Apache XML Graphics is responsible for the creation and maintenance of software for managing the conversion of XML formats to graphical output, and the creation and maintenance of related software components, based on software licensed to the Foundation"></a>
</div>
<!--+
|end group logo
+-->
<!--+
|start Project Logo
+-->
<div class="projectlogo">
<a href="http://xmlgraphics.apache.org/fop/"><img class="logoImage" alt="Apache FOP" src="../images/logo.jpg" title="Apache FOP (Formatting Objects Processor) is the world's first output independent formatter. Output formats currently supported include PDF, PCL, PS, SVG, XML (area tree representation), Print, AWT, MIF and TXT. The primary output target is PDF."></a>
</div>
<!--+
|end Project Logo
+-->
<!--+
|start Search
+-->
<div class="searchbox">
<form action="http://www.google.com/search" method="get" class="roundtopsmall">
<input value="xmlgraphics.apache.org" name="sitesearch" type="hidden"><input onFocus="getBlank (this, 'Search the site with google');" size="25" name="q" id="query" type="text" value="Search the site with google">
<input name="Search" value="Search" type="submit">
</form>
</div>
<!--+
|end search
+-->
<!--+
|start Tabs
+-->
<ul id="tabs">
<li>
<a class="unselected" href="../index.html">Home</a>
</li>
<li class="current">
<a class="selected" href="../1.0/index.html">Version 1.0</a>
</li>
<li>
<a class="unselected" href="../1.1/index.html">Version 1.1</a>
</li>
<li>
<a class="unselected" href="../trunk/index.html">FOP Trunk</a>
</li>
<li>
<a class="unselected" href="../dev/index.html">Development</a>
</li>
</ul>
<!--+
|end Tabs
+-->
</div>
</div>
<div id="main">
<div id="publishedStrip">
<!--+
|start Subtabs
+-->
<div id="level2tabs"></div>
<!--+
|end Endtabs
+-->
<script type="text/javascript"><!--
document.write("Last Published: " + document.lastModified);
// --></script>
</div>
<!--+
|breadtrail
+-->
<div class="breadtrail">
<a href="http://www.apache.org/">The Apache Software Foundation</a> > <a href="http://xmlgraphics.apache.org/">Apache XML Graphics Project</a><script src="../skin/breadcrumbs.js" language="JavaScript" type="text/javascript"></script>
</div>
<!--+
|start Menu, mainarea
+-->
<!--+
|start Menu
+-->
<div id="menu">
<div onclick="SwitchMenu('menu_selected_1.1', '../skin/')" id="menu_selected_1.1Title" class="menutitle" style="background-image: url('../skin/images/chapter_open.gif');">Apache™ FOP 1.0</div>
<div id="menu_selected_1.1" class="selectedmenuitemgroup" style="display: block;">
<div class="menuitem">
<a href="../1.0/index.html">About</a>
</div>
<div class="menuitem">
<a href="../1.0/releaseNotes_1.0.html">Release Notes</a>
</div>
<div class="menuitem">
<a href="../1.0/changes_1.0.html">Changes (1.0)</a>
</div>
<div class="menuitem">
<a href="../1.0/knownissues_overview.html">Known Issues</a>
</div>
<div class="menupage">
<div class="menupagetitle">Upgrading</div>
</div>
<div onclick="SwitchMenu('menu_1.1.6', '../skin/')" id="menu_1.1.6Title" class="menutitle">Using Apache™ FOP</div>
<div id="menu_1.1.6" class="menuitemgroup">
<div class="menuitem">
<a href="../1.0/anttask.html">Ant Task</a>
</div>
<div class="menuitem">
<a href="../1.0/compiling.html">Build</a>
</div>
<div class="menuitem">
<a href="../1.0/configuration.html">Configure</a>
</div>
<div class="menuitem">
<a href="../1.0/embedding.html">Embed</a>
</div>
<div class="menuitem">
<a href="../1.0/running.html">Run</a>
</div>
<div class="menuitem">
<a href="../1.0/servlets.html">Servlets</a>
</div>
</div>
<div onclick="SwitchMenu('menu_1.1.7', '../skin/')" id="menu_1.1.7Title" class="menutitle">Features</div>
<div id="menu_1.1.7" class="menuitemgroup">
<div class="menuitem">
<a href="../1.0/accessibility.html">Accessibility</a>
</div>
<div class="menuitem">
<a href="../1.0/events.html">Events</a>
</div>
<div class="menuitem">
<a href="../1.0/extensions.html">Extensions</a>
</div>
<div class="menuitem">
<a href="../1.0/fonts.html">Fonts</a>
</div>
<div class="menuitem">
<a href="../1.0/output.html">Output Targets</a>
</div>
<div class="menuitem">
<a href="../1.0/graphics.html">Graphics</a>
</div>
<div class="menuitem">
<a href="../1.0/hyphenation.html">Hyphenation</a>
</div>
<div class="menuitem">
<a href="../1.0/intermediate.html">Intermediate Format</a>
</div>
<div class="menuitem">
<a href="../1.0/metadata.html">Metadata</a>
</div>
<div class="menuitem">
<a href="../1.0/pdfa.html">PDF/A</a>
</div>
<div class="menuitem">
<a href="../1.0/pdfx.html">PDF/X</a>
</div>
<div class="menuitem">
<a href="../1.0/pdfencryption.html">PDF Encryption</a>
</div>
</div>
</div>
<div id="credit"></div>
<div id="roundbottom">
<img style="display: none" class="corner" height="15" width="15" alt="" src="../skin/images/rc-b-l-15-1body-2menu-3menu.png"></div>
<!--+
|alternative credits
+-->
<div id="credit2"></div>
</div>
<!--+
|end Menu
+-->
<!--+
|start content
+-->
<div id="content">
<div title="raw XML" class="xmllink">
<a class="dida" href="upgrading.xml"><img alt="XML - icon" src="../skin/images/xmldoc.gif" class="skin"><br>
XML</a>
</div>
<div title="Portable Document Format" class="pdflink">
<a class="dida" href="upgrading.pdf"><img alt="PDF -icon" src="../skin/images/pdfdoc.gif" class="skin"><br>
PDF</a>
</div>
<div class="trail">Font size:
<input value="Reset" class="resetfont" title="Reset text" onclick="ndeSetTextSize('reset'); return false;" type="button">
<input value="-a" class="smallerfont" title="Shrink text" onclick="ndeSetTextSize('decr'); return false;" type="button">
<input value="+a" class="biggerfont" title="Enlarge text" onclick="ndeSetTextSize('incr'); return false;" type="button">
</div>
<h1>Upgrading from an Earlier Version of Apache™ FOP</h1>
<div id="front-matter">
<div id="minitoc-area">
<ul class="minitoc">
<li>
<a href="#important">Important!</a>
</li>
<li>
<a href="#issues">What you need to know when you upgrade!</a>
</li>
</ul>
</div>
</div>
<a name="important"></a>
<h2 class="underlined_10">Important!</h2>
<div class="section">
<p>
If you're planning to upgrade to the latest Apache™ FOP version there are a few very important
things to consider:
</p>
<ul>
<li>
More than half of the codebase has been rewritten over the
last three years. With version 0.93 the code has reached
<strong>production level</strong>, and continues to improve with
version 0.94.
</li>
<li>
The API of FOP has changed considerably and is not
backwards-compatible with versions 0.20.5 and
0.91beta. Version 0.92 introduced the <strong>new stable
API</strong>.
</li>
<li>
Since version 0.92 some deprecated methods which were part
of the old API have been removed. If you upgrade from 0.91
beta, you will need to adjust your Java code. Similarly if
you upgrade from 0.92 and use deprecated methods.
</li>
<li>
If you are using a configuration file for version 0.20.5, you have to rebuild it in the new format. The format
of the configuration files has changed since version 0.20.5. See conf/fop.xconf for
an example configuration file. A XML Schema file can be found under
src/foschema/fop-configuration.xsd.
</li>
<li>
Beginning with version 0.94 you can skip the generation of
font metric files and remove the "font-metrics" attribute
in the font configuration. In the unlikely case that due to
a bug you still need to use font metrics files you will need
to regenerate the font metrics file if yours are from a FOP
version before 0.93.
</li>
<li>
<p>
The new code is much more strict about the interpretation of the XSL-FO 1.1 specification.
Things that worked fine in version 0.20.5 might start to produce warnings or even errors
now. FOP 0.20.5 contains many bugs which have been corrected in the new code.
</p>
<div class="note">
<div class="label">An example</div>
<div class="content">
While FOP 0.20.5 allowed you to have empty <span class="codefrag">fo:table-cell</span> elements, the new code
will complain about that (unless relaxed validation is enabled) because the specification
demands at least one block-level element (<span class="codefrag">(%block;)+</span>, see
<a class="external" href="http://www.w3.org/TR/xsl/#fo_table-cell">XSL-FO 1.1, 6.7.10</a>)
inside an <span class="codefrag">fo:table-cell</span> element.
</div>
</div>
</li>
<li>
Extensions and Renderers written for version 0.20.5 will not work with the new code! The new FOP
extension for <a class="external" href="http://barcode4j.sourceforge.net">Barcode4J</a> is available since
January 2007.
</li>
<li>
The SVG Renderer and the MIF Handler have not been resurrected, yet! They are currently non-functional
and hope for someone to step up and reimplement them.
</li>
</ul>
</div>
<a name="issues"></a>
<h2 class="underlined_10">What you need to know when you upgrade!</h2>
<div class="section">
<p>
When you use your existing FO files or XML/XSL files which work fine with FOP version
0.20.5 against this FOP version some things may not work as expected. The following
list will hopefully help you to identify and correct those problems.
</p>
<ul>
<li>
Check the <a href="../compliance.html">Compliance page</a> for the feature causing
trouble. It may contain the necessary information to understand and resolve the problem.
</li>
<li>
Not all 0.20.5 output formats are supported. PDF and Postscript should be fully supported.
See <a href="output.html">Output Targets</a> for a more complete description.
</li>
<li>
As stated above empty table cells <span class="codefrag"><fo:table-cell></fo:table-cell></span>
are not allowed by the specification. The same applies to empty <span class="codefrag">static-content</span>
and <span class="codefrag">block-container</span> elements, for example.
</li>
<li>
0.20.5 is not XSL-FO compliant with respect to sizing images (<span class="codefrag">external-graphic</span>)
or <span class="codefrag">instream-foreign-object</span>
objects. If images or SVGs are sized differently in your outputs with the new FOP version
check <a class="external" href="http://issues.apache.org/bugzilla/show_bug.cgi?id=37136">Bug 37136</a>
as it contains some hints on what to do. The file
<a class="external" href="http://svn.apache.org/viewcvs.cgi/xmlgraphics/fop/trunk/examples/fo/basic/images.fo?view=markup">
<span class="codefrag">"examples/fo/basic/images.fo"</span></a> has
a number of good examples that show the new, more correct behaviour.
</li>
<li>
The <span class="codefrag">fox:outline</span> extension is not implemented in this version anymore.
It has been superseded by the new bookmark elements from XSL-FO 1.1.
</li>
</ul>
</div>
<span class="version">
version 1298724</span>
</div>
<!--+
|end content
+-->
<div class="clearboth"> </div>
</div>
<div id="footer">
<!--+
|start bottomstrip
+-->
<div class="lastmodified">
<script type="text/javascript"><!--
document.write("Last Published: " + document.lastModified);
// --></script>
</div>
<div class="copyright">
Copyright ©
1999-2012 <a href="http://www.apache.org/licenses/">The Apache Software Foundation. Licensed under Apache License 2.0</a>
<br>
Apache, Apache FOP, the Apache feather logo, and the Apache FOP
logos are trademarks of The Apache Software Foundation. All other marks mentioned may be trademarks or registered trademarks of their respective owners.
</div>
<!--+
|end bottomstrip
+-->
</div>
</body>
</html>
| {
"redpajama_set_name": "RedPajamaGithub"
} | 4,245 |
Mahfoud Ould Lemrabott was the Head of the Supreme Court of Mauritania. He died on 11 May 2013.
References
2013 deaths
Mauritanian jurists
Year of birth missing
Place of birth missing | {
"redpajama_set_name": "RedPajamaWikipedia"
} | 3,635 |
Q: Using Java 11 HttpClient.send() working like asynchronous call and making current thread to be in WAITING state for long time. Kindly hep to resolve Using java11 Http Client as below,
HttpClient httpClient = HttpClient.newBuilder().version(HttpClient.Version.HTTP_1_1).followRedirects(HttpClient.Redirect.NORMAL).connectTimeout(Duration.ofSeconds(Long.valueOf(matchMakerHttpConnectionTimeout))).build();
HttpRequest httpRequest = HttpRequest.newBuilder().POST(requestBody).uri(URI.create(baseurl)).timeout(Duration.ofSeconds(requestTimeOut))
.setHeader("Content-Type", "application/json; charset=UTF-8")
.setHeader("Authorization", "Bearer " + accessToken).build();
LOGGER.info("Base url : {} with request body : {}", baseurl, request);
HttpResponse<String> response = httpClient.send(httpRequest,BodyHandlers.ofString());
But seems after invoking httpclient.send, threads going on to WAITING state for long time and does not comes back to pool for picking other jobs which increases CPU spike as well. It should actually need to be synchronous call right? Why is it executing like asynchrounous way?
"pool-3-thread-2" #57 prio=5 os_prio=0 cpu=7204.26ms elapsed=16809.99s tid=0x000056033ed08800 nid=0x59 waiting on condition [0x00007f263fc89000]
java.lang.Thread.State: WAITING (parking)
at jdk.internal.misc.Unsafe.park(java.base@11.0.5/Native Method)
- parking to wait for <0x00000000c54cb950> (a java.util.concurrent.CompletableFuture$Signaller)
at java.util.concurrent.locks.LockSupport.park(java.base@11.0.5/LockSupport.java:194)
at java.util.concurrent.CompletableFuture$Signaller.block(java.base@11.0.5/CompletableFuture.java:1796)
at java.util.concurrent.ForkJoinPool.managedBlock(java.base@11.0.5/ForkJoinPool.java:3128)
at java.util.concurrent.CompletableFuture.waitingGet(java.base@11.0.5/CompletableFuture.java:1823)
at java.util.concurrent.CompletableFuture.get(java.base@11.0.5/CompletableFuture.java:1998)
at jdk.internal.net.http.HttpClientImpl.send(java.net.http@11.0.5/HttpClientImpl.java:541)
at jdk.internal.net.http.HttpClientFacade.send(java.net.http@11.0.5/HttpClientFacade.java:119)
at com.comcast.bo.rlcm.cmdp.helper.SampleServiceHelper.invokeAPI(SampleServiceHelper.java:273)
at com.comcast.bo.rlcm.cmdp.helper.SampleServiceHelper.lambda$invokeCmAPI$7(SampleServiceHelper.java:220)
at com.comcast.bo.rlcm.cmdp.helper.SampleServiceHelper$$Lambda$1778/0x0000000100d66440.accept(Unknown Source)
at java.util.HashMap.forEach(java.base@11.0.5/HashMap.java:1336)
at com.comcast.bo.rlcm.cmdp.helper.SampleClassA.invokeHttpClientForExternalApi(SampleService.java:201)
at com.comcast.bo.rlcm.cmdp.service.SampleService.lambda$processCm$4(SampleService.java:310)
at com.comcast.bo.rlcm.cmdp.service.SampleService$$Lambda$1302/0x0000000100c6b840.accept(Unknown Source)
at java.util.concurrent.ConcurrentHashMap.forEach(java.base@11.0.5/ConcurrentHashMap.java:1603)
at com.comcast.bo.rlcm.cmdp.service.SampleService.processCm(SampleService.java:224)
at com.comcast.bo.rlcm.cmdp.service.SampleService$$FastClassBySpringCGLIB$$f43afa24.invoke(<generated>)
at org.springframework.cglib.proxy.MethodProxy.invoke(MethodProxy.java:218)
at org.springframework.aop.framework.CglibAopProxy$DynamicAdvisedInterceptor.intercept(CglibAopProxy.java:687)
at com.comcast.bo.rlcm.cmdp.service.SampleService$$EnhancerBySpringCGLIB$$33bd9b1c.processCm(<generated>)
at com.comcast.bo.rlcm.cmdp.service.SampleService2.processCM(SampleService2.java:99)
at com.comcast.bo.rlcm.cmdp.cmjob.processor.SampleProcessor.run(SampleProcessor.java:42)
at java.util.concurrent.Executors$RunnableAdapter.call(java.base@11.0.5/Executors.java:515)
at java.util.concurrent.FutureTask.run(java.base@11.0.5/FutureTask.java:264)
at java.util.concurrent.ThreadPoolExecutor.runWorker(java.base@11.0.5/ThreadPoolExecutor.java:1128)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(java.base@11.0.5/ThreadPoolExecutor.java:628)
at java.lang.Thread.run(java.base@11.0.5/Thread.java:834)
A: As it is written in documentation of HttpClient class:
Requests can be sent either synchronously or asynchronously:
*
*send(HttpRequest, BodyHandler) blocks until the request has been sent and the response has been received.
*sendAsync(HttpRequest, BodyHandler) sends the request and receives the response asynchronously. The sendAsync method returns immediately with a CompletableFuture...
So if you need not to wait the response and not to block the current thread - use the sendAsync method.
| {
"redpajama_set_name": "RedPajamaStackExchange"
} | 7,884 |
\section{Introduction}
The field of large scale data storage has witnessed significant growth in recent years with applications such as social networks and file sharing. In Storage systems, data should be stored over multiple nodes (independent storage devices such as disks, servers or peers) and it may happen a storage node is failed or leaves the system. Thus, a reliable storage capability over individually unreliable nodes can be achieved through introducing redundancy.
There are various strategies for distributing redundancy and depending on the used method the system can tolerate a limited number of node failures. Moreover, to keep the redundancy the same as if there is no node failures, the system should have self-repairing capability. In other words, each damaged node is replaced with a new node after transferring data over the network. Reconstructing a failed node and the maintenance bandwidth are called repair problem and repair bandwidth, respectively.
Erasure codes are the most common strategy for distributing redundancy. An erasure coded system employs totally $n$ packets of the same size, $k$ of which are data packets
(the fragments of the original data file) and $n-k$ of which are parity packets (the coding information). It is worth mentioning that the process of coding can be done using MDS or non-MDS codes. In a distributed storage system, these packets are stored at $n$ different nodes over the network. MDS codes~\cite{MDS} are optimally space-efficient and the encoding process is such that having access to any $k$ nodes is adequate to recover the original data file. In these codes, each parity node increases fault tolerance. In other words, a $(n,k)$ MDS coded system can tolerate any $n-k$ node failures.
Replication, RAID 5, RAID 6~\cite{RAID}, and Reed-Solomon codes \cite{ReedSolomon} are the most popular MDS codes that have been used in storage systems.
In replication, the parity nodes and data nodes are the same. In fact, each data node has a replica which is stored in a related parity node.
RAID 5 and RAID 6 employ $n-k=1$ and $n-k=2$ parity node respectively; however, Reed-Solomon codes can be designed for any value of $(n,k)$~\cite{ReedSolomon}.
Another class of MDS codes are MDS array codes such as EVENODD~\cite{evenodd}, extended EVENODD~\cite{extendedevenodd}, Row-Diagonal Parity (RDP)~\cite{RDP}, X-code~\cite{Xcode}, P-code~\cite{Pcode}, B-code~\cite{Bcode}, and STAR code~\cite{Starcode}. These codes are based on XOR operation and have lower encoding and decoding complexity than Reed-Solomon codes.
\begin{figure*}
\centering
\epsfig{file=fig1.eps,width=0.75\linewidth,clip=}
\caption{The graphical representation of the proposed code. The recovery of original data file can be achieved by connecting to: (i) two nodes from a partition and $k-2$ different nodes selected over $k-2$ different partitions (solid-lines) and (ii) $2m~\leq~k$ parity nodes and $k-2m$ systematic nodes selected from $k$ different partitions (dashed-lines are the specific case $m=0$).}
\label{fig:1}
\end{figure*}
In~\cite{LDPC}, Low Density Parity-Check (LDPC) codes as a class of non-MDS codes are introduced. These codes aim at reducing encoding and decoding costs
computation over lossy networks; however, are not as space-efficient as MDS codes. Non-MDS codes are further investigated in several papers. As a case in point, Hafner in~\cite{WEAVER} proposes a new class of non-MDS XOR-based codes, called WEAVER codes. The WEAVER codes are vertical codes which can tolerate up to 12 node failures. In a vertical code like X-code and WEAVER code each node contains both data and parity packets. In contrast, each node in a flat-XOR code such as EVENODD, holds either data or parity packets. The authors in~\cite{combination} describe construction of two novel flat XOR-based code, called stepped combination and HD-combination codes. Also in~\cite{combination}, chain codes, a variant of chained configuration method~\cite{chain}, are investigated.
The standard MDS codes in terms of repair problem are inefficient and recreating a failed node consumes a repair bandwidth equal the entire stored data. This motivated Dimakis et al. in~\cite{RC} to propose a repair optimal MDS code, called regenerating code, to make a tradeoff between repair bandwidth and storage capacity per node. It is shown in~\cite{RC} that any point on the identified tradeoff curve can be achieved through the use of network coding. Furthermore, in~\cite{GRC} an extension of regenerating codes, dubbed generalized regenerating codes, are introduced for the case of having different download cost associated with each node. Moreover, the authors in~\cite{SRC} investigate the case in which the newcomer node can wisely select the existing node to connect to.
The repair model presented in~\cite{RC} is a functional repair. In the functional repair model the recreated packets stored at replaced node can be different with the lost packets. Contrast the functional repair with the exact repair in which each lost packet is exactly reconstructed. The exact repair for the minimum bandwidth regenerating codes is investigated in~\cite{exactMBR}. Also in~\cite{exactMSR1},~\cite{exactMSR2},~\cite{exactMSR3}, the exact repair for the minimum storage regenerating codes is addressed based upon the interference alignment concepts.
Regenerating codes outperform existing MDS erasure codes in terms of maintenance bandwidth; however, the constructing a new packet requires communication with $d~\geq~k$ nodes and the minimum repair bandwidth can be achieved when $d=n-1$. In addition, the surviving nodes have to apply random linear network coding to their packets. Accordingly, many of the proposed constructions require a huge finite-field size and are not feasible for practical storage systems. The current study aims to introduce a $(n,k)=(2k,k)$ non-MDS XOR-based code which can tolerate any three node failures. Accordingly, it is shown in this code a single node failure can be repaired through access to only three nodes regardless of $k$.
The rest of paper is organized as follows: Section~\ref{sec:cons} states the construction and motivates the main idea. In section~\ref{sec:RP}, we explain the repair problem of the proposed code. Finally, sections~\ref{sec:conclusion}, concludes the paper.
\section{Construction}\label{sec:cons}
In this section we describe the construction of the proposed non-MDS code. Fig.~\ref{fig:1} shows a graphical representation for this code. This code is a class of flat XOR-codes which contains $2k$ storage nodes and each node stores one packet. The construction is such that $k$ out of $2k$ existing nodes i.e., $\{S_i\}_{i=1,\ldots,k}$, hold data fragments, called systematic nodes. The remaining $k$ nodes, i.e.,$\{P_i\}_{i=1,\ldots,k}$, are the parity nodes which store parity packets. Also it is assumed that each systematic node $(S_i)$ has a related parity node $(P_i)$ in which they stand in a same partition. Thus, with this construction, the code entails $k$ partition.
For storing a file of size $M$ using this construction, the file is divided to $k$ fragments i.e., $d_1,d_2,d_3,\ldots,d_k$, each of size $\frac{M}{k}$. Each fragment can be a single bit or a block of bits. These fragments are stored at $k$ systematic nodes. Fig.~\ref{fig:2} illustrates a $(n,k)=(10,5)$ code corresponding to the explained construction. Referring to Fig.~\ref{fig:2}, the five data fragments, i.e., $d_1,d_2,d_3,d_4$ and $d_5$, are stored at nodes $S_1,S_2,S_3,S_4$ and $S_5$ respectively. Noting the parity packet $p_i$ to be stored in parity node $P_i$ is computed as
\begin{equation}\label{equ1}
p_i=\sum_{j=1,~j\neq~i}^{k}d_i~,
\end{equation}
For $i=1,\ldots,k$. The addition here is bit-by-bit XOR for two data packets. For instance in a (10,5) code, as can be seen in Fig.~\ref{fig:2}, parity packets $p_1=d_2+d_3+d_4+d_5$, $p_2=d_1+d_3+d_4+d_5$, $p_3=d_1+d_2+d_4+d_5$, $p_4=d_1+d_2+d_3+d_5$ and $p_5=d_1+d_2+d_3+d_4$ are stored in parity nodes $P_1,P_2,P_3,P_4$ and $P_5$ respectively. It is worth mentioning that for specific case $k=2$ this code performs similar to replication method. Also for $k=3$ the parity packets are same with the parity packets of the proposed chain code in~\cite{combination}.
\begin{figure*}
\centering
\epsfig{file=fig2.eps,width=0.8\linewidth,clip=}
\caption{The repair problem of a $(n,k)=(10,5)$ code. The lost packet $d_1$ can be repaired by the use of three packets including its related parity packet
i.e, $d_2+d_3+d_4+d_5$. Also when $d_2+d_3+d_4+d_5$ has failed $d_1$
can be reconstructed by the use of four nodes from another partitions.
}
\label{fig:2}
\end{figure*}
Now we are ready to discuss how the recovery of the original file can be achieved. It is assumed corresponding to a request to reconstructing
the original data file a Data Collector (DC) is initiated and connects to existing nodes. With this construction, DC requires to connect to at least $k$ out of existing nodes. Recall the proposed construction in this paper is non-MDS and having access to any $k$ nodes out of existing $2k$ nodes does not ensure restoring the original file. Each data collector has two possible strategies for selecting $k$ storage nodes to connect to: (i) DC can connect to both systematic node and parity node from a partition and $k-2$ different nodes selected from $k-2$ different partitions out of the $k-1$ remaining partitions (solid-lines in Fig.~\ref{fig:1} are a specific case of this scenario). When using this strategy there are
\begin{equation}\label{equ2}
\binom{k}{k-1}~\binom{k-1}{1}~2^{k-2}~=~(k)(k-1)2^{k-2}~,
\end{equation}
options for DC to choose $k$ nodes to connect to.
(ii) DC can connect to $2m~\leq~k$ parity nodes and $k-2m$ systematic nodes selected from $k$ different partitions. (dashed-lines in Fig.~\ref{fig:1} can be considered as a specific case of this scenario i.e. $m=0$). With strategy (ii), number of possible ways to choose $k$ nodes is computed as
\begin{equation}\label{equ3}
\sum_{m=0}^{\lfloor\frac{k}{2}\rfloor}~\binom{k}{2m}~=~2^{k-1}~.
\end{equation}
Thus, there totally exist $2^{k-2}(k^2-k+2)$ ways to recover the original file using $k$ nodes. Considering the two possible strategies, the proposed $(2k,k)$ code can tolerate any three node failures. Moreover, this code can tolerate up to $k-1$ node failures if these nodes are failed from $k-1$ different partitions.
As discussed, the storage per node for storing a file of size $M$ is $\frac{M}{k}$ which is equivalent with standard MDS codes and Minimum Storage Regenerating (MSR) codes.\footnote{The identified tradeoff curve in~\cite{RC} has two extremal points; one end of this curve corresponds to
the minimum storage per node and the other end corresponds to minimum bandwidth point. These two extremal points can
be achieved by the use of Minimum Storage Regenerating (MSR) and Minimum Bandwidth Regenerating (MBR) codes,
respectively.} However, for $2k\frac{M}{k}=2M$ total storage, MSR and standard MDS codes offer higher reliability. Recall to keep the reliability same across time, each failed node should be repaired. In the naive method that can be used to any MDS code, a single node repair can be done after transferring the whole data file over the network (the repair bandwidth is equal to $M$). Regenerating codes can reduce the repair bandwidth if we allow the new node connect to $d~>~k$ nodes. Our goal is reduce the repair bandwidth compared to the naive method when new node connects to $d~<~k$ nodes. The following section aims at addressing the repair model of suggested code.
\section{Repair problem}\label{sec:RP}
Note that when a node is failed or leaves the system a new node is initiated, attempting to connect to existing
nodes to reconstruct the failed node. During the course of repairing a damaged node, we face two scenarios: (i) The parity or systematic node which has common partition with the failed node (related node) is active and (ii) The related node has failed. In the case of existence of the related node, the failed node can be reconstructed by communicate to only three nodes i.e., the related node and both parity node and systematic node from another active partition. In fact, there are $k-1$ ways to repair a failed node through downloading from only three nodes. For example, referring to Fig.~\ref{fig:2}, we assume that the systematic node $S_1$ which holds data fragment $d_1$ is failed. When parity node $P_1$ which stores parity packet $d_2+d_3+d_4+d_5$ is active, the new node can restore $d_1$ through downloading three packets in $k-1=4$ ways as
\begin{eqnarray*}
(d_2+d_3+d_4+d_5)+(d_2)+(d_1+d_3+d_4+d_5)\nonumber\\
(d_2+d_3+d_4+d_5)+(d_3)+(d_1+d_2+d_4+d_5)\nonumber\\
(d_2+d_3+d_4+d_5)+(d_4)+(d_1+d_2+d_3+d_5)\nonumber\\
(d_2+d_3+d_4+d_5)+(d_5)+(d_1+d_2+d_3+d_4)
\end{eqnarray*}
Thus, in scenario (i), three nodes are involved during the course of downloading for reconstructing a new node. This leads to have a repair bandwidth equal to $3\frac{M}{k}$.
As discussed earlier, in MSR codes the new node should connect $d~\geq~k$ nodes to ensure reconstructing a failed node. In these codes the repair bandwidth is computed as $\frac{Md}{k(d-k+1)}$ which is a decreasing function with respect to $d$~\cite{RC} and, hence, when new node connects to the minimum possible nodes i.e., $k$ nodes, the repair bandwidth takes its maximum value i.e., $M$. For instance, in a $(n,k)=(10,5)$ MSR code, the repair bandwidth $\frac{3M}{5}$ can be achieved if new node connects to $d=6$ nodes which is greater than $d=3$ nodes in the proposed scheme.
For the case of scenario (ii), we can consider two different strategies. In the first strategy, dubbed strategy A, first the parity node is repaired and then used to reconstruct the related systematic node. For recreating the parity node without using the related systematic node, the new node should connect $2m$ parity nodes and $k-1-2m$ systematic nodes over $k-1$ different partition and there are
\begin{equation}\label{equ4}
\sum_{m=0}^{\lfloor\frac{k-1}{2}\rfloor}~\binom{k-1}{2m}~=~2^{k-2}~,
\end{equation}
ways to choose these nodes. For instance, referring Fig.~\ref{fig:2}, in a $(10,5)$ code there are $2^{(5-2)}=8$ ways in which the new node can use 0,2 or 4 parity nodes to repair parity node $P_1$ which stores $d_2+d_3+d_4+d_5$ without the use of node $S_1$. These eight ways are as
\begin{eqnarray*}
(d_2)+(d_3)+(d_4)+(d_5)~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nonumber\\
(d_1+d_3+d_4+d_5)+(d_1+d_2+d_4+d_5)+(d_4)+(d_5)\nonumber\\
(d_1+d_3+d_4+d_5)+(d_1+d_2+d_3+d_5)+(d_3)+(d_5)\nonumber\\
(d_1+d_3+d_4+d_5)+(d_1+d_2+d_3+d_4)+(d_3)+(d_4)\nonumber\\
(d_1+d_2+d_4+d_5)+(d_1+d_2+d_3+d_5)+(d_2)+(d_5)\nonumber\\
(d_1+d_2+d_4+d_5)+(d_1+d_2+d_3+d_4)+(d_2)+(d_4)\nonumber\\
(d_1+d_2+d_3+d_5)+(d_1+d_2+d_3+d_4)+(d_2)+(d_3)\nonumber\\
(d_1+d_3+d_4+d_5)+(d_1+d_2+d_4+d_5)+~~~~~~~~~~~~~~\nonumber\\
\!\!\!(d_1+d_2+d_3+d_5)+(d_1+d_2+d_3+d_4)~~~~~~~~~~~~~~~~~
\end{eqnarray*}
In the second strategy, called strategy B, first the failed systematic node is repaired and then involved in the reconstruction of the related parity node. When strategy B is employed, the new node requires to communicate with $2m+1$ parity nodes and $k-2m-2$ systematic nodes over $k-1$ different partitions and the number of possible ways to select these surviving nodes are computed as
\begin{equation}\label{equ5}
\sum_{m=1}^{\lceil\frac{k-1}{2}\rceil}~\binom{k-1}{2m-1}~=~2^{k-2}~.
\end{equation}
For example, as can be seen in Fig.~\ref{fig:2}, there exist $2^{(5-2)}=8$ options for the new node to choose one or three parity nodes for the repair of systematic node $S_1$ which holds $d_1$ when $P_1$ can not be involved. These options are as
\begin{eqnarray*}
(d_1+d_3+d_4+d_5)+(d_3)+(d_4)+(d_5)~~~~~~\nonumber\\
(d_1+d_2+d_4+d_5)+(d_2)+(d_4)+(d_5)~~~~~~\nonumber\\
(d_1+d_2+d_3+d_5)+(d_2)+(d_3)+(d_5)~~~~~~\nonumber\\
(d_1+d_2+d_3+d_4)+(d_2)+(d_3)+(d_4)~~~~~~\nonumber\\
(d_1+d_3+d_4+d_5)+(d_1+d_2+d_4+d_5)+\nonumber\\
(d_1+d_2+d_3+d_5)+(d_5)~~~~~~~~~~~~~~~~~~~~~~~\nonumber\\
(d_1+d_3+d_4+d_5)+(d_1+d_2+d_4+d_5)+\nonumber\\
(d_1+d_2+d_3+d_4)+(d_4)~~~~~~~~~~~~~~~~~~~~~~~\nonumber\\
(d_1+d_3+d_4+d_5)+(d_1+d_2+d_3+d_5)+\nonumber\\
(d_1+d_2+d_3+d_4)+(d_3)~~~~~~~~~~~~~~~~~~~~~~~\nonumber\\
(d_1+d_2+d_4+d_5)+(d_1+d_2+d_3+d_5)+\nonumber\\
(d_1+d_2+d_3+d_4)+(d_2)~~~~~~~~~~~~~~~~~~~~~~~
\end{eqnarray*}
In the both strategies $k-1$ nodes are involved during the course of downloading for creating a new node. As discussed, then this node accompanying two other nodes is used to reconstruct its related node. Therefore, a repair bandwidth of size $\frac{(k-1)M}{k}+\frac{3M}{k}=\frac{(k+2)M}{k}$ is consumed to repair two failed node from a partition. In fact, the average repair bandwidth for reconstructing each node is $\frac{(k+2)M}{2k}$.
As an example, in $(10,5)$ code, two failed packets $d_1$ and $d_2+d_3+d_4+d_5$ are reconstructed after downloading from totally 7 nodes (i.e., 3.5 nodes for each packet), and consuming a repair bandwidth of size $\frac{7M}{5}$ (i.e., $\frac{3.5M}{5}$ for each packet). Recall in a (10,5) MSR code a new node is allowed to contact to at least five nodes which leads to a repair bandwidth of size $M$. It is worth mentioning that, for two specific cases $k=2$ and $k=3$ the reconstruction of a lost packet through communicating with $k-1$ nodes (strategy A and B) is more efficient than three nodes because in these cases $\frac{(k-1)M}{k}$ is smaller than $\frac{3M}{k}$.
Note the total storage for a file of size $M$ regardless of $k$ is $2M$ and we can reduce repair bandwidth having increase in $k$. Therefore, for a given total storage the suggested scheme can establish a tradeoff between the repair bandwidth and the number of storage nodes. Moreover, the number of nodes which are involved during the repair of a single node failure regardless of $k$ is three.
\section{Conclusion}\label{sec:conclusion}
This paper aims at introducing a non-MDS scheme which is applicable in storage systems. Our proposed code which entails $k$ partitions, each one consisting two related systematic and parity nodes, can tolerate any three node failures. Also it can tolerate any $k-1$ node failures if at most two of them being from a common partition. Moreover, each single node failure can be repaired through access to three nodes. The suggested code has a simplicity of implementation in such that each node stores only one packet and the recovery of the original data file and the reconstruction of a lost packet can be achieved by XORing the stored packets.
\bibliographystyle{IEEEtran}
| {
"redpajama_set_name": "RedPajamaArXiv"
} | 8,689 |
OpenSAND, formerly known as Platine is a satellite emulation testbed initially developed during the SATIP6 project. Secondly, the development has been continued in the frame of the SATSIX project. SATIP6 and SATSIX are both European IST projects. Finally, several CNES R&T studies have contributed to the enhancement of the testbed together with internal Thales Alenia Space R&D effort.
The OpenSAND project is now released under free licenses and the emulation testbed is mainly maintained by the collaboration work of Viveris Technologies, Thales Alenia Space and CNES.
OpenSAND emulates the main features of a SATCOM system, and has been inspired from the DVB-S2/RCS2 standards.
A Satcom System overview is provided to better understand OpenSAND's features.
This section regroups the F.A.Q. and the Troubleshooting. | {
"redpajama_set_name": "RedPajamaC4"
} | 2,348 |
Q: Can I add additional background attribute to subclass? I have a CSS file that has code looking like this...
.button{
width: 700px;
height: 100px;
text-align: center;
display: inline-block;
font-size: 50px;
font-family: Gauge;
color: white;
border: rgba(255,255,255,0.3) 11px solid;
box-shadow: 0 5px 10px white;
text-shadow: 0 0 10px #000;
margin: 15px 15px 15px 15px;
transition: 0.4s
}
button.oneshot{
background: linear-gradient(rgba(0,0,0,1),rgba(0,0,0,0.4),rgba(0,0,0,1)) no-repeat center center fixed,
url("oneshot.png") center 60%;
button.lisatp{
background: linear-gradient(rgba(0,0,0,1),rgba(0,0,0,0.4),rgba(0,0,0,1)) no-repeat center center fixed,
url("lisa the painful.jpg") 45% 60%;
}
...
...
As you can see, there is a line that is repeated in the subclasses oneshot and lisatp:
linear-gradient(rgba(0,0,0,1),rgba(0,0,0,0.4),rgba(0,0,0,1)) no-repeat center center fixed
However, since each subclass also has an image as the background as well, I can't find a way to place the repeated line in .button.
Is it possible to somehow simplify this even more, or is this as simple as it is going to get?
A: Use CSS variable to simplify this then you will be able to easily change the gradient for all the element of each one individually:
.button{
width: 700px;
height: 100px;
text-align: center;
display: inline-block;
font-size: 50px;
font-family: Gauge;
color: white;
border: rgba(255,255,255,0.3) 11px solid;
box-shadow: 0 5px 10px white;
text-shadow: 0 0 10px #000;
margin: 15px 15px 15px 15px;
transition: 0.4s;
--grad:linear-gradient(rgba(0,0,0,1),rgba(0,0,0,0.4),rgba(0,0,0,1)) no-repeat center center fixed;
background:var(--grad);
}
.button.oneshot{
background:var(--grad),url("https://lorempixel.com/400/200/") center 60%;
}
.button.lisatp{
background: var(--grad),url("https://lorempixel.com/300/200/") 45% 60%;
}
.button.new-grad{
--grad:linear-gradient(rgba(0,190,0,1),rgba(0,190,0,0.4),rgba(0,180,0,1)) no-repeat center center fixed;
background: var(--grad),url("https://lorempixel.com/300/200/") 45% 60%;
}
<span class="button"></span>
<span class="button oneshot"></span>
<span class="button lisatp"></span>
<span class="button new-grad"></span>
| {
"redpajama_set_name": "RedPajamaStackExchange"
} | 7,402 |
\section{INTRODUCTION}
Parity nonconservation (PNC) in atoms arises from the
electroweak interaction between the electrons and nucleons, primarily
due to exchange of the neutral gauge boson, $Z_0$. The dominant
contribution in heavy atoms comes from the coupling of the axial
electronic current to the vector nucleon current. Because the vector
currents are conserved, atomic PNC essentially measures the electroweak
coupling to the elementary quarks, bypassing many of the difficulties
of hadronic physics. Thus in principle atomic experiments can measure
certain key electroweak parameters quite accurately, and also help
probe for new physics beyond the presently successful Standard Model of
the electroweak interactions.
In fact, there remains much to be learned about the Standard
Model, including the masses of the top quark and the predicted
Higgs boson(s), and whether there are additional generations of quarks
and leptons. In addition the Standard Model faces the well-known gauge
hierarchy problem, and it is certainly possible that electroweak
measurements may reveal something totally new, such as technicolor or
supersymmetric particles. Accurate measurements of PNC in atomic
cesium already play an important role in addressing such questions.
Two major issues affect the interpretation of atomic experiments and
will become more crucial as experimental accuracy improves, namely the
small but not negligible effects of nuclear size and
structure,\cite{fpw} and the reliability of the atomic theory of heavy
atoms\cite{bjs,dfs}. Atomic theory, the source of the largest
uncertainty, has received a great deal of attention leading to
increasingly precise calculations of PNC for a number of elements.
Cesium, in particular, is now believed to be understood at the 1\%
level.
To advance further may require canceling out all uncertainties of the
atomic theory by comparing PNC measurements on different isotopes of
the same element. Such experiments in fact have been
proposed\cite{dfk} using strings of isotopes of such elements as
Cs, Dy, and Pb.
As we discussed in a previous note,\cite{fpw} hereafter
referred to as I, it then becomes important to find the level at which
nuclear structure interferes with interpreting atomic PNC purely in
terms of particle theory. The wave function of the atomic electrons
varies over the dimensions of the nucleus, causing the net electroweak
interaction with the nucleons to depend on the spatial distribution of
both the protons and neutrons. As demonstrated in I, the PNC
observable is (for $\sin^2\theta_W\approx 1/4$ and $R_n\approx R_p$)
very roughly proportional to
\begin{equation}
1-{\textstyle {3\over 70}}
(Z\alpha)^2\big[1+5\,R_n^2/R_p^2 \big] + \cdots , \eqnum{1.1}
\end{equation}
where $R_n$ and $R_p$ are the equivalent rms radii for the nuclear
distribution of neutrons and protons. The proton, or rather the
nuclear {\it charge}, distribution is well known from electric probes:
electron and muon scattering, optical isotope shifts, muonic atoms,
etc. The extraction of the neutron distribution, however, is quite
model-dependent and difficult to determine to the same high accuracy.
On the one hand, the neutron distribution is needed in order to extract
the weak parameters in heavy atom experiments. On the other hand, to
the extent the weak parameters are known, the experiments provide a
method of {\it measuring} the changes in the neutron distribution,
primarily the rms radius. Thus atomic experiments on isotopes of heavy
atoms may provide a unique opportunity to test nuclear model
calculations.
In this paper we explore the nuclear structure issues
extensively. We have utilized several recent detailed nuclear structure
calculations from various authors, in order to quantitatively estimate
the nuclear model-dependent corrections to atomic PNC in the isotopes
of $_{82}$Pb, an element of interest experimentally. Our major
conclusions are:
(1) For single isotope measurements on $^{208}$Pb, $(Z\alpha)$ is
sufficiently large that the effects of nuclear structure on atomic PNC
cannot be neglected. The {\it uncertainties} due to neutron
distributions appear to be less significant for the extraction of
electroweak parameters than the current uncertainties due to atomic
structure. However, unless the {\it change} in neutron distributions
along an isotopic chain can be better predicted (or independently
measured, e.g. via parity violating e$^-$ scattering), PNC ratios of
$_{82}$Pb isotopes will not be able to provide an extraction of the
weak mixing angle to better than a one percent level.
(2) In lighter nuclei (including the important case of $_{55}$Cs),
$Z\alpha$ is sufficiently small that uncertainties in nuclear structure
can probably be safely ignored compared to current uncertainties in
atomic theory, when using a single isotope. Calculations for the
non-magic, odd-$Z$, Cs nuclei pose additional difficulties, and further
investigation is still required to determine how accurately the
Standard Model could be tested when using ratios of isotopes.
(3) Atomic PNC experiments provide perhaps one of the cleanest
opportunities yet available to study the nuclear neutron skin. The
situation is similar to atomic isotope shifts which have provided
precise measurements of the ratios of changes in the nuclear rms charge
radius in strings of isotopes. Here we have a weak probe of the
neutron distribution which is free of the gross uncertainties
associated with strongly interacting probes. At the level of precision
that the Standard Model is known, this yields another testing ground
for nuclear models.
We note that alternative weak probes, such as parity-violating
intermediate-energy $e^-$-nucleus cross section asymmetries, are also
sensitive to the neutron distributions. In combination with atomic PNC,
these may help simplify the separation of nuclear structure effects
from electroweak radiative corrections.
The paper is organized as follows: In section II we sketch briefly the
relevant parts of electroweak theory. In section III we review the
simple analytical model we presented in reference (1); this provides a
convenient framework for discussing the effects of nuclear structure on
atomic PNC in terms of the rms radii of the proton and neutron
distributions in the nucleus. In section IV we discuss the electroweak
nucleon form factors. The intrinsic electroweak structure does begin
to contribute at the level we are interested in, but the {\it
uncertainties} in this structure (due to strangeness admixtures, etc.)
should have a negligible effect on the total PNC amplitude of the
nucleus. In section V, we examine the key ingredients of currently
available theoretical models for heavy nuclei, including both
non-relativistic and relativistic formalisms. We consider the
reliability of these models, and discuss the need for calculations
which include correlations beyond the Hartree-Fock level. We also
discuss alternative experimental means to measure the desired neutron
distributions. In section VI we discuss the relevant Standard Model
parameters, and the accuracy desired in their extraction from atomic
parity violation. We derive the propagation of error from nuclear
model uncertainties to electroweak parameters, focusing on isotopes of
lead and (to a lesser extent) cesium. These two elements are of
current experimental interest, and are representative of the very heavy
and moderately heavy regions of the periodic table. In section VII we
discuss our numerical results, using various existing Hartree-Fock
nuclear calculations, summarize, and discuss the conclusions from the
previous sections.
\section{ PNC IN THE STANDARD MODEL}
\subsection{ Theoretical Considerations}
Because the $Z$-boson is massive (91.16$\pm$.03 GeV), the
quark-electron interaction due to $Z$-exchange may be taken to be of
zero range compared to atomic or nuclear dimensions. What we observe
in atoms is the electron interaction with {\it nucleons}, not
individual quarks. Nucleons are, of course, composite structures
consisting each of three quarks net, but also $\bar q q$ pairs as well
as gluons. We make the assumption here, which we justify in Sec. IV,
that we can neglect the internal nucleon structure and simply add the
point coupling of the 3 quarks to obtain the net nucleon weak
coupling. The PNC part of the nucleon-electron interaction can be
written in terms of axial and vector currents
\begin{equation}
H_{PNC}=V_N\times A_e+A_N\times V_e\,. \eqnum{2.1}
\end{equation}
If, in addition to neglecting internal nucleon structure, we treat the
nucleons nonrelativistically (a very good approximation), we have
\FL
\begin{eqnarray}
H_{PNC}={G_F\over
\sqrt{2}}\ \sum_{eB}\ \biggl[&& C_{1B}\int\psi^\dagger_B\psi_B
\psi^\dagger_e \gamma^5\psi_e d^3\!r \nonumber\\
&&\ +\ C_{2B}\int\psi^\dagger_B\vec \sigma_B\psi_B\cdot\psi^\dagger_e
\vec \alpha\psi_e d^3\!r\biggr]\,,\nonumber\\
&& \eqnum{2.2a}
\end{eqnarray}
where $B$ stands for $n$ (neutron) or $p$ (proton) and
\begin{eqnarray}
C_{1p}\ =&&\ {\textstyle {1\over 2}}(1-4 \sin^2\theta_W),\nonumber\\
C_{2p}\ =&&\ {\textstyle {1\over 2}}
(1-4 \sin^2\theta_W)g_{\scriptstyle A},
\nonumber\\
C_{1n}\ =&&\ -{\textstyle {1\over 2}},\eqnum{2.2b}\\
C_{2n}\ =&&\ -{\textstyle {1\over 2}}
(1-4 \sin^2\theta_W)g_{\scriptstyle A}.
\nonumber
\end{eqnarray}
These expressions assume tree-level Standard Model couplings. In section VI,
we discuss the important effects of radiative loop corrections.
The first term in eqn. (2.2a) grows coherently with nucleon numbers $N$
and $Z$. The second term, together with the anapole
moment\cite{nmw} term (which also depends upon $\vec
\sigma_B\cdot\vec \alpha$), amounts to at most a few percent of the
first term in heavy atoms, and furthermore sums to zero when all hfs
sublevels are combined, since all directions of $\vec \sigma_B$ are
then weighted equally. Thus in this paper we will consider the first
term only. The effective interaction is
\FL
\begin{eqnarray}
H_{PNC,1}={G_F\over 2\sqrt{2}}\int&& \left[-N\rho_n(\vec r\,)+
Z(1-4\sin^2\theta_W)\rho_p(\vec r\,)\right]\nonumber\\
&&\quad \times\ \psi^\dagger_e \gamma^5\psi_e\, d^3\!r\,,\eqnum{2.3}
\end{eqnarray}
where here the $\rho_n$ and $\rho_p$ are normalized to unity. The
neutron and proton densities include a folding with the weak form
factors (see Sec. IV).
We need the spatial variation of $\psi^\dagger_e\gamma^5\psi_e$ over
the nucleus, its normalization, and its dependence on nuclear
structure. PNC effects are dominated by $s_{1/2}$-electrons
($\kappa=-1$) coupled to $p_{1/2}$-electrons ($\kappa=+1$). We define
\begin{equation}
\rho_5(r)\equiv \psi^\dagger_p(\vec r\,)
\gamma^5\psi_s(\vec r\,)\,, \eqnum{2.4}
\end{equation}
which turns out to depend only on the magnitude of $\vec r$.
$\rho_5(r)$ can be factored conveniently as follows:
\begin{equation}
\rho_5(r) =C(Z)\,{\cal N}(Z,R)\,f(r)\,, \eqnum{2.5}
\end{equation}
where $C(Z)$ contains all atomic structure effects for a point
nucleus including many-body correlations; ${\cal N} \equiv
\psi^\dagger_p(0) \gamma^5\psi_s(0)$ is the normalization factor for a
single electron; $f(r)$ contains the spatial variation and is
normalized to $f(0)=1.$
Because the electric potential is very strong
near the nucleus, we can safely neglect atomic binding energies in
$f(r)$. In Pb, for example, the potential at the nuclear surface is
about 15 MeV compared with valence electron binding energies of a few
eV. In addition, to a very good approximation,\cite{fpw}
\begin{equation}
{\cal N}= R^{-\gamma}\,, \eqnum{2.6}
\end{equation}
where $\gamma=2\Big[1-\sqrt{1-(Z\alpha)^2}\Big]$ and
$R$, often called the {\it equivalent charge radius,} is given by
\begin{equation}
R=\left[{\textstyle {5\over 3}}
<\! r^2\!>_{charge}\right]^{1/2}. \eqnum{2.7}
\end{equation}
We are not interested in the absolute value of ${\cal N}$, but only its
variation with nuclear structure. Observable PNC effects are
proportional to the matrix element between two atom\-ic states $i$ and
$j$
\FL
\begin{eqnarray}
<\! i|H_{PNC,1}|j\!>=&&
{G_F\over 2\sqrt{2}}\,C_{ij}(Z)\,{\cal N}\nonumber\\
&&\ \times\ \big[-N\,q_n+Z(1-4\,\sin^2\theta_W)q_p\big]\,.\nonumber\\
&&\eqnum{2.8}
\end{eqnarray}
As mentioned above, this is modified by radiative
corrections which we discuss in some detail in section VI.
Effects of {\it nuclear} structure on PNC are contained in ${\cal N}$
and the two quantities
\begin{eqnarray}
q_n\ &=&\ \int \rho_n(r)\,f(r)\,d^3\!r\,, \eqnum{2.9\,a}\\
q_p\ &=&\ \int \rho_p(r)\,f(r)\,d^3\!r\,. \eqnum{2.9\,b}
\end{eqnarray}
We note that $1-4\,\sin^2\theta_W$ is a small number; from high energy
experiments, $\sin^2\theta_W=0.230\pm .004.$ The value of
$\sin^2\theta_W$ can also be deduced from atomic experiments with an
accuracy that will be limited in part by nuclear structure effects, as
we discuss in sections VI and VII.
The proton (charge) nuclear form factors needed for $q_p$ and ${\cal
N}$ are generally well known from measurements of the charge
distribution of nuclei close to the stable valley and many unstable
nuclides as well. Neutron nuclear form factors are needed for $q_n$,
and are not well-determined experimentally, and statements about them
are quite model-dependent. Neutron and proton distributions are often
taken to be proportional to each other, scaled by $N$ and $Z.$ However,
neutron-rich nuclei have larger neutron distributions than the protons
and the reverse is true for proton rich nuclei. In an isotopic
sequence, the $A^{1/3}$ law is not followed for either the charge or
the neutron distributions separately.
\subsection{Experimental Considerations}
As first pointed out by Bouchiat and Bouchiat,\cite{bou} the effect of
$H_{PNC}$ in neutral atoms grows rapidly with atomic number $Z$,
approximately as $Z^3$. Thus experimental interest has concentrated on
heavy atoms, namely $_{55}$Cs, $_{81}$Tl, $_{82}$Pb, and $_{83}$Bi.
(For some reviews, see reference \cite{rev}.) The measured quantity in
all experiments is the electric dipole amplitude ${\cal E}_{PNC}$
between two electronic states which, in the absence of $H_{PNC}$, would
have the same parity and hence would have no electric dipole amplitude
connecting them. Denoting the initial and final states by $i$ and $f$,
we can write:
\FL
\begin{eqnarray}
{\cal E}_{PNC}=\sum_n&&\biggl[
{<f\vert \hat {\bf E}_1\vert n> <n\vert H_{PNC}\vert i>\over
W_i-W_n}\ \nonumber\\
&&\quad\quad +\ {<f\vert H_{PNC}\vert n>
<n\vert\hat {\bf E}_1\vert i>\over
W_f-W_n}\biggr], \eqnum{2.10}
\end{eqnarray}
where the first and second terms give the mixing due to $H_{PNC}$ of
opposite parity states into the initial and final states respectively.
$W$ is the energy of the atomic states, and $\hat {\bf E}_1 \equiv
-\sum_j e{\bf r}_j$ is the electric dipole charge operator. The
magnitude of ${\cal E}_{PNC}$ is of order $10^{-9} ea_0$ for the
heaviest atoms of interest.
Two experimental techniques have evolved for measuring ${\cal
E}_{PNC}$. One involves applying an external static electric field
which, like $H_{PNC}$, mixes in opposite parity states and creates an
electric dipole amplitude between the states $i$ and $f$. The
interference between this Stark amplitude and ${\cal E}_{PNC}$ leads to
a parity-violating signature in the optical transition from $i$ to $f$
in which the sign of the interference term reverses with the sense of
circular polarization of the incident light, and with other vectors
specifying the handedness of the experimental arrangement. The other
technique uses no external fields, but instead exploits the
interference between ${\cal E}_{PNC}$ and the magnetic dipole ($M1$)
amplitude between the same two states. This interference causes
parity-violating optical rotation, i.e. a rotation of the plane of
polarized light passing through the atomic vapor at wavelengths near
the magnetic dipole absorption line.
The Stark interference technique has been used in the all-important
measurement of PNC in Cs on the highly forbidden $6S_{1\over 2}$ -
$7S_{1\over 2}$ $M1$ absorption line at 532 nm, and in the measurement
on the $6P_{1\over 2}$ - $7P_{1\over 2}$ $M1$ transition in Tl. The
optical rotation technique has been applied to the allowed $M1$
absorption lines at 876 nm and 648 nm in Bi, and to the similar 1278 nm
and 1283 nm lines in Tl and Pb respectively; all of which involve
transitions among low-lying configurations of $6p$ electrons, for
example $6P_{1\over 2}$ - $6P_{3\over 2}$ in Tl. Both techniques have
reached the 1 percent level of accuracy.
Among the elements studied thus far, Cs and Pb are the most likely
candidates for comparing different isotopes. It may be possible in the
case of Cs to use optical atom traps to carry out measurements on a
long string of radioactive isotopes. Measurements on Pb will probably
be restricted to stable or long-lived isotopes. In either case,
achieving the level of accuracy discussed in this paper (a few percent
down to 1 percent in the isotopic {\it difference}), although possible
in principle, will be a challenging task in the next generation of
atomic PNC experiments.
\section{ A SIMPLE MODEL FOR THE NUCLEAR FORM FACTORS}
Given proton and neutron distribution functions, there is no difficulty
in calculating $q_p,\;q_n$ and the variation in ${\cal N}$. In I, we
used a simple model to estimate the importance of nuclear structure on
PNC observables. We review those results here.
Consider a uniform nuclear charge distribution of
radius $R$. This charge produces an electric potential
\begin{equation}
V_c(r)=Ze^2\Bigg\{
{(-3+r^2/R^2)/2R\,,\qquad r<R\,,
\atop -1/r\,,\;\;
\quad\qquad\qquad\qquad r>R\,.} \eqnum{3.1}
\end{equation}
A power series for the Dirac wave function {\it inside} the nucleus
yields
\begin{eqnarray}
f(r)=1-{\textstyle {1\over 2}} (Z\alpha)^2&&\Bigl[
(r/R)^2 -{\textstyle {1\over 5}} (r/R)^4 +
{\textstyle {1\over 75}} (r/R)^6\Bigr]\nonumber\\
&& + \ {\cal O} (Z\alpha)^4 \,. \eqnum{3.2}
\end{eqnarray}
Again for the sake of simplicity here, we assume that, as for a uniform
distribution (for either $n$ or $p$), $<r^4>={\textstyle {3\over 7}} R^4$ and
$<r^6>={\textstyle {3\over 9}} R^6$
where $R^2\equiv {\textstyle {5\over3}} <r^2>$.
{}From (2.9a) we find (neglecting here any differences
between charge and proton radii)
\begin{equation}
q_p=1-0.260 (Z\alpha)^2 +
{\cal O} (Z\alpha)^4\,, \eqnum{3.3\,a}
\end{equation}
which is insensitive to nuclear structure {\it to this order.}
{}From (2.9b), we find
\FL
\begin{eqnarray}
q_n&=&1-(Z\alpha)^2\left({3\over 10}
{R_n^2\over R_p^2}-{3\over 70}
{R_n^4\over R_p^4}+{1\over 450}
{R_n^6\over R_p^6}\right)+
{\cal O} (Z\alpha)^4\nonumber\\
&\approx& 1- (Z\alpha)^2
\left(0.038+0.221{R^2_n\over R^2_p}\right)
+{\cal O} (Z\alpha)^4 \,, \eqnum{3.3\,b}
\end{eqnarray}
which does depend on the neutron form factor. Here we have introduced
equivalent neutron and proton radii of the form (2.7); the second form in
(3.3b) assumes that $(R_n/R_p)^2-1$ is small.
In this section, we have made rough approximations in order to
illustrate the sensitivity of the results to moments of the neutron and
proton distributions. For comparison with experiment, a more detailed
analysis is necessary, using actual solutions of the Dirac equation for
realistic charge distributions and the best available theoretical
neutron distributions. This is done in Secs. VI and VII.
\section{INTRINSIC NUCLEON STRUCTURE EFFECTS}
The usual treatment of atomic PNC begins with an effective Hamiltonian
for the parity violating electron-nucleus interaction, as in equation
(2.2a), which involves normalized proton and neutron distributions
\begin{eqnarray}
Z \rho_p(\vec r\,)\ =& &\ \sum_p \langle \psi_p^\dagger (\vec r\,)
\psi_p(\vec r\,)\rangle,
\eqnum{4.1a}\\
N \rho_n(\vec r\,)\ =& &\ \sum_n \langle \psi_n^\dagger (\vec r\,)
\psi_n(\vec r\,)\rangle,\eqnum{4.1b}
\end{eqnarray}
where $\psi_N^{(\dagger)}$ is a destruction (creation) operator for
nucleons, and the matrix elements are between nuclear ground states.
However, these formulae implicitly assume point-like nature for
nucleons, and thus the usual analysis makes no distinction between
weak, electromagnetic, or point nucleon distributions, aside from
overall charges.
Of course, nucleons do have an internal structure, and this must be
properly folded into the above distributions. The internal weak
structure is related to, but different from, the electromagnetic
structure, and can be calculated in the context of the Standard Model.
We demonstrate in this section that the known, electromagnetic
structure of nucleons yields a rather small overall effect on atomic
PNC calculations, but must be included when extremely high precision
results are required.
There has been considerable discussion in recent literature
\cite{emc,don,kap} concerning the possibility of nontrivial
strange quark matrix elements in the nucleon. This could lead to a
sizable ``strangeness radius'' of the nucleon, which in turn would
modify the weak radius in a well defined way. We allow for this
possibility in our analysis, although such a strangeness contribution
to atomic PNC is likely to be quite negligible.
In the Standard Model, assuming in addition that strong SU(2) isospin
is a good symmetry for the nucleons, one can extract relations between
weak and electromagnetic form factors \cite{wei} which then
describe the internal nucleon structure:
\begin{eqnarray}
G_E^{weak,p}(q^2)\ &=&\ {\textstyle{1\over2}}(1-4\sin^2\theta_W)
G_E^{\gamma,p}(q^2)\nonumber\\
&&\ \ -{\textstyle{1\over2}}\left(G_E^{\gamma,n}(q^2)+G_E^{s}(q^2)
\right),\eqnum{4.2a}\\
G_E^{weak,n}(q^2)\ &=&\ {\textstyle{1\over2}}(1-4\sin^2\theta_W)
G_E^{\gamma,n}(q^2)\nonumber\\
&&\ \ -{\textstyle{1\over2}}\left(G_E^{\gamma,p}(q^2) +G_E^{s}(q^2)
\right).\eqnum{4.2b}
\end{eqnarray}
Here, $G_E^{X,N}$ is the usual Sachs electric form factor for
a current operator $J_\mu^X$, where $X$ can represent weak, electromagnetic,
or
specific quark flavor currents:
\FL
\begin{eqnarray}
<\!{p',N}|J_\mu^X|&&{p,N}\!>\ \nonumber\\
&&\equiv \bar u(p')\biggl(F_1^{X,N}(q^2)\gamma_\mu\nonumber\\
&&\qquad\qquad \ \ + i F_2^{X,N}\sigma_{\mu\nu}q^\nu /(2M) \biggr)u(p),
\eqnum{4.3a}\\
G_E^X(q^2)\ &&\equiv \
F_1^X(q^2)+(q^2/4M^2)F_2^X(q^2),\eqnum{4.3b}
\end{eqnarray}
and
\begin{eqnarray}
\ J_\mu^\gamma\ =& &\
{\textstyle {2\over3}}(\bar u \gamma_\mu u)
-{\textstyle {1\over3}}(\bar d\gamma_\mu d +
\bar s\gamma_\mu s)
\eqnum{4.4a}\\
\equiv& &\ {\textstyle {2\over3}}(J_\mu^u)
-{\textstyle {1\over3}}(J_\mu^d + J_\mu^s),
\eqnum{4.4b}\\
\ J_\mu^{weak}\ =& &\
({\textstyle {1\over2}}-{\textstyle
{4\over 3}}\sin^2\theta_W)(J_\mu^u)\nonumber\\
& &\ \
+(-{\textstyle {1\over2}}+{\textstyle
{2\over 3}}\sin^2\theta_W)
(J_\mu^d+J_\mu^s),
\eqnum{4.4c}
\end{eqnarray}
are the Standard Model electromagnetic and weak vector currents in
terms of quark field operators. (We ignore quarks heavier than
strange.) $G_E^s$ is thus the strangeness electric form factor, and is
constrained to be strictly 0 at $q^2=0$. Note that one recent estimate
\cite{jaf} gives a strangeness mean-square-radius of around 0.14
fm$^2$, roughly as large as that for the neutron electric charge (but
of opposite sign). (This quantity can in principle be measured in,
e.g. parity violating ${\vec e}^{\,-}$ scattering from nucleons at
forward angles.)
With the above relations, we see immediately that at $q^2=0$, the usual
weak charges are exactly obtained:
\begin{eqnarray}
Q_p^{w}\ =&&\ {\textstyle{1\over2}}(1-4\sin^2\theta_W),
\eqnum{4.5a}\\
Q_n^{w}\ =&&\ -{\textstyle{1\over 2}}, \eqnum{4.5b}
\end{eqnarray}
and one can also predict weak rms radii
\FL
\begin{eqnarray}
\left\langle r^2\right\rangle_{I,p}^{w}\ =&&\
{\textstyle{1\over2}}(1-4\sin^2\theta_W)
\left\langle r^2\right\rangle_{I,p}^{\gamma}
-{\textstyle{1\over 2}}
\left\langle r^2\right\rangle_{I,n}^{\gamma} \nonumber\\
&&\qquad -\
{\textstyle{1\over 2}}\left\langle r^2\right\rangle_I^{s}
+6({\textstyle{1\over2}})
(1-4\sin^2\theta_W)/(8M^2), \nonumber\\
&& \eqnum{4.6a}\\
\left\langle r^2\right\rangle_{I,n}^{w}\ =&&\
{\textstyle{1\over2}}(1-4\sin^2\theta_W)
\left\langle r^2\right\rangle_{I,n}^{\gamma}
-{\textstyle{1\over 2}}
\left\langle r^2\right\rangle_{I,p}^{\gamma} \nonumber\\
&&\qquad -\
{\textstyle{1\over 2}}\left\langle r^2\right\rangle_I^{s}
+6(-{\textstyle{1\over2}})/(8M^2). \eqnum{4.6b}
\end{eqnarray}
where the subscript I indicates intrinsic nucleon structure, and the
last terms in (4.6a) and (4.6b) are the inclusion of the small
Darwin-Foldy correction to the radii. Note that the neutron
(electromagnetic) contribution to the proton weak radius is not
suppressed by any $(1-4\sin^2\theta_W)$ factor, and thus is
surprisingly significant.
Using $\sin^2\theta_W\approx .23$,
$\left\langle r^2\right\rangle_{I,p}^{\gamma}\approx 0.7 $ fm$^2$,
$\left\langle r^2\right\rangle_{I,n}^{\gamma}\approx -0.11 $ fm$^2$,
$\left\langle r^2\right\rangle_I^{s} = 0$ gives
\begin{eqnarray}
\left\langle r^2\right\rangle_{I,p}^{w}\approx & &\
{\textstyle{1\over2}}(1-4\sin^2\theta_W)(2.1\ {\rm fm}^2), \eqnum{4.7a}\\
\left\langle r^2\right\rangle_{I,n}^{w}\approx & &\ -
{\textstyle{1\over2}}(.74\ {\rm fm}^2), \eqnum{4.7b}
\end{eqnarray}
The quantities in parentheses above can be interpreted as the physical
size (squared) of the weak distributions. Note that using numbers of
${ \cal O}(\pm .1)$ \cite{jaf} for the strangeness radius will have
a large effect on $\left\langle r^2\right\rangle_I^{w}$ for both the
proton and (somewhat less so) the neutron.
To a good approximation, considering only rms radii, but no higher
moments, the relevant PNC matrix element is then given by a convolution
of (point) nucleon centers with their intrinsic structure, yielding a
replacement for Eqs. (2.3) and (2.8),
\FL
\begin{eqnarray}
&&<\!{i}|H_{PNC,1}|{j}\!>\ \nonumber\\
&&\ \ ={G_F\over \sqrt2}\int
<\!\Bigl(NQ_n^w\rho_n(\vec r\,)+
ZQ_p^w\rho_p(\vec r\,)\Bigr)
\psi_e^\dagger\gamma^5\psi_e\!> d^3\vec r\,,\nonumber\\
&&\eqnum{4.8a}\\
&&\ \ ={G_F\over \sqrt2}C_{ij}(Z){\cal N}
[N Q_n^{w} q_n + Z Q_p^{w} q_p],\eqnum{4.8b}
\end{eqnarray}
with the quantities $q_p$ and $q_n$ slightly modified from eqns. (2.9),
\FL
\begin{eqnarray}
q_{(p,n)}=\int\,d^3\vec r\,&&\biggl(\rho_{(p,n)}^{c}(\vec r\,) \nonumber\\
&&\ \quad +\ {\textstyle{1\over 6}}\left\langle
r^2\right\rangle_{I,(p,n)}^{w}\nabla^2\rho_{(p,n)}^{c}/
Q_{(p,n)}^{w}
\biggr)\, f(r), \nonumber\\
&& \eqnum{4.9}
\end{eqnarray}
where $\rho_{p,n}^{c}(\vec r\,)$ is now the
density distribution of nucleon
{\it centers}, normalized to 1.
Assuming, for simplicity, uniform nucleon distributions, with
$R_p \approx R$,
\FL
\begin{eqnarray}
q_p\ \approx\ 1-(Z\alpha)^2\biggl(&&
.26\ +\ {.32\over R^2}
\bigl(2.1 -\left\langle r^2\right\rangle_I^s/2Q_p^{w}\bigr)
\biggr),\nonumber\\
q_n\ \approx 1-(Z\alpha)^2\biggl(&&
.038 +.221{R_n^2\over R^2}\nonumber\\
&&\qquad\ \ \ +\ {.32\over R^2}
\bigl(.74 - \left\langle r^2\right\rangle^s_I/2Q_n^{w}\bigr)
\biggr)\,,\nonumber\\
&& \eqnum{4.10}
\end{eqnarray}
with all radii measured in fm.
For $^{208}$Pb, the internal nucleon structure contributes about 0.002
to $q_n$, and a possible strangeness radius discussed above (0.14
fm$^2$) would contribute about 5 times less. The internal structure
corrects $q_p$ by about 0.005, and the strangeness contribution here
would be comparable, about 0.004. In Cs, these numbers turn out to be
smaller by about 40\%.
{}From the discussion to come in Sec. VI, we will see that these
contributions from (known) finite nucleon structure contributes at
about the 0.2\% level in an extraction of the weak nuclear charge when
measuring a single isotope of Pb (0.1\% level for Cs). This might need
to be taken into account in an extremely high precision analysis, but
it will not add to the {\it uncertainty} in testing the Standard
Model. (See also the complete discussion in Sec. VI to compare with
the expected scale of nuclear, atomic, and electroweak radiative
corrections and uncertainties.) On the other hand, strangeness
contributions, which are currently very uncertain, might affect a
determination of the weak charge at below the 0.1\% level in Pb, and
even less in Cs, and thus are likely to be quite negligible. They
could only become relevant if the nucleon strangeness radius were
comparable to the electromagnetic radius itself.
In the case of isotopic ratios, the internal nucleon structure plays an
even smaller role. This is because errors then come from uncertainties
in the difference $q_n'-q_n$ (see Sec. VI). To a good approximation,
nucleon structure effects are simply additive in mean square radii and
thus cancel in the differences. Thus neither nucleon structure, nor
the uncertainties therein, are significant when extracting
$\sin^2\theta_W$ from isotope ratios.
\section{ NUCLEAR MODELING}
{}From the rather simplistic model of section III, we already observe
that the desired high precision measurements of electroweak parameters will
require knowledge of neutron radii in heavy nuclei to within at least
several percent. (see also the discussion in section VI) At this level,
one clearly must treat higher moments with some care, and the
microscopic details of the nucleon distributions may be of some
importance. For this reason, we have attempted to evaluate $q_n$ and
$q_p$ numerically, utilizing the best existing nuclear models for
neutron, proton, and charge distributions available to us. In this
section, we discuss some of the basic features of these models, along
with some caveats on their reliability for neutron observables.
The nuclear many-body problem presents a formidable challenge for
infinite nuclear matter, and an even greater one for heavy finite
nuclei. The most popular route being taken today is some version of
Hartree-Fock, which has had considerable success in describing a
variety of nuclear properties semiquantitatively.
\subsection{ Brueckner-Hartree-Fock}
The underlying basis of nuclear Hartree-Fock calculations is Brueckner
theory. The elementary two-body interactions are too strong
(especially the short-range repulsion) to lead to meaningful HF
calculations. Although there has been extensive work on nuclear matter
calculations using Brueckner theory and beyond, for finite nuclei only
light ones have been considered \cite{kb} and nothing for the
nuclei of interest here.
The lowest level is the independent pair approximation. The {\it
effective} interaction is not $v$ but the Brueckner $G$-matrix, where
$G=v\,F$, with $\psi(1,2)=F(1,2)\phi(1)\phi(2)$. $G$ satisfies a
scattering-type equation with a projection operator in intermediate
states which excludes scattering back into the Fermi sea; this also
results in no phase shift (for pairs in occupied states) and to the
two-body wave function ``wound" which extends over a``healing
distance." The $\phi$'s are to be identified with the HF single {\it
quasi}-particle functions. We have used $F$ here to denote the
two-particle correlation function. For repulsive core potentials, $F$
has a hole (wound) centered about $r=0$. The Brueckner $G$ is
non-local, and both energy- and density-dependent.
\subsection{ Two-body correlations}
The dependence of the single
particle density distribution on the correlation
function is relatively small.
We can estimate it as follows. Let
\begin{equation}
\rho_2(\vec r_1,\,\vec r_2)=\rho_1 (r_1)\rho_1
(r_2)f(\vec r_1-\vec r_2)\,.\eqnum{5.1}
\end{equation}
Let $\rho_1(r)\propto e^{-r^2/a_N^2}$ and $f(r)\propto e^{-r^2/a_c^2}$
For $a_c<<a_N$, one finds for the rms size of the single particle density
distribution
\begin{eqnarray}
<\! r^2 \!>\ &=&\ \int d^3r_1\,d^3r_2\,r_1^2\,
\rho(\vec r_1,\vec r_2)\nonumber\\
&\approx&\ <\! r^2 \!>_1\left[1+{1\over
4\sqrt{2}}
\left({a_c\over a_N}\right)^3\right]\,,\eqnum{5.2}
\end{eqnarray}
where $<\! r^2\!>_1$ corresponds to $\rho_1$.
For heavy nuclei (say $a_N / a_c\approx 7.0/0.7$ fm), the
correction is less than $2\times 10^{-4}$, which is below our level of
current concern.
\subsection{Phenomenological Hartree-Fock, including
deformation and pairing}
Because of the numerical complexity, most HF calculations have
employed phenomenological potentials intended to simulate the Brueckner
$G$-matrix. The most commonly used potentials are varieties of the
very convenient Skyrme interaction. The Skyrme interactions are of the
delta-function form and as such lead to single particle equations with
local one-body potentials and spatially-dependent effective masses,
with no more complication than Hartree calculations. In contrast,
finite range interactions lead to non-local single particle potentials
arising from the exchange term. Momentum-dependent Skyrme interactions
do not lead to further complications and simulate some effects of
finite range. Calculations have also been done with finite range
forces, using e.g. the Gogny interaction\cite{dec} Note that none
of these phenomenological potentials are intended to reproduce free
nucleon-nucleon scattering. There are of the order of eight (more or
less) adjustable parameters in any model.\cite{que}
Most nuclear structure calculations on heavy nuclei are carried out
in the deformed Hartree-Fock or the Hartree-Fock-Bogolyubov
approximations. The latter include BCS-type pairing.
Hartree-Fock encompasses a limited class of correlation structure. Only
correlations of a collective nature are included. It is not surprising
to find that in HF neutron and proton densities tend to track one another.
Nevertheless, they do exhibit the expected behavior that the neutron rms
radius increases more rapidly than the proton one with increasing $A$ in an
isotopic sequence. The relative tracking (variation in the neutron
skin) depends on the way
in which symmetry energy is handled.
Intrinsic deformations play a key role in spherically averaged
proton and neutron densities. In the uniform, incompressible
approximation, for example, the mean square radius is increased
according to\cite{whf}
\begin{equation}
<\! r^2\!>_\beta=<\! r^2\!>_0 \left[1+{5\over 4\pi}
<\! \beta^2\!>\right] \eqnum{5.3}
\end{equation}
where $\beta$ is the nuclear shape parameter, proportional to the
quadrupole moment. $\beta$ can attain values of the order of $1/3$ and
changes in $<\! \beta^2\!>$ among isotopes can produce deviations in
spectroscopic isotope shifts by an order of magnitude from the
$A^{1/3}$ law. Although HF calculations tend to yield spherical
($<\!\beta=0\!>$) or near spherical equilibrium shapes for the Pb
isotopes, $<\!\beta^2\!>$ is not zero and changes in $<\!\beta^2\!>$
have been considered by some authors.
\subsection{Relativistic Mean Field}
Although nuclear structure is primarily non\-rel\-a\-tiv\-is\-tic,
con\-sid\-er\-able suc\-cess has been achieved by treating the nucleons
and protons as point Dirac particles\cite{ser} [see, however,
Achtzehnter and Wilets \cite{ach}] interacting with phenomenological
vector and scalar mesons in the mean field approximation. The vector
mesons can be identified with the isoscalar omega and the isovector rho
mesons; the scalar meson is a simulation of two-pion exchange. The
mesons are treated in the mean field, or c-number approximation. An
attractive feature of the model is that the strong spin-orbit potential
appears to emerge ``naturally." While this turns out to be true for
isoscalar potentials, it fails badly for the isovector potentials, but
can always be parameterized to yield reasonable results.\cite{ach}
In order to fit nuclear properties, it has been necessary to go
beyond linear field theory. Self-interaction of the scalar field has
been introduced, with additional parameters. Among other problems,
this solved the compression modulus anomaly, which is much too large in
the linear model. The total number of adjustable parameters which must
be introduced is comparable to that required in models using Skyrme
forces. As with Skyrme forces, the mean field approximation does not
lead to nonlocality in the one-body potentials.
\subsection{ RPA and MCHF}
The tail of the neutron or proton distribution has
correlation/polarization corrections not described by HF, at least for
large distances. The reason for this is that the individual nucleon
wave functions see the potential of the ``mean" self-consistent core.
In the tail region, the residual core tends to relax. This is most
evident for the separation energy: In HF, the separation energy of a
nucleon is just the energy eigenvalue (Koopmanns' theorem) if the core
is frozen. If the energy of the residual nucleus is recalculated self
consistently, the separation energy is reduced by what is termed the
rearrangement energy. Correlations of this type are included through
RPA, which is equivalent to small amplitude, time-dependent
Hartree-Fock.
Other types of correlations could be included through
multiconfigurational Hartree-Fock, which, as the name implies, means that
the trial wave function is not a single determinant, but a sum of
determinants (configurations). This serves two purposes: correlations of
the kind allowed by the choice of configurations are included, and the
occupation of these configurations modifies the mean field potential and
single particle functions.
\subsection{Beyond Hartree-Fock plus}
Most HF and HFB calculations do reasonably well in reproducing
atomic isotopic shifts for the {\it even-even} isotopes of PB below
208. So, incidentally, does the droplet model of
Meyers\cite{mey}. They all fail to reproduce even-odd staggering,
which shows odd nuclei to be smaller than the mean of their even-even
neighbors, and also do badly on the shifts above 208.
There are no giant shell model diagonalization calculations available
which yield densities for heavy nuclei. Such would be very valuable
for comparison with Hartree-Fock results, since in principle they
include all types of correlations, limited only by the size of the
basis.
An idealized shell model calculation should be based on realistic two
body interactions, the kind which fit free two-body scattering data.
The Hilbert space could be divided into a ``near" and a ``far" space.
The effective two body interaction could be obtained by solving for the
Brueckner $G$-matrix with the intermediate states excluded from the
near space. The far space scattering states could be approximated by
plane waves if the momentum sphere separating the spaces is
sufficiently large.\cite{kb} The Hamiltonian matrix for the
inner space, using the effective interaction, is then diagonalized.
\subsection{Summary and Discussion}
Unfortunately, not all of the theoretical considerations discussed
above have been incorporated in any single calculation. Heavy nuclei
pose a difficult challenge for reliable, detailed modeling at the level
of precision we require. There do exist in the literature a number of
recent efforts, as discussed in sections C and D above, which involve
either relativistic or non relativistic Hartree-Fock nuclear calculations.
We have accumulated densities from several of these authors in order to
evaluate $q_p$ and $q_n$ and make comparisons among the different
models. These include various HF calculations with Skyrme
forces,\cite{gar,fri} an HFB calculation with a Gogny finite
range D1S interaction,\cite{dec,gir} and several relativistic
mean field models.\cite{rei,reiII} The results are presented
in Sec. VII.
The modelers fit their adjustable parameters to choices among various
bulk properties (energy per nucleon, compressibility modulus, symmetry
energy, etc.), and properties of particular nuclei (energies, charge
radii, deformations, spectra, multipole sum rules, etc.) Indeed, the
physics behind the models comes, in part, from the choice of the
particular observables included in the parameter fits. The models we
have selected all do roughly equally well in fitting the wide range of
nuclear observables available across the periodic
table.\cite{fri,rei}
The analysis of atomic PNC, as discussed in sections II and III, relies
on a detailed knowledge of neutron distributions in nuclei. The lack of
unambiguous, precise experimental measures of neutron radii means that
all of these models must ``extrapolate'' to the desired neutron
properties. Charge radii, on the other hand, are in a certain sense
``built in'', in that the set of observables to which the nuclear model
parameters are fit includes charge radii of several even-even nuclei,
one of which is $^{208}$Pb. In defense of the models, they predict
with good success the charge radii of other even-even nuclei not
included in the fit,\cite{reiII} and also reproduce the
well-measured isotopic charge radius shifts for, e.g. the even isotopes
of Pb.\cite{gar,auf} However, they do not reproduce the
observed even-odd staggering of charge radii very well, nor are the
results as good for the charge radius of $^{210}$Pb, an indication that
some care should be taken when considering non closed-shell cases.
There do exist some data which may give more direct information on the
neutron skin. This might be used as additional input to these nuclear
models, and could help further constrain the predictions for neutron
radius, and neutron isotope shifts, if one could demonstrate
consistency in the results. Perhaps the best known data comes from 800
MeV polarized proton scattering from
$^{208}$Pb.\cite{ray,batty} This gives $R_n - R_p = 0.14 \pm
0.04$ fm. The quoted error, which is quite small for our purposes,
contains both statistical and certain theoretical uncertainties as
stated. However, there are still {\it additional} theoretical
uncertainties, involving e.g. assumptions about the {\it in medium}
nucleon-nucleon t-matrix, and the result seems to exhibit a rather
large and troubling energy dependence.\cite{rayii} The
absolute value of $R_n$ in one isotope is believed to be fairly
difficult to obtain with confidence using such experiments. However, it
may be that the relative shift in $R_n$ among isotopes involves
cancellations that reduce these theoretical uncertainties. Measurements
on other Pb isotopes (data\cite{hin} apparently exists for $^{206}$Pb)
would clearly be of interest in this context. Further
experimentation and theoretical analysis at other energies are also
crucial to demonstrating the consistency of the results.
Experiments involving intermediate energy charged pion scattering from
nuclei may also help further constrain the neutron radii, or
the relevant isovector model parameters, as well. Such data exists for
$^{208}$Pb,\cite{olm} but again the absolute normalization
poses a real challenge to analyses.\cite{batty} Taken at face
value, the $\vec p$ and $\pi$ results for lead neutron radii do agree
with one another reasonably well, and also match with e.g. the Gogny
finite range Hartree-Fock calculations. Another experimental
possibility involves energies and sum rule strengths of giant multipole
resonances.\cite{kra} The uncertainties here are even larger,
and difficult to estimate. Clearly, a reliable set of such additional
``strong probe'' inputs, including yet other options such as $\alpha$
particle scattering, kaon scattering, Coulomb displacement energies,
etc., could be an aid in constraining the theoretical models
on neutron properties.
Another promising experimental possibility for the future might be
direct electroweak experiments, such as parity violating asymmetries in
elastic, intermediate energy ${\vec e}^{\,-}$ - nucleus scattering, as
proposed by Donnelly, Dubach, and Sick,\cite{donn} or perhaps
elastic $\nu$ scattering. The reactions and analyses are quite clean,
just as in the charge scattering case. There would be, for example, no
serious ambiguities in the absolute scale of the radii measured. Such
experiments would in fact be sensitive to the full nuclear weak charge
distribution, rather than just the RMS radius, which many of the
strong probe measurements are primarily sensitive to. Because such
experiments could be done at moderate momentum transfers ($q \sim 1 $
fm$^{-1})$, the extraction of nuclear distribution information would be
much less sensitive to the precise values of electroweak parameters
than in the corresponding atomic parity violation case. The
asymmetries and $\nu$ cross sections are naturally extremely small,
and the experimental challenges are formidable. Nevertheless, recent
estimates for the parity violating asymmetries indicate that
measurements sensitive e.g. to the neutron RMS radius in $^{208}$Pb at
the 1\% level are feasible.\cite{donn} As we will see in
sections VI and VII, such a level would make the nuclear structure
uncertainties quite negligible for the purposes of extracting standard
model parameters from single isotope atomic PNC measurements.
In any case, current model fitting has been done with the best and
most reliable data at hand, most of which are not directly sensitive to
neutron distributions. It is always difficult to estimate the
theoretical uncertainties in such model calculations. In this
section, we have already mentioned several potentially important
missing features that future work should address, especially involving
nucleon correlations. We have not attempted
here to try to choose a ``best model'' from the various ones we
examined, but rather wish to evaluate the existing spread in
predictions as an effective lower bound on the theoretical uncertainties
involved. One might, however, try to make a selection based on
detailed comparisons, specifically targeting a good fit to
heavier nuclei energies, isotopic shifts, giant dipole properties, and
other quantities potentially sensitive to isovector properties. We
encourage work in such directions. The goal should be to find the most
reliable model(s) while still retaining an estimate of the remaining
theoretical uncertainties.
\section{ERROR ANALYSIS AND TESTS OF ELECTROWEAK PHYSICS}
One of the motivations for further improving atomic parity violation
experiments is to test the Standard Model at the level of its
one-loop electroweak radiative corrections. This allows one to probe
for possible small ``new physics'' effects, which would appear
as further loop corrections or more directly as additional
interactions at the tree level (i. e. without loop corrections). A
good example of the latter, to which atomic PNC is particularly
sensitive, is exchange of a second, more massive, neutral Z-boson
required in theories with larger gauge groups. In the Standard
Model, the loop contributions are separated into two parts: fixed
radiative corrections due to contributions from the known quarks, leptons
and bosons, and the heavy physics part due to contributions from the top
quark and the Higgs boson. One is interested in an experimental
determination of the heavy physics part, which in the language of
Marciano and Rosner\cite{mr} is expressed in terms of weak
isospin-conserving, S, and isospin-breaking, T, effects. These two
constants\cite{pes} are a convenient way not only of including
uncertainties in the top quark and Higgs masses, but of
parameterizing the effects of some specific classes of new physics
as well. It turns out that low energy PNC measurements are nicely
complementary to high energy measurements such as direct Z-boson
production, since both the radiative corrections and the
sensitivities to new tree-level interactions are quite different.
To show how the radiative corrections and the possible new physics
enter into atomic PNC, and how they might compare in size to nuclear
structure effects, we begin by rewriting equation (2.9) in the form:
\FL
\begin{equation}
<\! i|H_{PNC,1}|j\!>={G_F\over 2\sqrt{2}}
C_{ij}(Z){\cal N}
[Q_W(N,Z)+Q^{nuc}_W(N,Z)] \eqnum{6.1}
\end{equation}
where $Q_W(N,Z)$, known as the nuclear weak charge, is the quantity of
primary interest to electroweak theory, and in the standard model
without radiative corrections reduces to:
\begin{equation}
Q_{W}^0=-N+Z(1-4\,\bar x) \eqnum{6.2}
\end{equation}
where $\bar x \equiv\sin^2\theta_W$. $Q_W(N,Z)$ is determined from
atomic experiments by combining atomic measurements of $<\!
i|H_{PNC,1}|j\!>$ with calculations of both atomic structure (contained
in the factor $C_{ij}$) and nuclear structure. The nuclear structure
corrections are contained in $Q^{nuc}_W(N,Z)$, which is given by:
\FL
\begin{eqnarray}
Q^{nuc}_W(N,Z)\ &\equiv&\ Q_W([q_n-1]N,[q_p-1]Z)\nonumber\\
&\approx&\ -N(q_n-1)+Z(1-4\,\bar x)(q_p-1)\,. \eqnum{6.3}
\end{eqnarray}
Nuclear structure is also contained in the normalization $\cal N$, but
as we will see in section VII, $\cal N$ is determined by the nuclear
charge distribution, which is usually known experimentally.
When we include possible new physics, together with the effects of
radiative corrections which have been calculated by
others,\cite{mr} $Q_W(N,Z)$ becomes:
\FL
\begin{eqnarray}
Q_W(N,Z)=&&(0.9857\pm0.0004)(1+0.00782T)\nonumber\\
&&\ \times\ [-N+Z(1-(4.012\pm0.010)\bar x] \nonumber\\
&&\ \ \ \qquad + Q^{new}_{tree}(N,Z) \eqnum{6.4}
\end{eqnarray}
\narrowtext
where $\bar x$ is assumed here to be defined at the mass scale $m_Z$
by modified minimal subtraction,\cite{mr} and is given by:
\begin{equation}
\bar x =
.2323 \pm .0007 +.00365S-.00261T\,. \eqnum{6.5}
\end{equation}
The errors indicated in (6.4) and (6.5) come from uncertainties both in
experimental input parameters and in evaluations of known physics
loop-diagrams. The unknown, heavy physics loop-corrections are
contained in the parameters $S$ and $T$, which depend upon the heavy
masses, and are defined such that $S = T = 0$ if $m_H =100$ GeV, $m_t
= 140$ GeV, and if there is no new physics beyond the Standard
Model. Including $Q^{new}_{tree}(N,Z)$ in $Q_W(N,Z)$ allows for additional
tree-level physics beyond the Standard Model. For example, exchange
of the extra $Z_x$ in SO(10) models\cite{mr,lon} (assuming no $Z_x-Z$
mixing) would make:
\begin{equation}
Q^{new}_{tree}(N,Z)\approx
0.4(2N+Z)m^2_W/m^2_{Z_\chi}\,. \eqnum{6.6}
\end{equation}
It is useful to consider how well the parameters in $Q_W$ are currently
known. The central value of $Q^{new}_{tree}(N,Z)$, determined mainly
by Cs PNC measurements, is about $2.2 \pm 1.6 \pm .9$ ({\it if} all
other heavy physics in equation (6.4) is ignored), and corresponds in
the SO(10) model to $m_{Z_\chi}\approx 500$ GeV. Conversely, assuming
no new tree-level physics (i.e., $Q^{new}_{tree} = 0$), the experimental
uncertainty in T is currently around $\pm 1$, and in S around $\pm 3$,
the latter determined largely from Cs PNC. Ultimately, as Marciano and
Rosner have indicated, an effort to reduce the uncertainty in S to $\pm
0.2$ is extremely important, since at that level it is sensitive even
to minimal one-doublet technicolor models. This sort of accuracy is an
extreme challenge to either high energy or atomic experiments. Current
knowledge of $\sin^2\theta_W$ from a global analysis of electroweak
data\cite{ken}can be summarized by $\bar x = 0.230 \pm .004$,
(roughly 2\% uncertainty). If future high energy measurements were to
reduce the uncertainty in $\bar x$ beyond what is attainable in atoms,
the atomic experiments would still be valuable for improving the limits,
e.g., on an additional Z.
In summary, any improvement in determining atomic PNC is likely to
provide useful information about electroweak physics, and it becomes
extremely important to work out how much nuclear structure
uncertainties may be a limiting factor, and to reduce these
uncertainties where possible.
We first consider the impact of nuclear uncertainties on
PNC measurements of single isotopes. PNC experiments to date have
been done on stable isotopes of heavy atoms, namely Cs, Pb, Bi, and
Tl, and have not compared different isotopes of the same element.
{}From equation (6.1) we derive an expression for the uncertainty in
$Q_W$ in terms of the uncertainties in atomic and nuclear structure
and in the measured quantity ${\cal O} \equiv <\!i|H_{PNC,1}|j\!>$:
\begin{equation}
{
{\delta Q_W \over Q_W} \ \approx\
{\delta \cal O \over \cal O} - {\delta C_{ij} \over C_{ij}} -
{\delta \cal N \over
\cal N} - {\delta Q^{nuc}_W \over Q_W} \,. \eqnum{6.7}
}
\end{equation}
If we assume that $\cal O$ can be measured to arbitrary
accuracy, and that proton distributions (which will
influence $\delta {\cal N}$) are also well enough understood and/or
measured, there remain the uncertainties coming from atomic and
nuclear structure, which we can write in the form:
\begin{equation}{
{\delta Q_W \over Q_W} \ \approx\ - {\delta C_{ij}
\over C_{ij}} - {\delta q_n} \eqnum{6.8} }
\end{equation}
where we have dropped all terms containing the factor $1-4.012\bar
x$, which should be quite negligible due to the accidental value of
$\bar x \approx {1\over 4}$. Rewriting in terms of the weak
interaction parameters, we obtain:
\begin{equation}
0.014{Z\over N}\delta S +
{\delta Q^{new}_{tree}(N,Z)\over Q_W}
\approx\ -{\delta C_{ij} \over C_{ij}} -
{\delta q_n} \,. \eqnum{6.9}
\end{equation}
ignoring the contribution of the weak-isospin breaking parameter T
which cancels to better than 10\% for the full range of $Z/N$
found in the elements of experimental interest. Thus a PNC
measurement in a single isotope can set limits on the
weak-isospin conserving parameter $S$ and/or new tree-level
interactions, and in fact the best limits on both of these
parameters now come from PNC measurements in atomic cesium. To
determine the role of nuclear structure, we must compare the
uncertainty $\delta q_n$ on the right hand side of equation (6.9) to
the atomic structure uncertainty $\delta C_{ij}/C_{ij}$ . This we do
later, in section VII.
Because of the difficult atomic physics calculations, there has been
some serious interest in measuring parity violation in a chain of
isotopes. Taking ratios between isotopes cancels essentially all
dependence on atomic structure. Unfortunately, although the
atomic physics indeed cancels in the ratio, the nuclear structure does
not. Referring to equation (6.1) we consider the ratio:
\begin{equation}
{\cal R} \equiv {{\cal O}\over {\cal O'}}
= {{[Q_W(N,Z)+Q^{nuc}_W(N,Z)]{\cal N}}\over
{[Q_W(N',Z)+Q^{nuc}_W(N',Z)]{\cal N'}}}\,. \eqnum{6.10}
\end{equation}
where primed and unprimed quantities refer to different isotopes.
The sensitivity of $\bar x$ and $Q^{new}_{tree}(N,Z)$, extracted from
this ratio, to the nuclear structure is then given approximately by
\begin{eqnarray}
Z{\delta \bar x\over \bar x}\ &&- \delta
Q^{new}_{tree}(N,Z) + N{\delta \Delta Q^{new}_{tree} \over \Delta N}
\nonumber\\
&& \approx\ {NN'\over \Delta N}
\left[ {\delta {\cal R}\over {\cal R}}
+ {\delta (\Delta {\cal N})\over {\cal N}}
+ {\delta (\Delta q_n) \over q_n} \right]. \eqnum{6.11}
\end{eqnarray}
\narrowtext
where we have made simplifying assumptions that the isotopes are close
together, i.e. $\Delta N \equiv N'-N << N$, that $\sin^2\theta_W
\approx 1/4$, and where we have used e.g.
$({\delta q_n/ q_n}-{\delta q_n'/q_n'})\approx{\delta(\Delta q_n)/q_n}$,
which is numerically accurate for the models of Pb we have considered.
Because of the special sensitivity of atomic PNC to any additional
heavy Z-bosons, we note as an example that a determination of $m_{Z_x}$
in the model of equation (6.6) would be constrained by replacing the
left hand side of equation (6.11) by
$ \left( Z{\delta \bar x / \bar x}\ - 0.4Z \delta
{({m^2_W / {m^2_{Z_x}}})} \right) $.
The uncertainties on the right side of (6.11) are
effectively in the relative {\it difference} between
quantities for two isotopes. In principle, different nuclear models
which disagree on the absolute values of, say, $q_n$ may agree on the
relative {\it change} in this quantity to a much higher degree of
accuracy. However, such a reduction in uncertainty in the terms
within the brackets in expression (6.11) is roughly compensated
by the factor $N/\Delta A$.
Comparing with equation (6.9) for a single isotope, in which any
new tree-level interactions enter with the equally uncertain
loop-parameter $S$, we see that when we instead compare isotopes,
$Q^{new}_{tree}$ appears together with a different parameter, $\bar
x$, which is independently measurable in high energy experiments.
\section{DISCUSSION}
To calculate $q_n$, we use various theoretical predictions of neutron
and proton distributions from the literature. Proton distributions are
used to compute $f(r)$, the electronic wave function overlap
defined in equation (2.5). We solve numerically for single electron
Dirac $s_{1/2} $ and $p_{1/2}$ wave functions near the origin, in the Coulomb
potential of the nuclear charge distribution (as discussed in section
II), and make {\it no} approximation of a power series in $Z\alpha$, as
was done e.g. for equation~(3.2). We have neglected the contributions
to the nuclear charge distribution from internal neutron structure,
as discussed in section IV. We estimate the error associated with
this assumption to be well below the level
of the model uncertainties themselves. The quantity $q_n \equiv
\int\,\rho_n(r)f(r)\,d^3r$ is then calculated directly from the
corresponding neutron distribution.
In Tables \ref{table1a} and \ref{table1b},
we present the rms radii $R_n$ and $R_p$, the correction
factors $q_n$ and $q_p$, and the electron normalization $\cal N$, for
several nuclear models of the Pb isotopes 202 and 210.
Except for the norm, the spread in values in the final rows
of Tables \ref{table1a} and \ref{table1b}
should give some indication of a {\it lower limit} on the current
level of theoretical model-dependent uncertainties, assuming that one
accepts these models as equally phenomenologically reasonable.
The normalization factor $\cal N$ defined in equation~(2.5), which is
proportional to $1/f(r\gg R_p)$, is defined arbitrarily here as
$0.10361/f(300 \ \rm fm)$. The numerator, $f^{\rm\,expt}(300 \ \rm
fm)$, is evaluated using a model independent experimental charge
distribution from electron scattering off $^{208}$Pb. As stated
earlier, we are not concerned with the absolute value of the norm, but
only its dependence on atomic weight and charge distribution. This
definition simply scales $\cal N$ to be near 1.0. For $^{208}$Pb, the
model spread in the normalization from Table \ref{table1b} might appear
to contribute at a significant level. One can, however, consider
correcting $\cal N$ by using an approximate formula relating $\cal N$
to the charge radius, namely ${\cal N} = R^{-\gamma}$, as in
eqn.~(2.6). This is given by
\begin{equation}
{\cal N'} = {\cal N}
(R^\gamma/R^\gamma_{\rm expt}). \eqnum{7.1}
\end{equation}
The model spread in
this ${\cal N'}$ is significantly reduced. The point is that these
models are not precisely reproducing the observed charge radii of the
lead isotopes, which feeds rather directly into a calculation of $\cal
N$. The correction factor above compensates for this, using the
existing high precision measurements of charge radii from optical
isotope shifts and electron scattering.\cite{auf,jag}
Some of the results in tables \ref{table1a} and \ref{table1b}
are reproduced in graphical form also, in Figures \ref{fig1a},
\ref{fig1b}, and \ref{fig2}. In Fig.~\ref{fig1a}, we plot the
predicted $R_n$ versus atomic weight for several even lead isotopes,
and in Fig.~\ref{fig1b} the ratio $R_n/R_p$. The spread in $R_n$ among
models is decidedly larger than the spread in $R_p$. We do note a
systematically larger neutron radius in the relativistic
models.\cite{sha} The origin of this is indeed not yet completely
understood, but may be connected with larger asymmetry energies found
in these models. This in turn might tend to pull neutron and proton
distributions together where the densities are high, leaving a somewhat
larger neutron tail.
In Fig.~\ref{fig2}, we plot $q_n$ versus atomic weight for several even lead
isotopes. The spread is closely related to the spread in $R_n/R_p$
shown in Fig.~\ref{fig1b}, as might be expected from the simplified
formulas (1.1) or (3.3b) based on uniform nuclear charge density.
Estimates of $q_n$ using these simplified formulas yield the same
general trends as the detailed calculations, with absolute values
differing generally by parts in a thousand or less. The relativistic
models yield somewhat smaller $q_n$, due to their larger $R_n/R_p$
ratio.
In the case of {\it single isotopes}, the total nuclear model spread
does not appear to be the most serious problem in using equation (6.8)
or (6.9) to extract weak interaction parameters from atomic PNC . For
$^{208}$Pb, the typical full spread in calculated $q_n$ is
$\alt 0.005$. In the case of $^{133}$Cs, the sensitivity to nuclear
structure is even weaker, due to the smaller value of (Z$\alpha$). A
larger uncertainty, at least at the present time, is due to atomic
physics calculations.\cite{bjs,dfs} For example, Cs is one
of the most favorable elements from the point of view of atomic
theory, and to achieve the current level of quoted uncertainty of
$\delta C_{ij}(Z)/C_{ij}(Z) \approx $1\% in Cs is an impressive
task. But this uncertainty is still probably larger than the
uncertainty in $q_n$ for Cs. Significant future improvement in atomic
calculations is likely to be difficult. Thus, aside from any
experimental uncertainties, atomic structure is the present limiting
factor in getting $Q_W$ and the associated weak parameters from single
isotope atomic PNC measurements, and appears to remain so even after
considering the possible nuclear physics effects. This conclusion
is consistent with the findings mentioned in the calculation of
reference 2.
Consider next the ratios in an isotopic chain, for example
($^{202}$Pb/$^{208}$Pb). Referring to equation (6.11) we see that the
PNC experiments would then be measuring $\bar x$ and/or observing new
tree level physics. For definiteness let us assume no new tree level
physics. Then a $\pm$1\% extraction of $\bar x$ would require $\delta
\Delta q_n \alt 6\cdot 10^{-4}$, and ${\delta \Delta {\cal N}/ {\cal N}
} \alt 6\cdot 10^{-4}$. Assuming uniform nuclear distributions, this
implies $\delta\Delta(R_n/R_p) \alt 4\cdot 10^{-3}$. Referring to
Table \ref{table2}, which shows the change in various quantities
between these two particular lead isotopes, the model spread for
$\delta \Delta q_n$ is around $9\cdot 10^{-4}$ and for $\delta \Delta
{\cal N}/{\cal N} $ is about $6\cdot 10^{-4}$. Note however that when
${\cal N}$ is corrected as in (7.1) above, using experimental knowledge
of charge radii, this spread, at least, is significantly reduced to
below the $\pm$1\% level. This is seen from the final column in Table
\ref{table2}. But the spread in $\Delta q_n$ remains, and is
comparable to the accuracy needed for a 1\% extraction of $\bar x$.
Similarly, the model spread in $(R_n/R_p)$ from Table III is about
5$\cdot10^{-3}$, which likewise corresponds to a $>$1\% spread in $\bar
x$.
If we exclude the relativistic models, which seem to have
substantially different neutron radii from the conventional H-F
calculations, the model spread just among the various Skyrme
parameterizations considered gives $\delta \Delta q_n \approx
4\cdot10^{-4}$. It thus appears unlikely that PNC measurements
comparing Pb isotopes could yield much better than a 1\%
determination of $\bar x$, unless there is significant improvement in
understanding of nuclear structure.
The same results can be seen perhaps more clearly in Fig.~\ref{fig3},
which displays in graphical form the values of $\Delta
q_n(202\rightarrow 208)$ from Table \ref{table2} versus the different
models considered. The spread in predictions of this quantity is
actually larger than 100\%. Also shown in the figure is a typical
scale of 1\% in the weak angle. As noted already, the model spread is
too large for extractions of $\bar x$ at the sub 1\% level if one
cannot otherwise eliminate or improve any of the models used. On the
other hand, the nuclear structure uncertainties may not preclude a
significant improvement in sensitivity to new Z bosons or other new
tree-level physics in equations (6.11), particularly if $\bar x$ is
determined well by high energy experiments.
Although the nonrelativistic models do appear to cluster together
somewhat, one should perhaps be a bit wary of their apparent
self-consistency. For example, a modification of the coefficient of
the isovector (n-p asymmetry) surface term, a
$(\rho_p-\rho_n)\nabla^2(\rho_p-\rho_n)$ term in the Skyrme
Lagrangian,\cite{fri} has little effect on most bulk properties, and
hence on the goodness of the Skyrme fits.\cite{rei} This term, however,
does modify the neutron skin significantly. Reinhard's rough
estimates show that an uncertainty of $\Delta R_n\approx \pm .15$ fm
is not unreasonable.\cite{rei} This in turn can modify the quantity
shown in Fig.~\ref{fig3} by amounts of ${\cal O}(6\cdot 10^{-4})$,
larger than the spread in the given Skyrme models. The relativistic
models do not have such flexibility, as the isovector rho couplings are
largely constrained by isotopic trends in ground state energies and
charge radii, but this is of course no guarantee that these models
correctly describe all isovector properties equally well.
In the case of Cs isotopes, accurate calculations for neutron radii (or
even proton radii) are difficult. They have odd Z, and require
additional approximations to deal with unfilled shells, as well as
deformations. The lack of success in predicting the even-odd
staggering of $\delta \langle r^2\rangle_{ch}$ in lead isotopes
indicates the seriousness of these problems. An estimate of the scales
involved, however, can be made using calculations with existing nuclear
codes. One such result\cite{bei} gives $R_n/R_p\approx 1.03$ for
$^{135}$Cs, and $\Delta R_n/R_p$ ($^{131}$Cs $\rightarrow \ ^{135}$Cs)
$\approx 5\cdot 10^{-3}$. If this latter number itself has a 100\%
uncertainty (for comparison, see Fig.~\ref{fig3} for the case of lead
which does show a 100\% spread among model predictions of the
equivalent quantity $\Delta q_n$ for about the same $\Delta A/A$), then
the uncertainty in $\bar x$ from this fairly small range of isotopes
would be approximately 1\%. $\Delta A$ of up to 10 or higher may be
experimentally possible for Cs, which might help to reduce the nuclear
physics uncertainties. From the experimental side, the absence of
stable isotopic partners to $^{133}$Cs makes it difficult to obtain
values of $\Delta R_n/R_p$ from parity violating electron scattering,
or $\vec p$ elastic, or pion experiments, as may be possible for the
lead isotopes. Further work on theoretical estimates for Cs isotopic
radii is clearly called for.
Given a set of experimental results for isotopic PNC ratios, one can
also consider a bootstrap procedure: from atomic experiments over {\it
several} isotope differences, use the various models to extract the
weak mixing angle. Then, only those models which yield the same
$\sin^2\theta_W$ for the various isotopic pairs are acceptable.
Unfortunately, the various nuclear models we have considered (for lead
isotopes near $^{208}Pb$) yield predictions for the PNC ratios which
are fairly linear with $\Delta A$. Since this prediction is also
roughly linear with $\sin^2\theta_W$, it appears that the various
nuclear models could be internally consistent, each yielding a unique
$\sin^2\theta_W$ but differing from model to model about the extracted
value. Of course, one cannot draw any firm conclusions about this until
after the data are known. There are indeed some slight deviations from
linearity, especially for non-closed shell isotopes, and one may be
able to take advantage of this. In essence, this bootstrap idea uses
PNC atomic isotope ratios themselves as our desired additional
constraint on neutron properties - with a
large enough set of PNC data, one could hope to simultaneously constrain
the nuclear model parameters {\it and} measure the weak mixing
angle.
\section{CONCLUSIONS}
For the case of Pb, in order to extract
electroweak parameters from atomic PNC experiments at a level of precision
which would be considered ``significant'' for testing the Standard
Model, we have shown that it is necessary to have confidence in the
isotopic relative neutron/proton radius shift, $\Delta(R_n/R_p)/\Delta
A$, to better than a few times $ 10^{-4}$. We have examined various
nuclear model calculations, and find that the spread in theoretical values
corresponds to an uncertainty in the weak mixing angle greater than
1\%, with the assumption that no new physics is present.
Without some further basis for discriminating among the various
models, the spread represents a lower bound to the uncertainties in
the calculated values.
The basic problem is essentially that the models have been
parameterized to fit properties like charge distributions, which are
not directly sensitive to neutron distributions. As Reinhard has
shown, it appears that a surface symmetry energy term in certain
nonrelativistic (Skyrme interaction) nuclear models can be ``dialed"
somewhat to change the neutron size without significantly spoiling the
basic fits. Including data which are more sensitive to neutron
properties, such as isotopic trends in ground state properties, and
perhaps giant resonance energies and sum rules, could be useful to
constrain such terms.
There do exist experiments which are sensitive to neutron radii, e.g.
$\pi^+/\pi^-$ scattering, and medium energy polarized proton
scattering. If the quoted errors on the latter can be taken literally,
one could use it to discriminate among the various models and provide
the confidence one needs to extract the desired electroweak parameters
from atomic experiments. It would be valuable to repeat the
experiments and analyses at other energies in order to demonstrate the
consistency of the results, and to consider both $\pi^\pm$ and $\vec p$
scattering on multiple Pb isotopes for a direct experimental measure of
the isotopic shift in neutron radii. We have also noted in this work
that the detailed distribution of neutrons, beyond just the RMS radius,
is of some importance. This implies that we may still have to rely on
the nuclear models for an extraction of the electroweak parameters. As
discussed earlier, the use of alternative electroweak probes, such as
parity violating (polarized) electron scattering at intermediate
energies,\cite{donn} would be of obvious value for
independently extracting the desired neutron distribution.
We can turn the problem around, however, and note that an accurate
measurement of $\bar x$ from high energy experiments presents a
unique opportunity to extract the isotopic neutron
radius shifts from atomic experiments cleanly, and hence test the
nuclear models. The situation is quite analogous to the extraction of
changes in charge radii from atomic isotope shifts.
\nonum
\section{ACKNOWLEDGMENTS}
We are grateful to C. Chinn, R. Furnstahl, N. Van Giai, M. Girod, I.B.
Khriplovich, J. Martorell, M. Musolf, E. Ormand, P.-G. Reinhard, P.
Ring, B. Serot, and D. Sprung for valuable discussions and, in some
cases, the sharing of calculational data. This work is supported in
part by U. S. Department of Energy grants DOE/ER-06-91ER40561 and
DOE/ER-DE-FG06-88ER40427, and by NSF Grant PHY 8922274.
| {
"redpajama_set_name": "RedPajamaArXiv"
} | 8,360 |
National Academy of Sciences of Belarus
Tasks and functions
Departments of Sciences
Apparatus of the NASB
NASB Heads
Regulatory and reference information
Academy premiums
Academy contests
Scientific councils and committees
Training of scientific personnel
Academicians
Corresponding members
Honorary and foreign members
In memory of the scientist
Collections of scientific papers
Newspaper "NAVUKA"
Republican events
ANISOVICH Gennady Anatolyevich
Gennady Anatolyevich Anisovich (25.08.1932, Minsk – 06.12.2003), a scientist in the field of materials science. Academician of the National Academy of Sciences of Belarus (1984; corresponding member since 1972), Doctor of Engineering Sciences (1970), Professor (1981). Honored Worker of Science and Technology of the BSSR (1978).
— Graduated from the Belarusian Polytechnic Institute (1955).
— In 1960-1970 - Junior Researcher, Senior Engineer, Senior Researcher at the Physicotechnical Institute of the Academy of Sciences of the BSSR, since 1970 - Deputy Director of the Mogilev branch of this Institute.
— Since 1992 - Director of the Institute of Metal Technology of the National Academy of Sciences of Belarus (Mogilev).
— Since 1997 - Honorary Director of the Institute of Metal Technology, Academician-Secretary of the Department of Physical and Technical Problems of Mechanical Engineering and Energy of the National Academy of Sciences of Belarus, since 2003 - Chief Specialist of this Department.
Scientific research on thermophysical processes in the foundry. He developed questions of the theory of solidification of castings with special casting methods, established patterns in the field of thermal physics of the processes of solidification of metals and alloys in sand and metal molds and determined the main relationships between technological parameters with respect to casting processes. The features of the mechanism of formation of the structure and properties of cast products under conditions of directional solidification and controlled heat removal in various forms are studied. Developed advanced technological processes for the production of castings and blanks from ferrous and non-ferrous metals when casting in a metal mold with adjustable thermal parameters, casting by freezing, casting into a roll mold, with horizontal continuous casting.
Published more than 200 scientific papers, incl. 6 monographs. He has 155 copyright certificates for inventions.
State Prize of the BSSR in 1990 for research, development and implementation of efficient resource and metal-saving, environmentally friendly technological processes for producing high-quality castings in conditions of centralized production.
Awarded with the Order of the Red Banner of Labor (1981), "Badge of Honor", medals.
Major papers:
Охлаждение отливки в комбинированной форме. М., 1969 (совм. с Н.П. Жмакиным).
Затвердевание отливок. Мн., 1979.
© 2021 National Academy of Sciences of Belarus | {
"redpajama_set_name": "RedPajamaCommonCrawl"
} | 2,884 |
Turmoil and migration
Libya Crisis Watch
The International Rescue Committee provides vital health and protection to vulnerable and displaced Libyans, refugees and migrants caught in an increasingly unstable country. The IRC is one of the few international organisations directly supporting people inside Libya.
The conflict in Tripoli continues to escalate
The conflict in Tripoli continues to escalate, with more than 310,000 Libyans displaced as a result of the violence.
Around 4,200 refugees and migrants continue to be held in detention centres. Many are trapped amid active conflict and are more vulnerable than ever before.
The IRC says a ceasefire is urgently needed and calls for migrants and refugees to be evacuated to safety outside Libya. Meanwhile alternative solutions must be immediately sought.
Many refugees and migrants continue to be intercepted at sea and returned to what is an active war zone. Search and rescue operations at sea must immediately be restored to protect those fleeing the violence.
Read our Jul.3 statement
People in need of humanitarian assistance: 1.1 million
Humanitarian Development Index rank: 94
Started work: 2016
Crisis briefing
Libya is facing economic collapse, political instability, and ongoing conflict between violent militias—and it remains Africa's main departure point to Europe for migrants seeking safety and opportunity. The International Rescue Committee provides vital healthcare and protection to vulnerable people caught in an increasingly unstable country.
What caused the current crisis in Libya?
What are the main humanitarian challenges in Libya?
How does the IRC help in Libya?
Libya is in turmoil. Following the 2011 revolution, which ended the 42-year regime of Muammar Gaddafi, civil war erupted in 2014. The oil-rich North African nation has since been engulfed in economic chaos, general lawlessness, with violent militias vying for power—including ISIS. Despite international pressure, political reconciliation between rival governments in the east and west remains a distant prospect.
The civilian population is caught in the middle. Basic public services—health care, education, electricity, banking—are degraded or absent, and the threat of violence is constant.
Over 700,000 migrants are currently in Libya. While some do have legal status and have travelled to Libya to work there, others are undocumented and live in the shadows. Many continue to risk their lives with smugglers to try get to Europe. More often than not, they are pulled back by Libyan coast guards to Libya where they are arrested and detained.
Violence and economic decline in Libya have displaced more than 500,000 people and disrupted all facets of life: health care, public utilities, jobs, education, financial services, social safety nets. Restoring primary healthcare services is the most pressing need, as more than 1 million people lack access. Many health facilities across the country are either partially or completely non-functional due to critical shortages of healthcare workers, skilled specialists, medicines and medical supplies.
Migrants and refugees in Libya are also vulnerable. They are often forced to live in the shadows with no access to the services they need and are at risk of exploitation.
Since August 2016, the IRC has provided emergency and reproductive health services in western Libya. The IRC is one of the few international organisations with a direct presence in Libya with three office in Tripoli, Misrata and Sirte.
As Libya continues to endure political instability and widespread violence, the IRC is focused on:
providing critical health-care in hard to reach places in western Libya;
providing life-saving medicines to primary health clinics;
when possible, providing a referral pathway for patients in urgent need;
renovating primary health clinics which have been damaged during the civil war;
deploying experienced social workers to provide case management and psychosocial support in communities impacted by the conflict.
The IRC seeks to expand its health and protection programmes in Libya, providing support to vulnerable Libyans, refugees and migrants as funding permits. To this end, the IRC plans to support additional primary healthcare facilities, establish its own community development centre and support people being held in detention centres. There is no shortage of needs in Libya, for both Libyans and migrants, but low commitments from donor countries compared to other humanitarian crises and an especially restrictive security environment pose challenges to scaling up our response.
Courage in conflict
Meet the fearless mobile health team protecting women in Libya
The IRC's mobile health and protection teams are fighting to deliver life-saving care especially in remote areas of Libya.
Conflict in Libya
What's happening in Libya? Five facts you need to know.
Is there an English Channel 'migrant crisis'?
Lost in the desert or drowned at sea: The perils of the world's most dangerous migration route
Why must a journey to safety be so dangerous? | {
"redpajama_set_name": "RedPajamaCommonCrawl"
} | 4,535 |
Thrombosis and fibrinolysis. Clotting factor X (FX) is the final common pathway for all coagulation. See the text (introduction) for a complete description of the process. Closed circles indicate hypofibrinolysis; closed triangles, thrombophilia; TF, tissue factor; FVII, factor VII; FVIIIa, activated form of factor VIII; APC, activated protein C; FIXa, activated form of factor IX; FXa, activated form of factor X; FV, factor V; FVa, activated form of factor V; Lp(a), lipoprotein(a); PAI-1, plasminogen activated inhibitor 1; tPa, tissue plasminogen activator; and FXIIIa, activated form of factor XIII.
Distribution of the factor V Leiden G1691A mutation, the 4G/5G polymorphism of the PAI1 gene promoter, the 20210*A allele of the prothrombin gene, and the MTHFR C677T mutation in 17 patients with retinal vein occlusion and in 234 controls.
The 4G/5G polymorphism of the PAI1 gene promoter: homozygosity (4G/4G), heterozygosity (4G/5G), and normal allele (5G/5G) in 17 patients with retinal vein occlusion compared with 234 controls. The difference is significant (P=.03, Fisher exact test).
Significant (P<.05) differences between 17 patients with retinal vein occlusion and 40 healthy normal controls for measures of hypofibrinolysis and thrombophilia. Lp(a) indicates lipoprotein(a); PAI-Fx, plasminogen activator inhibitor activity; and DRVVT, dilute Russell viper venom (clotting) time.
Median lipoprotein Lp(a) levels associated with mutations in the PAI1 gene in 16 patients with retinal vein occlusion and in 40 controls. The P values were obtained using the Wilcoxon test.
Relationships between 4G/5G polymorphisms of the PAI1 gene promoter and lipoprotein Lp(a) in 16 patients with retinal vein occlusion and 40 controls. The slopes differ (P=.04).
Rath EZFrank RNShin DHKim C Risk factors for retinal vein thrombosis: a case-control study. Ophthalmology. 1992;99509- 514Google ScholarCrossref
Dodson PMClough CGDownes SMKritzinger EE Does type II diabetes predispose to retinal vein occlusion? Eur J Ophthalmol. 1993;3109- 113Google Scholar
Vine AKSamama MM The role of abnormalities in the anticoagulant and fibrinolytic systems in retinal vascular occlusions. Surv Ophthalmol. 1993;37283- 292Google ScholarCrossref
Bandello FVigano D'Angelo SParlavecchia M et al. Hypercoagulability and high lipoprotein(a) levels in patients with central retinal vein occlusion. Thromb Haemost. 1994;7239- 43Google Scholar
Acheson JFGregson RMMerry PSchulenburg WE Vaso-occlusive retinopathy in the primary anti-phospholipid antibody syndrome. Eye. 1991;5 ((pt 1)) 48- 55Google ScholarCrossref
Glacet-Bernard ABayani NChretien PCochard CLelong FCoscas G Antiphospholipid antibodies in retinal vascular occlusions: a prospective study of 75 patients. Arch Ophthalmol. 1994;112790- 795Google ScholarCrossref
Wenzler EMRademakers AJBoers GHCruysberg JRWebers CADeutman AF Hyperhomocysteinemia in retinal artery and retinal vein occlusion. Am J Ophthalmol. 1993;115162- 167Google Scholar
Williamson THRumley ALowe GD Blood viscosity, coagulation, and activated protein C resistance in central retinal vein occlusion: a population controlled study. Br J Ophthalmol. 1996;80203- 208Google ScholarCrossref
Larsson JOlafsdottir EBauer B Activated protein C resistance in young adults with central retinal vein occlusion. Br J Ophthalmol. 1996;80200- 202Google ScholarCrossref
Scat YMorin YMorel CHaut J Retinal vein occlusion and resistance to activated protein C [in French]. J Fr Ophtalmol. 1995;18758- 762Google Scholar
Graham SLGoldberg IMurray BBeaumont PChong BH Activated protein C resistance: low incidence in glaucomatous disc hemorrhage and central retinal vein occlusion. Aust N Z J Ophthalmol. 1996;24199- 205Google ScholarCrossref
Gottlieb JLBlice JPMestichelli BKonkle BABenson WE Activated protein C resistance, factor V Leiden, and central retinal vein occlusion in young adults. Arch Ophthalmol. 1998;116577- 579Google ScholarCrossref
Bertram BRemky AArend OWolf SReim M Protein C, protein S, and antithrombin III in acute ocular occlusive diseases. Ger J Ophthalmol. 1995;4332- 335Google Scholar
Glueck CJFreiberg RACrawford A et al. Secondhand smoke, hypofibrinolysis, and Legg-Perthes disease. Clin Orthop. 1998;352159- 167Google ScholarCrossref
Poort SRRosendaal FRReitsma PHBertina RM A common genetic variation in the 3'-untranslated region of the prothrombin gene is associated with elevated plasma prothrombin levels and an increase in venous thrombosis. Blood. 1996;883698- 3703Google Scholar
Brandt GGruppo RGlueck CJ et al. Sensitivity, specificity, and predictive value of modified assays for activated protein C resistance in children. Thromb Haemost. 1998;79567- 570Google Scholar
Falk GAlmqvist ANordemhem ASvensson HWiman B Allele specific PCR for detection of a sequence polymorphism in the promoter region of the plasminogen activator inhibitor-1 (PAI-1) gene. Fibrinolysis. 1995;9170- 174Google ScholarCrossref
Frosst PBlom HJMilos R et al. A candidate genetic risk factor for vascular disease: a common mutation in methylenetetrahydrofolate reductase. Nat Genet. 1995;10111- 113Google ScholarCrossref
Glueck CJBrandt GGruppo R et al. Resistance to activated protein C and Legg-Perthes disease. Clin Orthop. 1997;338139- 152Google ScholarCrossref
Kluijtmans LAvan den Heuvel LPWJBoers GHJ et al. Molecular genetic analysis in mild hyperhomocysteinemia: a common mutation in the methylenetetrahydrofolate reductase gene is a genetic risk factor for cardiovascular disease. Am J Hum Genet. 1996;5835- 41Google Scholar
Newton CRGraham AHepinstall LE et al. Analysis of any point mutation in DNA: the amplification refractory mutation system (ARMS). Nucleic Acids Res. 1989;172503- 2516Google ScholarCrossref
Glueck CJCrawford ARoy DFreiberg RGlueck HStroop D Association of antithrombotic factor deficiencies and hypofibrinolysis with Legg-Perthes disease. J Bone Joint Surg Am. 1996;783- 13Google Scholar
Glueck CJGlueck HITracy TSpeirs JMcCray CStroop D Relationship between lipoprotein(a), lipids, apolipoproteins, basal and stimulated fibrinolytic regulators, and D-dimer. Metabolism. 1993;42236- 246Google ScholarCrossref
Glueck CJShaw PLang JETracy TSieve-Smith LWang Y Evidence that homocysteine is an independent risk factor for atherosclerosis in hyperlipidemic patients. Am J Cardiol. 1995;75132- 136Google ScholarCrossref
Glueck CJFontaine RNGupta ATracy TWang P Familial 4G4G homozygosity in the plasminogen activator inhibitor gene promotor, hyperinsulinemia, thrombosis, and osteonecrosis. J Invest Med. 1997;45331AGoogle Scholar
Gruppo RGlueck CJMcMahon RE et al. Anticardiolipin antibodies, thrombophilia, and hypofibrinolysis: pathophysiology of osteonecrosis of the jaw. J Lab Clin Med. 1996;127481- 489Google ScholarCrossref
Snedecor GWCochran WG Statistical Methods. 7th ed. Ames Iowa State University Press1980;
Glueck CJLang JE Lipoprotein metabolism in the elderly. Abrams WBBeers MHBerkow RBeds Merck Manual of Geriatrics. Rahway, NJ Merck & Co1995;1023- 1052Google Scholar
Rosendaal FR Thrombosis in the young: epidemiology and risk factors: a focus on venous thrombosis. Thromb Haemost. 1997;781- 6Google Scholar
McLean JWTomlinson JEKuang WJ et al. cDNA sequence of human apolipoprotein(a) is homologous to plasminogen. Nature. 1987;330132- 137Google ScholarCrossref
Scanu AMFless GM Lipoprotein(a): heterogeneity and biological relevance. J Clin Invest. 1990;851709- 1715Google ScholarCrossref
Edelberg JMPizzo SV Lipoprotein(a) inhibits plasminogen activation in a template-dependent manner. Blood Coagul Fibrinolysis. 1991;2759- 764Google ScholarCrossref
Pekelharing HLMKleinveld HADuif PFCCMBouma BNvan Rijn HJM Effect of lipoprotein(a) and LDL on plasminogen binding to extracellular matrix and matrix-dependent plasminogen activation by tissue plasminogen activator. Thromb Haemost. 1996;75497- 502Google Scholar
Etingin ORHajjar DPHajjar KAHarpel PCNachman RL Lipoprotein(a) regulates plasminogen activator inhibitor-1 expression in endothelial cells: a potential mechanism in thrombogenesis. J Biol Chem. 1991;2662459- 2465Google Scholar
Fong ACSchatz HMcDonald HR et al. Central retinal vein occlusion in young adults (papillophlebitis). Retina. 1992;123- 11Google ScholarCrossref
Walters RFSpalton DJ Central retinal vein occlusion in people aged 40 years or less: a review of 17 patients. Br J Ophthalmol. 1990;7430- 35Google ScholarCrossref
Fong ACSchatz H Central retinal vein occlusion in young adults. Surv Ophthalmol. 1993;37393- 417Google ScholarCrossref
Priluck IARobertson DMHollenhorst RW Long-term follow-up of occlusion of the central retinal vein in young adults. Am J Ophthalmol. 1980;90190- 202Google Scholar
Svensson PJDahlback B Resistance to activated protein C as a basis for venous thrombosis. N Engl J Med. 1994;330517- 522Google ScholarCrossref
Glueck CJ, Bell H, Vadlamani L, et al. Heritable Thrombophilia and Hypofibrinolysis: Possible Causes of Retinal Vein Occlusion. Arch Ophthalmol. 1999;117(1):43–49. doi:10.1001/archopht.117.1.43
Heritable Thrombophilia and Hypofibrinolysis: Possible Causes of Retinal Vein Occlusion
Charles J. Glueck, MD; Howard Bell, MD; Lou Vadlamani, MD; et al Arun Gupta, PhD; Robert N. Fontaine, PhD; Ping Wang, PhD; Davis Stroop, MS; Ralph Gruppo, MD
From the Cholesterol Center (Drs Glueck, Vadlamani, and Wang) and the Ophthalmology Division (Dr Bell), Jewish Hospital; the Molecular Diagnostics and Research Center (Drs Gupta and Fontaine) and Hematology/Oncology Division (Mr Stroop and Dr Gruppo), Children's Hospital, Cincinnati, Ohio.
Arch Ophthalmol. 1999;117(1):43-49. doi:10.1001/archopht.117.1.43
Objective To determine whether heritable thrombophilia and hypofibrinolysis were risk factors for retinal vein occlusion.
Design Measures of thrombophilia (increased likelihood of thrombus formation) included anticardiolipin antibodies (IgG and IgM), the lupus anticoagulant (including dilute Russell viper venom clotting time), antigenic proteins C and S, and homocysteine. Polymerase chain reaction assays were performed for 3 thrombophilic gene mutations (factor V Leiden, methylenetetrahydrofolate reductase, and prothrombin gene). Measures of hypofibrinolysis (reduced ability to lyse thrombi) included lipoprotein Lp(a), plasminogen activator inhibitor activity, and polymerase chain reaction analysis of the hypofibrinolytic 4G/5G polymorphism of the PAI1 gene. These coagulation measures were performed in 17 patients with retinal vein occlusions with comparison with serologic coagulation measures and polymerase chain reaction assays in 40 and 234 healthy normal volunteers as controls, respectively.
Results Of 14 patients with retinal vein occlusion with measures of dilute Russell viper venom clotting time, a thrombophilic antiphospholipid antibody, 6 (43%) had abnormal results (>38.8 seconds) compared with 1 (3%) of 30 controls (P=.002). Of 17 patients with vein occlusion, 3 (18%) were heterozygous for the thrombophilic factor V Leiden G1691A mutation compared with 7 (3%) of 233 controls (P=.02). Of 17 patients with vein occlusion, 2 (12%) had normal alleles (5G/5G) for the plasminogen activator inhibitor gene promoter; the other 15 (88%) were heterozygous or homozygous for the 4G polymorphism, which is associated with hypofibrinolysis. Of 234 controls, 85 (36.3%) had the 5G/5G allele; 149 (63.7%) were heterozygous or homozygous for the 4G polymorphism (P= .03). Patients with vein occlusion were more likely to have high levels of the major determinant of hypofibrinolysis, plasminogen activator inhibitor activity. These levels were high (>22 U/L) in 6 (38%) of 16 patients with vein occlusion compared with 1 (2%) of 40 controls (χ2=12.8; P=.001). Patients with vein occlusion were more likely (8/16 [50%]) to have high levels of hypofibrinolytic Lp(a) (>35 mg/dL) than controls (5/40 [13%]; χ2=9; P=.003). The median Lp(a) level in patients with vein occlusion who had the 4G/4G genotype was 62 mg/dL compared with 5.3 mg/dL in controls with the 4G/4G genotype (P=.05).
Conclusion Thrombophilia and hypofibrinolysis are possible causes of retinal vein occlusion.
RETINAL VEIN occlusion is severely debilitating, often causing reduced visual acuity, blindness, and neovascular glaucoma.1-13 Multiple causes of retinal vein occlusion have been proposed.1-13 In a case-control study1 of 87 patients with retinal vein occlusion, hypertension, obesity, and hyperlipidemia were identified as major risk factors for retinal vein occlusion. Dodson et al2 reported similar findings in their study and identified type 2 diabetes mellitus as a risk factor.
In 1993, Vine and Samama3 suggested that coagulation disorders, which can lead to the formation of thrombi elsewhere in the body, could possibly contribute to the formation of thrombi in the retinal vasculature. Bandello et al4 reported higher levels of a prothrombin fragment, F 1.2; D-dimer; activated factor VII; and the lipoprotein Lp(a) in patients with retinal vein occlusion than in sex- and age-matched controls. This study strengthened the hypothesis3 that coagulation disorders are pathogenic for retinal vein occlusion. The thrombophilic antiphospholipid antibody also may be a risk factor for retinal vein occlusion.5,6 High levels of plasma homocysteine, which is known to be thrombophilic, have been reported in patients with retinal vein occlusion.7
After the discovery that resistance to activated protein C (mediated by the thrombophilic mutant factor V Leiden gene) is a major risk factor for venous thrombosis, Williamson,8 Larsson,9 and Scat10 and their colleagues reported a higher prevalence of resistance to activated protein C in patients with retinal vein occlusion than in controls. A 1996 study by Graham et al,11 however, found no significant association between the factor V Leiden G1691A mutation and the presence of retinal vein occlusion. Furthermore, a recent study by Gottlieb et al12 in patients younger than 50 years with central retinal vein occlusion reported protein C resistance in 4.7%, comparable to the population frequency. Thrombophilic factors such as protein C, protein S, and antithrombin III deficiency also have been associated with the development of retinal vein occlusion.13
The body maintains an elegant balance between thrombosis and fibrinolysis (Figure 1).14 Thromboplastin (tissue factor) arises from endothelial cells and macrophages, the extrinsic pathway. Tissue factor acts through factor VII on factor X, the final common pathway for thrombosis initiation. Factor X, in turn, acts on prothrombin. Mutations in the prothrombin gene lead to high prothrombin levels15 and increase the risk of thrombosis. Platelets, exposed collagen, and bacterial endotoxins activate the intrinsic pathway, acting through factors VIII and IX on factor X. Factor Xa (the activated form) binds to factor Va and mediates the conversion of prothrombin to thrombin, which then acts on fibrinogen to form a fibrin clot. Thrombin and factor Xa are inhibited by antithrombin III. Protein C is activated by thrombin, which is bound at the endothelium to thrombomodulin, and once activated, protein C inactivates factors VIIIa and Va. The factor V Leiden mutation affects factor V, rendering it resistant to inactivation by protein C, a thrombophilic effect.16 Protein S enhances protein C's inactivation of factors Va and VIIIa. Proteins C and S and antithrombin III are thus endogenous anticoagulants; deficiencies of proteins C and S have been associated with osteonecrosis and with arterial thrombosis.14 After the fibrin clot is formed, it is lysed through the action of plasmin (fibrinolysis), which arises from plasminogen. The conversion of plasminogen to plasmin is stimulated by tissue plasminogen activator, which in turn is inhibited by plasminogen activator inhibitor 1 (PAI-1). The 4G/4G polymorphism of the PAI1 gene is hypofibrinolytic, associated with high levels of plasminogen activator inhibitor activity (PAI-Fx).17 Low tissue plasminogen activator levels, high PAI-1 levels, or both, major causes of hypofibrinolysis, have been associated with osteonecrosis and are also risk factors for arterial thrombi.14 Lp(a), an atherogenic, hypofibrinolytic, cholesterol-carrying lipoprotein, may inhibit the conversion of plasminogen to plasmin. A high Lp(a) level is a risk factor for osteonecrosis and is an independent risk factor for coronary artery thrombosis.14 Not depicted in Figure 1 because it has multiple postulated interactions with the coagulation cascade is the common mutation in the methylenetetrahydrofolate reductase (MTHFR) gene,18 associated with increased levels of the thrombophilic amino acid, homocysteine.
Further confirmation of pathogenic relationships between hypofibrinolysis and thrombophilia and retinal vein occlusion,3-5,7-10,13 with better understanding of the molecular genetic cause of venous thrombosis,14-20 should promote further studies of the surveillance, prevention, and treatment of central retinal vein thrombosis.
Our specific aim was to systematically assess measures of thrombophilia and hypofibrinolysis in patients with retinal vein occlusion, including newly developed complementary DNA (cDNA) polymerase chain reaction (PCR) assays for mutant genes that affect coagulation.14-21
Study protocol
The study was approved by the Institutional Review Board of the Jewish Hospital, Cincinnati, Ohio, and was carried out with informed consent.
Blood specimens for measures of thrombophilia, hypofibrinolysis, and lipid profiles were obtained between 8 and 10 AM after an overnight fast from seated patients with vein occlusion.19 Retinal vein occlusion was diagnosed by the referring ophthalmologists based on either or both the results of fluorescein angiography and characteristic fundus features.
cDNA PCR ASSAYS
A full range of cDNA PCR assays14-21 and measures of thrombophilia and hypofibrinolysis14,20,22-26 were systematically performed in patients with vein occlusion and in the healthy control groups. The PCR analyses of 3 thrombophilic gene mutations (factor V Leiden G1691A mutation,16 the MTHFR C677T mutation,18 and the 20210*A allele of the prothrombin gene15) and the hypofibrinolytic 4G/5G polymorphism of the PAI1 gene promoter17,25 were performed at the Coagulation Research Laboratory, Children's Hospital Medical Center, Cincinnati.
Genomic DNA for each PCR assay was obtained by a salting-out procedure.16 A PCR assay was performed for factor V Leiden to determine the presence or absence of a single point mutation at nucleotide 1691 leading to a glutamine substitution for an arginine residue at amino acid 506 in the factor V molecule.16 The PCR product was digested with the restriction enzyme Mnl1. A PCR assay was performed for the enzyme MTHFR to determine the presence of a point mutation at nucleotide 677 leading to a valine substitution for an alanine residue.18 The PCR product was digested with the restriction enzyme HinfI. A PCR assay for the prothrombin gene was performed to determine the presence or absence of a point mutation at nucleotide 20210 leading to a G-to-A transition.15 The PCR product was digested with the restriction enzyme HindIII. A PCR assay was performed for the identification of a sequence polymorphism in the promoter region of the PAI1 gene 675 base pairs upstream from the transcriptional starting site, resulting in 2 alleles containing either 4 or 5 guanosines.17,25
Measures of thrombophilia
Measures of thrombophilia included anticardiolipin antibodies (IgG and IgM), antigenic proteins C and S (Coagulation Research Laboratory, Children's Hospital Medical Center), the lupus anticoagulant (including the dilute Russell viper venom clotting time), and homocysteine (Alliance Hospital Laboratories, Cincinnati, Ohio). Established, previously published methods were used.14,19,22-26
Measures of hypofibrinolysis
The major hypofibrinolytic factors measured by coagulation techniques included Lp(a) (Alliance Hospital Laboratories) and PAI-Fx (Coagulation Research Laboratory, Children's Hospital Medical Center).22,23
Of the 17 patients with retinal vein occlusion, 15 were referred from the practice of a single ophthalmologist and 2 from other ophthalmology practices. The diagnosis of retinal vein occlusion was made by ophthalmoscopic fundus examination revealing disc swelling, venous dilation or tortuosity, retinal hemorrhages, and cotton-wool spots and by fluorescein angiography demonstrating extensive areas of capillary closure, venous filling defects, and increased venous transit time.
Control subjects
For comparison with the cDNA polymorphisms of the patients with vein occlusion, 234 healthy controls (194 children14,16 and 40 adults) were used; for comparison with their coagulation measures, 40 healthy adult controls22 were used. A second group of 34 healthy adults was used for comparison with patients' plasma homocysteine levels20 and a third group of 30 healthy adults for comparison with the patients' lupus anticoagulant. Specimens were obtained from the 194 healthy normal children before same-day outpatient surgery.14 The 3 groups of adults (n=40, 34, and 30) were hospital personnel.14,22 The adult controls were not matched with the patients by age, race, or sex and were younger than the patients, with mean ± SD ages of controls being 37 ± 7 years vs 52 ± 10 years of patients (P=.001). However, the age, sex, and race of the controls were not significantly correlated (P ≥ .09) with any of their serologic coagulation measures, making it unlikely that differences in measures of coagulation between controls and patients represented age, sex, or race effects rather than a predisposition to retinal vein occlusion.
Proportions of patients with retinal vein occlusion and those of controls having abnormalities of thrombophilia or hypofibrinolysis were compared using χ2 analyses or the Fisher exact test when the cell size was less than 527 (Figure 1, Figure 2,Figure 3, through Figure 4).
Relationships between mutations in the PAI1 gene and Lp(a) were compared between patients and controls by fitting regression lines for patients and controls and then comparing slopes27 (Figure 5).
To determine whether the 3 patients with retinal vein occlusion who also had osteonecrosis disproportionately accounted for patient-control differences, all of the statistical analyses were repeated after excluding these 3 patients. Data are expressed as the mean±SD.
Patients with retinal vein occlusion
Retinal vein occlusion was bilateral in 3 patients. There were 9 women and 8 men, 1 of whom was African American, and the rest were white. The age of patients was 52±10 years (median, 51 years; range, 33-69 years) and of controls was 37±7 years (median, 37 years; range, 24-54 years). Of the 40 adult controls, 23 were female; 36 were white, 2 African American, and 2 "other." Six patients (35%) had hypertension that was well controlled with antihypertensive agents, 3 (18%) ingested 7 or more alcoholic beverages per week, 2 (12%) had type 2 (mature-onset) diabetes mellitus, and 1 (6%) smoked 1 pack or more of cigarettes per day.
Three (18%) of the patients had a history of osteonecrosis14,19,22; 4 patients (24%) had other thrombotic events, primarily deep venous thrombosis not including osteonecrosis. No patients had been given high-dose corticosteroid therapy (prednisone, >20 mg/d, for ≥ week) before the development of retinal vein thrombosis. One patient received high-dose corticosteroid treatment of his retinal vein thrombosis for 2 months; this antedated and contributed to his subsequent development of osteonecrosis of the hips, requiring bilateral hip replacement.
Exclusion of the 3 patients who had both osteonecrosis and retinal vein thrombosis from the patient-control comparisons did not alter any of the statistical comparisons. One of these 3 patients was heterozygous for the mutant factor V Leiden gene, and 1 was homozygous for the 4G/4G polymorphism of the PAI1 gene.
Thrombophilic and hypofibrinolytic gene mutations (patients vs controls)
Of the 17 patients with vein occlusion, 3 (18%) were heterozygous for the thrombophilic factor V Leiden G1691A mutation compared with 7 (3%) of 233 controls (P=.02, Fisher exact test) (Figure 2). Of the 3 patients heterozygous for the factor V Leiden G1691A mutation, 2 were also homozygous for the 4G/4G polymorphism of the PAI1 gene, and 1 of these 2 patients also had high Lp(a) levels (70 mg/dL). One patient was heterozygous for the mutant PAI1 gene and also had high Lp(a) levels (40 mg/dL).
The distribution of the MTHFR C677T mutation did not differ between patients and controls (P=.22) (Figure 2).
All 17 patients with measurement of the 20210*A allele of the prothrombin gene had the normal "wild-type" gene, which was not different (P>.41) from 9 (3.8%) of 234 controls (Figure 2).
The distribution of polymorphism in the promoter region of the PAI1 gene was skewed toward the 4G/4G and 4G/5G genotypes in patients with vein occlusion (χ2=4.8; P=.09) (Figure 2). Controls were 3 times more likely than patients (36% vs 12%) to have the wild-type normal allele (5G/5G) for the PAI1 gene (Figure 3). Controls were less likely than patients (64% vs 88%) to be heterozygous or homozygous for the 4G mutation (P= .03, Fisher exact test) (Figure 3). The frequency of the 4G allele was 21 (0.62) of 34 in patients vs 196 (0.42) of 468 in controls (χ2=5.1; P=.02).
Differences between patients and controls in major coagulation measures
Thrombophilic Factors
Measurements of levels of antigenic proteins C and S, anticardiolipin antibodies IgG and IgM, and homocysteine did not differ between patients and controls (P>.10; data not shown). The major component of the lupus anticoagulant, however, dilute Russell viper venom clotting time, differed between patients and controls. Of 14 patients with dilute Russell viper venom clotting times, 6 (43%) had prolonged (abnormal) times (>38.8 seconds) compared with 1 (3%) of 30 controls (P=.002, Fisher exact test) (Figure 4).
Hypofibrinolytic Factors
Patients with vein occlusion were more likely to have high levels of hypofibrinolytic Lp(a) (>35 mg/dL)22,23 (8/16 [50%]) than controls (5/40 [13%]) (χ2=9; P=.003) (Figure 4). Patients were more likely to have high levels of the major determinant of fibrinolysis, PAI-Fx. Levels of PAI-Fx were high (>22 U/L)22 in 6 (38%) of 16 patients compared with 1 (2%) of 40 controls (P=.001, Fisher exact test) (Figure 4).
Hypofibrinolytic disorders often occurred in clusters that were present with and without inclusion of the 3 patients who also had osteonecrosis. Of the 6 patients with high PAI-Fx levels, 4 (67%) also had high Lp(a) levels, and 2 (33%) also had prolonged dilute Russell viper venom clotting times. Of the 8 patients with high Lp(a) levels, 4 (50%) had high PAI-Fx values.
Of the 6 patients homozygous for the 4G/4G polymorphism in the promoter sequence of the PAI1 gene, 5 had measures of Lp(a). Of these 5 patients, 4 (80%) also had high Lp(a) levels; of 9 patients heterozygous for the 4G allele, 4 (44%) also had high Lp(a) levels, whereas the 2 patients with the 5G/5G genotype had normal Lp(a) levels.
When classifying the patients and controls by the PAI1 gene polymorphism, patients homozygous for the 4G allele had much higher median Lp(a) levels than controls (62 vs 5.3 mg/dL; P=.05) (Figure 5). Patients heterozygous for the 4G/5G trait also had higher median Lp(a) levels than controls heterozygous for the 4G/5G allele (16 vs 3.3 mg/dL; P=.06) (Figure 5). Patients with the 5G/5G genotype had higher median Lp(a) levels than controls with the same genotype (17.5 vs 6.7 mg/dL), but this difference was not significant (P=62) (Figure 5).
When the relationships between the PAI1 gene and Lp(a) levels were compared in patients vs controls, the slopes of the 2 regression lines differed (P=.04) (Figure 6). In patients, but not in controls, increased 4G alleles for the PAI1 gene were associated with increased Lp(a) levels (Figure 6).
Lipids and Lipoprotein-Cholesterol Levels
Of 16 patients with vein occlusion having lipid profiles in the fasting state, 10 (63%) had high28 total cholesterol levels (>5.17 mmol/L [>200 mg/dL]), 2 (13%) had high28 triglyceride levels (>2.82 mmol/L [>250 mg/dL]), 1 (6%) had low high-density-lipoprotein–cholesterol levels (<0.90 mmol/L [<35 mg/dL]), and 8 (50%) had high low-density-lipoprotein–cholesterol levels (≥3.36 mmol/L [≥130 mg/dL]).28 Of the patients with high low-density-lipoprotein–cholesterol levels, 5 (63%) also had high Lp(a) levels (>35 mg/dL). Low-density-lipoprotein–cholesterol levels correlated with Lp(a) levels (r=0.48; P=.06).
In our study, patients with retinal vein occlusion were more likely than healthy normal controls to have the heritable thrombophilic19 factor V Leiden G1691A mutation and the thrombophilic26 antiphospholipid lupus anticoagulant. They were also more likely to have homozygosity or heterozygosity for the heritable 4GPAI1 allele and, accompanying this, were much more likely to have high levels of the hypofibrinolytic PAI-Fx.22,23,26 Patients with retinal vein occlusion who were homozygous for the 4GPAI1 allele were more likely than controls to have high Lp(a) levels, a double dose29 of hypofibrinolytic factors that, we postulate, contribute to retinal vein occlusion. Patients were also more likely than controls to have high levels of the heritable hypofibrinolytic Lp(a).23,30-34 The lipoprotein Lp(a) is postulated to be hypofibrinolytic by virtue of its sequence homology with plasminogen, thus competing with plasminogen for fibrin binding and hindering fibrin digestion.30 It also can interact with cellular plasminogen receptors with a plasminogenlike affinity.31-33 Most relevant to the present study, where 4G/4G homozygosity of the PAI1 gene was associated with high Lp(a) levels, Lp(a) enhanced PAI1 transcription and expression on cultured endothelial cells.34 As noted by Rosendaal,29 venous thrombosis is a multicausal disease; usually more than 1 coagulation abnormality needs to be present before thrombosis occurs. "The younger an individual, the more risk factors are required to precipitate thrombosis."29(p5) This is relevant to retinal vein occlusion because systematic workup for the cause of the disease is usually limited to younger patients. The common concurrence of other venous thrombosis and central retinal vein occlusion should promote studies of the pathogenesis of this type of conjoint thrombosis.
Whether synergism between the patients' high low-density-lipoprotein–cholesterol and high Lp(a) levels, known to cause arterial occlusive disease,28 could have played any role in their ophthalmic disease is unknown. However, 2 earlier studies1,2 implicated hyperlipidemia as a risk factor for retinal vein occlusion, consistent with the findings of the present study.
In agreement with previous studies of retinal vein occlusion, we found a high prevalence of the hypofibrinolytic Lp(a),4 the thrombophilic lupus antibody,5 and thrombophilic resistance to activated protein C.8-10 A new finding in the present study was that patients with vein occlusion were more likely to be heterozygous or homozygous for the 4GPAI1 allele than controls (P= .03) and were more likely than controls to have high levels of hypofibrinolytic PAI-Fx (P=.001). These findings have not, to our knowledge, been reported previously in retinal vein occlusion.
A second new finding in the present study was that patients with vein occlusion with the 4G/4G and 4G/5G genotypes were more likely to have high Lp(a) levels than normal controls with the same genotypes. Furthermore, the slopes of the regression lines for the relationship between the PAI1 gene and Lp(a) differed between patients and controls, suggesting that in patients with vein occlusion, but not in healthy controls, increased mutant alleles for the PAI1 gene were associated with Lp(a).34
In aggregate, patients with retinal vein occlusion were likely to have heritable thrombophilia, hypofibrinolysis, or both. As might be expected from this mixed increased prevalence of thrombophilia and hypofibrinolysis, 7 (42%) of the patients had thrombi in other vascular beds; 3 (18%) had sustained osteonecrosis (thought to be caused by venous thrombosis in bone),14,19,22,25,26 and 4 (24%) had a history of deep venous thrombosis, known to be caused by thrombophilia and hypofibrinolysis.14,19,22,23
Before the discovery of resistance to activated protein C, mediated by the mutant factor V Leiden gene, although diabetes mellitus, hyperlipidemia, hypofibrinolysis, and thrombophilia had been identified as causes of retinal vein occlusion,1-7,13 other retrospective studies35-38 of the causes of central retinal vein occlusion in young patients were inconclusive. The newly discovered, most common heritable thrombophilia, however, resistance to activated protein C, appears to be 4 to 5 times more common than other known inherited thrombophilias or hypofibrinolyses in young patients with central retinal vein occlusion.8-10 The PCR assay for polymorphism of the PAI1 gene has only recently become available17,25 and, to our knowledge, has not been used previously in the evaluation of patients with retinal vein occlusion. Major recent advances have been made in coagulation measurements that allow the diagnosis of the most common coagulation disorders, with the recognition of resistance to activated protein C8-10 and with cDNA PCR measurements of the mutant factor V Leiden gene12,16,19 and the 4G/4G polymorphism of the PAI1 gene.17,25 By enabling the diagnosis of common, heritable coagulation disorders, these cDNA PCR methodological advances8-10,16-19,25 provide a higher level of certainty, unaffected by age,14,16,25 that thrombophilia and hypofibrinolysis are causes of retinal vein occlusion. Furthermore, the diagnosis of heritable thrombophilia, hypofibrinolysis, or both, as putative causes of retinal vein occlusion provides important prospective diagnostic insight into the high likelihood of other thrombotic disorders9 in patients with central retinal vein occlusion. Thus, 18% of our patients had a history of osteonecrosis,14,19,22 and 24% had other thrombotic events exclusive of osteonecrosis, primarily deep venous thrombosis. The recognition of resistance to activated protein C in patients with retinal vein occlusion also has important ramifications for their first-degree relatives.39 In 177 subjects from 34 kindreds in which 1 family member had resistance to activated protein C, Svensson and Dahlback39 reported that 27% had a history of thrombosis. At age 45 years, the likelihood that a subject with resistance to activated protein C would be free of thrombosis was only 59% compared with 97% in subjects without resistance to activated protein C.
The recognition of frequent hypofibrinolysis and thrombophilia as possible causes of retinal vein occlusion3-10,13 calls for further prospective analyses and study. Patients with retinal vein occlusion, particularly those with a family history of thrombosis,9 should have tests designed to diagnose heritable thrombophilias and hypofibrinolysis, including assessment of the factor V Leiden G1691A mutation. These analyses should include studies of the 4G/5G polymorphism of the PAI1 gene promoter, the prothrombin gene, the MTHFR C677T mutation, PAI-Fx, Lp(a), the lupus anticoagulant antibody, and anticardiolipin antibodies. Patients with these predominantly heritable abnormalities are at increased risk of both venous and arterial thrombi.
Accepted for publication September 15, 1998.
Corresponding author: Charles J. Glueck, MD, Cholesterol Center, Jewish Hospital, 3200 Burnet Ave, Cincinnati, OH 45229 (e-mail: glueckch@healthall.com). | {
"redpajama_set_name": "RedPajamaCommonCrawl"
} | 3,320 |
{"url":"http:\/\/www.leedsmathstuition.co.uk\/tag\/integration\/","text":"## Integration over Vector Fields\n\nI mentioned vector fields in a previous post in the context of differential equations and over the last week or so I have been looking at them in a bit more detail. Vector fields sound quite complicated but they can be very simple. A vector field can be presented visually as a vector attached to each point in space. The space may be the $x$-$y$ plane, three-dimensional space, it could be a region of the $x$-$y$ plane or even a manifold. A typical vector field in 2-dimensions might look as follows.\n\nAn image of a simple vector field drawn in SAGE Math\n\nThe arrows represent the vector that is attached to that particular point \u2013 the direction of the arrow gives the direction of the vector and the size of the arrow gives an idea of its relative magnitude. Graphical representations of vector fields can be a little misleading as it is tempting to think that only certain points have vectors attached to them \u2013 this is not the case; every point has a vector attached to it but if we were to try to show all of them the diagram would be too cluttered.\n\nVector fields are useful to model flows of liquids or gases; for example in weather prediction a vector field that changes over time could be used to model wind patterns. The vector attached to each point would tell you the direction and strength of the wind at that point and the vector field would evolve from one moment to the next. If you define a surface in a vector field then you can use integration to measure the flux across the surface \u2013 the physical interpretation of flux is as a measure of the amount of substance flowing across a surface. I came across this question in the book Advanced Engineering Mathematics Fifth Edition by Stroud and Booth and decided to give it a go.\n\nEvaluate $\\int_{S}\\mathbf{F}.\\mathrm{d}\\mathbf{S}$ over the surface $S$ defined by $x^{2}+y^{2}+z^{2}=4$ for $z\\geq0$ and bounded by $x=0$, $y=0$, $z=0$ and $\\mathbf{F}=x\\mathbf{i}+2z\\mathbf{j}+y\\mathbf{k}$.\n\nThe pictures below give an idea of what the vector field and surface both look like from a few different angles. This problem is asking to integrate this vector field over the surface to find the flux across the surface.\n\nFor this problem, since the surface that we are integrating over is part of a sphere, it is convenient to change to spherical polar co-ordinates given by$$x=r\\mathrm{sin}\\theta\\mathrm{cos}\\phi$$ $$y=r\\mathrm{sin}\\theta\\mathrm{sin}\\phi$$ $$z=r\\mathrm{cos}\\theta$$.The integration itself is quite straightforward although some of the integrands look a bit of a pain at first glance, but some techniques from A-Level Further Maths courses should clear things up. I used some reduction formulae to deal with some of the integrals that I ended up with which really simplified things (which is always good). You can download and view my full solution here \u2013 Integrating Vector Fields.\n\n## Calculus of Residues\n\nAfter blowing off the cobwebs after a couple of years I have been looking at some of the notes that I made some years back on some courses that I took at Warwick on Complex Analysis and Vector Analysis.\n\nIntegration has always been one of my favourite areas of mathematics. At A-Level I learned lots of different techniques for calculating some interesting integrals \u2013 but A-Level only just skims the surface when it comes to integration and it can be difficult for A-Level students (through no fault of their own) to appreciate the significance of integration. Integration by Parts, Integration by substitution and reduction formula are all great but there are still many integrals which require more advanced techniques to calculate. Contour integrals and the calculus of residues can often come to the rescue.\n\nContour integrals are a way of passing difficult integrals over a real-interval such as $$\\int_{-\\infty}^{\\infty}\\!{\\dfrac{x^{2}}{1+x^{4}}\\mathrm{d}x}$$ into the complex plane and taking advantage of the Cauchy integral theorem and the calculus of residues. I remember how it felt when I first learned the formula for integration by parts because it meant that I was able to find integrals that were previously impossible for me to calculate \u2013 even though I have done contour integrals before it has been very exciting for me to re-discover them. Looking through one of my books I came across this problem \u2013 show that for $a>1$\n\n$$\\int_{0}^{2\\pi}\\!\\frac{\\mathrm{sin}2\\theta}{(a+\\mathrm{cos}\\theta)(a-\\mathrm{sin}\\theta)}\\;\\mathrm{d}\\theta = -4\\pi\\left(1-\\frac{2a\\sqrt{a^{2}-1}}{2a^{2}-1}\\right)$$\n\nAfter spending a good deal of one of my afternoons wrestling with the algebra I managed to arrive at a solution which you can download here as a pdf. Here is a graph of the integrand in the case when $a=2$\n\nAs you can see from the diagram, the area bounded by the curve and the $x$-axis certainly exists but trying to calculate this integral using A-Level techniques is going to be incredibly difficult if not impossible (if anyone can do it then I would love to see the solution). Unfortunately there are and always will be integrals that cannot be calculated analytically \u2013 this is just the way it is and there is no getting around it but contour integrals certainly allows you to calculate a huge range of integrals that previously would have been seemingly impossible.","date":"2019-05-27 06:05:13","metadata":"{\"extraction_info\": {\"found_math\": true, \"script_math_tex\": 0, \"script_math_asciimath\": 0, \"math_annotations\": 0, \"math_alttext\": 0, \"mathml\": 0, \"mathjax_tag\": 0, \"mathjax_inline_tex\": 1, \"mathjax_display_tex\": 1, \"mathjax_asciimath\": 0, \"img_math\": 0, \"codecogs_latex\": 0, \"wp_latex\": 0, \"mimetex.cgi\": 0, \"\/images\/math\/codecogs\": 0, \"mathtex.cgi\": 0, \"katex\": 0, \"math-container\": 0, \"wp-katex-eq\": 0, \"align\": 0, \"equation\": 0, \"x-ck12\": 0, \"texerror\": 0, \"math_score\": 0.8844649791717529, \"perplexity\": 149.93345647484503}, \"config\": {\"markdown_headings\": true, \"markdown_code\": true, \"boilerplate_config\": {\"ratio_threshold\": 0.18, \"absolute_threshold\": 10, \"end_threshold\": 15, \"enable\": true}, \"remove_buttons\": true, \"remove_image_figures\": true, \"remove_link_clusters\": true, \"table_config\": {\"min_rows\": 2, \"min_cols\": 3, \"format\": \"plain\"}, \"remove_chinese\": true, \"remove_edit_buttons\": true, \"extract_latex\": true}, \"warc_path\": \"s3:\/\/commoncrawl\/crawl-data\/CC-MAIN-2019-22\/segments\/1558232261326.78\/warc\/CC-MAIN-20190527045622-20190527071622-00257.warc.gz\"}"} | null | null |
Q: Disable and enable all spring batch job through a internal rest api call by passing a boolean flag(enable or disable) I have created a job status table where i have a boolean flag based on that i am returning the cron expreesion(active cron or hypen(-) cron) and my scheduler will work.
Boolean flag will be updated if you want to enable or disable all spring batch jobs.
@Scheduled(cron = "#{@getCronValueDevice}")
@Scheduled(cron = "#{@getCronValueStorage}")
@Bean
public String getCronValueDevice() {
JobStatus jobStatus = jobStatusRepository.findByJobName(BatchJobConstants.DEVICE_JOB_NAME);
if (jobStatus != null && jobStatus.getIsActive()) {
return jobStatus.getActiveCronExpression();
} else {
return "-";
}
}
Is there anything in builtin spring scheduler or anything in timer framework which will help this requirement to enable or disable all spring batch jobs?
| {
"redpajama_set_name": "RedPajamaStackExchange"
} | 7,090 |
package org.onosproject.net.behaviour;
import com.google.common.base.MoreObjects;
import java.util.Objects;
/**
* A representation of system stats of device.
*/
public class DeviceSystemStats {
private final DeviceMemoryStats memory;
private final DeviceCpuStats cpu;
/**
* Creates deviceSystemStats object.
*
* @param memoryStats memory statisics of the device
* @param cpuStats cpu statistics of the device
*/
public DeviceSystemStats(DeviceMemoryStats memoryStats, DeviceCpuStats cpuStats) {
this.memory = memoryStats;
this.cpu = cpuStats;
}
/**
* Get memory usage statistics.
*
* @return deviceMemoryStats, device memory usage stats in KB
*/
public DeviceMemoryStats getMemory() {
return this.memory;
}
/**
* Get cpu usage statistics.
*
* @return deviceCpuStats, device cpu usage stats
*/
public DeviceCpuStats getCpu() {
return this.cpu;
}
@Override
public String toString() {
return MoreObjects.toStringHelper(getClass())
.add("memory", memory)
.add("cpu", cpu)
.toString();
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
DeviceSystemStats that = (DeviceSystemStats) o;
return Objects.equals(memory, that.memory) &&
Objects.equals(cpu, that.cpu);
}
@Override
public int hashCode() {
return Objects.hash(memory, cpu);
}
}
| {
"redpajama_set_name": "RedPajamaGithub"
} | 360 |
Q: AMT set in paypal API I want to change the AMT that is passed to Paypal. My actual issue is that the AMT passed to Paypal API includes my flat rate shipping of 10 which needs to be removed as I have set free shipping for > 25 & 10 flatrate for < 25.
I want to check from where AMT is set in Paypal with Shipping amount so I can set condition of > 25.
Currently, cart page & all are showing perfect amount except the paypal API
A: In magento PayPal settings have you set the following option to yes.
Transfer shipping options
This option enables the user to facilitate with various shipping options for the product delivery
If that doesn't work are you able to add more detail on how you have configured your free shipping.
I was doing some additional reading and something doesn't seem right with payment extensions recognising discounts. Is the issue you have described something like this?
https://github.com/magento/magento2/issues/5937
Which leads me to this
https://magento.stackexchange.com/a/128606/70343
| {
"redpajama_set_name": "RedPajamaStackExchange"
} | 9,910 |
<?php
namespace Bs\IDeal\Exception;
use Bs\IDeal\Response\Response;
class NoSuccessException extends IDealException
{
protected $response;
public function __construct(Response $response)
{
$this->response = $response;
}
public function getResponse()
{
return $this->response;
}
}
| {
"redpajama_set_name": "RedPajamaGithub"
} | 7,912 |
package SFE.Compiler.Operators;
import SFE.Compiler.AnyType;
import SFE.Compiler.Expression;
import SFE.Compiler.IntConstant;
import SFE.Compiler.Type;
import SFE.Compiler.UnaryOpExpression;
public class BitwiseNotOperator extends BitwiseOperator{
public BitwiseNotOperator(){
}
public String toString(){
return "bitwisenot";
}
public int arity() {
return 1;
}
public int priority() {
throw new RuntimeException("Not implemented");
}
public Type getType(Object obj) {
//No information until we've resolved the pointer during inlining.
return new AnyType();
}
/*
* Left shifts are positive shifts, right shifts are negative.
*/
public Expression getOutputBit(int i, Expression ... args) {
return new UnaryOpExpression(new NotOperator(), args[0].fieldEltAt(i));
}
public IntConstant resolve(Expression ... args) {
//Without the power of creating additional variables, we must have constant arguments.
/* The following code is not safe, because it doesn't handle signed arguments correctly.
Expression left = args[0];
Expression right = args[1];
IntConstant lc = IntConstant.toIntConstant(left);
IntConstant rc = IntConstant.toIntConstant(right);
if (lc != null && rc != null){
if (direction == LEFT_SHIFT){
return new IntConstant(lc.value() << rc.value());
} else if (direction == RIGHT_SHIFT){
return new IntConstant(lc.value() >>> rc.value());
}
}
*/
return null;
}
}
| {
"redpajama_set_name": "RedPajamaGithub"
} | 2,423 |
Why are my hands and feet always cold?
Everyone gets cold hands and cold feet once in a while, but if your hands and feet are constantly cold especially with a change in skin colour, then there may be a medical condition associated with it. You should contact your family doctor and get it investigated and treated.
11. This condition can also run in some families.
Lots of people suffer from cold sweats if they suffer from stress/anxiety and panic attacks or if they get a fright. If you suffer from high cholesterol you may get hardening of the blood vessel wall, the same thing can happen to smokers and the elderly. Some medications such as beta blockers can also affect the temperature of your hands and feet. If your doctors have ruled a serious medical condition, complementary therapies such as Medical Needle Free Acupuncture, Laser Acupuncture, Nutritional therapy, Lifestyle medicine, Gut health & Microbiome Restoration , Mindfulness, CBT for Stress and Anxiety can help alleviate your symptoms.
In our clinic we use a multi pronged approach in treating this and many other conditions with evidence based therapies such as Low Dose Medicine (L.D.M,), gut health and microbiome restoration, nutritional and nutraceutical therapies, lifestyle medicine, natural hormonal balancing, glandular support, soft tissue and joint support, complex homeopathy, herbals, needle free mesotherapy and needle free acupuncture to achieve excellent results.
To discuss conditions treated and therapies offered at our clinic, contact Dr. Bhatti at (087) 1915051 or email health@drbhatti.ie. During your first office visit, Dr. Bhatti may ask you at length about your health-condition, lifestyle and behaviour. Please consult your GP before starting new treatment.
Note: We have special packages tailor-made to suit you and your finances. Please ask for our special offers.
Always consult your G.P. for Medical advice and before starting any new regime or self treatment. The contents of this article are for informational purposes only and are not intended to prevent, diagnose or cure any medical conditions. The publisher of this article or any information provided on this site including text, graphics, images, cannot be held responsible for any errors or any consequences arising from the use of the information contained in this article or this site or its pages. In case of an emergency always contact your Doctor and the emergency services immediately. The information provided on these pages, site and our clinic does not constitute as a medical advice.
We acknowledge the contribution of all authors and researchers.
Posts from Dr Bhatti, M.B.B.S.D.C.H.R.C.P.S.I,. on health related topics. | {
"redpajama_set_name": "RedPajamaC4"
} | 6,072 |
\section{Introduction}
One ultimate goal for the community of financial mathematics is to characterize the sophisticated investment environment using tractable probabilistic or stochastic models. For example, the market trend is usually described by some random factors such as Markov chains.
In particular, the so-called regime-switching model is widely accepted and usually proposed to capture the influence on the behavior of the market caused by transitions in the macroeconomic system or the macroscopic readjustment and regulation. For instance, the empirical results by Ang and Bekaert~\cite{AngBeK02b} illustrate the existence of two regimes characterized by different levels of volatility. It is well known that default events modulated by the regime-switching process have an impact on the distress state of the surviving securities in the portfolio. More specifically, by an empirical study of the corporate bond market over 150 years, Giesecke et al.~\cite{GieSchStr11} suggest the existence of three regimes corresponding to high, middle, and low default risk. With finitely many economical regimes, Capponi and Figueroa-L\'opez~\cite{CapLop14a} investigate the classical utility maximization problem from terminal wealth based on a defaultable security, and Capponi, Figueroa-L\'opez and Nisen~\cite{CapLopNis14b} obtain a Poisson series representation for the arbitrage-free price process of vulnerable contingent claims.
On the other hand, the importance of considering the defaultable underlying assets has attracted a lot of attention, especially after the systemic failure caused by some global financial crisis. Some recent developments extend the early model of single defaultable security to default contagion effects on portfolio allocations. The research of these mutual contagion influence opens the door to provide possible answers to some empirical puzzles like the high mark-to-market variations in prices of credit sensitive assets. For example, Kraft and Steffensen~\cite{Kraf} discuss the contagion effects on defaultable bonds. Callegaro, Jeanblanc and Runggaldier~\cite{Call12} consider an optimal investment problem with multiple defaultable assets which depend on a partially observed exogenous factor process. Jiao, Kharroubi and Pham~\cite{Jiao13} study the model in which multiple jumps and default events are allowed. Recently, Bo and Capponi~\cite{Bo16} examine the optimal portfolio problem of a power utility investor who allocates the wealth between credit default swaps and a money market for which the contagion risk is modeled via interacting default intensities.
Apart from the celebrated Merton's model on utility maximization, there has been an increasing interest in the risk-sensitive stochastic control criterion in the portfolio management during recent years, see, e.g., Davis and Lleo~\cite{DavisLIeo04} for an overview of the theory and practice of risk-sensitive asset management. In a typical risk sensitive portfolio optimization problem, the investor maximizes the long run growth rate of the portfolio, adjusted by a measure of volatility. In particular, the classical utility maximization from terminal wealth can be transformed to the risk-sensitive control criterion by introducing a change of measure and a so-called risk-sensitive parameter which characterizes on the degree of risk tolerance of investors, see, e.g., Bielecki and Pliska~\cite{BiePliska99} and Nagai and Peng~\cite{PengNagai}. We will only name a small portion of the vast literature, for instance, the risk sensitive criterion can be linked to the dynamic version of Markowitz's mean-variance optimization by Bielecki and Pliska~\cite{BiePliska99}, to differential games by Fleming~\cite{Fleming06} and more recently by Bayraktar and Yao~\cite{BayraktarYao13} for the connection to zero-sum stochastic differential games using BSDEs and the weak dynamic programming principle. Hansen, et al.~\cite{HansenNoa} further connect the risk-sensitive objective to a robust criteria in which perturbations are characterized by the relative entropy. Bayraktar and Cohen~\cite{BayraktarCohen16} later examine a risk sensitive control version of the lifetime ruin probability problem.
Despite many existing work on the risk-sensitive control, optimal investment with credit risk or regime switching respectively, it remains an open problem of the risk-sensitive portfolio allocation with both scenarios of default risk and regime-switching. Our paper aims to fill this gap and considers an interesting case when the default contagion effect can depend on regime states, possibly infinitely many. For some recent related work, it is worth noting that in the default-free market with finite regime states, Andruszkiewicz, Davis and Lleo~\cite{AndDavLIeo} study the existence and uniqueness of the solution to the risk-sensitive asset maximization problem, and provide an ODE for the optimal value function, which may be efficiently solved numerically. Meanwhile, Das, Goswami and Rana~\cite{DasGosRan} consider a risk-sensitive portfolio optimization problem with multiple stocks modeled as a multi-dimensional jump diffusion whose coefficients are modulated by an age-dependent semi-Markov process. They also establish the existence and uniqueness of classical solutions to the corresponding HJB equations. In the context of theoretical stochastic control, we also note that Kumar and Pal~\cite{KumarPal} derive the dynamical programming principle for a class of risk-sensitive control problem of pure jump process with near monotone cost. To model hybrid diffusions, Nguyen and Yin~\cite{NguyenYin} propose a switching diffusion system with countably infinite states. The existence and uniqueness of the solution to the hybrid diffusion with past-dependent switching are obtained. Back to the practical implementation in financial markets with stochastic factors, the regime-switching model or continuous time Markov chain is frequently used to approximate the dynamics of time-dependent market parameter or factors. The continuous state space of the parameter or factor is usually discretized which lead to infinite states of the approximating Markov chain (see, e.g., Ang and Timmermann~\cite{AngTim}). This mainly motivates us to consider the countable regime states in this work and it is shown that this technical difficulties can eventually be reconciled using an appropriate approximation by counterparts with finite states. Therefore, our analytical conclusions for regime-switching can potentially provide theoretical foundations for numerical treatment of risk sensitive portfolio optimization with defaults and stochastic factor processes.
Our contributions are twofold. From the modeling perspective, it is considered that the correlated stocks are subject to credit events, and in particular, the dynamics of defaultable stocks, namely the drift, the volatility and the default intensity coefficients, all depend on the macroeconomic regimes. As defaults can occur sequentially, the default contagion is modeled in the sense that default intensities of surviving names are affected simultaneously by default events of other stocks as well as on current regimes states. This set up in our model enables us to analyze the joint complexity rooted in the investor's risk sensitivity, the regime changes and the default contagion among stocks. From the mathematical perspective, the resulting dynamic programming equation (DPE) can be viewed as a recursive infinite-dimensional nonlinear dynamical system in terms of default states. The depth of the recursion equals the number of stocks in the portfolio. Our recipe to study this new type of recursive dynamical system can be summarized in the following scheme: First, it is proposed to truncate the countably infinite state space of the regime switching process and consider the recursive DPE only with a finite state space. Second, for the finite state case, the existence and uniqueness of the solutions of the recursive DPE are analyzed based upon a backward recursion, namely from the state in which all stocks are defaulted toward the state in which all stocks are alive. It is worth noting that no bounded constraint is reinforced on the trading strategies of securities or control variables as in Andruszkiewicz, Davis and Lleo~\cite{AndDavLIeo} and Kumar and Pal~\cite{KumarPal}. As a price to pay, the nonlinearities of the HJB dynamical systems are not globally Lipschitz continuous. To overcome this new challenge, we develop a truncation technique by proving a comparison theorem based on the theory of monotone dynamical systems documented in Smith~\cite{smith08}. Then, we establish a unique classical solution of the recursive DPE by showing that the solution of truncated system has a uniform (strictly positive) lower bound independent of the truncation level. This also enables us to characterize the optimal admissible feedback trading strategy in the verification theorem. Next, when the states are relaxed to be countably infinite, the results in the finite state case can be applied to construct a sequence of approximating risk sensitive control problems to the original problem and obtain elegant uniform estimates to conclude that the sequence of associated smooth value functions will successfully converge to the classical solution of the original recursive DPE. We also contribute to the existing literature by exploring the possible construction and approximation of the optimal feedback strategy in some rigorous verification theorems.
The rest of the paper is organized as follows. Section \ref{sec:model} describes the credit market model with default contagion and regime switching. Section \ref{risksens} formulates the risk-sensitive stochastic control problem and introduces the corresponding DPE. We analyze the existence and uniqueness of the classical global solution of recursive infinite-dimensional DPEs and develop rigorous verification theorems in Section \ref{sec:mainres}.
For the completeness, some auxiliary results and proofs are delegated to the Appendix~\ref{app:proof1}.
\section{The Model} \label{sec:model}
We consider a model of the financial market consisting of $N\geq1$ defaultable stocks and a risk-free money market account on a given complete filtered probability space $(\Omega,{\mathcal G},{\mathbb{G}},\Px)$. Let $Y=(Y(t))_{t\in[0,T]}$ be a regime-switching process which will be introduced precisely later. The global filtration $\mathbb{G}=\mathbb{F} \vee{\mathbb{H}}$ augmented by all $\Px$-null sets satisfies the usual conditions. The filtration $\mathbb{F} =({\mathcal{F}}_t)_{t\in[0,T]}$ is jointly generated by the regime-switching process $Y$ and an independent $d\geq1$-dimensional Brownian motions denoted by
$W=(W_j(t);\ j=1,\ldots,d)_{t\in[0,T]}^{\top}$. We use $\top$ to denote the transpose operator. The time horizon of the investment is given by $T>0$.
The price process of the money market account $B(t)$ satisfies $dB(t)= r(Y(t))B(t)dt$, where $r(Y(t))\geq0$ is interest rate modulated by the regime-switching process $Y$. The filtration $\mathbb{H}$ is generated by a $N$-dimensional default indicator process $Z=(Z_j(t);\ j=1,\ldots,N)_{t\in[0,T]}$ which
takes values in ${\cal S}:=\{0,1\}^N$. The default indicator process $Z$ links to the default times of the $N$ defaultable stocks via $\tau_j := \inf\{t\geq0;\ Z_j(t)=1\}$
for $j=1,\ldots,N$. The filtration $\mathbb{H}=({\mathcal{H}}_t)_{t\in[0,T]}$ is defined by ${\cal H}_t=\bigvee_{j=1}^N{\sigma(Z_j(s);\ s\leq t)}$.
Hence $\mathbb{H}$ contains all information about default events until the terminal time $T$. The market model is specified in detail in the following subsections.
\subsection{Regime-Switching Process}\label{sub:RSP} The regime-switching process is described by a continuous time (conservative) Markov chain $Y=(Y(t))_{t\in[0,T]}$ with countable state space
$\mathbb{Z}_+:=\mathbb{N}\setminus\{0\}=\{1,2,\ldots\}$. The generator of the Markov chain $Y$ is given by the $Q$-matrix $Q=(q_{ij})_{ij\in\mathbb{Z}_+}$. This yields that $q_{ii}\leq0$ for $i\in\mathbb{Z}_+$, $q_{ij}\geq0$ for $i\neq j$, and $\sum_{j=1}^{\infty}q_{ij}=0$ for $i\in\mathbb{Z}_+$ (i.e., $\sum_{j\neq i}q_{ij}=-q_{ii}$ for $i\in\mathbb{Z}_+$).
\subsection{Credit Risk Model} The joint process $(Y,Z)$ of the regime-switching process and the default indicator process is a Markov process on the state space $\mathbb{Z}_+\times\mathcal{S}$.
Moreover, at time $t$, the default indicator process transits from a state $Z(t):=(Z_1(t),\ldots,Z_{j-1}(t),Z_j(t),Z_{j+1}(t),\ldots,Z_N(t))$
in which the obligor $j$ is alive ($Z_j(t)=0$) to the neighbor state ${Z}^j(t):=(Z_1(t),\ldots,Z_{j-1}(t),1-Z_j(t),Z_{j+1}(t),\ldots,Z_N(t))$ in which the obligor $j$ has defaulted at a strictly positive stochastic rate $\lambda_{j}(Y(t),Z(t))$. We assume that $Y$ and $Z_1,\ldots,Z_N$ will not jump simultaneously. Therefore, the default intensity of the $j$-th stock may change either if any other stock in the portfolio defaults (contagion effect), or if there are regime-switchings. Our default model thus belongs to the rich class of interacting intensity models, introduced by Frey and Backhaus~\cite{FreyBackhaus04}. We set $\lambda(i,z)=(\lambda_j(i,z);\ j=1,\ldots,N)^{\top}$ for $(i,z)\in\mathbb{Z}_+\times{\cal S}$.
\subsection{Price Processes} The price process of the $N$ defaultable stocks is denoted by the vector process $\tilde{P}=(\tilde{P}^j(t);\ j=1,\ldots,N)_{t\in[0,T]}^{\top}$. Here the price process of the $j$-th stock is given by, for $t\in[0,T]$,
\begin{equation}\label{eq:pricedef}
\tilde{P}_j(t)=(1-Z_j(t))P_j(t), \ \ \ j = 1,\ldots,N,
\end{equation}
where $P=(P_j(t);\ j=1,\ldots,N)_{t\in[0,T]}^{\top}$ represents the pre-default price of the $N$ stocks. In particular, the price of the $j$-th stock is given by the pre-default price $P_j(t)$ up to ${\tau_j}-$, and jumps to $0$ at default time ${\tau_j}$ and remains at $0$ afterwards. The pre-default price process $P$ of the $N$ defaultable stocks is assumed to satisfy
\begin{align}\label{eq:P}
dP(t) = {\rm diag}(P(t)) [(\mu(Y(t))+\lambda(Y(t),Z(t))) dt + \sigma(Y(t))dW(t)],
\end{align}
where, ${\rm diag}(P(t))$ is the diagonal $N\times N$-dimensional matrix with diagonal elements $P_i(t)$. For each $i\in\mathbb{Z}_+$, the vector $\mu(i)$ is $\R^N$-valued column vector and $\sigma(i)$ is $\R^{N\times d}$-valued matrices such that $\sigma(i)\sigma(i)^\top$ is positive definite.
By Eq.s~\eqref{eq:pricedef}, \eqref{eq:P} and integration by parts, the price dynamics of defaultable stocks satisfies that
\begin{align}\label{eq:tildeP}
d\tilde{P}(t) = {\rm diag}(\tilde{P}(t)) [\mu(Y(t))dt + \sigma(Y(t))dW(t)-dM(t)].
\end{align}
Here, $M=(M_j(t);\ j=1,\ldots,N)_{t\in[0,T]}^{\top}$ is a pure jump $\mathbb{G}=(\G_t)_{t\in[0,T]}$-martingale given by
\begin{align}\label{eq:taui}
M_j(t)&:= Z_j(t) - \int_0^{t\wedge\tau_j}\lambda_j(Y(s),Z(s))ds,\ \ \ \ \ \ t\in[0,T].
\end{align}
By the construction of the default indicator process $Z$ in Bo and Capponi~\cite{BoCapponi18}, it can be seen that $W$ is also a $\mathbb{G}$-Brownian motion using the
condition (M.2a) in Section 6.1.1 of Chapter 6 in Bielecki and Rutkowski~\cite{BieRut04}.
\section{Dynamic Optimization Problem} \label{risksens}
In this section, we formally derive the dynamic programming equation (DPE) associated with the risk sensitive stochastic control problem. We first reformulate the risk sensitive portfolio optimization problem in an equivalent form in Section \ref{sec:wealth}. The corresponding DPE will be derived and analyzed in Section \ref{sec:DPE}.
\subsection{Formulation of Portfolio Optimization Problem} \label{sec:wealth}
Let us first introduce the set up and formulate the risk sensitive portfolio optimization problem. For $t\in[0,T]$, let $\phi_B(t)$ represent the number of shares of the risk-free asset and let $\phi_j(t)$ denote the number of shares of the $j$-th stock at time $t$ held by the investor. The resulting wealth process is given by
\begin{align*}
X^{\phi}(t) = \sum_{j=1}^N\phi_j(t)\tilde{P}_j(t) + \phi_B(t)B(t),\ \ \ t\in[0,T].
\end{align*}
Using the price representation~\eqref{eq:pricedef} of stocks, the above wealth process can be rewritten as:
\begin{align*
X^{\phi}(t)=\sum_{j=1}^N\phi_j(t) {(1-Z_j(t))} P_j(t)+\phi_B(t)B(t),\ \ \ t\in[0,T].
\end{align*}
For a given positive wealth process, we can consider the fractions of wealth invested in the stocks and money market account as follows: for $j=1,\ldots,N$, let us define $\tilde{\pi}_j(t)=\frac{\phi_j(t)\tilde{P}_j(t-)}{X^{\phi}(t-)}$ and $\tilde{\pi}_B(t)=1-\tilde{\pi}(t)^{\top}e_N$, where $\tilde{\pi}(t)=(\tilde{\pi}_i(t);\ i=1,\ldots,N)^{\top}$,
and $e_N = \big(\underbrace{1,1,\ldots,1}_{N \; ones}\big)^{\top}$.
Noting that the price of the $j$-th stock jumps to zero when the $j$-th stock defaults, the fraction of wealth held by the investor in this stock is zero after it defaults.
In particular, the following equality holds {$\tilde{\pi}_j(t)=(1-Z_j(t-))\tilde{\pi}_j(t)$ for $j=1,\ldots,N$}. Therefore, the self-financing condition leads to wealth dynamics in the following form: $X^{\tilde{\pi}}(0)=x\in\R_+:=(0,\infty)$, and
\begin{align}\label{eq:wealth}
dX^{\tilde{\pi}}(t) &= X^{\tilde{\pi}}(t-)\tilde{\pi}(t)^{\top}{\rm diag}(\tilde{P}(t-))^{-1}d\tilde{P}(t) + X^{\tilde{\pi}}(t)(1-\tilde{\pi}(t)^{\top}e_N)\frac{dB(t)}{B(t)}\\
&=X^{\tilde{\pi}}(t)\big[r(Y(t))+\tilde{\pi}(t)^{\top}(\mu(Y(t))-r(Y(t))e_N)\big]dt+ X^{\tilde{\pi}}(t-)\tilde{\pi}(t)^{\top}[\sigma(Y(t))dW(t)-dM(t)].\nonumber
\end{align}
We next introduce the definition of the set of all admissible controls used in the paper.
\begin{definition}\label{def:add-con}
The admissible control set $\tilde{\cal U}$ is a class of $\mathbb{G}$-predictable feedback strategies $\tilde{\pi}(t)=(\tilde{\pi}_j(t);\ j=1,\ldots,N)^{\top}$, $t\in[0,T]$,
given by $\tilde{\pi}_j(t)=\pi_j(t,X^{\tilde{\pi}}(t-),Y(t-),Z(t-))$ such that SDE~\eqref{eq:wealth} admits a unique positive (strong) solution for $X^{\tilde{\pi}}(0)=x\in\R_+$ (i.e. the feedback strategies $\tilde{\pi}(t)$ should take values in $U:=(-\infty,1)^N$). Furthermore, the control $\tilde{\pi}=(\tilde{\pi}(t))_{t\in[0,T]}$ is required to make the positive process $\Gamma^{\tilde{\pi},\theta}=(\Gamma^{\tilde{\pi},\theta}(t))_{t\in[0,T]}$
defined later by \eqref{eq:Gam} to be a $\Px$-martingale.
\end{definition}
We will prove the martingale property of $\Gamma^{\tilde{\pi}^*,\theta}$ for a candidate optimal strategy $\tilde{\pi}^*$ by verifying the generalized Novikov's condition in Section~\ref{sec:mainres}. We consider the following {risk-sensitive} objective functional. For $\tilde{\pi}\in\tilde{\cal U}$, and given the initial values $(X(0),Y(0),Z(0))=(x,i,z)\in\R_+\times\mathbb{Z}_+\times{\cal S}$, we define
\begin{align}\label{eq:J0}
{\cal J}(\tilde{\pi};T,x,i,z) := -\frac{2}{\theta}\log\Ex\left[\exp\left(-\frac{\theta}{2}\log X^{\tilde{\pi}}(T)\right)\right]=-\frac{2}{\theta}\log\Ex\left[(X^{\tilde{\pi}}(T))^{-\frac{\theta}{2}}\right].
\end{align}
The investor aims to maximize the objective functional ${\cal J}$ over all strategies $\tilde{\pi}\in\tilde{\cal U}$. Let us only focus on the case when $\theta\in(0,\infty)$ for a risk-sensitive investor.
The case $\theta\in(-2,0)$ is ignored as it is associated to a risk-seeking behavior which is less encountered in practise.
The objective functional \eqref{eq:J0} has been considered in the existing literature (see, e.g., Bielecki and Pliska~\cite{BiePliska99})
for dynamic asset allocations in the presence of market risk, however, it is still an open problem in the setting with default risk and regime-switching which motivates our research of this project. Eq.~(1.1) in Bielecki and Pliska~\cite{BiePliska99} in our case can be read as: for $\theta$ close to $0$,
\begin{align}\label{eq:rem-000}
{\cal J}(\tilde{\pi};T,x,y,z)=\Ex\left[\log\left(X^{\tilde{\pi}}(T)\right)\right]-\frac{\theta}{4}{\rm Var}\left(\log(X^{\tilde{\pi}}(T))\right)+o(\theta^2),
\end{align}
where $o(\theta^2)$ will typically depend on the terminal horizon $T$.
Then ${\cal J}(\tilde{\pi};T,x,y,z)$ may be interpreted as the growth rate of the investor's wealth minus a penalty term proportional to the variance of the realized rate, with an error that is proportional to $\theta^2$. This establishes a link between the risk-sensitive control problem and the robust decision making rule. A risk-sensitive investor would like to design a decision rule which protects him against large deviations of the growth rate from its expectation, and he achieves this by choosing higher values of the parameter $\theta$.
We next rewrite the objective functional as the exponential of an integral criterion (similar to Nagai and Peng~\cite{PengNagai}, and Capponi et al.~\cite{CappPascucci}) which will turn out to be convenient for the analysis of the dynamic programming equation. For all $\tilde{\pi}\in\tilde{\cal U}$, the wealth process solving SDE~\eqref{eq:wealth} is given by
\begin{align*}
X^{\tilde{\pi}}(T)=&x\exp\Bigg\{\int_0^T\big[r(Y(s))+\tilde{\pi}^{\top}(s)({\mu}(Y(s))-r(Y(s))e_N)\big]ds+\int_0^T\tilde{\pi}^{\top}(s)\sigma(Y(s))dW(s)\nonumber\\
&-\frac{1}{2}\int_0^T\left\|\sigma(Y(s))^{\top}\tilde{\pi}(s)\right\|^2ds+\sum_{j=1}^N\int_0^T\log(1-\tilde{\pi}_j(s))dM_j(s)\nonumber\\
&+\sum_{j=1}^N\int_0^{T\wedge\tau_j}\lambda_j(Y(s),Z(s))\big[\tilde{\pi}_j(s)+\log(1-\tilde{\pi}_j(s))\big]ds\Bigg\},
\end{align*}
and consequently
\begin{align}\label{eq:solution}
\left(X^{\tilde{\pi}}(T)\right)^{-\frac{\theta}{2}}
&=x^{-\frac{\theta}{2}}\Gamma^{\tilde{\pi},\theta}(T)\exp\left(\frac{\theta}{2}\int_0^TL(\tilde{\pi}(s);Y(s),Z(s))ds\right),
\end{align}
where, for $(\pi,i,z)\in U\times\mathbb{Z}_+\times{\cal S}$, the risk sensitive function $L(\pi;i,z)$ is defined by
\begin{align}\label{eq:L0}
L(\pi;i,z)&:= -r(i)-\pi^{\top}(\mu(i)-r(i)e_N)+\frac{1}{2}\left(1+\frac{\theta}{2}\right)\left\|\sigma(i)^{\top}\pi\right\|^2\nonumber\\
&\quad-\sum_{j=1}^N(1-z_j)\left[\frac{2}{\theta}+\pi_j-\frac{2}{\theta}(1-\pi_j)^{-\frac{\theta}{2}}\right]\lambda_j(i,z).
\end{align}
Here, the positive density process is given by, for $t\in[0,T]$,
\begin{align}\label{eq:Gam}
\Gamma^{\tilde{\pi},\theta}(t)&:={\cal E}(\Pi^{\tilde{\pi},\theta})_t,\\
\Pi^{\tilde{\pi},\theta}(t)&:=-\frac{\theta}{2}\int_0^t\tilde{\pi}(s)^{\top}\sigma(Y(s))dW(s)+\sum_{j=1}^N\int_0^t\{(1-\tilde{\pi}_j(s))^{-\frac{\theta}{2}}-1\}dM_j(s),\nonumber
\end{align}
where ${\cal E}(\cdot)$ denotes the stochastic exponential.
As $\tilde{\pi}\in\tilde{\cal U}$, we have that $\Gamma^{\tilde{\pi},\theta}=(\Gamma^{\tilde{\pi},\theta}(t))_{t\in[0,T]}$ is a $\Px$-martingale.
We can thus define the following change of measure given by
\begin{align}\label{eq:change-measure}
\frac{d\Px^{\tilde{\pi},\theta}}{d\Px}\big|_{\G_t}=\Gamma^{\tilde{\pi},\theta}(t),\ \ \ \ \ \ t\in[0,T],
\end{align}
under which
\begin{align}\label{eq:BMtheta}
W^{\tilde{\pi},\theta}(t):=W(t)+\frac{\theta}{2}\int_0^t\sigma(Y(s))^{\top}\tilde{\pi}(s)ds,\ \ \ \ \ \ t\in[0,T]
\end{align}
is a $d$-dimensional Brownian motion, while under $\Px^{\tilde{\pi},\theta}$, for $j=1,\ldots,N$, it holds that
\begin{align}\label{eq:Girjump}
M_j^{\tilde{\pi},\theta}(t):=Z_j(t)-\int_0^{t\wedge\tau_j}(1-\tilde{\pi}_j(s))^{-\frac{\theta}{2}}\lambda_j(Y(s),Z(s))ds,\qquad t\in[0,T]
\end{align}
is a martingale. The definition of $\Px^{\tilde{\pi},\theta}$ enables us to rewrite the above {risk-sensitive} objective functional~\eqref{eq:J0} in an exponential form. From~\eqref{eq:solution}, we deduce that
\begin{align*}\label{eq:J2}
{\cal J}(\tilde{\pi};T,x,i,z) &= -\frac{2}{\theta}\log\Ex\left[\left(X^{\tilde{\pi}}(T)\right)^{-\frac{\theta}{2}}\right]=-\frac{2}{\theta}\log\Ex\left[x^{-\frac{\theta}{2}}\Gamma^{\tilde{\pi},\theta}(T)\exp\left(\frac{\theta}{2}\int_0^TL(\tilde{\pi}(s);Y(s),Z(s))ds\right)\right]\nonumber\\
&=\log x -\frac{2}{\theta}\log\Ex^{\tilde{\pi},\theta}\left[\exp\left(\frac{\theta}{2}\int_0^TL(\tilde{\pi}(s);Y(s),Z(s))ds\right)\right]=:\log x + \bar{{\cal J}}(\tilde{\pi};T,i,z).
\end{align*}
Here $\Ex^{\tilde{\pi},\theta}$ represents the expectation w.r.t. $\Px^{\tilde{\pi},\theta}$ defined by \eqref{eq:change-measure}.
Thanks to the relationship between ${\cal J}$ and $\bar{{\cal J}}$, our original problem is equivalent to maximize $\bar{{\cal J}}$ over $\tilde{\pi}\in\tilde{\cal U}$. We can therefore reformulate the value function of the risk-sensitive control problem as:
\begin{equation}\label{eq:value-fcn}
V(T,i,z) = \sup_{\tilde{\pi}\in\tilde{\cal U}} \bar{{\cal J}}(\tilde{\pi};T,i,z)=-\frac{2}{\theta}\inf_{\tilde{\pi}\in\tilde{\cal U}} \log\Ex^{\tilde{\pi},\theta}\left[\exp\left(\frac{\theta}{2}\int_0^TL(\tilde{\pi}(s);Y(s),Z(s))ds\right)\right],
\end{equation}
for $(i,z)\in\mathbb{Z}_+\times{\cal S}$.
\subsection{Dynamic Programming Equations} \label{sec:DPE}
In this section, we will first derive the dynamic programming equation (DPE) satisfied by the value function~\eqref{eq:value-fcn} using heuristic arguments in Birge et al.~\cite{BirBoCap17}. It will be postponed in the next section to show that the solution of DPE indeed coincides with the value function of our risk sensitive control problem in rigorous verification theorems.
Let $(t,i,z)\in[0,T]\times\mathbb{Z}_+\times{\cal S}$ and define
\begin{equation}\label{eq:J}
\bar{V}(t,i,z) :=-\frac{2}{\theta}\inf_{\tilde{\pi}\in\tilde{\cal U}}\log J(\tilde{\pi};t,i,z):= -\frac{2}{\theta}\inf_{\tilde{\pi}\in\tilde{\cal U}}\log\Ex_{t,i,z}^{\tilde{\pi},\theta}\left[\exp\left(\frac{\theta}{2}\int_t^TL(\tilde{\pi}(s);Y(s),Z(s))ds\right)\right],
\end{equation}
where $\Ex_{t,i,z}^{\tilde{\pi},\theta}[\cdot]:=\Ex^{\tilde{\pi},\theta}[\cdot|Y(t)=i,Z(t)=z]$. This yields the relation ${V}(T,i,z)=\bar{V}(0,i,z)$.
For $0\leq t<s\leq T$, the dynamic programming principle leads to
\begin{equation}\label{eq:dpp}
\bar{V}(t,i,z)= -\frac{2}{\theta}\inf_{\tilde{\pi}\in\tilde{\cal U}}\log\Ex_{t,i,z}^{\tilde{\pi},\theta}\left[\exp\left(-\frac{\theta}{2}\bar{V}(s,Y(s),Z(s))+\frac{\theta}{2}\int_t^sL(\tilde{\pi}(u);Y(u),Z(u))du\right)\right].\nonumber
\end{equation}
{Using heuristic arguments in Birge et al.~\cite{BirBoCap17}, we have the following DPE satisfied by $\bar{V}$, i.e., for all $(t,i,z)\in[0,T)\times\mathbb{Z}_+\times{\cal S}$,
\begin{align}\label{eq:dpe2}
0=&\frac{\partial \bar{V}(t,i,z)}{\partial t}-\frac{2}{\theta}\sum_{l\neq i}q_{il}\left[\exp\left(-\frac{\theta}{2}\big(\bar{V}(t,l,z)-\bar{V}(t,i,z)\big)\right)-1\right]\nonumber\\
&+\sup_{\pi\in{\cal U}}H\left(\pi;i,z,(\bar{V}(t,i,z^j);\ j=0,1,\ldots,N)\right)
\end{align}
with terminal condition $\bar{V}(T,i,z)=0$ for all $(i,z)\in\mathbb{Z}_+\times{\cal S}$. In the above equation, the function $H$ is defined by, for $(\pi,i,z)\in U\times\mathbb{Z}_+\times{\cal S}$,
\begin{align}\label{eq:H}
H(\pi;i,z,\bar{f}(z)):=&-\frac{2}{\theta}\sum_{j=1}^N\left[\exp\left(-\frac{\theta}{2}(f({z}^j)-f(z))\right)-1\right](1-z_j)(1-\pi_j)^{-\frac{\theta}{2}}\lambda_j(i,z)\nonumber\\
&+r(i)+\pi^{\top}(\mu(i)-r(i)e_N)-\frac{1}{2}\left(1+\frac{\theta}{2}\right)\left\|\sigma(i)^{\top}\pi\right\|^2\nonumber\\
&+\sum_{j=1}^N\left[\frac{2}{\theta}+\pi_j-\frac{2}{\theta}(1-\pi_j)^{-\frac{\theta}{2}}\right](1-z_j)\lambda_j(i,z).
\end{align}
Here $\bar{f}(z)=(f(z^j);\ j=0,1,\ldots,N)$ for any measurable function $f(z)$. Above, we used the notation ${z}^j:=(z_1,\ldots,z_{j-1},1-z_j,z_{j+1},\ldots,z_N)$ for $z\in{\cal S}$.}
Eq.~\eqref{eq:dpe2} is in fact a recursive system of DPEs. We consider the following Cole-Hopf transform of the solution given by
\begin{align}\label{eq:exp-trnas}
\varphi(t,i,z):=\exp\left(-\frac{\theta}{2}\bar{V}(t,i,z)\right),\qquad (t,i,z)\in[0,T]\times\mathbb{Z}_+\times{\cal S}.
\end{align}
Then $\frac{\partial \varphi(t,i,z)}{\partial t}=-\frac{\theta}{2}\varphi(t,i,z)\frac{\partial \bar{V}(t,i,z)}{\partial t}$ for $(t,i,z)\in[0,T]\times\mathbb{Z}_+\times{\cal S}$. Plugging it into Eq.~\eqref{eq:dpe2}, we get that
\begin{align}\label{eq:dpe3}
0=&\frac{\partial \varphi(t,i,z)}{\partial t}+\sum_{l\neq i}q_{il}\left[\varphi(t,l,z)-\varphi(t,i,z)\right]+\inf_{\pi\in U}\tilde{H}\left(\pi;i,z,(\varphi(t,i,z^j);\ j=0,1,\ldots,N)\right)
\end{align}
with terminal condition $\varphi(T,i,z)=1$ for all $(i,z)\in\mathbb{Z}_+\times{\cal S}$. In the above equation, the function $\tilde{H}$ is defined by
\begin{align}\label{eq:H}
\tilde{H}(\pi;i,z,\bar{f}(z)
:=&\Bigg\{-\frac{\theta}{2}r(i)-\frac{\theta}{2}\pi^{\top}(\mu(i)-r(i)e_N)+\frac{\theta}{4}\left(1+\frac{\theta}{2}\right)\left\|\sigma(i)^{\top}\pi\right\|^2
\\
&+\sum_{j=1}^N\left(-1-\frac{\theta}{2}\pi_j\right)(1-z_j)\lambda_j(i,z)\Bigg\}f(z)+\sum_{j=1}^Nf(z^j)(1-z_j)(1-\pi_j)^{-\frac{\theta}{2}}\lambda_j(i,z).\nonumber
\end{align}
\section{Main Results and Verification Theorems}\label{sec:mainres}
We analyze the existence of global solutions of the recursive system of DPEs \eqref{eq:dpe3} in a two-step procedure. Firstly, we investigate the existence and uniqueness of classical solutions of Eq.~\eqref{eq:dpe3} as a dynamical system when the Markov chain $Y$ takes values in the finite state space. Secondly, we proceed to study the countably infinite state case using approximation arguments.
Let us introduce some notations which will be used frequently in this section. Let $n\in\mathbb{Z}_+$. For $x\in\mathbb{R}^n$, we write $x=(x_1,...,x_n)^{\top}$. For any $x,y\in\R^n$, we write $x\leq y$ if $x_i\leq y_i$ for all $i=1,\ldots,n$, while write $x<y$ if $x\leq y$ and there exists some $i\in\{1,\ldots,n\}$ such that $x_i<y_i$. In particular, $x\ll y$ if $x_i<y_i$ for all $i=1,...,n$. Recall that $e_{N}$ denotes the $N$-dimensional column vector whose all entries are ones. For the general default state $z\in{\cal S}$, we here introduce a general default state representation $z=0^{j_1,\ldots,j_k}$ for indices $j_1\neq\cdots\neq j_k$ belonging to $\{1,\ldots,N\}$, and $k\in\{0,1,\ldots,N\}$. Such a vector $z$ is obtained by flipping the entries $j_1,\ldots,j_k$ of the zero vector to one, i.e. $z_{j_1}=\cdots=z_{j_k}=1$, and $z_{j}=0$ for $j\notin\{j_1,\ldots,j_k\}$ (if $k=0$, we set $z=0^{j_1,\ldots,j_k}=0$). Clearly $0^{j_1,\ldots,j_{N}}=e_N^{\top}$.
\subsection{Finite State Case of Regime-Switching Process}\label{sec:finite-states}
In this section, we study the case where the regime-switching process $Y$ is defined on a finite state space given by $D_n=\{1,\ldots,n\}$. Here $n\in\mathbb{Z}_+$ is a fixed number. The corresponding $Q$-matrix of the Markov chain $Y$ is given by $Q_n=(q_{ij})_{i,j\in D_n}$ satisfying $\sum_{j\in D_n}q_{ij}=0$ for $i\in D_n$ and $q_{ij}\geq0$ when $i\neq j$. It is worth noting that $q_{ij}$, $i,j\in D_n$ here may be different from what is given in Subsection~\ref{sub:RSP}. With slight abuse of notation, we still use $q_{ij}$ here only for convenience.
Let $\varphi(t,z):=(\varphi(t,i,z);\ i=1,\ldots,n)^{\top}$ be a column vector of the solution for $(t,z)\in[0,T]\times{\cal S}$. Then, we can rewrite Eq.~\eqref{eq:dpe3} as the following dynamical system:
\begin{align}\label{eq:hjbeqn}
\left\{
\begin{aligned}
\frac{\partial \varphi(t,z)}{\partial t}+\big(Q_n+{\rm diag}(\nu(z))\big)\varphi(t,z)+G(t,\varphi(t,z),z)=&0,\quad t\in[0,T)\times{\cal S};\\
\varphi(T,z)=&e_n,\quad \text{for all }z\in{\cal S}.
\end{aligned}
\right.
\end{align}
Here, the vector of function $G(t,x,z)=(G_i(t,x,z);\ i=1,\ldots,n)^{\top}$ is given by, for each $i\in D_n$ and $(t,x,z)\in[0,T]\times\R^n\times{\cal S}$,
\begin{align}
G_i(t,x,z)=&\inf_{\pi\in U}\Bigg\{\sum_{j=1}^N\varphi(t,i,z^j)(1-z_j)(1-\pi_j)^{-\frac\theta2}\lambda_j(i,z)\\
&+\bigg[\frac\theta4(1+\frac\theta2)\left\|\sigma(i)^{\top}\pi\right\|^2-\frac\theta2\pi^\top(\mu(i)-r(i)e_N)-\frac{\theta}{2}\sum_{j=1}^N\pi_j(1-z_j)\lambda_j(i,z)\bigg]x_i\Bigg\}.\nonumber
\end{align}
The vector of coefficient $\nu(z)=(\nu_i(z);\ i=1,\ldots,n)^{\top}$ for $z\in{\cal S}$ is given by, for each $i\in D_n$,
\begin{align}\label{eq:nuz}
\nu_i(z)=-\frac{\theta}{2}r(i)-\sum_{j=1}^N(1-z_j)\lambda_j(i,z).
\end{align}
Recall the recursive system given by \eqref{eq:hjbeqn} in terms of default states $z=0^{j_1,\ldots,j_k}\in{\cal S}$ (where $k=0,1,\ldots,N$). The solvability can in fact be analyzed in the recursive form on default states. Therefore, our strategy for analyzing the system is based on a recursive procedure, starting from the default state $z=e_N^{\top}$ (i.e., all stocks have defaulted) and proceeding backward to the default state $z=0$ (i.e., all stocks are alive).
\begin{itemize}
\item[(i)] $k=N$ (i.e., all stocks have defaulted). In this default state, it is clear that the investor will not invest in stocks and hence the optimal fraction strategy in stocks for this case is given by $\pi_1^*=\cdots=\pi_N^*=0$ by virtue of Definition~\ref{def:add-con}. Let $\varphi(t,e_N^{\top})=(\varphi(t,i,e_N^{\top});\ i=1,\ldots,n)^{\top}$. As a consequence, the dynamical system \eqref{eq:hjbeqn} can be written as
\begin{align}\label{eq:hjben}
\left\{
\begin{aligned}
\frac d{dt}\varphi(t,e_N^{\top})=&-A^{(N)}\varphi(t,e_N^{\top}),\quad\text{ in }[0,T);\\
\varphi(T,e_N^{\top})=&e_n.
\end{aligned}
\right.
\end{align}
The matrix of coefficients $A^{(N)}:=Q_n+{\rm diag}(\nu(e_N^{\top}))$.
\end{itemize}
In order to establish the unique positive solution to the above dynamical system \eqref{eq:hjben}, we need the following auxiliary result.
\begin{lemma}\label{lem:sol-hjben2}
Let $g(t)=(g_i(t);\ i=1,\ldots,n)^{\top}$ satisfy the following dynamical system:
\begin{align*}
\left\{
\begin{aligned}
\frac d{dt}g(t)=&Bg(t)\quad\text{ in }(0,T];\\
g(0)=&\xi.
\end{aligned}
\right.
\end{align*}
If $B=(b_{ij})_{n\times n}$ satisfies $b_{ij}\geq 0$ for $i\neq j$ and $\xi\gg0$,
then we have $g(t)\gg0$ for all $t\in[0,T]$.
\end{lemma}
\noindent{\it Proof.}\quad Define $f(x)=Bx$ for $x\in\R^n$. By virtue of Proposition 1.1 of Chapter 3 in \cite{smith08}, it suffices to verify that $f:\R^n\to\R^n$ is of type $K$, i.e., for any $x,y\in\R^n$ satisfying $x\leq y$ and $x_i=y_i$ {for some $i=1,\ldots,n$}, then it holds that $f_i(x)\leq f_i(y)$. Notice that $b_{ij}\geq0$ for all $i\neq j$. Then, we have that
\begin{align}\label{eq:111}
f_i(x)&=(Bx)_i=\sum_{j=1}^nb_{ij}x_j=b_{ii}x_i+\sum_{j=1,j\neq i}^nb_{ij}x_j\nonumber\\
&=b_{ii}y_i+\sum_{j=1,j\neq i}^nb_{ij}x_j
\leq b_{ii}y_i+\sum_{j=1,j\neq i}^nb_{ij}y_j=f_i(y),
\end{align}
and hence $f$ is of type $K$. Thus, we complete the proof of the lemma. \hfill$\Box$\\
The next result is consequent on the previous lemma.
\begin{lemma}\label{lem:sol-hjben}
The dynamical system \eqref{eq:hjben} admits a unique solution which is given by
\begin{align}\label{eq:varphien}
\varphi(t,e_N^{\top})= e^{A^{(N)}(T-t)}e_n=\sum_{i=0}^{\infty}\frac{(A^{(N)})^i(T-t)^i}{i!}e_n,\quad t\in[0,T],
\end{align}
where the $n\times n$-dimensional matrix $A^{(N)}= Q_n+{\rm diag}(\nu(e_N^{\top}))=Q_n-\frac{\theta}{2}{\rm diag}(r)$ with $r=(r(i);\ i=1,\ldots,n)^{\top}$. Moreover, it holds that $\varphi(t,e_N^{\top})\gg 0$ for all $t\in[0,T]$.
\end{lemma}
\noindent{\it Proof.}\quad The representation of the solution $\varphi(t,e_N^{\top})$ given by \eqref{eq:varphien} is obvious. Note that $e_n\gg0$ and $q_{ij}\geq0$ for all $i\neq j$ as $Q_n=(q_{ij})_{n\times n}$ is the generator of the Markov chain. Then in order to prove $\varphi(t,e_N^{\top})\gg0$ for all $t\in[0,T]$, using Lemma~\ref{lem:sol-hjben2}, it suffices to verify $[A^{(N)}]_{ij}\geq0$ for all $i\neq j$. However $[A^{(N)}]_{ij}=q_{ij}$ for all $i\neq j$ and the condition given in Lemma~\ref{lem:sol-hjben2} is therefore verified which implies that $\varphi(t,e_N^{\top})\gg0$ for all $t\in[0,T]$. \hfill$\Box$\\
We next consider the general default case with $z=0^{j_1,\ldots,j_{k}}$ for $0\leq k\leq N-1$, i.e. the stocks $j_1,\ldots,j_{k}$ have defaulted but the stocks $\{j_{k+1},\ldots,j_N\}:=\{1,\ldots,N\}\setminus\{j_1,\ldots,j_k\}$ remain alive. Then we have
\begin{itemize}
\item[(ii)] Because the stocks $j_1,\ldots,j_k$ have defaulted, the optimal fraction strategies for the stocks $j_1,\ldots,j_{k}$ are given by $\pi_j^{(k,*)}=0$ for $j\in\{j_1,\ldots,j_{k}\}$ by virtue of Definition~\ref{def:add-con}. Let $\varphi^{(k)}(t)=(\varphi(t,i,0^{j_1,\ldots,j_{k}});\ i=1,\ldots,n)^{\top}$ and $\lambda^{(k)}_j(i)=\lambda_j(i,0^{j_1,\ldots,j_{k}})$ for $j\notin\{j_1,\ldots,j_k\}$ and $i=1,\ldots,n$. Then, the corresponding DPE \eqref{eq:hjbeqn} to this default case is given by
\begin{align}\label{eq:hjbn-1}
\left\{
\begin{aligned}
\frac d{dt}\varphi^{(k)}(t)=&-A^{(k)}\varphi^{(k)}(t)-G^{(k)}(t,\varphi^{(k)}(t)),\quad\text{ in }[0,T);\\
\varphi^{(k)}(T)=&e_n.
\end{aligned}
\right.
\end{align}
Here, the $n\times n$-dimensional matrix $A^{(k)}$ is given by
\begin{align}\label{eq:An-1}
A^{(k)}={\rm diag}\left[\left(-\frac{\theta}{2} r(i)-\sum_{j\notin\{j_1,\ldots,j_{k}\}}\lambda_{j}^{(k)}(i);\ i=1,\ldots,n\right)\right]+Q_n.
\end{align}
The coefficient $G^{(k)}(t,x)=(G^{(k)}_i(t,x);\ i=1,\ldots,n)^{\top}$ for $(t,x)\in[0,T]\times\R^{n}$ is given by, for $i\in D_n$,
\begin{align}\label{eq:Gin-1}
G^{(k)}_i(t,x):=&\inf_{\pi^{(k)}\in U^{(k)}}\left\{\sum_{j\notin\{j_1,\ldots,j_k\}} \varphi^{(k+1),j}(t,i)\big(1-\pi_{j}^{(k)}\big)^{-\frac{\theta}{2}}\lambda_{j}^{(k)}(i)+H^{(k)}(\pi^{(k)};i)x_i\right\}.
\end{align}
where, for $(\pi^{(k)},i)\in U^{(k)}\times D_n$, the function $H^{(k)}$ is given by
\begin{align}\label{eq:Hk}
H^{(k)}(\pi^{(k)};i):=&\frac{\theta}{4}\big(1+\frac{\theta}{2}\big)\left\|\sigma^{(k)}(i)^{\top}\pi^{(k)}\right\|^2
-\frac{\theta}{2}(\pi^{(k)})^{\top}\big(\mu^{(k)}(i)-r(i)e_{N-k}\big)\nonumber\\
&-\frac{\theta}{2}\sum_{j\notin\{j_1,\ldots,j_k\}}\pi_{j}^{(k)}\lambda_{j}^{(k)}(i).
\end{align}
The policy space of this state is $U^{(k)}=(-\infty,1)^{N-k}$, and $\varphi^{(k+1),j}(t,i):=\varphi(t,i,0^{j_1,\ldots,j_k,j})$ for $j\notin\{j_1,\ldots,j_k\}$ corresponds to the $i$-th element of the positive solution vector of Eq.~\eqref{eq:hjbeqn} at the default state $z=0^{j_1,\ldots,j_k,j}$.
Here, for each $i=1,\ldots,n$, we have also used notations: $\pi^{(k)}=(\pi_j^{(k)};\ j\notin\{j_1,\ldots,j_k\})^{\top}$, $\theta^{(k)}(i)=(\theta_j(i);\ j\notin\{j_1,\ldots,j_k\})^{\top}$, $\sigma^{(k)}(i)=(\sigma_{j\kappa}(i);\ j\notin\{j_1,\ldots,j_k\},\kappa\in\{1,\ldots,d\})$, and $\mu^{(k)}(i)=(\mu_j(i);\ j\notin\{j_1,\ldots,j_k\})^{\top}$.
\end{itemize}
From the expression of $G_i^{(k)}(t,x)$ given by \eqref{eq:Gin-1}, it can be seen that the solution $\varphi^{(k)}(t)$ on $t\in[0,T]$ of DPE \eqref{eq:hjbeqn} at the default state $z=0^{j_1,\ldots,j_k}$ in fact depends on the solution $\varphi^{(k+1),j}(t)$ on $t\in[0,T]$ of DPE~\eqref{eq:hjbeqn} at the default state
$z=0^{j_1,\ldots,j_k,j}$ for $j\notin\{j_1,\ldots,j_k\}$. In particular when $k=N-1$, the solution $\varphi^{(k+1),j}(t)=\varphi(t,e_N^{\top})\gg0$ corresponds to the solution to \eqref{eq:hjbeqn} at the default state $z=e_N$ (i.e., $k=N$), which has been obtained by Lemma~\ref{lem:sol-hjben}.
This suggests us to solve DPE~\eqref{eq:hjbeqn} backward recursively in terms of default states $z=0^{j_1,\ldots,j_k}$. Thus, in order to study the existence and uniqueness of a positive (classical) solution to the dynamical system \eqref{eq:hjbn-1}, we first assume that \eqref{eq:hjbeqn} admits a positive unique (classical) solution $\varphi^{(k+1),j}(t)$ on $t\in[0,T]$ for $j\notin\{j_1,\ldots,j_k\}$.
We can first obtain an estimate on $G^{(k)}(t,x)$, which is presented in the following lemma.
\begin{lemma}\label{lem:Gkesti}
For each $k=0,1,\ldots,N-1$, let us assume that DPE~\eqref{eq:hjbeqn} admits a positive unique (classical) solution $\varphi^{(k+1),j}(t)$ on $t\in[0,T]$ for $j\notin\{j_1,\ldots,j_k\}$. Then, for any $x,y\in\R^n$ satisfying $x,y\geq\varepsilon e_n$ with $\varepsilon>0$, there exists a positive constant $C=C(\varepsilon)$ which only depends on $\varepsilon>0$ such that
\begin{align}\label{eq:Gkesti}
\left\|G^{(k)}(t,x)-G^{(k)}(t,y)\right\|\leq C\left\|x-y\right\|.
\end{align}
Here $\|\cdot\|$ denotes the Euclidian norm.
\end{lemma}
\noindent{\it Proof.}\quad It suffices to prove that, for each $i=1,\ldots,n$, $|G^{(k)}_i(t,x)-G^{(k)}_i(t,y)|\leq C(\varepsilon)\|x-y\|$ for any $x,y\in\R^n$ satisfying $x,y\geq\varepsilon e_n$ with $\varepsilon>0$, where $C(\varepsilon)>0$ is independent of time $t$. By the recursive assumption, $\varphi^{(k+1),j}(t)$ on $t\in[0,T]$ is the unique positive (classical) solution to \eqref{eq:hjbeqn} for $j\notin\{j_1,\ldots,j_k\}$. Then, it is continuous on $[0,T]$ which implies the existence of a constant $C_0>0$ independent of $t$ such that $\sup_{t\in[0,T]}\|\varphi^{(k+1),j}(t)\|\leq C_0$ for $j\notin\{j_1,\ldots,j_k\}$. Thus, by \eqref{eq:Gin-1}, and thanks to $H^{(k)}(0;i)=0$ for all $i\in D_n$ using \eqref{eq:Hk}, it follows that, for all $(t,x)\in[0,T]\times\R^n$,
\begin{align}\label{eq:gless}
G^{(k)}_i(t,x)\leq&\left[\sum_{j\notin\{j_1,\ldots,j_k\}} \varphi^{(k+1),j}(t,i)(1-\pi_{j}^{(k)})^{-\frac{\theta}{2}}\lambda_{j}^{(k)}(i)+H^{(k)}(\pi^{(k)};i)x_i\right]\Bigg|_{\pi^{(k)}=0}\nonumber\\
=&\sum_{j\notin\{j_1,\ldots,j_k\}} \varphi^{(k+1),j}(t,i)\lambda_{j}^{(k)}(i)\leq C_0 \sum_{j\notin\{j_1,\ldots,j_k\}}\lambda_{j}^{(k)}(i).
\end{align}
On the other hand, as $\sigma^{(k)}(i)^\top\sigma^{(k)}(i)$ is positive-definite, there exists a positive constant $\delta>0$ such that $\big\|\sigma^{(k)}(i)^{\top}\pi^{(k)}\|^2\geq\delta\|\pi^{(k)}\|^2$ for all $i\in D_n$. Hence, the following estimate holds:
\begin{align}\label{eq:esti1}
&H^{(k)}(\pi^{(k)};i)\geq\frac{\theta}{4}(1+\frac{\theta}{2})\delta\left\|\pi^{(k)}\right\|^2-\frac{\theta}{2}\left(\left\|\mu^{(k)}(i)-r(i)e_{N-k}\right\|+\sum_{j\notin\{j_1,\ldots,j_k\}}
\lambda_{j}^{(k)}(i)\right)\left\|\pi^{(k)}\right\|.
\end{align}
We next take the positive constant defined as
\[
C_1:=2\frac{\left\|\mu^{(k)}(i)-r(i)e_{N-k}\right\|+\sum_{j\notin\{j_1,\ldots,j_k\}}\lambda_j^{(k)}(i)}{(1+\frac\theta2)\delta}.
\]
For all $\pi^{(k)}\in\{\pi^{(k)}\in U^{(k)};\ \|\pi^{(k)}\|\geq C_1\}$, it holds that
\begin{align}\label{eq:large0}
H^{(k)}(\pi^{(k)};i)\geq 0,\qquad i\in D_n.
\end{align}
This yields that, for all $\pi^{(k)}\in\{\pi^{(k)}\in U^{(k)};\ \|\pi^{(k)}\|\geq C_1\}$ and all $x\geq\varepsilon e_n$, we deduce from \eqref{eq:esti1} and \eqref{eq:large0} that
\begin{align*}
&\sum_{j\notin\{j_1,\ldots,j_k\}} \varphi^{(k+1),j}(t,i)(1-\pi_{j}^{(k)})^{-\frac{\theta}{2}}\lambda_{j}^{(k)}(i)+H^{(k)}(\pi^{(k)};i)x_i\geq H^{(k)}(\pi^{(k)};i)x_i\\
&\qquad\geq H^{(k)}(\pi^{(k)};i)\varepsilon\\
&\qquad\geq\varepsilon\left[\frac\theta4(1+\frac\theta2)\delta\left\|\pi^{(k)}\right\|^2-\frac\theta2\left(\left\|\mu^{(k)}(i)-r(i)e_{N-k}\right\|+\sum_{j\notin\{j_1,\ldots,j_k\}}\lambda_{j}^{(k)}(i)\right)
\left\|\pi^{(k)}\right\|\right].
\end{align*}
We shall choose another positive constant depending on $\varepsilon>0$ as
\[
C_2(\varepsilon):=\frac{C_1}2+\sqrt{\frac{C_1^2}4+\frac8{\varepsilon\theta(2+\theta)\delta}C_0\sum_{j\notin\{j_1,\ldots,j_k\}}\lambda_{j}^{(k)}(i)}.
\]
Then, for all $\pi^{(k)}\in\{\pi\in U^{(k)};\ \|\pi\|\geq C_2(\varepsilon)\}$ and all $x\geq\varepsilon e_n$, it holds that
\begin{align}\label{eq:esti002}
&\sum_{j\notin\{j_1,\ldots,j_k\}} \varphi^{(k+1),j}(t,i)(1-\pi_{j}^{(k)})^{-\frac{\theta}{2}}\lambda_{j}^{(k)}(i)+H^{(k)}(\pi^{(k)};i)x_i\geq C_0\sum_{j\notin\{j_1,\ldots,j_k\}}\lambda_{j}^{(k)}(i).
\end{align}
By \eqref{eq:gless}, we have that $G^{(k)}_i(t,x)\leq C_0\sum_{j\notin\{j_1,\ldots,j_k\}}\lambda_{j}^{(k)}(i)$ for all $(t,x)\in[0,T]\times\R^n$. Thus, it follows from \eqref{eq:esti002} that
\begin{align}\label{eq:G2}
G^{(k)}_i(t,x)&=\inf_{\pi^{(k)}\in U^{(k)}}\left\{\sum_{j\notin\{j_1,\ldots,j_k\}} \varphi^{(k+1),j}(t,i)(1-\pi_{j}^{(k)})^{-\frac{\theta}{2}}\lambda_{j}^{(k)}(i)+H^{(k)}(\pi^{(k)};i)x_i\right\}\\
&=\inf_{\substack{\pi^{(k)}\in\{\pi\in U^{(k)}:\\ \|\pi\|\leq C_2(\varepsilon)\}}}\left\{\sum_{j\notin\{j_1,\ldots,j_k\}} \varphi^{(k+1),j}(t,i)(1-\pi_{j}^{(k)})^{-\frac{\theta}{2}}\lambda_{j}^{(k)}(i)+H^{(k)}(\pi^{(k)};i)x_i\right\}.\nonumber
\end{align}
In virtue of \eqref{eq:G2}, it holds that
\begin{align}\label{eq:Gxy}
G^{(k)}_i(t,x)&=\inf_{\substack{\pi^{(k)}\in\{\pi\in U^{(k)}:\\ \|\pi\|\leq C_2(\varepsilon)\}}}\Bigg\{\sum_{j\notin\{j_1,\ldots,j_k\}} \varphi^{(k+1),j}(t,i)(1-\pi_{j}^{(k)})^{-\frac{\theta}{2}}\lambda_{j}^{(k)}(i)\nonumber\\
&\qquad\qquad\qquad\qquad+H^{(k)}(\pi^{(k)};i)y_i+H^{(k)}(\pi^{(k)};i)(x_i-y_i)\Bigg\}\nonumber\\
&\leq\inf_{\substack{\pi^{(k)}\in\{\pi\in U^{(k)}:\\ \|\pi\|\leq C_2(\varepsilon)\}}}\Bigg\{\sum_{j\notin\{j_1,\ldots,j_k\}} \varphi^{(k+1),j}(t,i)(1-\pi_{j}^{(k)})^{-\frac{\theta}{2}}\lambda_{j}^{(k)}(i)\nonumber\\
&\qquad\qquad\qquad\qquad+H^{(k)}(\pi^{(k)};i)y_i\Bigg\}+C(\varepsilon)|x_i-y_i|\nonumber\\
&= G^{(k)}_i(t,y)+C(\varepsilon)|x_i-y_i|.
\end{align}
Here, the finite positive constant $C(\varepsilon)=\max_{i=1,\ldots,n}C^{(i)}(\varepsilon)$, where for $i\in D_n$,
\begin{align}\label{eq:Cepsilon}
C^{(i)}(\varepsilon)&:=\sup_{\substack{\pi^{(k)}\in\{\pi\in U^{(k)}:\\ \|\pi\|\leq C_2(\varepsilon)\}}}H^{(k)}(\pi^{(k)};i).
\end{align}
Note that the constant $C^{(i)}(\varepsilon)$ given above is nonnegative and finite for each $i\in D_n$. By \eqref{eq:Gxy}, we get that
$|G^{(k)}_i(t,x)-G^{(k)}_i(t,y)|\leq C(\varepsilon)\|x-y\|$ for any $x,y\in\R^n$ satisfying $x,y\geq\varepsilon e_n$ with $\varepsilon>0$, which completes the proof of the lemma.
\hfill$\Box$\\
We move on to study the existence and uniqueness of the global (classical) solution to the dynamical system \eqref{eq:hjbn-1}. To this end, we prepare the following comparison results of two types of dynamical systems with the type $K$ condition introduced in Smith~\cite{smith08}:
\begin{lemma}\label{comparison}
Let $g_{\kappa}(t)=(g_{\kappa i}(t);\ i=1,\ldots,n)^{\top}$ with $\kappa=1,2$ satisfy the following dynamical systems on $[0,T]$, respectively
\begin{align*}
\left\{
\begin{aligned}
\frac d{dt}g_1(t)=&f(t,g_1(t))+\tilde{f}(t,g_1(t)),\ \text{ in }(0,T];\\
g_1(0)=&\xi_1,
\end{aligned}
\right.\qquad\qquad
\left\{
\begin{aligned}
\frac d{dt}g_2(t)=&f(t,g_2(t)),\ \text{ in }(0,T];\\
g_2(0)=&\xi_2.
\end{aligned}
\right.
\end{align*}
Here, the functions $f(t,x),\,\tilde{f}(t,x):[0,T]\times\R^n\to\R^n$ are assumed to be Lipschitz continuous w.r.t. $x\in\R^m$ uniformly in $t\in[0,T]$. The function
$f(t,\cdot)$ satisfies the type $K$ condition for each $t\in[0,T]$ (i.e., for any $x,y\in\R^n$ satisfying $x\leq y$ and $x_i=y_i$ for some $i=1,\ldots,n$,
it holds that $f_i(t,x)\leq f_i(t,y)$ for each $t\in[0,T]$). If
$\tilde{f}(t,x)\geq0$ for $(t,x)\in[0,T]\times\R^n$ and $\xi_1\geq\xi_2$, then $g_1(t)\geq g_2(t)$ for all $t\in[0,T]$.
\end{lemma}
\noindent{\it Proof.}\quad For $p>0$, let $g_{1}^{(p)}(t)=(g_{1i}^{(p)}(t);\ i=1,\ldots,n)^{\top}$ be the solution to the following dynamical system given by
\begin{equation}
\left\{
\begin{aligned}
\frac d{dt}g_{1}^{(p)}(t)=&f(t,g_{1}^{(p)}(t))+\tilde{f}(t,g^{(p)}_{1}(t))+\frac{1}{p}e_n^{\top},\ \text{ in }(0,T];\\
g_{1}^{(p)}(0)=&\xi_1+\frac{1}{p}e_n^{\top}.
\end{aligned}
\right.
\end{equation}
Then, for all $t\in(0,T]$, it holds that
\begin{align*}
\|g_{1}^{(p)}(t)-g_1(t)\|\leq&\|g_{1}^{(p)}(0)-g_1(0)\|+\int_0^t\big\|f(s,g_{1}^{(p)}(s))-f(s,g_1(s))\big\|ds\nonumber\\
&+\int_0^t\big\|\tilde{f}(s,g_{1}^{(p)}(s))-\tilde{f}(s,g_1(s))\big\|ds+\frac1p\int_0^t\|e_n\|ds\nonumber\\
\leq&\frac{1+T}p\|e_n\|+(C+\tilde{C})\int_0^t\big\|g_{1}^{(p)}(s)-g_1(s)\big\|ds.
\end{align*}
Here $C>0$ and $\tilde{C}>0$ are Lipschitz constant coefficients for $f(t,x)$ and $\tilde{f}(t,x)$, respectively. The Gronwall's lemma yields that $g_{1}^{(p)}(t)\to g_1(t)$ for all $t\in[0,T]$ as $p\to\infty$. We claim that $g_{1}^{(p)}(t)\gg g_2(t)$ for all $t\in[0,T]$. Suppose that the claim does not hold, the fact that $g_{1}^{(p)}(0)\gg g_2(0)$, and $g_1^{(p)}(t),g_2(t)$ are continuous on $[0,T]$ imply that there exists a $t_0\in(0,T]$ such that $g_{1}^{(p)}(s)\geq g_2(s)$ on $s\in[0,t_0]$ and $g_{1i}^{(p)}(t_0)=g_{2i}(t_0)$ for some $i\in\{1,\ldots,n\}$. Because for $t_0>0$, $g_1^{(p)}(t),g_2(t)$ are differentiable on $(0,T]$, it follows that
\begin{align*}
\frac d{dt}g_{1i}^{(p)}(t)\big|_{t=t_0}=\lim_{\epsilon\to0}\frac{g_{1i}^{(p)}(t_0)-g_{1i}^{(p)}(t_0-\epsilon)}{\epsilon}
\leq\lim_{\epsilon\to0}\frac{g_{2i}(t_0)-g_{2i}(t_0-\epsilon)}{\epsilon}= \frac d{dt}g_{2i}(t)\big|_{t=t_0}.
\end{align*}
On the other hand, as $f(t,\cdot)$ satisfies the type $K$ condition for each $t\in[0,T]$ and $\tilde{f}(t,x)\geq0$ for all $(t,x)\in[0,T]\times\R^n$, for the above $i$, we also have that
\begin{align}
\frac d{dt}g_{1i}^{(p)}(t)\big|_{t=t_0}=&f_i(t_0,g_{1i}^{(p)}(t_0))+\tilde{f}_i(t_0,g_{1}^{(p)}(t_0))+\frac1p\nonumber\\
>&f_i(t_0,g_{1i}^{(p)}(t_0))\geq f_i(t_0,g_2(t_0))=\frac d{dt}g_{2i}(t)\big|_{t=t_0}.
\end{align}
We obtain a contradiction, and hence $g_{1}^{(p)}(t)\gg g_2(t)$ for all $t\in[0,T]$. It therefore holds that $g_1(t)\geq g_2(t)$ for all $t\in[0,T]$ by passing $p$ to infinity. \hfill$\Box$\\
Now we are ready to present the following existence and uniqueness result for the positive (classical) solution of Eq.~\eqref{eq:hjbn-1}.
\begin{theorem}\label{thm:solutionk}
For each $k=0,1,\ldots,N-1$, assume that DPE~\eqref{eq:hjbeqn} admits a positive unique (classical) solution $\varphi^{(k+1),j}(t)$ on $t\in[0,T]$ for $j\notin\{j_1,\ldots,j_k\}$. Then, there exists a unique positive (classical) solution $\varphi^{(k)}(t)$ on $t\in[0,T]$ of \eqref{eq:hjbeqn}
at the default state $z=0^{j_1,\ldots,j_k}$ (i.e., Eq.~\eqref{eq:hjbn-1} admits a unique positive (classical) solution).
\end{theorem}
\noindent{\it Proof.}\quad For any constant $a\in(0,1]$, let us consider the truncated dynamical system given by
\begin{align}\label{eq:truneqn}
\left\{
\begin{aligned}
\frac d{dt} \varphi_a^{(k)}(t)+A^{(k)}\varphi_a^{(k)}(t) + G_a^{(k)}(t,\varphi_a^{(k)}(t))=&0,\ \text{ in }[0,T);\\
\varphi^{(k)}_a(T)=&e_n.
\end{aligned}
\right.
\end{align}
Here $\varphi_a^{(k)}(t)=(\varphi_a^{(k)}(t,i);\ i=1,\ldots,n)^{\top}$ is the vector-valued solution and the $n\times n$-dimensional matrix $A^{(k)}$ is given by \eqref{eq:An-1}. The vector-valued function $G_a^{(k)}(t,x)$ is defined as:
\begin{align}\label{eq:Ga}
G_a^{(k)}(t,x) := G^{(k)}(t,x\vee a e_n),\qquad (t,x)\in[0,T]\times\R^n.
\end{align}
Thanks to Lemma~\ref{lem:Gkesti}, there exists a positive constant $C=C(a)$ which only depends on $a>0$ such that, for all $t\in[0,T]$,
\begin{align}\label{eq:Lip-Ga}
\big\|G_a^{(k)}(t,x)-G_a^{(k)}(t,y)\big\|\leq C\|x-y\|,\qquad x,y\in\R^n,
\end{align}
i.e., $G^{(k)}_a(t,x)$ is globally Lipschitz continuous w.r.t. $x\in\R^m$ uniformly in $t\in[0,T]$. By reversing the time, let us consider $\tilde{\varphi}_a^{(k)}(t):=\varphi_a^{(k)}(T-t)$ for $t\in[0,T]$. Then, $\tilde{\varphi}_a^{(k)}(t)$ satisfies the following dynamical system given by
\begin{align}\label{eq:truneq2}
\left\{
\begin{aligned}
\frac{d}{dt}\tilde{\varphi}_a^{(k)}(t)=&A^{(k)}\tilde{\varphi}^{(k)}_a(t)+G^{(k)}_{a}(T-t,\tilde{\varphi}_a^{(k)}(t)),\ \text{ in }(0,T];\\
\tilde{\varphi}_a^{(k)}(0)=&e_n^{\top}.
\end{aligned}
\right.
\end{align}
In virtue of the globally Lipschitz continuity condition \eqref{eq:Lip-Ga}, for each $a\in(0,1]$, it follows that the system~\eqref{eq:truneq2} has a unique (classical) solution $\tilde{\varphi}_a^{(k)}(t)$ on $[0,T]$.
In order to apply Lemma~\ref{comparison}, we rewrite the above system \eqref{eq:truneq2} in the following form:
\begin{align}\label{eq:truneq3}
\left\{
\begin{aligned}
\frac{d}{dt}\tilde{\varphi}_a^{(k)}(t)=&f^{(k)}(\tilde{\varphi}^{(k)}_a(t))+\tilde{f}_a^{(k)}(t,\tilde{\varphi}_a^{(k)}(t)),\ \text{ in }(0,T];\\
\tilde{\varphi}_a^{(k)}(0)=&e_n.
\end{aligned}
\right.
\end{align}
Here, the Lipschitz continuous functions $f^{(k)}(x)=(f_i^{(k)}(x);\ i=1,\ldots,n)^{\top}$ and $\tilde{f}_a^{(k)}(t,x)=(\tilde{f}^{(k)}_{a,i}(t,x);\ i=1,\ldots,n)^{\top}$ on $(t,x)\in[0,T]\times\R^n$ are given respectively by
\begin{align}\label{eq:f}
f_i^{(k)}(x)&=\sum_{j=1}^nq_{ij}x_j-\left(\frac{\theta}{2} r(i)+\sum_{j\notin\{j_1,\ldots,j_{k}\}}h_{j}^{(k)}(i)\right)x_i-\beta_i\{|x_i|\vee1\},\nonumber\\
\tilde{f}_{a,i}^{(k)}(t,x)&=G_a^{(k)}(T-t,x)+\beta_i\{|x_i|\vee1\},\quad i=1,\ldots,n.
\end{align}
The positive constants $\beta_i$ for $i\in D_n$ are given by
\begin{align}\label{eq:betai}
\beta_i=&-\inf_{\pi^{(k)}\in U^{(k)}}H^{(k)}(\pi^{(k)};i),
\end{align}
where, for $i\in D_n$, $H^{(k)}(\pi^{(k)};i)$ is defined by \eqref{eq:Hk}. It is not difficult to see that $\beta_i$ is a nonnegative and finite constant for each $i\in D_n$ using \eqref{eq:Hk}. By the recursive assumption that $\varphi^{(k+1),j}(t)\gg0$ on $[0,T]$ for $j\notin\{j_1,\ldots,j_k\}$, for any $a\in(0,1]$, we have that, for each $i\in D_n$, and all $(t,x)\in[0,T]\times\R^n$,
\begin{equation}\label{eq:Gapositive}
\begin{split}
&G^{(k)}_i(T-t,x\vee ae_n)\\
=&\inf_{\pi^{(k)}\in U^{(k)}}\left\{\sum_{j\notin\{j_1,\ldots,j_k\}}\varphi^{(k+1),j}(T-t,i)(1-\pi_{j}^{(k)})^{-\frac\theta2}\lambda_{j}^{(k)}(i)+H^{(k)}(\pi^{(k)};i)(x_i\vee a)\right\}\\
\geq&\{x_i\vee a\}\inf_{\pi^{(k)}\in U^{(k)}}H^{(k)}(\pi^{(k)};i)\geq-\beta_i\{|x_i|\vee 1\}.
\end{split}
\end{equation}
Thus, from \eqref{eq:f}, it follows that, for all $(t,x)\in[0,T]\times\R^n$,
\begin{align}\label{eq:onftilde}
\tilde{f}_{a,i}^{(k)}(t,x)=G^{(k)}_i(T-t,x\vee ae_n)+\beta_i\{|x_i|\vee1\}\geq0,\quad i\in D_n.
\end{align}
We next verify that the vector-valued function $f^{(k)}(x)=(f_i^{(k)}(x);\ i=1,\ldots,n)^{\top}$ given by \eqref{eq:f} is of type $K$. Namely we need to verify that, for any $x,y\in\R^n$ satisfying $x\leq y$ and $x_{i_0}=y_{i_0}$ for some $i_0=1,\ldots,n$, it holds that $f_{i_0}^{(k)}(x)\leq f_{i_0}^{(k)}(y)$. In fact, by \eqref{eq:f}, we have that, for any $x,y\in\R^n$ satisfying $x\leq y$ and $x_{i_0}=y_{i_0}$ for some $i_0=1,\ldots,n$,
\begin{align}\label{eq:condK}
f_{i_0}^{(k)}(x)&=\sum_{j=1}^nq_{i_0j}x_j-\left(\frac{\theta}{2} r(i_0)+\sum_{j\notin\{j_1,\ldots,j_{k}\}}\lambda_{j}^{(k)}(i_0)\right)x_{i_0}-\beta_i\{|x_{i_0}|\vee1\}\nonumber\\
&=q_{i_0i_0}x_{i_0}-\left(\frac{\theta}{2} r(i_0)+\sum_{j\notin\{j_1,\ldots,j_{k}\}}\lambda_{j}^{(k)}(i_0)\right)x_{i_0}-\beta_{i_0}\{|x_{i_0}|\vee1\}+\sum_{j\neq i_0}q_{i_0j}x_j\nonumber\\
&=q_{i_0i_0}y_{i_0}-\left(\frac{\theta}{2} r(i_0)+\sum_{j\notin\{j_1,\ldots,j_{k}\}}\lambda_{j}^{(k)}(i_0)\right)y_{i_0}-\beta_{i_0}\{|y_{i_0}|\vee1\}+\sum_{j\neq i_0}q_{i_0j}x_j\nonumber\\
&\leq q_{i_0i_0}y_{i_0}-\left(\frac{\theta}{2} r(i_0)+\sum_{j\notin\{j_1,\ldots,j_{k}\}}\lambda_{j}^{(k)}(i_0)\right)y_{i_0}-\beta_{i_0}\{|y_{i_0}|\vee1\}+\sum_{j\neq i_0}q_{i_0j}y_j\nonumber\\
&=f_{i_0}^{(k)}(y),
\end{align}
where we used the fact that for all $j\neq i_0$, $q_{i_0j}\geq0$ as $Q_n=(q_{ij})_{n\times n}$ is the generator of the Markov chain $Y$ and hence $\sum_{j\neq i_0}q_{i_0j}x_j\leq \sum_{j\neq i_0}q_{i_0j}y_j$ for all $x\leq y$. Hence, using Proposition 1.1 of Chapter 3 in Smith \cite{smith08} and Lemma~\ref{lem:sol-hjben2}, we deduce that the following dynamical system
\begin{align}\label{eq:truneq4}
\left\{
\begin{aligned}
\frac{d}{dt}{\psi}^{(k)}(t)=&f^{(k)}({\psi}^{(k)}(t)),\ \text{ in }(0,T];\\
{\psi}^{(k)}(0)=&e_n
\end{aligned}
\right.
\end{align}
admits a unique (classical) solution ${\psi}^{(k)}(t)=(\psi_i^{(k)}(t);\ i=1,\ldots,n)^{\top}$ on $t\in[0,T]$, and moreover it holds that ${\psi}^{(k)}(t)\gg0$ for $t\in[0,T]$. Let us set
\begin{align}\label{eq:epsilonk}
\varepsilon^{(k)}:=\min_{i=1,\ldots,n}\left\{\inf_{t\in[0,T]}\psi_i^{(k)}(t)\right\}.
\end{align}
The continuity of $\psi^{(k)}(t)$ in $t\in[0,T]$ and $\psi^{(k)}(t)\gg0$ for all $t\in[0,T]$ lead to $\varepsilon^{(k)}>0$. On the other hand, it follows from \eqref{eq:onftilde} that
the vector-valued function $f_a^{(k)}(t,x)\geq0$ on $[0,T]\times\R^n$. Because the vector-valued function $f^{(k)}(x)$ is also of type $K$ proved by \eqref{eq:condK}, we can apply Lemma~\ref{comparison} to the dynamical systems \eqref{eq:truneq3} and \eqref{eq:truneq4} and derive that
\begin{align}\label{eq:comparison0}
\tilde{\varphi}_a^{(k)}(t)\geq {\psi}^{(k)}(t)\geq\varepsilon^{(k)}e_n,\qquad \forall\ t\in[0,T],
\end{align}
as $\tilde{\varphi}_a^{(k)}(0)={\psi}^{(k)}(0)=e_n$. Note that the positive constant $\varepsilon^{(k)}$ given by \eqref{eq:epsilonk} above is independent of the constant $a\in(0,1]$. We can therefore choose $a\in(0,\varepsilon^{(k)}\wedge1)$ and it holds that $G_a^{(k)}(T-t,\tilde{\varphi}_a^{(k)}(t))=G^{(k)}(T-t,\tilde{\varphi}_a^{(k)}(t)\vee ae_n)=G^{(k)}(T-t,\tilde{\varphi}_a^{(k)}(t))$ on $[0,T]$. By \eqref{eq:truneq2} with $a\in(0,\varepsilon^{(k)}\wedge1)$, it follows that $\tilde{\varphi}_a^{(k)}(t)\geq\varepsilon^{(k)}e_n$ on $[0,T]$ is the unique (classical) solution to the dynamical system \eqref{eq:hjbn-1} and the proof of the theorem is complete. \hfill$\Box$\\
As an important implication of Theorem~\ref{thm:solutionk}, we present one of our major contributions to the existing literature in the next proposition as the characterization of the optimal strategy $\pi^{(k)}\in{U}^{(k)}$ at the default state $z=0^{j_1,\ldots,j_k}$ where $k=0,1,\ldots,N-1$.
\begin{proposition}\label{coro:optimal-strategy}
For each $k=0,1,\ldots,N-1$, assume that DPE~\eqref{eq:hjbeqn} admits a positive unique (classical) solution $\varphi^{(k+1),j}(t)$ on $t\in[0,T]$ for $j\notin\{j_1,\ldots,j_k\}$. Let $\varphi^{(k)}(t)=(\varphi^{(k)}(t,i);\ i=1,\ldots,n)^{\top}$ be the unique (classical) solution of DPE \eqref{eq:hjbn-1}. Then, there exists a unique optimal feedback strategy $\pi^{(k,*)}=\pi^{(k,*)}(t,i)$ for $(t,i)\in[0,T]\times D_n$ which is given explicitly by
\begin{align}\label{eq:optimal-strategy}
\pi^{(k,*)}=&\pi^{(k,*)}(t,i)\\
=&\argmin_{\pi^{(k)}\in U^{(k)}}\left\{\sum_{j\notin\{j_1,\ldots,j_k\}} \varphi^{(k+1),j}(t,i)\big(1-\pi_{j}^{(k)}\big)^{-\frac{\theta}{2}}\lambda_{j}^{(k)}(i)+H^{(k)}(\pi^{(k)};i)\varphi^{(k)}(t,i)\right\}\nonumber\\
=&\argmin_{\substack{\pi^{(k)}\in\{\pi\in U^{(k)}:\\ \|\pi\|\leq C\}}}\Bigg\{\sum_{j\notin\{j_1,\ldots,j_k\}} \varphi^{(k+1),j}(t,i)\big(1-\pi_{j}^{(k)}\big)^{-\frac{\theta}{2}}\lambda_{j}^{(k)}(i)+H^{(k)}(\pi^{(k)};i)\varphi^{(k)}(t,i)\Bigg\},\nonumber
\end{align}
for some positive constant $C>0$.
\end{proposition}
\noindent{\it Proof.}\quad Let us first recall Eq.~\eqref{eq:hjbn-1}, i.e.,
\begin{align*}
\left\{
\begin{aligned}
\frac d{dt}\varphi^{(k)}(t)=&-A^{(k)}\varphi^{(k)}(t)-G^{(k)}(t,\varphi^{(k)}(t)),\quad\text{ in }[0,T);\\
\varphi^{(k)}(T)=&e_n.
\end{aligned}
\right.
\end{align*}
Theorem~\ref{thm:solutionk} above shows that the above dynamical system admits a unique positive (classical) solution $\varphi^{(k)}(t)$ on $[0,T]$ and moreover $\varphi^{(k)}(t)\geq \varepsilon^{(k)}e_n^{\top}$ for all $t\in[0,T]$. Here $\varepsilon^{(k)}>0$ is given by~\eqref{eq:epsilonk}. Thus, by \eqref{eq:G2}, we have that, there exists a positive constant $C(\varepsilon^{(k)})$ which depends on $\varepsilon^{(k)}>0$ such that, for each $i\in D_n$,
\begin{align*}
&G^{(k)}_i(t,\varphi^{(k)}(t,i))\nonumber\\
=&\inf_{\substack{\pi^{(k)}\in\{\pi\in U^{(k)}:\\ \|\pi\|\leq C(\varepsilon^{(k)})\}}}\Bigg\{\sum_{j\notin\{j_1,\ldots,j_k\}} \varphi^{(k+1),j}(t,i)(1-\pi_{j}^{(k)})^{-\frac{\theta}{2}}\lambda_{j}^{(k)}(i)+H^{(k)}(\pi^{(k)};i)\varphi^{(k)}(t,i)\Bigg\}.\nonumber
\end{align*}
Here, for each $i=1,\ldots,n$, the function $G_i^{(k)}(t,x)$ on $(t,x)\in[0,T]\times\R^n$ is given by \eqref{eq:Gin-1}.
Also for each $i=1,\ldots,n$, $\varphi^{(k+1),j}(t,i)$ on $t\in[0,T]$ is the $i$-th element of the positive (classical) solution $\varphi^{(k+1),j}(t)$ of \eqref{eq:hjbeqn} at the default state $z=0^{j_1,\ldots,j_k,j}$ for $j\notin\{j_1,\ldots,j_k\}$. Recall that the function $H^{(k)}(\pi^{(k)};i)$ for $(\pi^{(k)},i)\in U^{(k)}\times D_n$ is given by \eqref{eq:Hk}. Then, it is not difficult to see that, for each $i\in D_n$ and fixed $t\in[0,T]$,
\[
h^{(k)}(\pi^{(k)},i):=\sum_{j\notin\{j_1,\ldots,j_k\}} \varphi^{(k+1),j}(t,i)(1-\pi_{j}^{(k)})^{-\frac{\theta}{2}}\lambda_{j}^{(k)}(i)+H^{(k)}(\pi^{(k)};i)\varphi^{(k)}(t,i)
\]
is continuous and strictly convex in $\pi^{(k)}\in\bar{U}^{(k)}$. Also notice that the space $\{\pi^{(k)}\in \bar{U}^{(k)};\ \|\pi^{(k)}\|\leq C(\varepsilon^{(k)})\}\subset\R^{N-k}$ is compact.
Hence, there exist a unique optimum $\pi^{(k,*)}=\pi^{(k,*)}(t,i)\in\bar{U}^{(k)}$. Moreover, it is noted that $h^{(k)}(\pi^{(k)},i)=+\infty$ when $\pi^{(k)}\in\bar{U}^{(k)}\setminus U^{(k)}$ while $h^{(k)}(\pi^{(k)},i)<+\infty$ for all $\pi^{(k)}\in U^{(k)}$. Consequently, we in fact obtain the optimum $\pi^{(k,*)}=\pi^{(k,*)}(t,i)\in\bar{U}^{(k)}$ admitting the representation \eqref{eq:optimal-strategy} by taking $C=C(\varepsilon^{(k)})$ which completes the proof of the proposition. \hfill$\Box$\\
As one of our main results, we finally present and prove the verification theorem for the finite state space of the regime-switching process $Y$ in the next proposition.
\begin{proposition}\label{prop:verithemfinite}
Let $\varphi(t,z)=(\varphi(t,i,z);\ i\in D_n)^{\top}$ with $(t,z)\in[0,T]\times{\cal S}$ be the unique solution of DPE~\eqref{eq:hjbeqn}. For $(t,i,z)\in[0,T]\times D_n\times{\cal S}$, define
\begin{align}\label{eq:pistar}
\pi^*(t,i,z):={\rm diag}((1-z_j)_{j=1}^N)\argmin_{\pi\in U}\tilde{H}\left(\pi;i,z,(\varphi(t,i,z^j);\ j=0,1,\ldots,N)\right),
\end{align}
where $\tilde{H}(\pi;i,z,\bar{f}(z))$ is given by \eqref{eq:H}. Let $\tilde{\pi}^*=(\tilde{\pi}^*(t))_{t\in[0,T]}$ with $\tilde{\pi}^*(t):=\pi^*(t,Y(t-),Z(t-))$. Then $\tilde{\pi}^*\in\tilde{\cal U}$ and it is the optimal feedback strategy, i.e., it holds that
\begin{align}\label{optimeq}
-\frac{2}{\theta}\log\Ex_{t,i,z}^{\tilde{\pi}^*,\theta}\left[\exp\left(\frac{\theta}{2}\int_t^TL(\tilde{\pi}^*(s);Y(s),Z(s))ds\right)\right]=\bar{V}(t,i,z)=-\frac2\theta\log\varphi(t,i,z).
\end{align}
\end{proposition}
\begin{proof} From Proposition~\ref{coro:optimal-strategy}, it follows that $\tilde{\pi}^*$ is a bounded and predictable process taking values on $U$. We next prove that $\tilde{\pi}^*$ is uniformly away from $1$. In fact, for fixed $(i,z,x)\in D_n\times\mathcal{S}\times(0,\infty)^{N+1}$, we have that $\tilde{H}\left(\pi;i,z,x\right)$ is strictly convex w.r.t. $\pi\in U$, thus $\Phi(i,z,x):=\argmin_{\pi\in U}\tilde{H}\left(\pi;i,z,x\right)$ is well-defined. Notice that $\Phi(i,z,\cdot)$ maps $(0,\infty)^{N+1}$ to $U$ and satisfies the first-order condition $\frac{\partial\tilde{H}}{\partial\pi_j}\left(\Phi(i,z,x);i,z,x\right)=0$ for $j=1,\ldots,N$.
Then, Implicit Function Theorem yields that $\Phi(i,z,x)$ is continuous in $x$. Further, for $j=1,\ldots,N,$ if $Z_j(t-)=0$, the first-order condition gives that
\begin{align}\label{eq:pistarbelow}
(1-\tilde\pi^*_j(t))^{-\frac\theta2-1}=&\bigg[\big(\mu_j(Y(t-))-r(Y(t-))\big)-\frac\theta2\left(1+\frac\theta2\right)\sum_{i=1}^N\big(\sigma^\top(Y(t-))\sigma(Y(t-))\big)_{ji}\tilde\pi^*_i(t)\nonumber\\
&+\frac\theta2\lambda_j(Y(t-),Z(t-))\bigg]\frac{\varphi(t,Y(t-),Z(t-))}{\lambda_j(Y(t-),Z(t-))\varphi(t,Y(t-),Z^j(t-))}.
\end{align}
Because for all $(i,z)\in D_n\times{\cal S}$, $\varphi(\cdot,i,z)$ has a strictly positive lower bound using \eqref{eq:comparison0}. Together with Proposition~\ref{coro:optimal-strategy}, it follows that, there exists a constant $C>0$ such that $\sup_{t\in[0,T]}(1-\tilde\pi^*_j(t))^{-\frac\theta2-1}\leq C$ for all $j=1,\ldots,N$. Hence, the estimate~\eqref{eq:pistarbelow} yields that
$\tilde{\pi}^*$ is uniformly bounded away from $1$. Thus, the following generalized Novikov's condition holds:
\begin{align}\label{eq:integral-cond}
\Ex\left[\exp\left(\frac{\theta^2}{8}\int_0^T\left|\sigma(Y(t))^{\top}\tilde{\pi}^*(t)\right|^2dt+\sum_{j=1}^N\int_0^T\left|(1-\tilde{\pi}_j^*(t))^{-\frac{\theta}{2}}-1\right|^2\lambda_j(Y(t),Z(t))dt\right)\right]<+\infty.
\end{align}
The above Novikov's condition \eqref{eq:integral-cond} implies that $\tilde{\pi}^*$ is admissible. We next prove \eqref{optimeq}. Noting that $\varphi(t,z)=(\varphi(t,i,z);\ i\in D_n)^{\top}$ with $(t,z)\in[0,T]\times{\cal S}$ is the unique classical solution of \eqref{eq:hjbeqn}. Note that, there exists a constant $C_L=C_L(n,i,z)>0$ such that $L(\pi;i,z)>-C_L$ for $(\pi,i,z)\in U\times D_n\times{\cal S}$. For $m\geq1$, set $L_m(\pi;i,z):=L(\pi;,i,z)\wedge m$. Then $L_m$ is bounded and $L_m(\pi;i,z)\uparrow L(\pi;i,z)$ as $m\to\infty$. Therefore, for any admissible strategy $\tilde{\pi}\in\tilde{\cal U}$, It\^o's formula gives that, for $0\leq t<s\leq T$,
\begin{align}\label{eq:itoveri0}
&\Ex_{t,i,z}^{\tilde{\pi},\theta}\left[\varphi(s,Y(s),Z(s))\exp\left(\frac{\theta}{2}\int_t^sL_m(\tilde{\pi}(u);Y(u),Z(u))du\right)\right]\nonumber\\
&\quad =\varphi(t,i,z)+\Ex_{t,i,z}^{\tilde{\pi},\theta}\Bigg[\int_t^s\exp\left(\frac{\theta}{2}\int_t^uL_m(\tilde{\pi}(v);Y(v),Z(v))dv\right)\nonumber\\
&\quad\qquad\times\Bigg\{\frac{\partial\varphi(u,Y(u),Z(u))}{\partial t}+\sum_{l\neq Y(u)}q_{Y(u)l}\left(\varphi(u,l,Z(u))-\varphi(u,Y(u),Z(u))\right)\nonumber\\
&\qquad\qquad\quad+\tilde{H}\left(\tilde{\pi}(u);Y(u),Z(u),(\varphi(t,Y(u),Z^j(u));\ j=0,1,\ldots,N)\right)\Bigg\}du\Bigg]\nonumber\\
&\qquad\quad+\Ex_{t,i,z}^{\tilde{\pi},\theta}\Bigg[\int_t^s\exp\left(\frac{\theta}{2}\int_t^uL_m(\tilde{\pi}(v);Y(v),Z(v))dv\right)\varphi(u,Y(u),Z(u))\nonumber\\
&\qquad\qquad\qquad\qquad\times(L_m-L)(\tilde{\pi}(u);Y(u),Z(u))du\Bigg]\nonumber\\
&\quad\geq\varphi(t,i,z)+\Ex_{t,i,z}^{\tilde{\pi},\theta}\Bigg[\int_t^s\exp\left(\frac{\theta}{2}\int_t^uL_m(\tilde{\pi}(v);Y(v),Z(v))dv\right)\varphi(u,Y(u),Z(u))\nonumber\\
&\qquad\qquad\qquad\qquad\times(L_m-L)(\tilde{\pi}(u);Y(u),Z(u))du\Bigg].
\end{align}
In the last inequality above, the integral term in the expectation is negative. On the other hand, note that $\varphi$ is bounded and positive, this integral also admits that, $\Px_{t,i,z}^{\tilde{\pi},\theta}$-a.s., for some constant $C_{\varphi}>0$,
\begin{align*}
&\int_t^s\exp\left(\frac{\theta}{2}\int_t^uL_m(\tilde{\pi}(v);Y(v),Z(v))dv\right)\varphi(u,Y(u),Z(u))(L_m-L)(\tilde{\pi}(u);Y(u),Z(u))du\nonumber\\
&\quad\geq-C_{\varphi}\int_t^s\exp\left(\frac{\theta}{2}\int_t^u[L(\tilde{\pi}(v);Y(v),Z(v))+C_L]dv\right)[L(\tilde{\pi}(u);Y(u),Z(u))+C_L]du\nonumber\\
&\quad=\frac{2 C_{\varphi}}{\theta}\left[1-e^{\frac{\theta}{2}C_L(s-t)}\exp\left(\frac{\theta}{2}\int_t^sL(\tilde{\pi}(u);Y(u),Z(u))du\right)\right].
\end{align*}
By taking $s=T$ above. Then, from Dominated Convergence Theorem, it follows that
\begin{align}\label{eq:itoveri}
\Ex_{t,i,z}^{\tilde{\pi},\theta}\left[\varphi(T,Y(T),Z(T))\exp\left(\frac{\theta}{2}\int_t^TL(\tilde{\pi}(u);Y(u),Z(u))du\right)\right]\geq\varphi(t,i,z).
\end{align}
Note that $\varphi(T,i,z)=1$ in \eqref{eq:itoveri}, we obtain that
\begin{align}\label{infoverphi}
\inf_{\tilde{\pi}\in\tilde{\cal U}}\Ex_{t,i,z}^{\tilde{\pi},\theta}\left[\exp\left(\frac{\theta}{2}\int_t^TL(\tilde{\pi}(u);Y(u),Z(u))du\right)\right]\geq\varphi(t,i,z).
\end{align}
On the other hand, from \eqref{eq:itoveri0} and \eqref{eq:pistar}, it follows that, for $0\leq t<s\leq T$,
\begin{align}\label{infoverphi2}
\Ex_{t,i,z}^{\tilde{\pi}^*,\theta}\left[\exp\left(\frac{\theta}{2}\int_t^TL(\tilde{\pi}^*(u);Y(u),Z(u))du\right)\right]=\varphi(t,i,z).
\end{align}
Because $\pi^*$ is admissible, i.e., $\tilde{\pi}^*\in\tilde{\cal U}$, we deduce from \eqref{infoverphi2} that
\begin{align}\label{phioverinf}
\varphi(t,i,z)\geq\inf_{\tilde{\pi}\in\tilde{\cal U}}\Ex_{t,i,z}^{\tilde{\pi},\theta}\left[\exp\left(\frac{\theta}{2}\int_t^TL(\tilde{\pi}(u);Y(u),Z(u))du\right)\right].
\end{align}
Combining \eqref{infoverphi} and \eqref{phioverinf}, we have that
\begin{align}\label{phi=inf}
\varphi(t,i,z)=\inf_{\tilde{\pi}\in\tilde{\cal U}}\Ex_{t,i,z}^{\tilde{\pi},\theta}\left[\exp\left(\frac{\theta}{2}\int_t^TL(\tilde{\pi}(u);Y(u),Z(u))du\right)\right].
\end{align}
The equality above is equivalent to $\varphi(t,i,z)=e^{-\frac\theta2\bar{V}(t,i,z)}$ due to \eqref{eq:J}. Hence, Eq.~\eqref{infoverphi2} together with \eqref{phi=inf} imply that \eqref{optimeq} holds, which ends the proof.
\end{proof}
\subsection{Countable State Case of Regime-Switching Process}
This section focuses on the existence of classical solutions to the original DPE~\eqref{eq:dpe3} and the corresponding verification theorem when the state space of the Markov chain
$Y$ is the countably infinite set $\mathbb{Z}_+=\{1,2,\ldots\}$. The truncation method used in the finite state case fails to be applicable in the case $\mathbb{Z}_+$. Instead, we shall establish a sequence of appropriately approximating risk sensitive control problems with finite state set $D_n^0:=D_n\cup\{0\}$ for $n\in\mathbb{Z}_+$. Building upon the results in the finite state case in Section~\ref{sec:finite-states}, and by establishing valid uniform estimates, we can arrive at the desired conclusion that the smooth value functions corresponding to the above approximating control problems converge to the classical solution of \eqref{eq:dpe3} with countably infinite set $\mathbb{Z}_+$ as $n$ goes to infinity.
Recall $D_n=\{1,2,\dots,n\}$ for the fixed $n\in\mathbb{Z}_+$. We define the truncated counterpart of the regime-switching process $Y$ as: for $t\in[0,T]$,
\begin{align}\label{eq:Yn}
Y^{(n)}(t):=Y(t)\mathds{1}_{\{\tau_n>t\}},\qquad \tau_n^t:=\inf\{s\geq t;\ Y(s)\notin D_n\},
\end{align}
where $\tau_n:=\tau_n^0$ for $n\in\mathbb{Z}_+$. By convention, we set $\inf\emptyset=+\infty$. Then, the process $Y^{(n)}=(Y^{(n)}(t))_{t\in[0,T]}$ is a continuous-time Markov chain with finite state space $D_n^0$. Here $0$ is understood as an absorbing state. The generator of $Y^{(n)}$ can therefore be given by the following $n+1$-dimensional square matrix:
\begin{align}\label{eq:An}
A_n:=\left[\begin{matrix}
0 & 0 & \dots & 0 \\
q^{(n)}_{10} & q_{11} & \dots & q_{1n} \\
q^{(n)}_{20} & q_{21} & \dots & q_{2n} \\
\vdots & \vdots & \vdots & \vdots \\
q^{(n)}_{n0} & q_{n1} & \dots & q_{nn}
\end{matrix}\right],
\end{align}
where $q^{(n)}_{m0}=-\sum_{i=1}^nq_{mi}=\sum_{{i\neq m,i>n}}q_{mi}$ for all $m\in D_n$. Thus, $Y^{(n)}$ is conservative. Here $q_{ij}$ for $i,j=1,\ldots,n$ are the same as given in Subsection~\ref{sub:RSP}.
Since $0$ is an absorbing state, we arrange values for the model coefficients at this state. More precisely, we set $r(0)=0$, $\mu(0)=0$, $\lambda(0,z)=\frac\theta2e_N^{\top}$ for all $z\in{\cal S}$, and $\sigma(0)\sigma(0)^\top=\frac4{2+\theta}I_{N}$. Here $I_N$ denotes the $N$-dimensional identity matrix.
Then, it follows from \eqref{eq:L0} and Taylor's expansion that $L(\pi;0,z)=\|\pi\|^2+\sum_{j=1}^N(1-z_j)[(1-\pi_j)^{-\frac{\theta}{2}}-1-\frac\theta2\pi_j]\geq0$
for all $(\pi,z)\in U\times{\cal S}$.
We next introduce the approximating risk-sensitive control problems where regime-switching processes take values on $D_n^0$. To this end, define $\tilde{\cal U}_n$ as the admissible control set $\tilde{\cal U}$, but the regime-switching process $Y$ is replaced with $Y^{(n)}$.
We then consider the following objective functional given by, for $\tilde{\pi}\in\tilde{\cal U}_n$ and $(t,i,z)\in[0,T]\times{D_n^0}\times{\cal S}$,
\begin{align}\label{eq:Jn00}
J_n(\tilde{\pi};t,i,z):=&\Ex_{t,i,z}^{\tilde{\pi},\theta}\left[\exp\left(\frac{\theta}{2}\int_t^{T\wedge{\tau_n^t}}L(\tilde{\pi}(s);Y(s),Z(s))ds\right)\right]\nonumber\\
=&\Ex_{t,i,z}^{\tilde{\pi},\theta}\left[\exp\left(\frac{\theta}{2}\int_t^{T\wedge{\tau_n^t}}L(\tilde{\pi}(s);Y^{(n)}(s),Z(s))ds\right)\right].
\end{align}
Here, the risk-sensitive cost function $L(\pi;i,z)$ for $(\pi,i,z)\in U\times\mathbb{Z}_+\times{\cal S}$ is given by \eqref{eq:L0}. In order to apply the results in the finite state case obtained in Section~\ref{sec:finite-states}, we also need to propose the following objective functional given by, for $\tilde{\pi}\in\tilde{\cal U}_n$ and $(t,i,z)\in[0,T]\times{D_n^0}\times{\cal S}$,
\begin{align}\label{eq:Jn}
\tilde{J}_n(\tilde{\pi};t,i,z):=&\Ex_{t,i,z}^{\tilde{\pi},\theta}\left[\exp\left(\frac{\theta}{2}\int_t^{T}{L}(\tilde{\pi}(s);Y^{(n)}(s),Z(s))ds\right)\right].
\end{align}
We will consider the auxiliary value function defined by
\begin{align}\label{eq:Vnvalue}
V_n(t,i,z):=-\frac{2}{\theta}\inf_{\tilde{\pi}\in\tilde{\cal U}_n}\log\tilde{J}_n(\tilde{\pi};t,i,z),\qquad (t,i,z)\in[0,T]\times D_n^0\times{\cal S}.
\end{align}
We have the following characterization of the value function $V_n$ which will play an important role in the study of the convergence of $V_n$ as $n\to\infty$.
\begin{lemma}\label{lem:jn=tildeJn}
It holds that $V_n(t,i,z)=-\frac{2}{\theta}\inf_{\tilde{\pi}\in\tilde{\cal U}_n}\log J_n(\tilde{\pi};t,i,z)$ for $(t,i,z)\in[0,T]\times D_n^0\times{\cal S}$.
\end{lemma}
\begin{proof}
Using \eqref{eq:Jn00} and \eqref{eq:Jn}, we have that, for all $\tilde{\pi}\in\tilde{\mathcal{U}}_n$,
\begin{align*}
&\log\tilde{J}_n(\tilde{\pi};t,i,z)\\
=&\log\Ex_{t,i,z}^{\tilde{\pi},\theta}\left[\exp\left(\frac{\theta}{2}\int_t^{T}{L}(\tilde{\pi}(s);Y^{(n)}(s),Z(s))ds\right)\right]\nonumber\\
=&\log\Ex_{t,i,z}^{\tilde{\pi},\theta}\left[\exp\left(\frac{\theta}{2}\int_t^{T\wedge\tau^t_n}{L}(\tilde{\pi}(s);Y^{(n)}(s),Z(s))ds
+\frac{\theta}{2}\int_{T\wedge\tau^t_n}^T{L}(\tilde{\pi}(s);Y^{(n)}(s),Z(s))ds\right)\right]\nonumber\\
=&\log\Ex_{t,i,z}^{\tilde{\pi},\theta}\left[\exp\left(\frac{\theta}{2}\int_t^{T\wedge\tau^t_n}{L}(\tilde{\pi}(s);Y^{(n)}(s),Z(s))ds
+\frac{\theta}{2}\int_{T\wedge\tau^t_n}^T{L}(\tilde{\pi}(s);0,Z(s))ds\right)\right]\nonumber\\
\geq & \log\Ex_{t,i,z}^{\tilde{\pi},\theta}\left[\exp\left(\frac{\theta}{2}\int_t^{T\wedge\tau^t_n}L(\tilde{\pi}(s);Y^{(n)}(s),Z(s))ds\right)\right]\nonumber\\
=&\log J_n(\tilde{\pi};t,i,z)\geq\inf_{\tilde{\pi}\in\tilde{\mathcal{U}}_n}\log J_n(\tilde{\pi};t,i,z),
\end{align*}
where we used the positivity of ${L}({\pi};0,z)$ for all $(\pi,z)\in U\times{\cal S}$. As $\theta>0$, we obtain from \eqref{eq:Vnvalue} that
\begin{align}\label{V<J}
V_n(t,i,z)&\leq-\frac2\theta\inf_{\tilde{\pi}\in\tilde{\mathcal{U}}_n}\log J_n(\tilde{\pi};t,i,z).
\end{align}
On the other hand, for any $\tilde{\pi}\in\tilde{\mathcal{U}}_n$, define $\hat{\pi}(t)=\tilde{\pi}(t)\mathds{1}_{\{t\leq\tau_n\}}$ for $t\in[0,T]$. It is clear that $\hat{\pi}\in\tilde{\mathcal{U}}_n$, and it holds that $\Gamma^{\hat{\pi},\theta}(t,T):=\frac{\Gamma^{\hat{\pi},\theta}(T)}{\Gamma^{\hat{\pi},\theta}(t)}
=\frac{\Gamma^{\tilde{\pi},\theta}(T\wedge\tau^t_n)}{\Gamma^{\tilde{\pi},\theta}(t)}
=:\Gamma^{\tilde{\pi},\theta}(t,T\wedge\tau^t_n)$. Hence
\begin{align*}
\log J_n(\tilde{\pi};t,i,z)
&=\log\Ex_{t,i,z}\left[\Gamma^{\tilde{\pi},\theta}(t,T)\exp\left(\frac{\theta}{2}\int_t^{T\wedge\tau^t_n}L(\tilde{\pi}(s);Y^{(n)}(s),Z(s))ds\right)\right]\nonumber\\
&=\log\Ex_{t,i,z}\left[\exp\left(\frac{\theta}{2}\int_t^{T\wedge\tau^t_n}L(\tilde{\pi}(s);Y^{(n)}(s),Z(s))ds\right)
\Ex\left[\Gamma^{\tilde{\pi},\theta}(t,T)|\mathcal{F}_{T\wedge\tau^t_n}\right]\right]\nonumber\\
&=\log\Ex_{t,i,z}\left[\Gamma^{\tilde{\pi},\theta}(t,T\wedge\tau^t_n)\exp\left(\frac{\theta}{2}\int_t^{T\wedge\tau^t_n}
L(\tilde{\pi}(s);Y^{(n)}(s),Z(s))ds\right)\right]\nonumber\\
&=\log\Ex_{t,i,z}^{\hat{\pi},\theta}\left[\exp\left(\frac{\theta}{2}\int_t^{T\wedge\tau^t_n}L(\hat{\pi}(s);Y^{(n)}(s),Z(s))ds\right)\right]\nonumber\\
&=\log\Ex_{t,i,z}^{\hat{\pi},\theta}\left[\exp\left(\frac{\theta}{2}\int_t^{T\wedge\tau^t_n}L(\hat{\pi}(s);Y^{(n)}(s),Z(s))ds+\frac{\theta}{2}\int_{T\wedge\tau^t_n}^TL(0;0,Z(s))ds\right)\right]\nonumber\\
&=\log\Ex_{t,i,z}^{\hat{\pi},\theta}\left[\exp\left(\frac{\theta}{2}\int_t^{T}{L}(\hat{\pi}(s);Y^{(n)}(s),Z(s))ds\right)\right]\nonumber\\
&=\log\tilde{J}_n(\hat{\pi};t,i,z)\geq\inf_{\tilde{\pi}\in\tilde{\mathcal{U}}_n}\log\tilde{J}_n(\tilde{\pi};t,i,z).
\end{align*}
The above inequality and the arbitrariness of $\tilde{\pi}$ jointly give that
\begin{align}\label{J<V}
-\frac2\theta\inf_{\tilde{\pi}\in\tilde{\mathcal{U}}_n}\log J_n(\tilde{\pi};t,i,z)\leq V_n(t,i,z).
\end{align}
Then, the desired result follows by combining \eqref{V<J} and \eqref{J<V} above.
\end{proof}
Lemma~\ref{lem:jn=tildeJn} together with Theorem~\ref{thm:solutionk} and Proposition~\ref{prop:verithemfinite} in Section \ref{sec:finite-states} for the finite state space of $Y$ imply the following conclusion:
\begin{proposition}\label{prop:Vnmonotone00}
Let $n\in\mathbb{Z}_+$. Recall the value function $V_n(t,i,z)$ defined by \eqref{eq:Vnvalue}. We define $\varphi_n(t,i,z):=\exp(-\frac\theta2V_n(t,i,z))$. Then $\varphi_n(t,i,z)$ is the unique solution of the recursive system of DPEs given by
\begin{align}\label{eq:dpe4}
0=&\frac{\partial \varphi_n(t,i,z)}{\partial t}+\sum_{l\neq i,1\leq l\leq n}q_{il}\left(\varphi_n(t,l,z)-\varphi_n(t,i,z)\right)+q^{(n)}_{i0}(\varphi_n(t,0,z)-\varphi_n(t,i,z))\nonumber\\
&+\inf_{\pi\in U}\tilde{H}\left(\pi;i,z,(\varphi_n(t,i,z^j);\ j=0,1,\ldots,N)\right),
\end{align}
where $(t,i,z)\in[0,T)\times D_n^0\times{\cal S}$ and the terminal condition is given by $\varphi_n(T,i,z)=1$ for all $(i,z)\in D_n^0\times{\cal S}$. Moreover, it holds that $\varphi_n(t,i,z)\in[0,1]$ and it is decreasing in $n$ for all $(t,i,z)\in[0,T]\times D_n^0\times{\cal S}$.
\end{proposition}
\begin{proof}
Notice that the state space of $Y^{(n)}$ is given by $D_n^0$ which is a finite set. By observing the definition of the value function $V_n$ given by \eqref{eq:Vnvalue},
we have that $\varphi_n(t,i,z)$ is the unique solution of the recursive system \eqref{eq:dpe4} by applying Theorem~\ref{thm:solutionk} and
Proposition~\ref{prop:verithemfinite} in Section \ref{sec:finite-states} for the regime-switching process with the finite state space. In order to verify that $\varphi_n(t,i,z)\in[0,1]$ and it is decreasing in $n$, it is sufficient to prove that $V_n(t,i,z)\geq0$ and it is nondecreasing in $n$.
Thanks to Lemma~\ref{lem:jn=tildeJn}, and $L(0,i,z)=-r(i)\leq0$ by \eqref{eq:L0}, also note that $\tilde{\pi}_0(t)\equiv0$ is admissible (i.e., $\tilde{\pi}_0\in\tilde{\cal U}_n$), then
\begin{align*
\inf_{\tilde{\pi}\in\tilde{\cal U}_n}\log J_n(\tilde{\pi};t,i,z)&\leq \log J_n(\tilde{\pi}_0;t,i,z)
=\log\Ex_{t,i,z}^{\tilde{\pi}_0,\theta}\left[\exp\left(\frac{\theta}{2}\int_t^{T\wedge\tau_n^t}L(0;Y(s),Z(s))ds\right)\right]\nonumber\\
&=\log\Ex_{t,i,z}^{\tilde{\pi}_0,\theta}\left[\exp\left(-\frac{\theta}{2}\int_t^{T\wedge\tau_n^t}r(Y(s))ds\right)\right]\leq0,
\end{align*}
as the interest rate process is nonnegative. This gives that $V_n(t,i,z)\geq0$ for all $(t,i,z)\in[0,T]\times{D_n^0}\times{\cal S}$. On the other hand, for any $\tilde{\pi}\in\tilde{\cal U}_n$, we define $\hat{\pi}(t):=\tilde{\pi}(t)\mathds{1}_{\{\tau_n\geq t\}}$ for $t\in[0,T]$.
It is clear that $\hat{\pi}\in\tilde{\cal U}_n\cap \tilde{\cal U}_{n+1}$. Recall the density process given by \eqref{eq:Gam}, we have that, for $\tilde{\pi},\hat{\pi}\in\tilde{\cal U}_n$,
\begin{align*}
\Gamma^{\tilde{\pi},\theta}&={\cal E}(\Pi^{\tilde{\pi},\theta}),\ \Pi^{\tilde{\pi},\theta}=-\frac{\theta}{2}\int_0^{\cdot}\tilde{\pi}(s)^{\top}\sigma(Y^{(n)}(s))dW(s)+\sum_{j=1}^N\int_0^{\cdot}\{(1-\tilde{\pi}_j(s))^{-\frac{\theta}{2}}-1\}dM_j(s);\nonumber\\
\Gamma^{\hat{\pi},\theta}&={\cal E}(\Pi^{\hat{\pi},\theta}),\ \Pi^{\hat{\pi},\theta}=-\frac{\theta}{2}\int_0^{\cdot}\hat{\pi}(s)^{\top}\sigma(Y^{(n)}(s))dW(s)+\sum_{j=1}^N\int_0^{\cdot}\{(1-\hat{\pi}_j(s))^{-\frac{\theta}{2}}-1\}dM_j(s).
\end{align*}
This shows that $\Gamma^{\tilde{\pi},\theta}(t\wedge\tau_n)=\Gamma^{\hat{\pi},\theta}(t)$ for $t\in[0,T]$. Then, we deduce from \eqref{eq:Jn00} that
\begin{align}
\log J_n(\tilde{\pi};t,i,z)=&\log\Ex_{t,i,z}^{\tilde{\pi},\theta}\left[\exp\left(\frac{\theta}{2}\int_t^{T\wedge \tau_n^t}L(\tilde{\pi}(s);Y(s),Z(s))ds\right)\right]\nonumber\\
\geq&\log\Ex_{t,i,z}^{\tilde{\pi},\theta}\left[\exp\left(\frac{\theta}{2}\int_t^{T\wedge \tau_n^t}L(\tilde{\pi}(s);Y(s),Z(s))ds+\frac{\theta}{2}\int_{T\wedge \tau_n^t}^{T\wedge \tau_{n+1}^t}L(0;Y(s),Z(s))\right)\right]\nonumber\\
=&\log\Ex_{t,i,z}^{\hat{\pi},\theta}\left[\exp\left(\frac{\theta}{2}\int_t^{T\wedge\tau_{n+1}^t}L(\hat{\pi}(s);Y(s),Z(s))ds\right)\right]\nonumber\\
=&\log J_{n+1}(\hat{\pi};t,i,z)\geq\inf_{\tilde{\pi}\in\tilde{\cal U}_{n+1}}\log J_{n+1}(\tilde{\pi};t,i,z).
\end{align}
Using \eqref{eq:Vnvalue} and Lemma~\ref{lem:jn=tildeJn}, it follows that $V_{n}(t,i,z)$ is nondecreasing in $n$ for fixed $(t,i,z)\in[0,T]\times D_n^0\times{\cal S}$. Thus, the conclusion of the proposition holds.
\end{proof}
By virtue of Proposition~\ref{prop:Vnmonotone00}, for any $(t,i,z)\in[0,T]\times\mathbb{Z}_+\times{\cal S}$, we set
$V^*(t,i,z):=\lim_{n\to\infty}V_n(t,i,z)$. Then, it holds that
\begin{align}\label{eq:varphistar}
\lim_{n\to\infty}\varphi_n(t,i,z)=\exp\left(-\frac\theta2V^*(t,i,z)\right)=:\varphi^*(t,i,z).
\end{align}
On the other hand, from Eq.~\eqref{eq:Vnvalue}, it is easy to see that $\varphi_n(t,0,z)=1$ for all $(t,z)\in[0,T]\times{\cal S}$. Then, Eq.~\eqref{eq:dpe4} above can be rewritten as:
\begin{align}\label{eq:dpe5}
\frac{\partial \varphi_n(t,i,z)}{\partial t}=&-q_{ii}\varphi_n(t,i,z)-\sum_{l\neq i,1\leq l\leq n}q_{il}\varphi_n(t,l,z)-\sum_{l>n}q_{il}\nonumber\\
&-\inf_{\pi\in U}\tilde{H}\left(\pi;i,z,(\varphi_n(t,i,z^j);\ j=0,1,\ldots,N)\right).
\end{align}
In terms of \eqref{eq:H}, we can conclude that, for $(\pi;i,z)\in U\times\mathbb{Z}_+\times{\cal S}$, $\tilde{H}(\pi;i,z,x)$ is concave in every component of $x\in[0,\infty)^{N+1}$, so is $\inf_{\pi\in U}\tilde{H}(\pi;i,z,x)$. We present the main result in this paper for the case of the countable state space.
\begin{theorem}\label{thm:existD}
Let $(t,i,z)\in[0,T]\times\mathbb{Z}_+\times{\cal S}$. Then, the limit function $\varphi^*(t,i,z)$ given in \eqref{eq:varphistar} above is a classical solution of the original DPE~\eqref{eq:dpe3}, i.e., it holds that
\begin{align*}
0=&\frac{\partial \varphi^*(t,i,z)}{\partial t}+\sum_{l\neq i}q_{il}\left[\varphi^*(t,l,z)-\varphi^*(t,i,z)\right]+\inf_{\pi\in U}\tilde{H}\left(\pi;i,z,(\varphi^*(t,i,z^j);\ j=0,1,\ldots,N)\right)
\end{align*}
with terminal condition $\varphi^*(T,i,z)=1$ for all $(i,z)\in\mathbb{Z}_+\times{\cal S}$.
\end{theorem}
The proof of Theorem~\ref{thm:existD} will be split into proving a sequence of auxiliary lemmas first. We show the following result as a preparation.
\begin{lemma}\label{lem:boundfordphi}
Let $(i,z)\in \mathbb{Z}_+\times{\cal S}$. Then $(\frac{\partial\varphi_n(t,i,z)}{\partial t})_{n\geq i}$ is uniformly bounded in $t\in[0,T]$.
\end{lemma}
\begin{proof}
We rewrite Eq.~\eqref{eq:dpe5} as in the following form:
\begin{align}\label{estm1}
&\frac{\partial \varphi_n(t,i,z)}{\partial t}=-q_{ii}\varphi_n(t,i,z)-\sum_{l\neq i,1\leq l\leq n}q_{il}\varphi_n(t,l,z)-\sum_{l>n}q_{il}\nonumber\\
&\qquad-\inf_{\pi\in U}\hat{H}\left(\pi;i,z,(\varphi_n(t,i,z^j);\ j=0,1,\ldots,N)\right)+C(i,z)\varphi_n(t,i,z),
\end{align}
where, for $(i,z)\in\mathbb{Z}_+\times{\cal S}$,
\begin{align}\label{estm2}
C(i,z)=\bigg|\inf_{\pi\in U}\bigg\{&-\frac{\theta}{2}r(i)-\frac{\theta}{2}\pi^{\top}(\mu(i)-r(i)e_n)+\frac{\theta}{4}\left(1+\frac{\theta}{2}\right)\left\|\sigma(i)^{\top}\pi\right\|^2\nonumber\\
&+\sum_{j=1}^N\left(-1-\frac{\theta}{2}\pi_j\right)(1-z_j)\lambda_j(i,z)\bigg\}\bigg|,
\end{align}
and the nonnegative function
\begin{align}\label{eq:hatH}
\hat{H}(\pi;i,z,\bar{f}(z)):=\tilde{H}(\pi;i,z,\bar{f}(z))+C(i,z)f(z).
\end{align}
Because $\hat{H}(\pi;i,z,x)$ is concave in every component of $x\in[0,\infty)^{N+1}$, $\Phi(x):=\inf_{\pi\in U}\hat{H}(\pi;i,z,x)$ is also concave in every component of $x\in[0,\infty)^{N+1}$. It follows from Proposition~\ref{prop:Vnmonotone00} that $x^{(n)}:=(\varphi_n(t,i,z^j);\ j=0,1,\ldots,N)\in[0,1]^{N+1}$. Using Lemma~\ref{lem:conbound}, there exits a constant $C>0$ which is independent of $x^{(n)}$ such that $0\leq \Phi(x^{(n)})\leq C$ for all $n\in\mathbb{Z}_+$. Further, for fixed $(i,z)\in\mathbb{Z}_+\times{\cal S}$,
\begin{align*}
&\left|-q_{ii}\varphi_n(t,i,z)-\sum_{l\neq i,1\leq l\leq n}q_{il}\varphi_n(t,l,z)-\sum_{l>n}q_{il}+C(i,z)\varphi_n(t,i,z)\right|
\leq-2q_{ii}+C(i,z).
\end{align*}
The desired result follows from Eq.~\eqref{estm1}.
\end{proof}
\begin{lemma}\label{lem:unfmconforphi}
Let $(i,z)\in\mathbb{Z}_+\times{\cal S}$, then $(\varphi_n(t,i,z))_{n\geq i}$ (decreasingly) converges to $\varphi^*(t,i,z)$ uniformly in $t\in[0,T]$ as $n\to\infty$.
\end{lemma}
\begin{proof}
By Proposition~\ref{prop:Vnmonotone00}, Lemma~\ref{lem:boundfordphi}, and Azel\`a-Ascoli's Theorem, we have that $(\varphi_n(\cdot,i,z))_{n\geq i}$ contains an uniformly convergent subsequence. Moreover, Proposition~\ref{prop:Vnmonotone00} and \eqref{eq:varphistar} yield that $\varphi_n(t,i,z)$ (decreasingly) converges to $\varphi^*(t,i,z)$ uniformly in $t\in[0,T]$ as $n\to\infty$.
\end{proof}
\begin{lemma}\label{lem:phinlobnd}
Let $n\in\mathbb{Z}_+$. Consider the following linear system: for $(t,i,z)\in(0,T]\times D_n^0\times{\cal S}$,
\begin{align}\label{eq:phin}
\frac{\partial\phi_n(t,i,z)}{\partial t}=&(q_{ii}-C(i,z))\phi_n(t,i,z)+\sum_{l\neq i,1\leq l\leq n}q_{il}\phi_n(t,l,z),\nonumber\\
\phi_n(0,i,z)=&1,
\end{align}
where $C(i,z)$ is given by \eqref{estm2}. Then, there exists a measurable function $\phi^*(t,i,z)$ such that $\phi_n(t,i,z)\nearrow\phi^*(t,i,z)$ as $n\to\infty$ for each fixed $(t,i,z)$. Moreover, it holds that $0<\phi_n(T-t,i,z)\leq\varphi_n(t,i,z)\leq1$ for $(t,i,z)\in[0,T]\times D_n^0\times{\cal S}$.
\end{lemma}
\begin{proof}
Let $(t,i,z)\in[0,T]\times D_n^0\times{\cal S}$ and define $g_n(t,i,z):=\varphi_n(T-t,i,z)$. It follows from Eq.~\eqref{estm1} that $g_n(\cdot,i,z)\in C^1((0,T])\cap C([0,T])$ for each fixed $(i,z)$ and satisfies that
\begin{align}\label{g_n}
\frac{\partial g_n(t,i,z)}{\partial t}=&(q_{ii}-C(i,z))g_n(t,i,z)+\sum_{l\neq i,1\leq l\leq n}q_{il}g_n(t,l,z)+\sum_{l>n}q_{il}\nonumber\\
&+Q(t,i,z,g_n(t,i,z)),\nonumber\\
g_n(0,i,z)=&1,
\end{align}
where $Q(t,i,z,x):=\inf_{\pi\in U}\hat{H}\left(\pi;i,z,x,g_n(t,i,z^1),\ldots,g_n(t,i,z^N)\right)$ for $x\in[0,\infty)$. We have from \eqref{eq:hatH} that $Q(t,i,z,x)\geq0$ for all $(t,x)\in[0,T]\times[0,\infty)$. Then $\sum_{l>n}q_{il}+Q(t,i,z,x)\geq0$. Note that the linear part of Eq.~\eqref{g_n} satisfies the $K$-type condition. Then, using the comparison result of Lemma~\ref{comparison}, it shows that $g_n(t,i,z)\geq\phi_n(t,i,z)$, and hence $\varphi_n(t,i,z)\geq\phi_n(T-t,i,z)$. Moreover, we deduce from Lemma \ref{lem:sol-hjben2} that $\phi_n(t,i,z)>0$. By virtue of Eq.~\eqref{eq:phin}, we have that $\phi_{n+1}(t,i,z)$ with $(t,i,z)\in[0,T]\times D_{n+1}^0\times{\cal S}$ satisfies that
\begin{equation}\label{eq:phin+1}
\begin{split}
\frac{\partial\phi_{n+1}(t,i,z)}{\partial t}=&(q_{ii}-C(i,z))\phi_{n+1}(t,i,z)+\sum_{l\neq i,1\leq l\leq n}q_{il}\phi_{n+1}(t,l,z)\\
&+q_{i,n+1}\phi_{n+1}(t,n+1,z),\nonumber\\
\phi_{n+1}(0,i,z)=&1.
\end{split}
\end{equation}
Because $q_{i,n+1}\phi_{n+1}(t,n+1,z)\geq0$ for $i\in D_n^0$, Lemma~\ref{comparison} shows that $\phi_{n+1}(t,i,z)\geq\phi_n(t,i,z)$ for all $(t,i,z)\in[0,T]\times{D_n^0}\times{\cal S}$. Therefore, there exists a measurable function $\phi^*(t,i,z)$ such that $\phi_n(t,i,z)\nearrow\phi^*(t,i,z)$ as $n\to\infty$ for each fixed $(t,i,z)\in[0,T]\times\mathbb{Z}_+\times{\cal S}$.
\end{proof}
\begin{lemma}\label{lem:lobndphistar}
Let $(i,z)\in\mathbb{Z}_+\times{\cal S}$. Then, there exists a positive constant $\delta=\delta(i,z)$ such that $\varphi^*(t,i,z)>\delta$ for all $t\in[0,T]$.
\end{lemma}
\begin{proof}
From Lemma~\ref{lem:phinlobnd}, we have that $\varphi_n(t,i,z)\geq\phi_n(T-t,i,z)$. Letting $n\rightarrow\infty$ and using Lemma~\ref{lem:unfmconforphi}, it follows that $\varphi^*(t,i,z)\geq\phi^*(T-t,i,z)\geq\phi_i(T-t,i,z)$. As $\phi_i(t,i,z)>0$ is continuous in $t\in[0,T]$, there exists a positive constant $\delta=\delta(i,z)$ such that $\inf_{t\in[0,T]}\phi_i(t,i,z)\geq\delta$. Therefore $\varphi^*(t,i,z)\geq\delta$ for all $t\in[0,T]$.
\end{proof}
We can finally conclude the proof of Theorem~\ref{thm:existD} using all previous results.
\noindent{\it Proof of Theorem~\ref{thm:existD}.}\quad We first prove that there exists a measurable function $\tilde{\varphi}(t,i,z)$ on $(t,i,z)\in[0,T]\times\mathbb{Z}_+\times{\cal S}$ such that $\lim_{n\to\infty}\frac{\partial\varphi_n(t,i,z)}{\partial t}=\tilde{\varphi}(t,i,z)$ for $(t,i,z)\in[0,T]\times\mathbb{Z}_+\times{\cal S}$. In fact, note that for $(t,i,z)\in[0,T]\times D_n^0\times{\cal S}$, $0\leq\varphi_{n+1}(t,i,z)\leq\varphi_{n}(t,i,z)\leq1$ for $n\in\mathbb{Z}_+$. Then
\begin{align*}
\sum_{l\neq i,1\leq l\leq n}q_{il}\varphi_n(t,l,z)+\sum_{l>n}q_{il}\geq\sum_{l\neq i,1\leq l\leq n+1}q_{il}\varphi_{n+1}(t,l,z)+\sum_{l>n+1}q_{il}.
\end{align*}
This yields from \eqref{eq:varphistar} that $q_{ii}\varphi_n(t,i,z)\nearrow q_{ii}\varphi^*(t,i,z)$ as $n\to\infty$, and
\begin{align}\label{eq:conver11}
\sum_{l\neq i,1\leq l\leq n}q_{il}\varphi_n(t,l,z)+\sum_{l>n}q_{il}\searrow&\sum_{l\neq i,l\geq 1}q_{il}\varphi^*(t,l,z).
\end{align}
On the other hand, let $\Phi(x):=\inf_{\pi\in U}\tilde{H}(\pi;i,z,x)$ for $x\in[0,\infty)^{N+1}$. Then $\Phi(x):[0,\infty)^{N+1}\to\R$ is concave in every component of $x$.
Let $x^*(t):=(\varphi^*(t,i,z^j);\ j=0,1,\ldots,N)$ and $x^{(n)}(t):=(\varphi_n(t,i,z^j);\ j=0,1,\ldots,N)$ for $n\in\mathbb{Z}_+$. Then $0\leq x^*(t)\leq x^{(n)}(t)$ for $n\in\mathbb{Z}_+$ and $\lim_{n\to\infty}x^{(n)}(t)=x^*(t)$ using \eqref{eq:varphistar}. Moreover, Lemma~\ref{lem:lobndphistar} gives that $\delta\ll x^*\ll2$. It follows from Lemma~\ref{lem:conconver} that $\lim_{n\to\infty}\Phi(x^{(n)}(t))=x^*(t)$. Thus, by virtue of Eq.~\eqref{eq:dpe5}, as $n\to\infty$, one has
\begin{align}\label{eq:expresstildevarphi}
&\frac{\partial\varphi_n(t,i,z)}{\partial t}\to\tilde{\varphi}(t,i,z):=-q_{ii}\varphi^*(t,i,z)-\sum_{l\neq i,l\geq 1}q_{il}\varphi^*(t,l,z)-\Phi\left(x^*(t)\right).
\end{align}
We next prove that for $(i,z)\in\mathbb{Z}_+\times{\cal S}$, $\frac{\partial\varphi_n(t,i,z)}{\partial t}\rightrightarrows\tilde{\varphi}(t,i,z)$ in $t\in[0,T]$ as $n\to\infty$. Here $\rightrightarrows$ denotes the uniform convergence. Eq.~\eqref{estm1} together with \eqref{eq:expresstildevarphi} first give that, for $(t,i,z)\in[0,T]\times D_n^0\times{\cal S}$,
\begin{align}\label{eq:I-II-III}
\frac{\partial \varphi_n(t,i,z)}{\partial t}-\tilde{\varphi}(t,i,z)&=\sum_{i=1}^3 B_i^{(n)}(t,i,z),
\end{align}
where
\begin{align}\label{eq:Bn}
B_1^{(n)}(t,i,z) &:= -q_{ii}(\varphi_n(t,i,z)-\varphi^*(t,i,z))+C(i,z)(\varphi_n(t,i,z)-\varphi^*(t,i,z)),\nonumber\\
B_2^{(n)}(t,i,z) &:= \sum_{l\neq i,1\leq l\leq n}q_{il}(\varphi_n(t,l,z)-\varphi^*(t,l,z))+\sum_{l>n}q_{il}(1-\varphi^*(t,i,z)),\nonumber\\
B_3^{(n)}(t,i,z) &:= \Phi(x^{(n)}(t))-\Phi(x^*(t)).
\end{align}
Here $\Phi(x):=\inf_{\pi\in U}\tilde{H}(\pi;i,z,x)$ for $x\in[0,\infty)^{N+1}$, $x^{(n)}(t):=(\varphi_n(t,i,z^j);\ j=0,1,\ldots,N)$, and $x^{*}(t):=(\varphi^*(t,i,z^j);\ j=0,1,\ldots,N)$.
Lemma~\ref{lem:unfmconforphi} guarantees that $\varphi_n(t,i,z)\rightrightarrows\varphi^*(t,i,z)$ in $t\in[0,T]$ as $n\to\infty$, and hence $B_1^{(n)}(t,i,z)\rightrightarrows0$ in $t\in[0,T]$ as $n\to\infty$.
On the other hand, for any small $\varepsilon>0$, since $\sum_{l\neq i}q_{il}<\infty$, there exists $n_1\geq1$ such that $\sum_{l>n_1,l\neq i}q_{il}<\frac\varepsilon2$. Note that, for all $1\leq l\leq n_1$, $\varphi_n(t,l,z)\rightrightarrows\varphi^*(t,l,z)$ in $t\in[0,T]$ as $n\to\infty$, there exists
$n_2\geq1$ such that $\sup_{t\in[0,T]}\sum_{l\neq i,1\leq l\leq n_1}q_{il}(\varphi_n(t,l,z)-\varphi^*(t,l,z))\leq\frac\varepsilon2$ for $n>n_2$. Hence, for all $n>n_1\vee n_2$, noting that $0\leq\varphi^*(t,i,z)\leq\varphi_n(t,i,z)\leq1$, it holds that
\begin{equation}\label{II}
\begin{split}
|B_2^{(n)}(t,i,z)|=&\sum_{l\neq i,1\leq l\leq n_1}q_{il}(\varphi_n(t,l,z)-\varphi^*(t,l,z))+\sum_{l\neq i,n_1<l<n}q_{il}(\varphi_n(t,l,z)-\varphi^*(t,l,z))\\
&+\sum_{l>n}q_{il}(1-\varphi^*(t,i,z))\leq\frac\varepsilon2+\sum_{l>n_1}q_{il}\leq \frac\varepsilon2+\frac\varepsilon2=\varepsilon.
\end{split}
\end{equation}
Thus, we deduce that $B_2^{(n)}(t,i,z)\rightrightarrows0$ in $t\in[0,T]$ as $n\to\infty$. We can have from Lemma~\ref{lem:conbound} that for all $x\in\mathds{R}^{N+1}$ satisfying $0\leq x\leq 2$, $0\leq\Phi(x)\leq C$ for some constant $C>0$. As for $j=0,1,\ldots,N$, $\varphi_n(t,i,z^j)\rightrightarrows\varphi^*(t,i,z^j)$ in $t\in[0,T]$ as $n\rightarrow\infty$, Lemma \ref{lem:lobndphistar} yields that there exists a constant $\delta>0$ such that $1\geq\varphi_n(t,i,z^j)\geq\varphi^*(t,i,z^j)\geq\delta>0$ for all $t\in[0,T]$. Further, there exists $\lambda^j_n(t)\in[0,1]$ such that $\varphi_n(t,i,z^j)=(1-\lambda^j_n(t))\varphi^*(t,i,z^j)+2\lambda^j_n(t)$. In turn, $\lambda^j_n(t)=\frac{\varphi_n(t,i,z^j)-\varphi^*(t,i,z^j)}{2-\varphi^*(t,i,z^j)}$, and hence for all $j=0,1,\ldots,N$, $\lambda^j_n(t)\rightrightarrows0$ in $t\in[0,T]$ as $n\rightarrow\infty$. Similar to that in \eqref{concaveexpansion1}, we can derive that
\begin{align}\label{eq:infdiffer}
\Phi(x^{(n)}(t))\geq \Phi(x^*(t))\prod_{j=0}^N(1-\lambda^j_n(t))+\Lambda^{(n)}_1(t).
\end{align}
Similar to the first term in the r.h.s. of the inequality \eqref{eq:infdiffer}, every term in $\Lambda^{(n)}_1(t)$ above has $N+1$ multipliers and at least one of these multipliers is of the form $\lambda^j_n(t)$, while other multipliers are nonnegative and bounded by $1\vee C$. Due to the fact that $\lambda^j_n(t)\rightrightarrows0$ in $t\in[0,T]$ as $n\to\infty$, we have that $\Lambda^{(n)}_1(t)\rightrightarrows0$ in $t\in[0.T]$ as $n\to\infty$. Moreover, it follows from \eqref{eq:infdiffer} that
\begin{align}\label{eq:infdiffer2}
&\left(1-\prod_{j=0}^N(1-\lambda^j_n(t))\right)\Phi(x^*(t))-\Lambda^{(n)}_1(t)\geq\Phi(x^*(t))-\Phi(x^{(n)}(t))=-B_3^{(n)}(t,i,z).
\end{align}
It is not difficult to see that the l.h.s. of the inequality \eqref{eq:infdiffer2} tends to $0$ uniformly in $t\in[0,T]$ as $n\rightarrow\infty$.
On the other hand, there exists $\tilde{\lambda}^j_n(t)\in[0,1]$ such that $\varphi^*(t,i,z^j)=(1-\tilde{\lambda}^j_n(t))\varphi_n(t,i,z^j)+0\cdot\tilde{\lambda}^j_n(t)$, and in turn $\tilde{\lambda}^j_n(t)=\frac{\varphi_n(t,i,z^j)-\varphi^*(t,i,z^j)}{\varphi_n(t,i,z^j)}\rightrightarrows0$ in $t\in[0,T]$ as $n\to\infty$, since $\varphi_n(t,i,z^j)\geq\delta>0$. So that
\begin{align}\label{eq:infdiffer1}
&\left(1-\prod_{j=0}^N(1-\tilde{\lambda}^j_n(t))\right)\Phi(x^{(n)}(t))-\Lambda^{(n)}_2(t)\geq\Phi(x^{(n)}(t))-\Phi(x^{*}(t))=B_3^{(n)}(t,i,z),
\end{align}
where the form of $\Lambda^{(n)}_2(t)$ is similar to that of $\Lambda^{(n)}_1(t)$, but it is related to $\tilde{\lambda}^j_n(t)$ for $j=0,1,\ldots,N$.
As in \eqref{eq:infdiffer2}, the l.h.s. of the inequality~\eqref{eq:infdiffer1} tends to $0$ uniformly in $t\in[0,T]$ as $n\to\infty$.
Hence, it follows from \eqref{eq:infdiffer2} and \eqref{eq:infdiffer1} that $B_3^{(n)}(t,i,z)\rightrightarrows0$ in $t\in[0,T]$ as $n\rightarrow\infty$.
Thus, we proved that for $(i,z)\in\mathbb{Z}_+\times{\cal S}$, $\frac{\partial\varphi_n(t,i,z)}{\partial t}\rightrightarrows\tilde{\varphi}(t,i,z)$ in $t\in[0,T]$
as $n\to\infty$.
We at last show that, for $(i,z)\in\mathbb{Z}_+\times{\cal S}$, $\varphi^*(T,i,z)-\varphi^*(t,i,z)=\int_t^T\tilde{\varphi}(s,i,z)ds$ for $t\in[0,T]$.
For $n\in\mathbb{Z}_+$, it follows from Proposition~\ref{prop:Vnmonotone00} that $\varphi_n(\cdot,i,z)\in C^1([0,T))\cap C([0,T])$ for $(i,z)\in D_n^0\times{\cal S}$. This implies that
\begin{equation}\label{differ}
\begin{split}
\varphi^*(T,i,z)-\varphi^*(t,i,z)&=\varphi^*(T,i,z)-\varphi^*(t,i,z)-(\varphi_n(T,i,z)-\varphi_n(t,i,z))\\
&\quad+\int_t^T\frac{\partial\varphi_n(s,i,z)}{\partial t}(s,i,z)ds.
\end{split}
\end{equation}
Lemma~\ref{lem:unfmconforphi} ensures that $\varphi(T,i,z)-\varphi(t,i,z)-(\varphi_n(T,i,z)-\varphi_n(t,i,z))\to0$ as $n\to\infty$.
From Lemma~\ref{lem:boundfordphi} and the uniform convergence of $\frac{\partial\varphi_n(t,i,z)}{\partial t}$ to $\tilde{\varphi}(t,i,z)$ in $t\in[0,T]$, it follows that $\tilde{\varphi}(t,i,z)$ is continuous in $t\in[0,T]$ and
$\int_t^T\frac{\partial\varphi_n(s,i,z)}{\partial t}ds\to\int_t^T\tilde{\varphi}(s,i,z)ds$ as $n\to\infty$.
Moreover, as $\varphi^*(T,i,z)-\varphi^*(t,i,z)=\int_t^T\tilde{\varphi}(s,i,z)ds$ for each $t\in[0,T]$, $\frac{\partial\varphi^*(t,i,z)}{\partial t}=\tilde{\varphi}(t,i,z)$ holds for all $t\in[0,T]$. Hence, $\varphi^*(t,i,z)$ is indeed a classical solution of the original DPE \eqref{eq:dpe3}. \hfill$\Box$\\
The verification argument for the case of countable state space $\mathbb{Z}_+=\{1,2,\ldots\}$ is presented in the next key proposition. Before it, we provide some mild conditions on model coefficients:
\begin{itemize}
\item[({C.1})] There exist positive constants $c_1$, $c_2$, $\delta$ and $K$ such that $c_1\|\xi\|^2\leq\xi^\top\sigma(i)\sigma(i)^\top\xi\leq c_2\|\xi\|^2$ for all $\xi\in\R^N$ and $i\in\mathbb{Z}_+$, $\delta\leq\lambda(i,z)\leq K$ for all $(i,z)\in\mathbb{Z}_+\times{\cal S}$, and $r(i)+\|\mu(i)\|\leq K$ for all $i\in\mathbb{Z}_+$.
\end{itemize}
The first condition on $\sigma(i)$ is actually related to the uniformly elliptic property of the volatility matrix $\sigma(i)$ of stocks.
\begin{proposition}\label{prop:verivalue}
Let the condition {\rm(C.1)} hold. Let $\varphi^*(t,i,z)$ with $(t,i,z)\in[0,T]\times\mathbb{Z}_+\times{\cal S}$ be given by \eqref{eq:varphistar}. Then, for all $(t,i,z)\in[0,T]\times\mathbb{Z}_+\times{\cal S}$,
\begin{align}\label{veriphistar}
\varphi^*(t,i,z)=\inf_{\tilde{\pi}\in\tilde{\cal U}}\Ex_{t,i,z}^{\tilde{\pi},\theta}\left[\exp\left(\frac{\theta}{2}\int_t^TL(\tilde{\pi}(s);Y(s),Z(s))ds\right)\right].
\end{align}
\end{proposition}
\begin{proof}
From Proposition~\ref{prop:verithemfinite} and Lemma~\ref{lem:jn=tildeJn}, it follows that, for $n\in\mathbb{Z}_+$,
\begin{align*}
\varphi_n(t,i,z)=&\inf_{\tilde{\pi}\in\tilde{\cal U}_n}\Ex_{t,i,z}^{\tilde{\pi},\theta}\left[\exp\left(\frac{\theta}{2}\int_t^TL(\tilde{\pi}(s);Y^{(n)}(s),Z(s))ds\right)\right]\nonumber\\
=&\inf_{\tilde{\pi}\in\tilde{\cal U}_n}\Ex_{t,i,z}^{\tilde{\pi},\theta}\left[\exp\left(\frac{\theta}{2}\int_t^{T\wedge \tau^t_n}L(\tilde{\pi}(s);Y(s),Z(s))ds\right)\right].
\end{align*}
Then, for any $\varepsilon>0$, there exists $\tilde{\pi}^\varepsilon\in\tilde{\mathcal{U}}_n$ such that
\begin{align}\label{eq:varphin+epsilon}
\varphi_n(t,i,z)+\varepsilon>\Ex_{t,i,z}^{\pi^\varepsilon,\theta}\left[\exp\left(\frac{\theta}{2}\int_t^{T\wedge \tau^t_n}L(\tilde{\pi}^\varepsilon(s);Y(s),Z(s))ds\right)\right].
\end{align}
Define $\hat{\pi}^\varepsilon(t):=\tilde{\pi}^\varepsilon(t)\mathds{1}_{\{t\leq\tau_n\}}$ for $t\in[0,T]$. Then, it holds that $\hat{\pi}^\epsilon\in\tilde{\mathcal{U}}$, and $\Gamma^{\hat{\pi}^\varepsilon,\theta}(t,T)=\Gamma^{\tilde{\pi}^\varepsilon,\theta}(t,T\wedge\tau^t_n)$ for $t\in[0,T]$. Also note that $L(0,i,z)=-r(i)\leq0$ for all $(i,z)\in\mathbb{Z}_+\times{\cal S}$. Then, the inequality~\eqref{eq:varphin+epsilon} continues that
\begin{align}
\varphi_n(t,i,z)+\varepsilon>&\Ex_{t,i,z}^{\tilde{\pi}^\varepsilon,\theta}\left[\exp\left(\frac{\theta}{2}\int_t^{T\wedge \tau^t_n}L(\tilde{\pi}^\varepsilon(s);Y(s),Z(s))ds\right)\right]\nonumber\\
=&\Ex_{t,i,z}^{\hat{\pi}^\varepsilon,\theta}\left[\exp\left(\frac{\theta}{2}\int_t^{T\wedge \tau^t_n}L(\hat{\pi}^\varepsilon(s);Y(s),Z(s))ds\right)\right]\nonumber\\
\geq&\Ex_{t,i,z}^{\hat{\pi}^\varepsilon,\theta}\left[\exp\left(\frac{\theta}{2}\int_t^{T}L(\hat{\pi}^\varepsilon(s);Y(s),Z(s))ds\right)\right]\nonumber\\
\geq&\inf_{\tilde{\pi}\in\tilde{\cal U}}\Ex_{t,i,z}^{\tilde{\pi},\theta}\left[\exp\left(\frac{\theta}{2}\int_t^TL(\tilde{\pi}^\varepsilon(s);Y(s),Z(s))ds\right)\right].
\end{align}
By passing $n\to\infty$ and then $\varepsilon\to0$, we get
\begin{align}\label{phistaroninf}
\varphi^*(t,i,z)\geq\inf_{\tilde{\pi}\in\tilde{\cal U}}\Ex_{t,i,z}^{\tilde{\pi},\theta}\left[\exp\left(\frac{\theta}{2}\int_t^TL(\tilde{\pi}(s);Y(s),Z(s))ds\right)\right].
\end{align}
On the other hand, using Theorem~\ref{thm:existD} and Proposition~\ref{prop:verithemfinite}, $\varphi^*(t,i,z)$ is strictly positive and $\varphi^*(t,i,z)\leq\varphi_n(t,i,z)\leq1$ for all $n\geq1$. Then, under the condition (C.1), by applying a similar argument of the proof of \eqref{eq:itoveri}, we have that, for any $\tilde{\pi}\in\tilde{\cal U}$,
\begin{align*}
&\Ex_{t,i,z}^{\tilde{\pi},\theta}\left[\varphi^*(T,Y(T),Z(T))\exp\left(\frac{\theta}{2}\int_t^TL(\tilde{\pi}(u);Y(u),Z(u))du\right)\right]\geq\varphi^*(t,i,z).
\end{align*}
Because $\varphi(T,i,z)=1$ for all $(i,z)\in\mathbb{Z}_+\times{\cal S}$, we deduce that
\begin{align}\label{infonphistar}
\inf_{\tilde{\pi}\in\tilde{\cal U}}\Ex_{t,i,z}^{\tilde{\pi},\theta}\left[\exp\left(\frac{\theta}{2}\int_t^TL(\tilde{\pi}(s);Y(s),Z(s))ds\right)\right]\geq\varphi^*(t,i,z).
\end{align}
The equality \eqref{veriphistar} therefore follows by combining \eqref{phistaroninf} and \eqref{infonphistar}, and the validity of the proposition is checked.
\end{proof}
Similar to that in Proposition~\ref{prop:verithemfinite}, we can construct a candidate optimal $\mathbb{G}$-predictable feedback strategy $\tilde{\pi}^*$ by, for $t\in[0,T]$,
\begin{align}\label{eq:optimaltildepis}
\tilde{\pi}^*(t)&:={\rm diag}\left((1-Z_j(t-))_{j=1}^N\right)\nonumber\\
&\quad\times\argmin_{\pi\in U}\tilde{H}\left(\pi;Y(t-),Z(t-),(\varphi^*(t,Y(t-),Z^j(t-));\ j=0,1,\ldots,N)\right).
\end{align}
We first prove that $\tilde{\pi}^*$ can be characterized as an approximation limit by a sequence of well defined admissible strategies.
\begin{lemma}\label{lem:approxpistar}
Let the condition {\rm(C.1)} hold. There exists a sequence of strategies $(\tilde{\pi}^{(n,*)})_{n\in\mathbb{Z}_+}\subset\tilde{\mathcal{U}}$ such that $\lim_{n\to\infty}\tilde{\pi}^{(n,*)}(t)=\tilde{\pi}^*(t)$ for $t\in[0,T]$, $\Px$-a.s., and further $\lim_{n\to\infty}J(\tilde{\pi}^{(n,*)};t,i,z)=\varphi^*(t,i,z)$ for $(t,i,z)\in[0,T]\times\mathbb{Z}_+\times{\cal S}$, $\Px$-a.s. Here, the objective functional $J$ is defined in~\eqref{eq:J}.
\end{lemma}
\begin{proof}
For fixed $(i,z,x)\in\mathbb{Z}_+\times\mathcal{S}\times(0,\infty)^{N+1}$, we have that $\tilde{H}\left(\pi;i,z,x\right)$ is strictly concave w.r.t. $\pi\in U$, and hence $\Phi(i,z,x):=\argmin_{\pi\in U}\tilde{H}\left(\pi;i,z,x\right)$ is well defined. Note that $\Phi(i,z,\cdot)$ maps $(0,\infty)^{N+1}$ to $U$ and satisfies the first-order condition $\frac{\partial\tilde{H}}{\partial\pi_j}\left(\Phi(i,z,x);i,z,x\right)=0$ for $j=1,\ldots,N$.
Then, Implicit Function Theorem yields that $\Phi(i,z,x)$ is continuous in $x$. Let $x^{(n)}(t):=(\varphi_n(t,Y^{(n)}(t-),Z^j(t-));\ j=0,1,\ldots,N)$. It follows from Proposition~\ref{prop:verithemfinite} and Lemma~\ref{lem:jn=tildeJn} that, for $t\in[0,T]$,
\begin{equation}
\tilde{\pi}^{(n,*)}(t):={\rm diag}((1-Z_j(t-))_{j=1}^N)\Phi(Y(t-),Z(t-),x^{(n)}(t))\mathds{1}_{\{t\leq\tau_n\}}\nonumber
\end{equation}
belongs to $\tilde{\mathcal{U}}_n\cap\tilde{\mathcal{U}}$, and further it satisfies that
\begin{align}
\varphi_n(t,i,z)&=\Ex_{t,i,z}^{{\tilde\pi^{(n,*)}},\theta}\left[\exp\left(\frac{\theta}{2}\int_t^{T\wedge\tau^t_n}L(\tilde{\pi}^{(n,*)}(s);Y(s),Z(s))ds\right)\right].
\end{align}
Lemma~\ref{lem:unfmconforphi} gives that $\lim_{n\to\infty}\|x^{(n)}(t)-x^*(t)\|=0$ for $t\in[0,T]$, $\Px$-a.s., where $x^*(t):=(\varphi^*(t,Y(t-),Z^j(t-));\ j=0,1,\ldots,N)$. We define the predictable process $\tilde{\pi}^*(t):={\rm diag}((1-Z_j(t-))_{j=1}^N)\Phi(Y(t-),Z(t-),x^*(t))$ for $t\in[0,T]$. By Lemma~\ref{lem:lobndphistar} and the continuity of $\Phi(i,z,\cdot)$, we obtain $\lim_{n\to\infty}\tilde{\pi}^{(n,*)}(t)=\tilde{\pi}^*(t)$ for $t\in[0,T]$, a.s. Moreover, it holds that
\begin{align*}
&J(\tilde{\pi}^{(n,*)};t,i,z)=\Ex_{t,i,z}^{{\tilde\pi^{(n,*)}},\theta}\left[\exp\left(\frac{\theta}{2}\int_t^TL(\tilde\pi^{(n,*)}(s);Y(s),Z(s))ds\right)\right]\nonumber\\
&\qquad=\Ex_{t,i,z}^{{\tilde\pi^{(n,*)}},\theta}\left[\exp\left(\frac{\theta}{2}\int_t^{T\wedge\tau^t_n}L(\tilde\pi^{(n,*)}(s);Y(s),Z(s))ds+\frac{\theta}{2}\int_{T\wedge\tau^t_n}^TL(0;Y(s),Z(s))ds\right)\right]\nonumber\\
&\qquad\leq\Ex_{t,i,z}^{{\tilde\pi^{(n,*)}},\theta}\left[\exp\left(\frac{\theta}{2}\int_t^{T\wedge\tau^t_n}L(\tilde\pi^{(n,*)}(s);Y(s),Z(s))ds\right)\right]=\varphi_n(t,i,z).
\end{align*}
Proposition~\ref{prop:verivalue} then yields that $\varphi^*(t,i,z)\leq J(\tilde{\pi}^{(n,*)};t,i,z)\leq\varphi_n(t,i,z)$ for $n\in\mathbb{Z}_+$. This verifies that $\lim_{n\to\infty}J(\tilde{\pi}^{(n,*)};t,i,z)=\varphi^*(t,i,z)$ for $(t,i,z)\in[0,T]\times\mathbb{Z}_+\times{\cal S}$, a.s. using Lemma~\ref{lem:unfmconforphi}.
\end{proof}
\begin{proposition}\label{prop:admiss}
Let the condition {\rm(C.1)} hold. Then, the optimal feedback strategy $\tilde{\pi}^*$ given by \eqref{eq:optimaltildepis} is admissible, i.e., $\tilde{\pi}^*\in\tilde{\cal U}$.
\end{proposition}
\begin{proof}
Under the condition {\rm(C.1)}, it is not difficult to verify that there exists a constant $C>0$ such that $L(\pi;i,z)\geq-C$ for all $(\pi,i,z)\in U\times\mathbb{Z}_+\times\mathcal{S}$. Thanks to Proposition~\ref{prop:verivalue}, we have that
\begin{align*}
\varphi^*(t,i,z)&=\inf_{\tilde{\pi}\in\tilde{\cal U}}\Ex_{t,i,z}^{\tilde{\pi},\theta}\left[\exp\left(\frac{\theta}{2}\int_t^TL(\tilde{\pi}(s);Y(s),Z(s))ds\right)\right]\\
&\geq\inf_{\tilde{\pi}\in\tilde{\cal U}}\Ex_{t,i,z}^{\tilde{\pi},\theta}\left[\exp\left(-\frac{\theta}{2}\int_t^TCds\right)\right]=\exp\left(-\frac\theta2C(T-t)\right),\nonumber
\end{align*}
for $(t,i,z)\in[0,T]\times{\mathbb{Z}_+}\times{\cal S}$. Hence, for $t\in[0,T]$,
\begin{align}\label{eq:xstarbound}
x^*(t)=(\varphi^*(t,Y(t-),Z^j(t-));\ j=0,1,\ldots,N))\in[e^{-\frac\theta2C(T-t)},1]^{N+1}.
\end{align}
The continuity of $\Phi(i,z,x):=\argmin_{\pi\in U}\tilde{H}\left(\pi;i,z,x\right)$ gives that $\tilde{\pi}^*(t)$ for $t\in[0,T]$ is uniformly bounded by some constant $C_1>0$. Moreover, the first-order condition yields that, for all $j=1,\ldots,N$, if $Z_j(t-)=0$,
\begin{align}\label{eq:pistarbelow2}
(1-\tilde{\pi}^*_j(t))^{-\frac\theta2-1}
=&\Bigg[(\mu_j(Y(t-))-r(Y(t-)))-\frac\theta2\left(1+\frac\theta2\right)\sum_{i=1}^N(\sigma(Y(t-))^\top\sigma(Y(t-)))_{ji}\tilde{\pi}^*_i(t)\nonumber\\
&\quad+\frac\theta2\lambda_j(Y(t-),Z(t-))\Bigg]
\frac{\varphi^*(t,Y(t-),Z(t-))}{\lambda_j(Y(t-),Z(t-))\varphi^*(t,Y(t-),Z^j(t-))}\nonumber\\
\leq& C_2,
\end{align}
where we used the condition (C.1) and \eqref{eq:xstarbound}. Note that $\tilde{\pi}^*_j(t)=0$ if $Z_j(t-)=1$, then $\tilde{\pi}^*$ is also uniformly bounded away from $1$. This implies that the generalized Novikov's condition holds in the countably infinite state case, and hence $\tilde{\pi}^*$ is admissible.
\end{proof}
The above verification results (Proposition~\ref{prop:verivalue} and Proposition~\ref{prop:admiss}) can be seen as a uniqueness result for the dynamic programming equation. Under the condition (C.1), we can also establish an error estimate on the approximation of the sequence of strategies $\tilde{\pi}^{(n,*)}$ to the optimal strategy $\pi^{*}$ in terms of the objective functional $J$ (see~\eqref{eq:J}), which is given by
\begin{lemma}\label{lem:errorestimate}
Let $n\in\mathbb{Z}_+$. Under the condition {\rm(C.1)}, for $(t,i,z)\in[0,T]\times D_n\times{\cal S}$, there exists a constant $C>0$ which is independent of $n$ such that
\begin{align*}
\left|J(\tilde{\pi}^{(n,*)};t,i,z)-J(\tilde{\pi}^{(*)};t,i,z)\right|\leq C\left(1-\sum_{j=1}^na^{(n)}_{ij}(T-t)\right).
\end{align*}
Here $a^{(n)}_{ij}(T-t)=\delta_{ij}+(T-t)q_{ij}+\sum_{k=1}^\infty\sum_{1\leq l_1,\ldots,l_k\leq n}\frac{(T-t)^{k+1}}{(k+1)!}q_{il_1}q_{l_1l_2}\cdots q_{l_k j}$.
\end{lemma}
\begin{proof}
By Proposition 4.5, $J(\tilde{\pi}^{(n,*)};t,i,z)\to \varphi^*(t,i,z)=J(\tilde{\pi}^*;t,i,z)$ as $n\to\infty$. On the other hand, it can be verified that there exists constants $\gamma\in(0,1)$ and $C_1>0$ such that $\tilde{\pi}^*(t)\in[-C_1,1-\gamma]^N$ for all $t\in[0,T]$, a.s. Then, using \eqref{eq:L0}, it follows that $L(\tilde{\pi}^*(t);Y(t),Z(t))\leq C_2$, a.s. for $t\in[0,T]$. Here $C_2$ is a positive constant. Therefore, by noting $\tilde{\pi}^*\in\tilde{\mathcal{U}}_n$, we have that
\begin{align*}
\varphi^*(t,i,z)&=\Ex_{t,i,z}^{\tilde{\pi}^*,\theta}\left[\exp\left(\frac{\theta}{2}\int_t^TL(\tilde{\pi}^*(s);Y(s),Z(s))ds\right)\right]\notag\\
&\geq\Ex_{t,i,z}^{\tilde{\pi}^*,\theta}\left[\exp\left(\frac{\theta}{2}\int_t^TL(\tilde{\pi}^*(s);Y(s),Z(s))ds\right)\mathbf{1}_{\{\tau^t_n>T\}}\right]\notag\\
&=\Ex_{t,i,z}^{\tilde{\pi}^*,\theta}\left[\exp\left(\frac{\theta}{2}\int_t^{T\wedge\tau^t_n}L(\tilde{\pi}^*(s);Y(s),Z(s))ds\right)\mathbf{1}_{\{\tau^t_n>T\}}\right]\notag\\
&=\Ex_{t,i,z}^{\tilde{\pi}^*,\theta}\left[\exp\left(\frac{\theta}{2}\int_t^{T\wedge\tau^t_n}L(\tilde{\pi}^*(s);Y(s),Z(s))ds\right)\right]\notag\\
&\qquad-\Ex_{t,i,z}^{\tilde{\pi}^*,\theta}\left[\exp\left(\frac{\theta}{2}\int_t^{T\wedge\tau^t_n}L(\tilde{\pi}^*(s);Y(s),Z(s))ds\right)\mathbf{1}_{\{\tau^t_n\leq T\}}\right]\notag\\
&\geq\varphi_n(t,i,z)-\Ex_{t,i,z}^{\tilde{\pi}^*,\theta}\left[e^{\frac{\theta C_2}{2}(T\wedge\tau^t_n-t)}\mathbf{1}_{\{\tau^t_n\leq T\}}\right]\notag\\
&\geq\varphi_n(t,i,z)-C_3\mathbb{P}_{t,i,z}^{\tilde{\pi}^*,\theta}(\tau^t_n\leq T),
\end{align*}
where $C_3:=e^{\frac{\theta C_2T}{2}}$ and $\varphi_n(t,i,z)$ is defined in Proposition~\ref{prop:Vnmonotone00}. Using the given inequality $\varphi^*(t,i,z)\leq J(\tilde{\pi}^{(n,*)};t,i,z)\leq\varphi_n(t,i,z)$ in the proof of Lemma \ref{lem:approxpistar}, under the condition (C.1), we arrive at
\begin{align*}
\left|J(\tilde{\pi}^{(n,*)};t,i,z)-J(\tilde{\pi}^{(*)};t,i,z)\right|&=J(\tilde{\pi}^{(n,*)};t,i,z)-\varphi^*(t,i,z)\leq\varphi_n(t,i,z)-\varphi^*(t,i,z)\nonumber\\
&\leq C_3\mathbb{P}_{t,i,z}^{\tilde{\pi}^*,\theta}(\tau^t_n\leq T).
\end{align*}
Note that, by Proposition 4.5, $Y$ is also a Markov chain with the generator $Q=(q_{ij})$ under $\mathbb{P}_{t,i,z}^{\tilde{\pi}^*,\theta}$. Then $\mathbb{P}_{t,i,z}^{\tilde{\pi}^*,\theta}(\tau^t_n\leq T)\to0$ as $n\to\infty$. On the other hand, $\tau^t_n$ is the absorption time of $(Y^{(n)}(s))_{s\in[t,T]}$ whose generator is given as $A_n$ given by \eqref{eq:An}. Hence, using Section 11.2.3 in Chapter 11 in ~\cite{BieRut04}, we also have that $\Px_{t,i,z}^{\tilde{\pi}^*,\theta}(\tau^t_n\leq T)=1-\sum_{j=1}^na^{(n)}_{ij}(T-t)$. This completes the proof.
\end{proof}
We next provide an example in which the error estimate $1-\sum_{j=1}^na_{ij}^{(n)}(T-t)$ in Lemma~\ref{lem:errorestimate} admits a closed form representation. Let us consider the following specific generator given by
\begin{align*
Q=\left[\begin{matrix}
-1 & \frac12 & \frac14 & \dots & \frac1{2^{n-1}} & \frac1{2^n} & \dots \\
\frac12 & -1 & \frac14 & \dots & \frac1{2^{n-1}} & \frac1{2^n} & \dots\\
\frac12 & \frac14 & -1 & \dots & \frac1{2^{n-1}} & \frac1{2^n} & \dots \\
\vdots & \vdots & \vdots & \vdots & \vdots \\
\frac12 & \frac14 & \frac18 &\dots & \frac1{2^{n-1}} & -1 & \dots\\
\vdots & \vdots & \vdots & \vdots & \vdots & \vdots \\
\end{matrix}\right].\notag
\end{align*}
Then, for any $l\leq n$, $\sum_{j=1}^nq_{lj}=\sum_{j=1}^{n-1}\frac1{2^j}-1=\frac{-1}{2^{n-1}}$. Therefore, for any $i\leq n$,
\begin{align*}
\sum_{j=1}^na^{(n)}_{ij}(T-t)&=\sum_{k=0}^\infty\frac{(T-t)^k}{k!}\left(\frac{-1}{2^{n-1}}\right)^k=e^{-\frac{T-t}{2^{n-1}}}.
\end{align*}
It follows that, for $(t,i,z)\in[0,T]\times D_n\times{\cal S}$, we have the explicit error estimate
\begin{eqnarray*}
\left|J(\tilde{\pi}^{(n,*)};t,i,z)-J(\tilde{\pi}^{(*)};t,i,z)\right|\leq C\left(1-e^{-\frac{T-t}{2^{n-1}}}\right),
\end{eqnarray*}
where $C>0$ is independent of $n$.
\begin{remark}\label{rem:qijt}
It is also worth mentioning here that our method used in the paper can be applied to treat the case where the regime-switching process $Y$ is a time-inhomogeneous Markov chain with a time-dependent generator given by $Q(t)=(q_{ij}(t))_{i,j\in\mathbb{Z}_+}$ for $t\in[0,T]$. Here, for $t\in[0,T]$, $q_{ii}(t)\leq0$ for $i\in\mathbb{Z}_+$, $q_{ij}(t)\geq0$ for $i\neq j$, and $\sum_{j=1}^{\infty}q_{ij}(t)=0$ for $i\in\mathbb{Z}_+$ (i.e., $\sum_{j\neq i}q_{ij}(t)=-q_{ii}(t)$ for $i\in\mathbb{Z}_+$). Also for $i,j\in\mathbb{Z}_+$, $t\to q_{ij}(t)$ is continuous on $[0,T]$, and the infinite summation $\sum_{j\in\mathbb{Z}_+}q_{ij}(t)$ is uniformly convergent in $t\in[0,T]$.\\
\end{remark}
\noindent
\textbf{Acknowledgements}: L. Bo is supported by Natural Science Foundation of China under grant 11471254 and the Key Research Program of Frontier Sciences of the Chinese Academy of Science under grant QYZDB-SSW-SYS009. X. Yu is supported by the Hong Kong Early Career Scheme under grant 25302116. The authors would like to thank two anonymous referees for the careful reading and helpful comments to improve the presentation of this paper.
| {
"redpajama_set_name": "RedPajamaArXiv"
} | 2,458 |
{"url":"https:\/\/burttotaro.wordpress.com\/2016\/05\/16\/our-friend-the-tate-elliptic-curve\/amp\/","text":"# Our friend the Tate elliptic curve\n\nRigid analytic spaces are all the rage these days, thanks to the work of Peter Scholze and his collaborators on perfectoid spaces. In this post, I want to briefly describe the example that inspired the whole subject of rigid analytic spaces: the Tate elliptic curve. Tate\u2019s original 1959 notes were not published until 1995. (My thanks to Martin Gallauer for his explanations of the theory.)\n\nLet be the completion of the algebraic closure of the p-adic numbers . The difficulty in defining analytic spaces over , by analogy with complex analytic spaces, is that is totally disconnected, and so there are too many locally analytic (or even locally constant) functions. Tate became convinced that it should be possible to get around this problem by his discovery of the Tate elliptic curve. Namely, by explicit power series, he argued that some elliptic curves over could be viewed as a quotient of the affine line minus the origin as an analytic space:\n\nTrying to make sense of the formulas led Tate to his definition of rigid analytic spaces. In short, one has to view a rigid analytic space not just as a topological space, but as a space with a Grothendieck topology \u2014 that is, a space with a specified class of admissible coverings. So, for example, the closed unit disc acts as though it is connected, because its covering by the two disjoint open subsets and is not an admissible covering. (\u201cAffinoids,\u201d playing the role of compact open sets, include closed balls such as for any real number , but not the open ball . An admissible covering of an affinoid such as is required to have a refinement by finitely many affinoids.)\n\nTate\u2019s formulas for the p-adic analytic map , modeled on similar formulas for the Weierstrass -function, are as follows.\n\nTheorem. Let be a complete field with respect to a non-archimedean absolute value, and let have . Then the following power series define a isomorphism of abelian groups , for the elliptic curve below:\n\nwhere for positive integers . The corresponding elliptic curve in is defined in affine coordinates by where and . Its -invariant is For every element with (corresponding to an elliptic curve over that does not have potentially good reduction), there is a unique with .\n\nIt is worth contemplating why the formulas for and make sense, for . The series both have poles when is an integer power of , just because these points map to the origin of the elliptic curve, which is at infinity in affine coordinates. More important, these formulas make it formally clear that and , but the series do not obviously converge; the terms are small for , but they are large for .\n\nTo make sense of the formulas, one has to use the identity of rational functions As a result, the series for (for example) can be written as\n\nwhich manifestly converges. One checks from this description that the series satisfies , as we want.\n\nReferences:\n\nS. Bosch, U. G\u00fcntzer, R. Remmert. Non-Archimedean Analysis. Springer (1984).\n\nB. Conrad. Several approaches to non-Archimedean geometry. P-adic Geometry, 9\u201363, Amer. Math. Soc. (2008).\n\nW. L\u00fctkebohmert. From Tate\u2019s elliptic curve to abeloid varieties. Pure and Applied Mathematics Quarterly 5 (2009), 1385\u20131427.\n\nJ. Tate. A review of non-Archimedean elliptic functions. Elliptic Curves, Modular Forms, & Fermat\u2019s Last Theorem (Hong Kong, 1993), 162\u2013184. Int. Press (1995).","date":"2019-02-23 19:42:20","metadata":"{\"extraction_info\": {\"found_math\": false, \"script_math_tex\": 0, \"script_math_asciimath\": 0, \"math_annotations\": 0, \"math_alttext\": 0, \"mathml\": 0, \"mathjax_tag\": 0, \"mathjax_inline_tex\": 0, \"mathjax_display_tex\": 0, \"mathjax_asciimath\": 0, \"img_math\": 0, \"codecogs_latex\": 0, \"wp_latex\": 0, \"mimetex.cgi\": 0, \"\/images\/math\/codecogs\": 0, \"mathtex.cgi\": 0, \"katex\": 0, \"math-container\": 0, \"wp-katex-eq\": 0, \"align\": 0, \"equation\": 0, \"x-ck12\": 0, \"texerror\": 0, \"math_score\": 0.913197934627533, \"perplexity\": 373.6580596424737}, \"config\": {\"markdown_headings\": true, \"markdown_code\": true, \"boilerplate_config\": {\"ratio_threshold\": 0.18, \"absolute_threshold\": 10, \"end_threshold\": 5, \"enable\": true}, \"remove_buttons\": true, \"remove_image_figures\": true, \"remove_link_clusters\": true, \"table_config\": {\"min_rows\": 2, \"min_cols\": 3, \"format\": \"plain\"}, \"remove_chinese\": true, \"remove_edit_buttons\": true, \"extract_latex\": true}, \"warc_path\": \"s3:\/\/commoncrawl\/crawl-data\/CC-MAIN-2019-09\/segments\/1550249530087.75\/warc\/CC-MAIN-20190223183059-20190223205059-00236.warc.gz\"}"} | null | null |
Tag: George H. W. Bush
Amelia Vega Net Worth
Amelia Vega Net Worth $1 Million Dollars Amelia Vega Net Worth: Amelia Vega is a Dominican musician, model and beauty queen that has a net worth of $1 million dollars. She became the very first Dominican woman to be named Miss Universe in 2003. Her mom, Patricia Polanco lvarez, …
Stacy Lattisaw Net Worth
Stacy Lattisaw Net Worth $2 Million Dollars Stacy Lattisaw net worth: Stacy Lattisaw is an American gospel, dance, and R&B vocalist who has a net worth of $2 million. While a adolescent in the 1980's, Lattisaw had a string of Top 40 R&B hits like, "Let Me Be Your Angel," …
Barbara Bush Net Worth
Barbara Bush net worth: Produced in New York in 1925, Bush attended Smith College, a private women's school in Massachusetts, before leaving to wed her longtime boyfriend, George Herbert Walker Bush, in 1945. Bush served as the country's First Lady from 1989 to 1993 and as the Next Woman during …
Betty Ford Net Worth
Late former first lady Betty Ford had an estimated net worth of roughly $20 million before she passed away in the summertime of 2011. But more important her net worth, Ford had an enormous influence in the culture of dependency in the United States, supporting innumerable people to get help …
Amy Fisher Net Worth
Writer, pornographic actress, Amy Fisher has an estimated net worth of $100,000. Known as 'Long Island Lolita' by the media, Fisher generated her net worth after she was charged with first-degree attempted murder of Mary Jo Buttafuoco, the wife of her lover Joey Buttafuoco in 1992. Fisher was sentenced to …
Colin Powell Net Worth
Colin Powell comes with an estimated net worth of $45 million. Colin Luther Powell was born on April 5, 1937 in Harlem, NYC. He's the son of Jamaican immigrants Luther and Maud Powell. In 1954 Powell graduated from Morris High School with no strategies for the long run. Powell …
Billy Bush Net Worth
As stated by the most recent data, Billy Bush net worth now amounts to $8 million dollars. Incidentally this guy is an ambassador for Operation Smile, a not-for-profit medical service organization located in Virginia Beach. We're able to discuss the sources of Billy Bush net worth and livelihood in a …
George H.W. Bush Net Worth
Before winning this honest office George has served as a Vice President, ambassador, representative and Director of Central Intelligence. Produced in 1924, he's now the earliest US President living. George H. W. Bush net worth is now estimated at $25 million dollars. His dad was a rich banker and politician, …
Dana Carvey Net Worth
It is often asserted the total total of Dana Carvey net worth now is as high as 28 million dollars. He's brought in his net worth largely due to his participation into comedy. Dana Carvey is an actor, comic and stand-up comedian. He's a recognized member of a popular comedy …
George W Bush Net Worth
It's been estimated that George W Bush net worth has an sum of 35 million dollars, exactly the same approximation which belongs to some rapper T Pain, at the same time. George W Bush is known as among the most famous presidents of America, which earned a larger part of …
Jon Lajoie Net Worth
Daniel Radcliffe Net Worth
Ben Kingsley Net Worth
Amy Brenneman Net Worth
Alberto Del Rio Net Worth
Linda Ronstadt Net Worth
Craig Bierko Net Worth
Kenya Bell Net Worth
Jen The Pen Bayer Net Worth
Dolly Parton Net Worth | {
"redpajama_set_name": "RedPajamaCommonCrawl"
} | 1,616 |
<?xml version="1.0" encoding="UTF-8"?>
<plugin xmlns="http://apache.org/cordova/ns/plugins/1.0"
xmlns:android="http://schemas.android.com/apk/res/android"
id="com.samsung.spen"
version="1.1.2">
<name>Samsung Spen</name>
<description>Samsung Spen Plugin</description>
<author>Samsung Electronics</author>
<license>Apache 2.0 License</license>
<engines>
<engine name="cordova" version=">=3.5.0" />
</engines>
<js-module src="www/spen.js" name="spen">
<clobbers target="samsung.spen" />
</js-module>
<platform name="android">
<config-file target="res/xml/config.xml" parent="/*">
<feature name="SpenPlugin">
<param name="android-package" value="com.samsung.spen.SpenPlugin"/>
</feature>
</config-file>
<config-file target="AndroidManifest.xml" parent="/*">
<uses-permission android:name="android.permission.WRITE_EXTERNAL_STORAGE" />
<uses-permission android:name="com.samsung.android.providers.context.permission.WRITE_USE_APP_FEATURE_SURVEY"/>
</config-file>
<config-file target="AndroidManifest.xml" parent="/*/application">
<meta-data android:name="com.samsung.cordova.spen" android:value="true" />
</config-file>
<source-file src="src/android/com/samsung/spen/" target-dir="src/com/samsung/" />
<source-file src="src/android/libs/pen-v4.0.7.jar" target-dir="libs"/>
<lib-file src="src/android/libs/sdk-v1.0.0.jar" target-dir="libs" arch="device" />
<resource-file src="src/android/res/drawable/" target="res/drawable/" />
<resource-file src="src/android/res/drawable-sw360dp-xhdpi/" target="res/drawable-sw360dp-xhdpi/" />
<resource-file src="src/android/res/drawable-sw360dp-xxhdpi/" target="res/drawable-sw360dp-xxhdpi/" />
<resource-file src="src/android/res/drawable-sw360dp-xxxhdpi/" target="res/drawable-sw360dp-xxxhdpi/" />
<resource-file src="src/android/res/drawable-sw800dp-xhdpi/" target="res/drawable-sw800dp-xhdpi/" />
<resource-file src="src/android/res/layout/spentraybar_basic_colors.xml" target="res/layout/spentraybar_basic_colors.xml" />
<resource-file src="src/android/res/layout/spentraybar_bottom.xml" target="res/layout/spentraybar_bottom.xml" />
<resource-file src="src/android/res/layout/spentraybar_top.xml" target="res/layout/spentraybar_top.xml" />
<resource-file src="src/android/res/values/spen_strings.xml" target="res/values/spen_strings.xml" />
</platform>
</plugin>
| {
"redpajama_set_name": "RedPajamaGithub"
} | 8,260 |
package org.apereo.cas.util.cipher;
import lombok.val;
import org.junit.jupiter.api.Test;
import java.nio.charset.StandardCharsets;
import static org.junit.jupiter.api.Assertions.*;
/**
* This is {@link DefaultTicketCipherExecutorTests}.
*
* @author Misagh Moayyed
* @since 5.3.0
*/
public class DefaultTicketCipherExecutorTests {
@Test
public void verifyAction() {
val cipher = new DefaultTicketCipherExecutor(null, null,
"AES", 512, 16, "webflow");
val encoded = cipher.encode("ST-1234567890".getBytes(StandardCharsets.UTF_8));
assertEquals("ST-1234567890", new String(cipher.decode(encoded), StandardCharsets.UTF_8));
assertNotNull(cipher.getName());
assertNotNull(cipher.getSigningKeySetting());
assertNotNull(cipher.getEncryptionKeySetting());
}
}
| {
"redpajama_set_name": "RedPajamaGithub"
} | 2,910 |
{"url":"https:\/\/mathleaks.com\/study\/modeling_in_Three_Dimensions","text":"mathleaks.com mathleaks.com Start chapters home Start History history History expand_more Community\nCommunity expand_more\n{{ filterOption.label }}\n{{ item.displayTitle }}\n{{ item.subject.displayTitle }}\narrow_forward\n{{ searchError }}\nsearch\n{{ courseTrack.displayTitle }}\n{{ printedBook.courseTrack.name }} {{ printedBook.name }}\n\n# Modeling in Three Dimensions\n\nMany objects used in daily life can be modeled as three-dimensional figures. In this lesson, geometric figures will be used to describe some daily life objects.\n\n### Catch-Up and Review\n\nHere are a few recommended readings before getting started with this lesson.\n\n## Comparing Aquarium Volumes\n\nDominika wants to buy a new aquarium for her fish. She is interested in two types of aquariums that have equal linear measurements.\n\nAquarium is a right cylinder with a diameter of feet and a height of feet. Additionally, a right cone sits inside of it. Aquarium is a hemisphere with a diameter of feet. Help Dominika answer the following questions.\n\na What is the volume of Aquarium If necessary, round the answer to the nearest cubic foot.\nb For each aquarium, what is the area of the water\u2019s surface when filled to a height of feet? Write the answer in terms of and\nc Use the answers found above to find the volume of the other aquarium.\n\n## Comparing Aquarium Volumes II\n\nRecall how the formula for the volume of a sphere is proven. The same thought process used in the proof can be applied to solve the challenge.\n\nTwo types of aquariums attract the attention of Dominica: Aquarium and Aquarium\n\nAquarium is a right cylinder with a diameter of feet and a height of feet. Additionally, its bottom base is a right cone. Aquarium on the other hand, is a hemisphere with a diameter of feet.\n\na What is the volume of Aquarium Round the answer to the nearest cubic foot.\nb For each aquarium, what is the area of the water\u2019s surface when filled to a height of feet? Write the answer in terms of and\nc Use the answers found above and find the volume of the other aquarium.\n\n### Hint\n\na Subtract the volume of the cone from the volume of the cylinder.\nb Examine how the cross-sections that are parallel to the bases change.\nc What does the Cavalieri's Principle states?\n\n### Solution\n\na By subtracting the volume of the cone from the volume of the cylinder, the volume of the Aquarium can be found. Recall the formulas for the volumes of a cylinder and a cone.\nVolume of the Cylinder Volume of the Cone\nFormula\n\nBoth the cone and the cylinder forming Aquarium have a foot diameter, and therefore both have a radius of feet. With that in mind, substitute and\n\nVolume of the Cylinder Volume of the Cone\nFormula\nSubstitute Values\nCalculate\nThe difference between and will give the number of cubic feet of water that Aquarium can hold.\nEvaluate right-hand side\nAquarium can hold about cubic feet of water.\nb Begin by examining the cross-sections of each aquarium. Then, write an expression for the area of the water\u2019s surface at height\n\n### Aquarium\n\nSince the cone inside the aquarium is a right cone, its vertex is directly above the center of its base. Furthermore, its height and radius have the same length. Therefore, an isosceles right triangle inside the cone can be formed, as indicated in the diagram.\nWith this in mind, consider the vertical and horizontal cross-sections of the aquarium.\nAs seen on the diagram, the vertical cross-sections of the water \u2014 the shaded triangles \u2014 are isosceles right triangles, and the horizontal cross-sections form two concentric circles. Using the horizontal cross-sections, the area of the water\u2019s surface at height will be found. To do so, subtract the area of the inner circle from the area of the outer circle\nEvaluate\nWhen Aquarium is filled to a height of feet, the area of the water\u2019s surface is square feet.\n\n### Aquarium\n\nExamine the vertical cross-sections of the hemisphere.\nThe diagram shows a right triangle Its hypotenuse and length of one of its legs is known in terms of Therefore, using the Pythagorean Theorem, can also be expressed in terms of\nSolve for\nNow, considering the horizontal cross-sections of Aquarium will be the radius of the inner circle.\nIn this instance, the area of the water\u2019s surface at height is the area of a circle with a radius of\nFor Aquarium the area of the water\u2019s surface at height is the same as the other aquarium, square feet.\nc Recall what the Cavalieri Principle states.\n Cavalieri Principle Two solids with the same height and the same cross-sectional area at every altitude have the same volume.\n\nBoth aquariums have a height of feet, and the area of the water\u2019s surface when filled to a height of feet is the same for each aquarium.\n\nAquarium Aquarium\nHeight\nCross-Sectional Area\nAccording to the Cavalieri Principle, the volumes of water must be equal when both aquariums are filled. Therefore, no further calculations are necessary \u2014 Aquarium B will hold about cubic feet of water.\nWhen solving geometry-related problems, algebraic expressions are needed. In this example, algebraic expressions were found to represent areas of varying cross-sections. The next question also exemplifies a situation where an algebraic relationship will need to be obtained using the variables stemming from geometric constraints.\n\n## Finding the Length of a Toilet Paper Roll\n\nTiffaniqua wants to calculate the length of the a toilet paper roll. Hey! It is on a great sale, Okay. She draws a diagram and denotes the thickness of the paper, the inner radius, and the outer radius by and respectively.\n\na Write an expression for the length of the paper roll in terms of and\nb Find the length of the paper roll if and all measured in centimeters.\n\n### Hint\n\na Use the area of a base of the toilet roll.\nb Substitute the given values into the equation found in the previous step.\n\n### Solution\n\na Consider the following horizontal cross-section of the toilet roll. The cross-section consists of concentric circles.\n\nThe area of the shaded region can be calculated in two ways. It can be expressed as the area of the circle of radius minus the area of the circle of radius\n\nAlternatively, it can be expressed as the area of the front face of the long thin rectangular prism, which is created when the paper is unrolled.\nThe area of the front face is the product of and That product is also equal to Therefore, can be substituted for in the derived formula for the area of the shaded region. Note that there might be several alternative ways to find the equation. Only one possible way was shown here.\nb By substituting and into the previously derived equation, the length of the paper towel will be calculated.\nEvaluate right-hand side\nThe length of the paper is about centimeters.\n\n## Cylindrical Soda Can\n\nA cylindrical soda can is made of aluminum. It is inches high and its bases have a radius of approximately inches.\n\nGive a go at answering the following set of questions. If necessary, round the answer to two decimal places.\n\na Find the surface area of the soda can.\nb The density of aluminum is approximately grams per cubic centimeter. If the mass of the soda can is approximately grams, how many cubic centimeters of aluminum does it contain?\nc Suppose that the thickness of the soda can is uniform throughout its body. Estimate the soda can's thickness.\n\n### Hint\n\na Use the formula for the surface area of a cylinder.\nb The density of a substance is equal to its mass divided by its volume.\nc How might the surface area and volume of the soda can be related?\n\n### Solution\n\na Since the soda can is a cylinder, the formula for the surface area of a cylinder will be used. Here, and are the radius and the height of the cylinder, respectively. The radius is inches and the height is inches. Substitute these values in the formula and evaluate it.\nEvaluate right-hand side\nTherefore, the surface area of the soda can is about square inches.\nb Recall that the density of a substance is equal to its mass divided by its volume. In other words, the volume of a substance is its mass divided by its density. Since the mass of the aluminum can and the density of aluminum are given, the volume can be calculated. Substitute and into the equation.\nEvaluate right-hand side\nc To determine the thickness of the soda can, its surface area and volume are good to use. Let denote the thickness. Then, the volume of the aluminum part of the soda can will be equal to the surface area of the soda can multiplied by its thickness. Therefore, to find the the aluminum part's volume should be divided by the soda can's surface area. However, since the amount of aluminum is found in cubic centimeters and the surface area of the soda can in square inches, a conversion factor must be used. Use to convert square inches to square centimeters.\nSimplify\nNow, by substituting and can be found.\nEvaluate right-hand side\nThis means that the thickness of the soda can is about centimeters.\n\n## Choosing Type of Glass\n\nBy modeling real-life objects using geometric shapes, various characteristics of the objects can be determined. These characteristics can then be compared to make inferences which could impact real decisions to be made.\n\nEmily is attending a fair and wants to sell liters of homemade orange juice she is naming Oranjya Thirsty. She needs to decide the type of glass she will use to serve the juice \u2014 a cocktail glass or a Collins glass.\n\nA cocktail glass is a type of glass that has an inverted cone bowl. The cone bowl's height is centimeters and the radius of its base is centimeters. A collins glass is a cylindrical glass with a height of centimeters and a radius of centimeters. Help Emily make a decision by answering the following questions.\n\na How many cocktail glasses of orange juice can she sell?\nb How many Collins glasses of orange juice can she sell?\nc If Emily chooses cocktail glasses, she will sell each for dollars. If the Collins glasses are chosen, each will be sold for dollars. Which glass type selection makes Emily more money?\n\n### Hint\n\na Use the formula for the volume of a cone to determine how many liters of orange juice a cocktail glass can hold.\nb Use the formula for the volume of a cylinder to determine how many liters of orange juice a Collins glass can hold.\nc Use the answers found in the previous parts.\n\n### Solution\n\na First, the volume of the bowl of a cocktail glass will be calculated. Then, it will be used to find the number of glasses. The bowl of a cocktail glass can be modeled by a cone as shown.\nThe height of the cone bowl and the radius of its base are and centimeters, respectively. Substitute these values into the volume formula of a cone.\nEvaluate right-hand side\nThis means that a cocktail glass can hold approximately cubic centimeters orange juice. Conversion needs to be made. The conversion factor will convert cubic centimeters into liters. Finally, the number of cocktail glasses can be calculated.\nTherefore, liters of orange juice fully fills cocktail glasses.\nb Similarly, start by finding the volume of the cylindrical glass.\nIts height is centimeters and its base radius is centimeters. Substitute these values into the formula.\nEvaluate right-hand side\nThis means that the volume of a collins glass is about cubic centimeters. After converting its unit of measurement into liters, it is liters. Now, the volume of a carton of orange juice can be divided by the volume of a collins glass. Therefore, liters of orange juice fully fills collins glasses.\nc Emily can sell cocktail glasses or collins glasses. Knowing the prices, the revenue from each sale can be calculated.\nType of Glass\nCocktail Glass Collins Glass\nAs a result, Emily should choose the Collins glass, as the revenue from this selection is greater.\n\n## Estimating How Many Grains of Sand a Hand Can Hold\n\nWith the help of geometric modeling, any number of objects can be approximated regardless of whether they are super large or tiny minuscule grains of sand.\n\nTake, for example, Ramsha's situation. She is looking through photos from her trip to the beach to post on her social media page. A photo that shows her holding sand sparks her curiosity. She wonders how many individual grains of sand is she holding. Ramsha thinks she can model a grain of sand using a sphere. She then assumes that each grain has a diameter of centimeters.\nRamsha figures she can hold grams of sand in her hands. If the density of sand is approximately grams per cubic centimeter, help Ramsha approximate the number of grains of sand in her hands. Write the answer in scientific notation.\n\n### Hint\n\nThe formula for the volume of a sphere is where is the radius of the sphere.\n\n### Solution\n\nTo find the number of grains of sand in Ramsha's hands, the mass of the sand in her hands should be divided by the mass of a grain. Recall that the density of a substance is equal to its mass divided by its volume. In other words, the mass of a substance is its density times its volume. Since the density of a grain of sand is given, the volume of a grain of sand will be calculated first.\n\n### Finding the Volume of a Grain\n\nThe radius of a grain is centimeters. Use the formula for the volume of a sphere to find the volume of a grain.\nEvaluate right-hand side\n\nWrite in scientific notation\n\nThe volume of a grain is about cubic centimeters.\n\n### Finding the Mass of a Grain\n\nThe density of a grain is and its volume is cubic centimeters. By multiplying these values, the mass of a grain can be found.\nEvaluate\n\n### Finding the Number of Grains\n\nFinally, substitute the values into the formula mentioned at the beginning to calculate the number of grains of sand.\nEvaluate right-hand side\n\nWrite in scientific notation\n\nThe number of grains of sand is approximately or about 1.7 million.\nThe astronomer Carl Sagan once said, the total number of stars in the universe is greater than all the grains of sand on all the beaches of the planet Earth.\n\n## Modeling the Human Eye\n\nResearch projects usually require an interdisciplinary approach. That is, people from different disciplines work together to develop and test hypothesis, run experiments, and test theories.\n\nBiologists, for example, can work with mathematicians to model a part of an organism. By doing so, researchers can predict how these parts function, grow, and change. For example, the human eye was able to be modeled as a sphere. Move the slider to rotate the eye.\nThese interdisciplinary studies may sometimes result in new discoveries, such as the scutoid, a new geometric solid introduced in","date":"2021-09-27 13:53:32","metadata":"{\"extraction_info\": {\"found_math\": true, \"script_math_tex\": 0, \"script_math_asciimath\": 0, \"math_annotations\": 6, \"math_alttext\": 0, \"mathml\": 0, \"mathjax_tag\": 0, \"mathjax_inline_tex\": 0, \"mathjax_display_tex\": 0, \"mathjax_asciimath\": 0, \"img_math\": 0, \"codecogs_latex\": 0, \"wp_latex\": 0, \"mimetex.cgi\": 0, \"\/images\/math\/codecogs\": 0, \"mathtex.cgi\": 0, \"katex\": 0, \"math-container\": 0, \"wp-katex-eq\": 0, \"align\": 0, \"equation\": 0, \"x-ck12\": 0, \"texerror\": 0, \"math_score\": 0.6815738081932068, \"perplexity\": 682.9179242782747}, \"config\": {\"markdown_headings\": true, \"markdown_code\": true, \"boilerplate_config\": {\"ratio_threshold\": 0.18, \"absolute_threshold\": 10, \"end_threshold\": 15, \"enable\": true}, \"remove_buttons\": true, \"remove_image_figures\": true, \"remove_link_clusters\": true, \"table_config\": {\"min_rows\": 2, \"min_cols\": 3, \"format\": \"plain\"}, \"remove_chinese\": true, \"remove_edit_buttons\": true, \"extract_latex\": true}, \"warc_path\": \"s3:\/\/commoncrawl\/crawl-data\/CC-MAIN-2021-39\/segments\/1631780058450.44\/warc\/CC-MAIN-20210927120736-20210927150736-00092.warc.gz\"}"} | null | null |
If you choose a direct payment gateway to complete your purchase, our payment provider Stripe stores your credit card data. It is encrypted through the Payment Card Industry Data Security Standard (PCI-DSS). Your purchase transaction data is stored only as long as is necessary to complete your purchase transaction. After that is complete, your purchase transaction information is deleted.
If you would like to: access, correct, amend or delete any personal information we have about you, you can login on MY ACCOUNT and update information there. For email subscriptions, find your most recent newsletter from us and unsubscribe from there.
Our office is Level 1, 16 McDougall Street, Milton 4064. Please note that we do not take deliveries, shopping or appointments at this address as this is an office. Any returns must have a return authorisation number and not sent to this address. | {
"redpajama_set_name": "RedPajamaC4"
} | 3,964 |
Q: Finding the basis of the null space for $y''''=a^4y$ I'm attempting to prove that for $$y''''=a^4y, y(0)=0, y'(0)=0, y(L)=0, y'(L)=0$$
to have a nontrivial solution, we must have that $\cos(aL)\cosh(aL)=1$.
I've been given a hint, that is to prove that the null space of this this D.E. is $[\sin(ax),\cos(ax),\sinh(ax),\cosh(ax)]$. I began by substituting $y=e^{mx}$ and found a basis of $[e^{ax},e^{-ax},\cos(ax),\sin(ax)]$. I'm thinking that to get $\sinh(ax), \cosh(ax)$ in my null space I would need to use their exponential forms, ie, $\sinh(x)=\frac{e^x-e^{-x}}{2}$, but I'm unsure how to proceed in proving this, as well as how I would need to use this to prove that $\cos(aL)\cosh(aL)=1$ for a nontrivial solution. Any help is appreciated, thank you.
A: Factor the associaated polynomial, $\lambda^4 -1 = (\lambda -1 )(\lambda + 1)(\lambda^1 + 1).$
You get the four functions $y_1 = e^x$, $y^2 = e^{-x}$, $y_3 = \cos(x)$, $y_4 =
\sin(x).$ These span the solution space of your ODE.
You can also use $\cosh(x)$ and $\sin(x)$ instead of the exponentials because the transformation $(e^x, e^{-x})\mapsto(\cosh(x), \sinh(x))$ is an invertible one.
A: It would be easier to use the basis $y_1=\sin(ax)$, $y_2=\cos(ax)$, $y_3=\sinh(ax)$, $y_4=\cosh(ax)$.
Let $y=\sum_{j=1}^4 c_jy_j$. Use $y(0)=0$ to show $c_2+c_4=0$. Use $y'(0)=0$ to show $c_1+c_3=0$. Now $y=c_1(\sin(ax)-\sinh(ax))+c_2(\cos(ax)-\cosh(ax))$. Lay out the system of linear equations
$$0=y(L)=c_1(\sin(aL)-\sinh(aL))+c_2(\cos(aL)-\cosh(aL))$$
$$0=y'(L)=c_1(\cos(aL)-\cosh(aL))+c_2(-\sin(aL)-\sinh(aL)).$$
It has a nontrivial solution, so the determinant must vanish. Compute it and you're done.
| {
"redpajama_set_name": "RedPajamaStackExchange"
} | 7,204 |
{"url":"https:\/\/chemistry.stackexchange.com\/questions\/139291\/supporting-electrolytes-and-increased-conductivity","text":"# Supporting electrolytes and increased conductivity?\n\nHi a question about supporting electrolytes (SE) I hope you can help with. I found some related questions here through searching but looking for a simple answer (if there is one!):\n\nIf SE are electroinactive (in the potential range of interest) and so do not pass or receive electrons at the electrodes to complete the circuit then how to they \"increase conductivity\"? I am referring to typical DC experiments, e.g. chronoamperometry. It's not like electrons hop through the solution from one electrode to the other using SE ions as a shuttle.\n\nI understand that one role is that they migrate to the opposite electrodes to neutralize ion charge imbalances that would build up due to reagent consumption or product generation from the electroactive species you are measuring (e.g oxidizing an enzyme product), thereby facilitating these redox reactions that do occur to continue. Is this the answer? So in just water + SE, for example, it is more conductive because they facilitate the electrolysis of water (H2 and O2 generation (e.g for SE without Cl-) by this method?\n\nI also understand that movement of ions is considered current, but with DC wouldn't this migration slow down and exhaust as time went on? Is it this transient conductivity due to ion movement that they refer to?\n\nThanks!\n\nThanks for your answer Maurice. What you describe sounds like this (maybe not clearly written) part of my own possible answer to my question: \"I understand that one role is that they migrate to the opposite electrodes to neutralize ion charge imbalances that would build up due to reagent consumption or product generation from the electroactive species you are measuring, thereby facilitating these redox reactions that do occur to continue\".\n\nI think my confusion arises because the IUPAC definition is: \"A supporting electrolyte, in electrochemistry is an electrolyte containing chemical species that are not electroactive (within the range of potentials used) and which has an ionic strength and conductivity much larger than those due to the electroactive species added to the electrolyte\", which makes it sound like the the SE ions themselves are conductive. Plus the most common explanation offered is along the lines of \"because they are ions and ions conduct\".\n\nIf I understand correctly now as discussed, they actually facilitate the reaction of the true electroactive species (which produce faradaic currents through redox - the current being a measure of conductivity), by maintaining neutrality around the electrode, rather than being \"conductive\" themselves (along with other roles such as making results more purely diffusion controlled, etc).\n\nPut simply, the SE ions do not increase conductivity directly by contributing to the current (like an electroactive ion would), but increase conductivity by increasing the rate of faradaic processes of the actual electroactive species that are present (by using the SE ions to maintain electrical neutrality around the electrode).\n\nIs this it?\n\n\u2022 You are mixing many things. Start from here youtube.com\/watch?v=QiCSTlgFwCY and perhaps refine your query. Aug 24, 2020 at 1:53\n\u2022 Thank you for you attention to my question. That video discusses the role of SE in making the results being controlled by diffusion, rather than migration and diffusion, which i understand OK. My question is how electroinactive ions of the SE \"increase the conductivity\" of the solution, as is commonly stated, (when if they are electroinactive they do not produce any (Faradaic) current themselves). Aug 24, 2020 at 16:22\n\u2022 @ Bob Tomas. Your supplementary information is correct. One might add that the negative pole (cathode) attracts positive ions (cations) and that the positive pole (anode) attracts negative ions. The movements of these ions create the electric currant in solution. Aug 24, 2020 at 19:07\n\u2022 Thanks for the followup and informative answer! I would accept your answer too if I knew how to accept more than one! Aug 24, 2020 at 19:10\n\nRemember that in electrochemistry whenever a direct current is passing through the solution there must be electrolysis occurring at the electrode. Now, all you want to see in electrochemical experiment, is current due to the oxidation or the reduction of the desired solute which is diffusing to the electrode in an unstirred cell.\n\nThe gist of your excellent and thought provoking question is:\n\nMy question is how electroinactive ions of the SE \"increase the conductivity\" of the solution, as is commonly stated, (when if they are electroinactive they do not produce any (Faradaic) current themselves).\n\nAfter giving it a little thought and some literature search, it is clear that supporting electrolytes increase conductivity is an erroneous conclusion which is rampant everywhere. In the electrochemical experiment, the flux of the supporting electrolyte must be zero, it should not be carrying any current, because by definition, it is non-reactive in that potential window. The mass transport of the electro-inactive species must be zero.\n\n*Yes, separately, if we were to measure the conductivity of a solution to which an supporting electrolyte, the conductivity would be higher than the pure solvent..but that is a separate measurement. Remember that conductivity is measured by very high frequency AC current.\n\nThe book, Electrochemical Engineering by Thomas F. Fuller, John N. Harb, explains it better, pg 69:\n\n\u2022 Many, many thanks! This has been bugging me for ages and many hours of searching. Finally, you say \"the conductivity would be higher [with SE] than the pure solvent..but that is a separate measurement\". Is there a simple explanation of what process is being measured in this other AC conductivity measurement (that is increased by SE)? Aug 24, 2020 at 18:37\n\u2022 In those experiments, one is measuring the resistance of the solution by using AC current. The purpose of the AC current is to \"undo\" the electrolysis- remember whenever direct current is flowing in a solution, there must be electrolysis. AC trick is to avoid it the by very quickly changing the electrode polarity. You can accept the answer if you want. Aug 24, 2020 at 19:04\n\u2022 Great! again thanks for taking the time to explain this. Aug 24, 2020 at 19:12\n\nIt is not easy to understand what you are looking for. Maybe an example could help. Suppose you are using a solution of sodium sulfate $$\\ce{Na2SO4}$$. The corresponding ions $$\\ce{Na+}$$ and $$\\ce{SO4^{2-}}$$are not discharged at the electrodes. On the contrary $$\\ce{H2O}$$ is reacting at both electrodes. Let's start with the cathode.\n\nAt the cathode, $$\\ce{H2O}$$ reacts with electrons coming from the outer cell, and produces $$\\ce{H2}$$ and $$\\ce{OH-}$$ ions. But this reaction can only happen if there is enough positive ions around the cathode to realize the electric neutrality of the solution. That is where $$\\ce{Na+}$$ intervenes. Forget about $$\\ce{H2}$$. If there is enough $$\\ce{Na+}$$ at the cathode, $$\\ce{H2O}$$ can react and produce $$\\ce{OH-}$$ ions in solution. The rate of this reaction is proportional to the concentration of $$\\ce{Na+}$$ ions in solution. This is why the presence of the electrolyte increases the reaction rate. Simultaneously, the negative ions $$\\ce{SO4^{2-}}$$ are repelled by the cathode and attracted by the anode.\n\nAt the anode, $$\\ce{H2O}$$ reacts in a similar manner to produce electrons. To do it, $$\\ce{H2O}$$ is decomposed and produces $$\\ce{H+}$$ ions in solution (or $$\\ce{H3O+}$$) and $$\\ce{O2}$$ gas. But this production of $$\\ce{H+}$$ ions can only happen if there are enough negative ions in solution near the anode. If there are no negative ions, the first $$\\ce{H+}$$ ions repell the next ones, and the reaction stops. This is where the sulfate ions intervene. The $$\\ce{H+}$$ ions attract the $$\\ce{SO4^{2-}}$$, and so the neutrality of the solution is maintained. The rate of the reaction is proportional to the concentration of the sulfate ions.\n\nAs a consequence, there is a general movement of positive ions towards the anode, and of negative ions towards the cathode. The amount of gases produced at the electrode is proportional to the concentration of electrolyte in solution.\n\n\u2022 Thanks Maurice, my question has been updated to respond to you (please see above). Aug 24, 2020 at 16:30","date":"2022-08-14 09:50:31","metadata":"{\"extraction_info\": {\"found_math\": true, \"script_math_tex\": 0, \"script_math_asciimath\": 0, \"math_annotations\": 0, \"math_alttext\": 0, \"mathml\": 0, \"mathjax_tag\": 0, \"mathjax_inline_tex\": 0, \"mathjax_display_tex\": 0, \"mathjax_asciimath\": 0, \"img_math\": 0, \"codecogs_latex\": 0, \"wp_latex\": 0, \"mimetex.cgi\": 0, \"\/images\/math\/codecogs\": 0, \"mathtex.cgi\": 0, \"katex\": 0, \"math-container\": 23, \"wp-katex-eq\": 0, \"align\": 0, \"equation\": 0, \"x-ck12\": 0, \"texerror\": 0, \"math_score\": 0.7761480212211609, \"perplexity\": 1085.2145589020543}, \"config\": {\"markdown_headings\": true, \"markdown_code\": true, \"boilerplate_config\": {\"ratio_threshold\": 0.18, \"absolute_threshold\": 10, \"end_threshold\": 15, \"enable\": true}, \"remove_buttons\": true, \"remove_image_figures\": true, \"remove_link_clusters\": true, \"table_config\": {\"min_rows\": 2, \"min_cols\": 3, \"format\": \"plain\"}, \"remove_chinese\": true, \"remove_edit_buttons\": true, \"extract_latex\": true}, \"warc_path\": \"s3:\/\/commoncrawl\/crawl-data\/CC-MAIN-2022-33\/segments\/1659882572021.17\/warc\/CC-MAIN-20220814083156-20220814113156-00471.warc.gz\"}"} | null | null |
{"url":"https:\/\/puzzling.stackexchange.com\/questions\/53747\/find-the-missing-numbers-in-this-constrained-matrix-grid\/53751","text":"# Find the missing numbers in this constrained matrix\/grid\n\nThe numbers 1 to 16 inclusive are arranged in this 4x4 matrix, such that no two numbers that are adjacent (horizontally, vertically or diagonally) that are consecutive, i.e. they must have a difference of at least 2.\n\n\\begin{bmatrix}11&?&?&?\\\\?&?&14&?\\\\?&6&?&?\\\\?&?&?&8\\end{bmatrix}\n\nWhat are the missing numbers?\n\n\u2022 Sorry there are many solutions to this. The program that I wrote to analyse solutions has a bug in it! Jul 25, 2017 at 13:28\n\u2022 Has a correct answer been given? If so, please don't forget to $\\color{green}{\\checkmark \\small\\text{Accept}}$ it :)\n\u2013\u00a0Rubio\nAug 7, 2017 at 23:03\n\nI feel like this seems too simple so I've probably missed something but I have one solution here\n\n| 11 09 07 04 |\n| 16 01 14 12 |\n| 10 06 03 05 |\n| 02 15 13 08 |\n\nMy method is simple:\n\nThrow numbers in at random and swap one in the wrong place with another that wont cause another problem!\n\n\u2022 Sorry I forgot to mention the diagonal constraint, I have added this now. Therefore this is an invalid solution as several pairs of numbers diagonally are consecutive: (08 09), (11 12), (14 15) Jul 25, 2017 at 10:13\n\u2022 Ah but you didn't say diagonally ;) Jul 25, 2017 at 10:14\n\u2022 Apologies I have edited it now Jul 25, 2017 at 10:16\n\u2022 Think I fixed it Jul 25, 2017 at 10:21\n\u2022 Well done. I thought there was only one solution but yours is correct also :). Jul 25, 2017 at 12:23\n\nThere are so many possible answers if i understand the question correctly...\n\nMy example solution:\n\n11 09 12 05\n13 16 14 07\n01 06 04 02\n03 10 15 08\n\nHow did i figure this out?\n\nTried random numbers and it worked to my surprise :)\n\nEDIT: Changed the numbers to match the new requirements\nEDIT2: Added 'How did i figure this out?'\n\n\u2022 Sorry I forgot to mention the diagonal constraint, I have added this now. Therefore this is an invalid solution as (05 06), and (15 16) are adjacent Jul 25, 2017 at 10:15\n\nLike so many others before me, i have another answer, and i am sure there are more.\n\n|11 03 10 05 |\n|01 16 14 07 |\n|13 06 04 12 |\n|09 15 02 08 |\n\nHow i got there\n\nI started at each corner and rotated each consecutive number to the alternate corner and one to the number's right. By placing them as far across, i assured there was no intersection against the rules\n\na few more\n\n|11 03 05 10 |\n|01 16 14 07 |\n|13 06 04 12 |\n|09 15 02 08 |\n\n|11 03 05 10 |\n|01 16 14 07 |\n|13 06 04 12 |\n|15 09 02 08 |\n\n|11 03 05 10 |\n|01 16 14 07 |\n|13 06 04 02 |\n|15 09 12 08 |","date":"2022-09-28 09:32:08","metadata":"{\"extraction_info\": {\"found_math\": true, \"script_math_tex\": 0, \"script_math_asciimath\": 0, \"math_annotations\": 0, \"math_alttext\": 0, \"mathml\": 0, \"mathjax_tag\": 0, \"mathjax_inline_tex\": 1, \"mathjax_display_tex\": 0, \"mathjax_asciimath\": 0, \"img_math\": 0, \"codecogs_latex\": 0, \"wp_latex\": 0, \"mimetex.cgi\": 0, \"\/images\/math\/codecogs\": 0, \"mathtex.cgi\": 0, \"katex\": 0, \"math-container\": 0, \"wp-katex-eq\": 0, \"align\": 0, \"equation\": 0, \"x-ck12\": 0, \"texerror\": 0, \"math_score\": 0.5729489922523499, \"perplexity\": 884.9050696221325}, \"config\": {\"markdown_headings\": true, \"markdown_code\": true, \"boilerplate_config\": {\"ratio_threshold\": 0.18, \"absolute_threshold\": 10, \"end_threshold\": 15, \"enable\": true}, \"remove_buttons\": true, \"remove_image_figures\": true, \"remove_link_clusters\": true, \"table_config\": {\"min_rows\": 2, \"min_cols\": 3, \"format\": \"plain\"}, \"remove_chinese\": true, \"remove_edit_buttons\": true, \"extract_latex\": true}, \"warc_path\": \"s3:\/\/commoncrawl\/crawl-data\/CC-MAIN-2022-40\/segments\/1664030335190.45\/warc\/CC-MAIN-20220928082743-20220928112743-00489.warc.gz\"}"} | null | null |
@implementation ButtonsCell
- (void)awakeFromNib{
[Tools style:ButtonStyleNormal forButton:self.historicDataButton];
// Set the HistoricData button text without animation
[UIView performWithoutAnimation:^{
[self.historicDataButton setTitle: NSLocalizedString(@"historic_data_button_title", @"historic_data_button_title") forState:normal];
}];
self.backgroundColor = [UIColor clearColor];
self.userInteractionEnabled = YES;
self.backgroundBorderView.layer.borderColor = [COLOR_LINE CGColor];
self.backgroundBorderView.layer.borderWidth = 1.0f;
self.contentView.backgroundColor = COLOR_BACKGROUND;
}
@end
| {
"redpajama_set_name": "RedPajamaGithub"
} | 6,270 |
{"url":"http:\/\/mmci.prizziexplorer.it\/latex-space-between-lines.html","text":"rp76tz199ecs 4hz8ynxxr6a oy3rdw3ilhh1 m0ljof7l8v0rk3 mfob23l0xs37 9tu5zwq7ziykl7w x3pnz6kzfidgoe irs1wd3vglwoh 2dyy1zqwb21 js0z1hqlmml0 2rujmrg5ou w2mdz7fpc8dclm rui2lrtxb68hhg7 72h0jhd2y2a t9urby1lfd5f 76etu85btiwts57 pix00zcqb6a1 nv6j89nzenf3jgz flnw2vtum63t9l lpbyslql0li8m nvtzs4b234 nbkehqxas4pp pfipt87uxe6 3n9lkelq6k errco9khr1o 7654v2mjnl q1u5ypwrf1dhym vmqzx937x3u5rg h3jkrguwubutbj 07a5hscb69xdnj6 ht4vk1ct6nka9w 8ao7rba2ea 5l7scp20k0","date":"2020-07-14 13:22:51","metadata":"{\"extraction_info\": {\"found_math\": true, \"script_math_tex\": 0, \"script_math_asciimath\": 0, \"math_annotations\": 0, \"math_alttext\": 0, \"mathml\": 0, \"mathjax_tag\": 0, \"mathjax_inline_tex\": 1, \"mathjax_display_tex\": 1, \"mathjax_asciimath\": 1, \"img_math\": 0, \"codecogs_latex\": 0, \"wp_latex\": 0, \"mimetex.cgi\": 0, \"\/images\/math\/codecogs\": 0, \"mathtex.cgi\": 0, \"katex\": 0, \"math-container\": 0, \"wp-katex-eq\": 0, \"align\": 0, \"equation\": 0, \"x-ck12\": 0, \"texerror\": 0, \"math_score\": 0.8868509531021118, \"perplexity\": 2008.6063665601448}, \"config\": {\"markdown_headings\": true, \"markdown_code\": true, \"boilerplate_config\": {\"ratio_threshold\": 0.3, \"absolute_threshold\": 20, \"end_threshold\": 15, \"enable\": true}, \"remove_buttons\": true, \"remove_image_figures\": true, \"remove_link_clusters\": true, \"table_config\": {\"min_rows\": 2, \"min_cols\": 3, \"format\": \"plain\"}, \"remove_chinese\": true, \"remove_edit_buttons\": true, \"extract_latex\": true}, \"warc_path\": \"s3:\/\/commoncrawl\/crawl-data\/CC-MAIN-2020-29\/segments\/1593655880665.3\/warc\/CC-MAIN-20200714114524-20200714144524-00415.warc.gz\"}"} | null | null |
require 'spec_helper'
describe 'projects/merge_requests/edit.html.haml' do
include Devise::Test::ControllerHelpers
include ProjectForksHelper
let(:user) { create(:user) }
let(:project) { create(:project, :repository) }
let(:forked_project) { fork_project(project, user, repository: true) }
let(:unlink_project) { Projects::UnlinkForkService.new(forked_project, user) }
let(:milestone) { create(:milestone, project: project) }
let(:closed_merge_request) do
project.add_developer(user)
create(:closed_merge_request,
source_project: forked_project,
target_project: project,
author: user,
assignees: [user],
milestone: milestone)
end
before do
assign(:project, project)
assign(:target_project, project)
assign(:merge_request, closed_merge_request)
assign(:mr_presenter, closed_merge_request.present(current_user: user))
allow(view).to receive(:can?).and_return(true)
allow(view).to receive(:current_user)
.and_return(User.find(closed_merge_request.author_id))
end
context 'when a merge request without fork' do
it "shows editable fields" do
unlink_project.execute
closed_merge_request.reload
render
expect(rendered).to have_field('merge_request[title]')
expect(rendered).to have_field('merge_request[description]')
expect(rendered).to have_selector('input[name="merge_request[label_ids][]"]', visible: false)
expect(rendered).to have_selector('#merge_request_milestone_id', visible: false)
expect(rendered).not_to have_selector('#merge_request_target_branch', visible: false)
end
end
context 'when a merge request with an existing source project is closed' do
it "shows editable fields" do
render
expect(rendered).to have_field('merge_request[title]')
expect(rendered).to have_field('merge_request[description]')
expect(rendered).to have_selector('input[name="merge_request[label_ids][]"]', visible: false)
expect(rendered).to have_selector('#merge_request_milestone_id', visible: false)
expect(rendered).to have_selector('#merge_request_target_branch', visible: false)
end
end
end
| {
"redpajama_set_name": "RedPajamaGithub"
} | 3,325 |
Q: Qt5 кросс-компиляция (прикладная: windows7 / целевая ubuntu) По порядку: моя машина под Windows 7
*
*установил opensource Qt5.13.2 (не компилировал, просто скачал с офф.сайта готовый установщик)
*написал приложение в IDE QtCreator
*скомпилировал с помощью MinGW32 который шел вместе с Qt;
Вопрос: как(что) настроить в QtCreator-е и(или) проекте и затем скомпилировать под Linux системы? Проверять буду на виртуальной машине c Ubuntu 18.
Ответ требуется именно по кросс-компиляции!
A: Возможно, Вам поможет эта статья. Нужно импортировать MXE в Qt Вообще MXE пользуются, в основном из-под Linux но говорят он прекрасно импортируется в QtCreator.
Вот инструкция по установке на Linux
*
*Скачать командой git clone https://github.com/mxe/mxe.git
*установить зависимости сборки (гайд тут)
*Скомпилировать под Windows командой cd mxe && make qtbase
Сначала будут построены зависимости и инструменты кросс-сборки; Это
займет менее часа на быстрой машине с приличным доступом в интернет.
Из-за новой модульной природы Qt 5 различные основные компоненты Qt
теперь находятся в разных архивах. Команда, qtbaseдолжна дать
вам достаточно функциональности для запуска обычных приложений с
графическим интерфейсом.
Если вы хотите собрать весь Qt 5, вам нужно запустить make
qt5(вместо make qtbase). Обратите внимание, что это займет намного
больше времени, поэтому убедитесь, что вам нужны дополнительные
функции.
Перейдите в каталог вашего приложения и запустите инструмент
генератора Qt Makefile:
<mxe root>/usr/bin/i686-w64-mingw32.static-qmake-qt5
Создайте свой проект:
make
Вы должны найти двоичный файл в каталоге ./release:
wine release/foo.exe
В результате получается 32-битный статический исполняемый файл,
который будет хорошо работать в 64-битной Windows.
Если вы хотите 64-битный исполняемый файл, соберите Qt с:
make MXE_TARGETS=x86_64-w64-mingw32.static qtbase
Инструкция в оригинале
Из-под Windows Можно попробовать воспользоваться CygWin. Вот статья по настройке. Но с ним я не работал.
| {
"redpajama_set_name": "RedPajamaStackExchange"
} | 8,664 |
'use strict';
angular.module('SmartAdmin.Forms').directive('smartSummernoteEditor', function (lazyScript) {
return {
restrict: 'A',
compile: function (tElement, tAttributes) {
tElement.removeAttr('smart-summernote-editor data-smart-summernote-editor');
var options = {
focus : true,
tabsize : 2
};
if(tAttributes.height){
options.height = tAttributes.height;
}
lazyScript.register('build/vendor.ui.js').then(function(){
tElement.summernote(options);
});
}
}
}); | {
"redpajama_set_name": "RedPajamaGithub"
} | 4,900 |
Yavuz – ультраглибоководне бурове судно. Стало другим турецьким буровим судном.
Загальні відомості
Судно спорудили як Deepsea Metro I в 2011 році на південнокорейській верфі Hyundai Heavy Industries в Ульсані на замовлення грецької компанії Metrostar Management та норвезької Odfjell Drilling (мали 60% та 40% участі у проекті відповідно).
Судно відноситься до розробленого компанією Gusto MSC типу Gusto P10,000 та у відповідності до замовленого обладнання розраховане на роботу в районах з глибинами моря до 3000 метрів (максимальна глибина для проекту P10,000 становить 3658 метрів). При цьому воно може бурити свердловини довжиною до 12,2 км. Роботи провадяться при висоті хвиль до 6 метрів з інтвервалом до 10 секунд та швидкості вітру до 49 вузлів. Судна цього типу мають резервуари для прийому 125 тисяч барелів нафти, тому здатні без додаткової підтримки провадити тривале тестування свердловин.
Силова установка складається з шести дизельних двигунів STX-MAN I-9 із генераторами потужністю по 4,3 МВт та двох двигунів STX-MAN V-18 з генераторами по 8,7 МВт.
Пересування до району робіт здійснюється самостійно зі швидкістю до 10 вузлів.
На борту забезпечується проживання до 210 осіб.
Судно використовує систему динамічного позиціонування Kongsberg DP-3.
Служба судна
Протягом перших кількох років своєї кар'єри Deepsea Metro I працювало біля узбережжя Східної Африки (переважно в економічній зоні Танзанії на замовлення BG), де спорудило цілий ряд свердловин, як то:
- пробурена навесні 2012-го Mzia-1, яку заклали в районі з глибиною моря 1639 метрів. Свердловина мала довжину 4082 метри та виявила газове родовищі Мзіа. Взимку та наприкінці 2013-го Deepsea Metro I спорудило на цьому родовищі дві успішні оціночні свердловини Mzia-2 (глибина моря 1622 метра, довжина 4341 метр) та Mzia-3 (1780 та 4803 метра відповідно);
- споруджена навесні 2012-го Jodari-1, яку заклали в районі з глибиною моря 1153 метра. Свердловина мала довжину 4465 метрів та виявила газове родовищі Джодарі. Наприкінці 2012-го Deepsea Metro I спорудило на цьому родовищі дві успішні оціночні свердловини Jodari South-1 (глибина моря 1040 метрів, довжина 3441 метр, мала бічний стовбур Jodari South ST-1 довжиною 3282 метра) та Jodari North-1 (1040 та 3389 метрів відповідно);
- пробурена влітку 2012-го Papa-1. Закладена в районі з глибиною моря 2186 метрів, вона мала довжину 5544 метри та виявила родовище Папа;
- завершена в економічній зоні Кенії на початку осені 2012-го Mbawa-1, замовником якої виступив консорціум під операторством Apache. Свердловина була доведена до глибини у 3151 метр та перетнула один газонасичений інтервал, проте оголошення про відкриття родовища за цим сталось;
- завершена на початку літка 2013-го Ngisi-1, яку заклали в районі з глибиною моря 1325 метрів. Свердловина мала довжину 4640 метри та виявила газове родовищі Нгісі, а також стала оціночною для родовища Чева;
- споруджена влітку 2013-го Mkizi -1. Закладена в районі з глибиною моря 1301 метр вона мала довжину 2860 метрів та виявила родовище Мкізі;
- пробурені у другій половині літа – першій половині осені 2013-го успішні оціночні свердловини Pweza-2 (довжина 3159 метрів) та Pweza-3 (глибина моря 1384 метра, довжина 3153 метри), які дозволили уточнити розміри родовища Пвеза;
- споруджені в економічній зоні Кенії у січні – березні 2014-го Sunbird-1, замовником якої, як і у випадку з Танзанією, виступив консорціум під операторством BG. Закладена в районі з глибиною моря 721 метр свердловина була доведена до позначки у 2850 метрів та перетнула газо- і нафтонасичені інтервали, проте оголошення про відкриття родовища за цим сталось;
- пробурена у березні – червні 2014-го Taachui-1, яку заклали в районі з глибиною моря 1639 метрів. Завершена за допомогою бокового стовбура Taachui-1 ST1, свердловина мала довжину 4215 метрів та виявила газове родовище Taachui;
- завершена восени 2014-го Kamba-1 (закладена в районі з глибиною моря 1380 метрів), що перетнула газонасичений інтервал у структурі, яка є північним продовженням Пвеза;
- пробурена у жовтні – листопаді 2014-го розвідувальна свердловина Tende-1, яка була закладена в районі з глибиною моря 781 метр. Вона досягнула позначки у 4153 метра, але змогла виявили лише газопрояви. Хоча це завдання також виконували у водах Танзанії, проте замовником виступив консорціум під операторством Ophir Energy;
- споруджена наприкінці 2014-го Mkuki-1. Свердловина, закладена в районі з глибиною моря 1648 метрів, досягнула довжини у 3204 метра, але не виявила вуглеводнів. І цей проект знаходився в економічній зоні Танзанії, але у блоці, де оператором виступала компанія Dominion.
На момент завершення східноафриканського фрахту Depsea Metro I у грудні 2014-го спостерігалась надлишкова пропозиція на ринку офшорного буріння, тому судно певний час провело у простої. Втім, у серпні 2015-го воно розпочало у В'єтнамі для компанії VietGazprom буріння свердловини із очікуваним терміном завершення робіт в січні наступного року. В подальшому VietGazprom використала своє контрактне право та залишила установку для проведення робіт з тестування свердловини.
В липні 2016-го Depsea Metro I узялось за буріння у водах Малайзії свердловини для компанії Petronas Carigali.
Наприкінці березня 2017-го судно прибуло до філіппінського басейну Палаван, де пробурило для компанії Nido Petroleum оціночну свердловину Galoc-7, яка досягнула довжини у 2373 метра. Оскільки у ній отримали нафтопрояви із неочевидною комерційністю, Depsea Metro I для уточнення результатів спорудило також бічний стовбур Galoc-7ST.
В середині червня 2017-го Depsea Metro I розпочало буріння для консорціуму компаній Repsol та Talisman, що отримали розвідувальну ліцензію від В'єтнаму. Втім, через тиск Китаю, який висуває претензії на цей же район, в середині серпня судно припинило роботи та відбуло до малазійського порту Лабуан. Існують відомості, що Depsea Metro I вдалось досягнути газонасичених порід саме тоді, коли в'єтнамська влада попросила Repsol згорнути роботи.
Після більш ніж річного простою у Малайзії, восени 2018-го, судно продали турецькій нафтогазовій компанії Türkiye Petrolleri Anonim Ortaklığı (TPAO), яка перейменувала його у Yavuz. В серпні 2019-го судно долучилось до проведення розвідувальної кампанії у водах, що оточують Кіпр. Першою спорудили свердловину Karpaz-1, розташовану неподалік від контрольованого самопроголошеною Турецькою Республікою Північного Кіпру північно-східного завершення острова. Після цього з жовтня 2019 до листопада 2020 Yavuz пробурило свердловини Guzelyurt-1, Selçuklu-1 та Lefkoşa-1. Оскільки їх заклали у водах на південь від Кіпру, проведення тут робіт гарантував турецький ВМФ.
Восени 2021-го Yavuz перевели до Чорного моря, для чого в порту Гайдарпаша з нього демонтували бурову вежу, що дозволяло пройти під стамбульськими мостами. Далі судно прибуло до чорноморського порту Filyos, де мали змонтувати вежу назад. Після цього Yavuz мало долучитись до робіт у турецькому секторі, де в 2020 відкрили гігантське газове родовище Сакар'я.
Примітки
Бурові судна | {
"redpajama_set_name": "RedPajamaWikipedia"
} | 9,195 |
{"url":"https:\/\/math.stackexchange.com\/questions\/1809650\/did-i-make-mistakes-bilinear-form-generator-strange-relation","text":"# Did I make mistakes? Bilinear form, generator, strange relation\n\nI have a question about functional analysis and operator theory.\n\nDefinition\n\nLet $(H,(\\cdot,\\cdot)_{H})$ be a real Hilbert space and $D$ be a dense subspace of $H$. Let $(\\mathcal{E},D)$ be a positive definite closed symmetric bilinear form on $H$ i.e.\n\n$\\mathcal{E}(f,f) \\geq 0 \\text{ for all }f \\in D$, $\\mathcal{E}(f,g)=\\mathcal{E}(g,f) \\text{ for all } f,g \\in D$ and\n\n$\\langle f,g \\rangle:=\\mathcal{E}(f,g)+(f,g)_{H}$ is a inner-product on $D$ and $D$ is a Hilbert space w.r.t. this inner-product.\n\nBy Schwarz's inequality and the trivial inequality $\\mathcal{E}(f,f) \\leq \\langle f,f\\rangle$, we can see $\\left|\\mathcal{E}(f,g) \\right| \\leq \\langle f,f\\rangle^{1\/2} \\langle g,g \\rangle^{1\/2}$. This implies the map $D \\ni g \\mapsto \\mathcal{E}(f,g) \\in \\mathbb{R}$ is continuous w.r.t. $\\langle \\cdot, \\cdot \\rangle^{1\/2}$. By Riesz's theorem, there exists bounded linear operator $T$ on $D$ such that $\\mathcal{E}(f,g)=\\langle Tf,g \\rangle$.\n\nOn the other hand, we can define linear operator $(L,D(L))$ on $(H,(\\cdot,\\cdot)_{H})$ associated with $(\\mathcal{E},D)$ i.e. Let \\begin{align*} D(L)=\\left\\{ f \\in D : g \\mapsto \\mathcal{E}(f,g) \\text{ is continuous on }D \\text{ w.r.t. } (\\cdot,\\cdot)_{H}^{1\/2} \\right\\}. \\end{align*}\n\nThen, we can find $F \\in H$ such that $\\mathcal{E}(f,g)=(F,g)_{H}$ for each $g \\in D$ (Riesz's theorem) and we denote $Lf:=F$.\n\nMy Question\n\nIdentifying $D$ with its dual $D'$ we have that \\begin{align*} H' \\subset D' \\cong D \\subset H \\text{ densely and continuously } \\end{align*}\n\nand $( \\cdot,\\cdot )_{H}$ restricted to $H' \\times D$ coincides with $\\langle \\cdot,\\cdot \\rangle$. Then, it holds that \\begin{align*} D(L)&=\\left\\{ f \\in D : g \\mapsto \\mathcal{E}(f,g) \\text{ is continuous on }D \\text{ w.r.t. } (\\cdot,\\cdot)_{H}^{1\/2} \\right\\} \\\\ &=\\left\\{ f \\in D : g \\mapsto \\langle Tf,g \\rangle \\text{ is continuous on }D \\text{ w.r.t. } (\\cdot,\\cdot)_{H}^{1\/2} \\right\\} \\\\ &=\\left\\{ f \\in D : g \\mapsto (Tf,g)_{H} \\text{ is continuous on }D \\text{ w.r.t. } (\\cdot,\\cdot)_{H}^{1\/2} \\right\\} \\\\ &=D . \\end{align*}\n\nBut I think this relation $D(L)=D$ strange...\n\nDid I make mistakes? Please let me know.\n\n\u2022 Your application of the Cauchy-Schwarz inequality is wrong, you only get $\\mathcal{E}(f,g)^2\\leq \\mathcal{E}(f,f)\\mathcal{E}(g,g)$. \u2013\u00a0MaoWao Jun 2 '16 at 13:36\n\u2022 By the Schwarz's inequality, $\\left| \\mathcal{E}(f,g) \\right| \\leq \\mathcal{E}(f,f)^{1\/2}\\mathcal{E}(g,g)^{1\/2}$. Note that $\\mathcal{E}(f,f) \\leq \\langle f,f \\rangle$. \u2013\u00a0sharpe Jun 2 '16 at 14:41\n\u2022 Sorry, I mixed up your notations of $\\langle\\cdot,\\cdot\\rangle$ and $(\\cdot,\\cdot)_H$. \u2013\u00a0MaoWao Jun 2 '16 at 20:28","date":"2019-05-25 10:01:58","metadata":"{\"extraction_info\": {\"found_math\": true, \"script_math_tex\": 0, \"script_math_asciimath\": 0, \"math_annotations\": 0, \"math_alttext\": 0, \"mathml\": 0, \"mathjax_tag\": 0, \"mathjax_inline_tex\": 1, \"mathjax_display_tex\": 0, \"mathjax_asciimath\": 0, \"img_math\": 0, \"codecogs_latex\": 0, \"wp_latex\": 0, \"mimetex.cgi\": 0, \"\/images\/math\/codecogs\": 0, \"mathtex.cgi\": 0, \"katex\": 0, \"math-container\": 0, \"wp-katex-eq\": 0, \"align\": 0, \"equation\": 0, \"x-ck12\": 0, \"texerror\": 0, \"math_score\": 0.9999871253967285, \"perplexity\": 502.3051540398001}, \"config\": {\"markdown_headings\": true, \"markdown_code\": true, \"boilerplate_config\": {\"ratio_threshold\": 0.18, \"absolute_threshold\": 10, \"end_threshold\": 15, \"enable\": true}, \"remove_buttons\": true, \"remove_image_figures\": true, \"remove_link_clusters\": true, \"table_config\": {\"min_rows\": 2, \"min_cols\": 3, \"format\": \"plain\"}, \"remove_chinese\": true, \"remove_edit_buttons\": true, \"extract_latex\": true}, \"warc_path\": \"s3:\/\/commoncrawl\/crawl-data\/CC-MAIN-2019-22\/segments\/1558232257939.82\/warc\/CC-MAIN-20190525084658-20190525110658-00273.warc.gz\"}"} | null | null |
'use strict';
var lodash = require('lodash');
module.exports = function(Generator) {
function appendSlash(testFolderPath) {
if (testFolderPath.slice(-1) === '/') {
return testFolderPath;
} else {
return testFolderPath + '/';
}
}
function customFolderPrompt(generator, done) {
generator.prompt({
type: 'input',
name: 'testFolder',
message: 'Specify the folder for your tests.',
default: 'test',
filter: appendSlash
}, function(answer) {
lodash.merge(generator.preferences, answer);
done();
});
}
Generator.prototype.testPrompt = function() {
if (this.useExistingConfig && this.preferences.tests) {
return;
}
var done = this.async();
this.prompt({
type: 'list',
name: 'tests',
message: 'Where would you like to store your test files?',
choices: [
{ name: 'Test folder', value: 'custom'},
{ name: 'With my app code', value: 'appcode'}
],
default: 'custom'
}, function(answer) {
lodash.merge(this.preferences, answer);
// testFolder setup
if (this.preferences.tests === 'custom') {
customFolderPrompt(this, done);
} else {
lodash.merge(this.preferences, { testFolder: 'app/scripts/' });
done();
}
}.bind(this));
};
};
| {
"redpajama_set_name": "RedPajamaGithub"
} | 9,491 |
Horry residents say 'back to the drawing board' on proposed county, school district maps
J. Dale Shoemaker
Horry County residents on Wednesday had their first chance to publicly testify on the proposed redistricting maps for county council and county board of education seats, which will determine how future local elections operate.
They weren't pleased.
"This looks like a little kid in preschool drew these maps and let paint just run down all over the place," Cedric Blain-Spain, a leader in the Horry County Democratic Party, told leaders in charge of drawing the maps at the public hearing.
County leaders are re-drawing the maps that determine council and school board districts as part of the once-a-decade mandate that's tied to the U.S. Census, which counts the nation's population every 10 years. When that data was released several months ago, it showed that Horry County had added more than 80,000 new residents since 2010, meaning that district maps could change substantially.
And indeed they did, according to a draft map released two weeks ago.
The biggest change to the map came in Carolina Forest, which was consolidated into one uniform district with a single council and school board member. Previously, the unincorporated area known as Carolina Forest was split up among four council and school board districts.
And due to the population increase, several districts around the county were compacted into small geographic areas, eliminating so-called "fingers" that existed in the previous map to make the districts roughly equal. The Carolina Forest district, for example, previously reached into Myrtle Beach city limits, and the Forestbrook district previously reached into Conway city limits. The proposed map compacts both of those districts.
With the population increase, each district includes roughly 32,000 residents.
You can view an interactive version of the proposed redistricting map here.
But some residents said the more compact maps could cause problems. Carole vanSickler, the president of the Carolina Forest Civic Association, decried the fact that the area lost three of its four council and school board representatives.
"On behalf of the residents of Carolina Forest, we were totally surprised by the reduction of Horry County councilmen from four to one, that's a 75% reduction," vanSickler said. "As an unincorporated entity, we have relied on additional Horry County Councilmen for their support."
vanSickler explained that Carolina Forest residents have used the fact they had multiple representatives to effectively address issues in their community. Residents there don't have a desire to form their own city with its own powers and representation, she said, meaning having multiple county representatives was important. Under the proposed map, County Council member Dennis DiSabato and school board member Tracy Winters would represent the area exclusively.
vanSickler also took issue with the neighborhoods included in the new standalone Carolina Forest district. The Wild Wing neighborhood in Conway, she said, was included in the district, but is not generally considered part of Carolina Forest. Similarly, she said, neighborhoods along River Oaks Drive were left out of the district, despite being traditionally considered part of Carolina Forest.
"Before this redistricting started, we were told we were going to lose maybe two (representatives), so we kind of figured we'd go from four to two," vanSickler said. "But we don't consider what they've defined as Carolina Forest being all in district three."
Other residents, like Blain-Spain, argued that areas with concentration of Black and Democratic voters were split up into multiple districts, which he referred to as "cracking and packing." In Myrtle Beach, he said, several precincts with a significant number of Black voters were broken up among three districts, meaning the influence of those voters could be "diluted."
"They have cracked up that African-American vote...so now you have a community where it's a large portion of African-Americans that are now split between (districts) two, three and four," Blain-Spain said. "So that is clear signs of cracking."
He added: "As a resident of Horry County, I'm asking that this committee go back to the drawing board. Draw fair districts without cracking and packing."
Blain-Spain and others also complained that several districts in the Western part of the county were unusually large and their borders unwieldy. Part of the reason for that is because fewer people live in the more rural areas, meaning those districts need to be larger to be equal in population to other districts. Blain-Spain, though, questioned why district 10, which stretches from S.C. 31 to the county's Western border, wasn't kept to the Western part of the county, and why it stretched into the Carolina Forest area. He suggested that Western and rural districts be more uniform.
Several residents, as well as county leaders, also voiced concern on Wednesday that the county wasn't gathering enough public input on the proposed maps.
"I'm disappointed that this room is not packed," vanSickler noted. "4 o'clock in the afternoon is a very, very difficult time to have a meeting to get people who are working, who have families, kids in school, and whatever."
Redistricting committee members Orton Bellamy, Doris Hickman and chair Tyler Servant all agreed with vanSickler that additional public comment should be gathered. They said additional meetings to gather public input would be held in the near future.
"I have been receiving calls from persons who said they wished to be in attendance but they are working and they can't get off in time, in a timely matter to be here, because then they will be penalized their salary or a day's work," Hickman, the committee's Democratic representative, said. "I just think it behooves us to make sure that our citizens will have an opportunity to come and be a part of the redistricting process."
In the end, residents like Blain-Spain said, they'd like to see county leaders re-draw the maps to make the districts more competitive. Blain-Spain noted that the county council is made up of all men, only one Black person, and no Democrats. He said he'd like to see districts where Democratic candidates and Black people could be competitive.
"You see this council, (mostly) white men, no women, so that's not competitive, that's not even fair to the county," he said. "It's like the old wild west in the decision making and everything."
Man convicted in wife's beating death in Raleigh motel room will get new trial
Fortune Nickel and Gold Receives Approval for Exploration Plans for the Gowan and Beck-Ottaway Property in Northeast, Ontario, Canada
Taiwan to pay for Guatemalan lobbying in U.S., Guatemala says
Nadia Brichikov died in a Knights Inn room. Surveillance camera captured her last moments while people passed the open room, unconcerned.
Rye Brook, New York--(Newsfile Corp. - January 18, 2022) - Fortune Nickel and Gold Inc. (OTC Pink: HTSC) ("Fortune"), a wholly-owned subsidiary of Here To Serve Holding Corp., is pleased to announce that it has received approval for its exploration plans for its 100%-owned Gowan, and Beck-Ottaway Properties in the Porcupine Mining Division, Ontario Canada. These properties lie at the core of the Abitibi Greenstone Belt, which contains some of the world's largest deposits ...
Taiwan has paid for a lobbying contract to promote Guatemala with U.S. officials, Guatemala's government said late on Monday, just as Beijing's efforts to strengthen its diplomatic foothold in Central America are advancing. "Guatemala thanks Taiwan for the support that will allow us to enhance the country's position in the United States," the Guatemalan government said in a statement.
Italian aerospace group Leonardo said on Tuesday it had reached an agreement with unions to furlough employees at its Aerostructures division to compensate for a fall in orders for commercial aircraft parts caused by the pandemic. Last year the group filed a request to put more than 3,000 workers under a temporary lay-off scheme for 13 weeks, triggering protests from unions. Following negotiations with unions, the group agreed to furlough more than 2,000 workers at four plants in southern Italy which make aircraft parts for Boeing Co and Airbus.
RWE says liquidity enough to cover extreme energy price spikes
RWE, Germany's largest power producer, has the funds needed to tackle an unprecedented rise in energy prices, Chief Executive Markus Krebber said, adding the company's liquidity managers had prepared for even the most "extreme scenarios". Less than two weeks ago, RWE's smaller rival Uniper said it needed an extra 10 billion euros ($11 billion) in credit lines. German regional utility STEAG also said record energy prices had caused it to secure extra funding to cover payments tied to hedges that are lower than the current spot price.
UPDATE 1-Biden administration in talks to head off 5G aviation standoff
The Biden administration is working with wireless carriers, airlines, airplane manufacturers and key federal agencies to resolve a looming aviation crisis, a senior administration official told Reuters. Airlines are preparing to cancel a significant number of passenger and cargo flights in the coming hours to prepare for AT&T and Verizon's new 5G C-Band service https://www.reuters.com/business/aerospace-defense/do-5g-telecoms-pose-threat-airline-safety-2022-01-18 that starts on Wednesday, after warning on Monday https://www.reuters.com/technology/exclusive-major-us-airline-ceos-urge-action-avoid-catastrophic-5g-flight-2022-01-17 of "catastrophic" impacts. Airlines want wireless carriers to not turn on some wireless towers near airport runways in a bid to avoid most of the flight disruptions.
Paramedics who treated fatally wounded teen receive 18 months' conditional sentence
Two Hamilton paramedics who treated a fatally wounded teen as though he had suffered a minor injury will serve an 18-month sentence in the community. Ontario Superior Court Justice Harrison Arrell handed out his sentence today in the case of Christopher Marchant and Steven Snively, who were found guilty last year of failing to provide the necessaries of life to Yosif Al-Hasnawi. Prosecutors alleged that the pair approached Al-Hasnawi on the night of Dec. 2, 2017 with the "preconceived notion" he | {
"redpajama_set_name": "RedPajamaCommonCrawl"
} | 2,664 |
Q: Counting the number of lines having a number greater than 100 I have a file with many numbers in it (only numbers and each number is in one line). I want to find out the number of lines in which the number is greater than 100 (or infact anything else). How can I do that?
A: Similar solution with perl
$ seq 98 105 | perl -ne '$c++ if $_ > 100; END{print $c+0 ."\n"}'
5
Speed comparison: numbers reported for 3 consecutive runs
Random file:
$ perl -le 'print int(rand(200)) foreach (0..10000000)' > rand_numbers.txt
$ perl -le 'print int(rand(100200)) foreach (0..10000000)' >> rand_numbers.txt
$ shuf rand_numbers.txt -o rand_numbers.txt
$ tail -5 rand_numbers.txt
114
100
66125
84281
144
$ wc rand_numbers.txt
20000002 20000002 93413515 rand_numbers.txt
$ du -h rand_numbers.txt
90M rand_numbers.txt
With awk
$ time awk '$1>100{c++} END{print c+0}' rand_numbers.txt
14940305
real 0m7.754s
real 0m8.150s
real 0m7.439s
With perl
$ time perl -ne '$c++ if $_ > 100; END{print $c+0 ."\n"}' rand_numbers.txt
14940305
real 0m4.145s
real 0m4.146s
real 0m4.196s
And just for fun with grep (Updated: faster than even Perl with LC_ALL=C)
$ time grep -xcE '10[1-9]|1[1-9][0-9]|[2-9][0-9]{2,}|1[0-9]{3,}' rand_numbers.txt
14940305
real 0m10.622s
$ time LC_ALL=C grep -xcE '10[1-9]|1[1-9][0-9]|[2-9][0-9]{2,}|1[0-9]{3,}' rand_numbers.txt
14940305
real 0m0.886s
real 0m0.889s
real 0m0.892s
sed is no fun:
$ time sed -nE '/^10[1-9]|1[1-9][0-9]|[2-9][0-9]{2,}|1[0-9]{3,}$/p' rand_numbers.txt | wc -l
14940305
real 0m11.929s
$ time LC_ALL=C sed -nE '/^10[1-9]|1[1-9][0-9]|[2-9][0-9]{2,}|1[0-9]{3,}$/p' rand_numbers.txt | wc -l
14940305
real 0m6.238s
A: Let's consider this test file:
$ cat myfile
98
99
100
101
102
103
104
105
Now, let's count the number of lines with a number greater than 100:
$ awk '$1>100{c++} END{print c+0}' myfile
5
How it works
*
*$1>100{c++}
Every time that the number on the line is greater than 100, the variable c is incremented by 1.
*END{print c+0}
After we have finished reading the file, the variable c is printed.
By adding 0 to c, we force awk to treat c like a number. If there were any lines with numbers >100, then c is already a number. If there were not, then c would be an empty (hat tip: iruvar). By adding zero to it, we change the empty string to a 0, giving a more correct output.
| {
"redpajama_set_name": "RedPajamaStackExchange"
} | 1,851 |
#
**ALSO BY ROSEMARY JONES**
ED GREENWOOD PRESENTS WATERDEEP
_City of the Dead_
THE DUNGEONS
_Crypt of the Moaning Diamond_
**COLD STEEL AND SECRETS:
A NEVERWINTER NOVELLA**
©2011 Wizards of the Coast LLC
All characters in this book are fictitious. Any resemblance to actual persons, living or dead, is purely coincidental.
This book is protected under the copyright laws of the United States of America. Any reproduction or unauthorized use of the material or artwork contained herein is prohibited without the express written permission of Wizards of the Coast LLC.
Published by Wizards of the Coast LLC. Hasbro SA, represented by Hasbro Europe, Stockley Park, UB11 1AZ. UK.
Forgotten Realms, Dungeons & Dragons, D&D, Wizards of the Coast, and their respective logos are trademarks of Wizards of the Coast LLC in the U.S.A. and other countries.
All Wizards of the Coast characters and their distinctive likenesses are property of Wizards of the Coast LLC.
Cover art by: Aleksi Briclot
eISBN: 978-0-7869-6234-1
For customer service, contact:
U.S., Canada, Asia Pacific, & Latin America: Wizards of the Coast LLC, P.O. Box 707, Renton, WA 98057-0707, +1-800-324-6496, www.wizards.com/customerservice
U.K., Eire, & South Africa: Wizards of the Coast LLC, c/o Hasbro UK Ltd., P.O. Box 43, Newport, NP19 4YD, UK, Tel: +08457 12 55 99, Email: wizards@hasbro.co.uk
Europe: Wizards of the Coast p/a Hasbro Belgium NV/SA, Industrialaan 1, 1702 Groot-Bijgaarden, Belgium, Tel: +32.70.233.277, Email: wizards@hasbro.be
Visit our websites at www.wizards.com
www.DungeonsandDragons.com
Welcome to Faerûn, a land of magic and intrigue, brutal violence and divine compassion, where gods have ascended and died, and mighty heroes have risen to fight terrifying monsters. Here, millennia of warfare and conquest have shaped dozens of unique cultures, raised and leveled shining kingdoms and tyrannical empires alike, and left long forgotten, horror-infested ruins in their wake.
**A LAND OF MAGIC**
When the goddess of magic was murdered, a magical plague of blue fire—the Spellplague—swept across the face of Faerûn, killing some, mutilating many, and imbuing a rare few with amazing supernatural abilities. The Spellplague forever changed the nature of magic itself, and seeded the land with hidden wonders and bloodcurdling monstrosities.
**A LAND OF DARKNESS**
The threats Faerûn faces are legion. Armies of undead mass in Thay under the brilliant but mad lich king Szass Tam. Treacherous dark elves plot in the Underdark in the service of their cruel and fickle goddess, Lolth. The Abolethic Sovereignty, a terrifying hive of inhuman slave masters, floats above the Sea of Fallen Stars, spreading chaos and destruction. And the Empire of Netheril, armed with magic of unimaginable power, prowls Faerûn in flying fortresses, sowing discord to their own incalculable ends.
**A LAND OF HEROES**
But Faerûn is not without hope. Heroes have emerged to fight the growing tide of darkness. Battle-scarred rangers bring their notched blades to bear against marauding hordes of orcs. Lowly street rats match wits with demons for the fate of cities. Inscrutable tiefling warlocks unite with fierce elf warriors to rain fire and steel upon monstrous enemies. And valiant servants of merciful gods forever struggle against the darkness.
**A LAND OF
UNTOLD ADVENTURE**
# Contents
_Cover_
_Other Books by This Author_
_Title Page_
_Copyright_
Map
First Page
_Every attack has its defense: it needs only a quick eye and good judgment to confound the thrust_.
—Elyne, a lady of Neverwinter
_1478 DR_
**T HE YOUNG NASHERS YELLED AT EACH OTHER AS RUCAS SARFAEL** rolled across the floor of the armory, grappling with the hellhound left to guard its treasures. Dhafiyand, the spymaster of Neverwinter, had assured him that there was no great protection for the weapons, and the armory had seemed like the perfect place to let Elyne's students practice some burglary for the good of their cause and ingratiate himself with their rebel teacher. At the moment, Sarfael strove to keep his ruse from turning him into a roasted corpse.
Two of Elyne's students came to his aid. Parnadiz ran forward to stab the hound with his outdrawn sword as Charinyn whipped off her cloak, flapping it in one hand, seeking to distract the creature by flourishing it. The others closed in, swords out, thrusting eagerly to kill the fiendish dog.
"The eyes," Sarfael called out as he thrashed on the floor. "Blind it!"
They stabbed as he commanded, and Charinyn managed to nick the corner of the hound's eye with her sharp rapier.
With a horrendous howl, the hound rolled off Sarfael. Snarling, it backed away from the group.
Its eyes glowed like hot coals and its huge mouth opened. Deep in its gullet, flames began to burn.
The young wizard Montimort gave a shout and a wave of ice flew off his hands, engulfing the creature and knocking it into the weapons chamber. The hound's giant paws scrabbled for purchase on the icy floor. It slid into a pile of breastplates that fell with a clatter on its head.
Sarfael whipped out Mavreen's sword. With a great leap, he cleared the hound, landing behind it. He slashed down and across, neatly cutting its throat.
With a gurgling bark that erupted in a small flame, the hound collapsed. The guard dog died at Sarfael's feet.
There was a moment of stunned silence, then Parnadiz ran forward. "Well struck," he said.
Sarfael looked up from the dead hound at the stunned Montimort.
"Well done, indeed," he said to the young wizard. "Quick thinking to use ice against it."
Charinyn and the others began to pluck weapons from the walls, quickly bundling their loot into the blankets and bags they had brought.
"We need to hurry," she said. "Before the patrols return."
Sarfael nodded.
The weapons secured, they moved briskly through the streets. As previously arranged, a hooded-and-cloaked Elyne met them near the foot of the ruined Dolphin Bridge. With her was another group, also well muffled against the night fog and prying eyes. With whispered instructions, the weapons were transferred and the recipients melted away into the dark streets.
"Where are they going?" Sarfael asked as casually as he could.
"To caches throughout the city," she replied.
Another man joined them. "So this is your newest recruit?" he said to Elyne. "Your students say he saved them tonight."
"Montimort's wizardry accounted for our victory," said Sarfael.
"Ah, yes, Elyne's Luskar pet," he said.
"The boy has proved his loyalty more than once, Arlon Bladeshaper," she snapped back at him.
"But he is not and never will be a child of Neverwinter," rejoined the other to Sarfael's intense interest. Dhafiyand loved hearing about arguments and divisions among the rebel factions. The belligerent Arlon looked like he could be useful for starting a small schism among the Sons.
The man turned to Sarfael. "We welcome the return of exiles like yourself. Elyne, bring him to our next meeting."
"And Montimort?" she asked.
"Leave the boy behind," Arlon said.
"This prejudice of yours serves no one," Elyne argued. "Least of all the city we both love."
Sarfael silently applauded the lady's forthright criticism of the Nasher before her, but he held his tongue. After all, Dhafiyand had sent him to make friends, not enemies. And the man had said he would welcome Sarfael to the Nashers' next meeting.
Arlon shrugged at Elyne's protests. "I will expect you there," he said. "There are new rumors that the treasure we seek might have been found by that mad cousin of yours."
Sarfael pricked up his ears at the talk of "treasure." Dhafiyand would want to hear that.
"Karion is far more dangerous than Montimort," Elyne said to Arlon, but the big man just shook his head at her and walked away. She stood staring after him, one slim foot tapping angrily against the pavement.
"We would not have escaped serious harm without Montimort's aid," Sarfael said to the still simmering redhead as they walked back to the warehouse. Her students ran a little ahead of them, full of whispering laughter about the success of the night's raid.
"I know," Elyne said. "We have far too few with any magical skills. The boy is a gift, and one that they should treasure. But they see only that he comes from Luskan." "You disagree?"
She nodded. "He is as committed to the rebellion as any born here."
"And you are as loyal to him?" Sarfael hazarded a personal question a little sooner than Dhafiyand would consider wise, but he wanted to know. She intrigued him, this rebel daughter of Neverwinter.
"He reminds me of family I have lost," she admitted.
Sarfael told the truth without intending to. "I know what you mean." The quick, light step of Elyne beside him reminded him of
Mavreen and all he had lost to the Red Wizards.
As always, Dhafiyand's room was very warm, with a good fire crackling in the grate. Sarfael watched the flames flicker with a sour expression.
"You did not tell me that General Sabine guards her weapons with hell hounds."
The spymaster glanced up from his correspondence at that. "Does she really? I wonder if that is the gift from Mordai Vell she mentioned at dinner the other night."
"Vell?"
"An admirer of our general, apparently. At least to judge by the number of invitations that he issues to her and her staff, as well as the small presents of esteem that he sends her. All for the good of the new Neverwinter, at least according to him."
"But?"
"He is a tiefling, and worse, a subtle, rich tiefling who uses gold to stifle the whiff of brimstone that hangs around him." Dhafiyand leaned back in his chair and folded his long, lean hands upon his chest. "But he is not your concern. I gather that you meet with others tonight."
"A meeting of some of the younger leaders, including one quarrelsome soul named Arlon."
Dhafiyand nodded in satisfaction. "We've heard stories about that one."
"Well, he's calling this meeting, and let's hope I hear something more than his spouting on true bloodlines and the best of Neverwinter." Sarfael remembered the rebel leader's quick dismissal of Montimort's skills, simply because the boy was Luskan bred, and the distress that caused Elyne. Truly, bullheaded Arlon was an annoying soul.
"One would hope so," said the spymaster. "Or I have wasted your considerable talents upon this group."
"There are greater dangers to Neverwinter," Sarfael began.
"Not Red Wizards again." Dhafiyand sighed. "There is no threat there. No, bring me the plans and plots of these Nashers. And continue to listen for talk of a crown."
"Again, a crown?" Dhafiyand had harped upon that earlier. But it was myth. There was no king and no royal heir in Neverwinter. "Why is a crown so important with no one to wear it?"
"A crown can lead to a throne, an empty throne. If such a thing exists, Lord Neverember must take it for himself. There's something of a story in the city, that a crown can call forth a true ruler of Neverwinter."
"If such a thing exists." Sarfael rather doubted it, but there was no denying Dhafiyand's sudden gleam of interest, which had been quickly masked by the man's attention to the paperwork spread across his worktable, when he had told him earlier about Arlon's comments of a treasure found.
"Still, better we have it than some group of children playing at rebellion," concluded Sarfael.
"Precisely," said the spymaster.
Rucas Sarfael followed the directions he was given to the Kraken Society building near the graveyard. From the outside, it appeared to be another of Neverwinter's dilapidated structures. Inside, the meeting had already begun. Voices were raised. Arlon Bladeshaper pounded on the table to quiet the others.
"Let Virchez finish reading his letter," the young leader shouted over the din.
A plump man waved a paper at the others. "My cousin writes that we can no longer count on our friends in Waterdeep for funds."
"Cowards!" shouted one tall and heavyset blonde woman. She looked enough like Elyne's student, Charinyn, for Sarfael to guess her a relative, a mother or aunt. "They bow to Lord Neverember and forget their families here."
Elyne saw him from across the room and waved for him to come closer to her. He began to weave through the crowd.
"It's worse than that, Torialaine. My cousin says that a man in Waterdeep, an agent of Neverember, wrecked his business," went on the letter reader. "A notable rogue, who seduced my cousin's maids into stealing important documents for him."
Sarfael stopped where he was. That all sounded unfortunately familiar.
"What happened to the man?" asked Arlon.
"He has disappeared, and my cousin warns us to watch for him in Neverwinter."
"Does he send a description?"
"Yes, yes," said the little man. "That's what I was trying to read you. He says the fellow is no youth, but still very strong and nimble. He goes always armed with a black-hilted sword."
Sarfael shrugged his cloak so it covered the dark hilt of Mavreen's sword and slid it half out of its sheath. He measured the distance to the door. There were nearly a dozen Nashers between him and the only exit. Across the room, Elyne arched an eyebrow at his delay. He half-turned away from her, hoping the quick-witted swordmistress hadn't paid attention to Virchez's last statement. After all, she'd handled Mavreen's sword, borrowing it from him to examine it more closely.
A thunderous knocking on the door caused Virchez to drop the letter from his Waterdeep cousin. Sarfael kept a look of friendly interest on his face as he slapped backs, shifted closer to Virchez, and counted the number of probable attackers between him and the door.
As Virchez fished under the table for his letter, the Nashers nearest the door dragged a new man inside. Arlon Bladeshaper motioned to them to bring the latecomer forward. Some Nashers grumbled about the interruption, others yelled at Arlon to tell them what was going on, and Arlon shouted back at them to shut up and listen.
These rebels, thought Sarfael, are not quiet folk.
"I saw him tonight!" cried the tall pimply youth when he reached Arlon's side. "Karion's gone back to his old house in the Blacklake District."
Sarfael paused in his careful stalking of Virchez and his missive. That name sounded familiar. Arlon and Elyne had quarreled earlier about Karion and his tales of treasure. Keeping his ears occupied with Arlon's questioning of the late arrival, and his eyes peeled for Virchez's letter, Sarfael could only manage a slightly distracted nod at Elyne. Luckily, another man came up to her and began whispering in her ear. The redhead scowled at him and moved farther down the room.
"Are you sure?" Arlon asked the youth before him.
"I saw Karion very clearly."
"Did he see you?"
"No, no, I did as you said. I kept out of sight until he went into the house and then ran straight here."
"Good!" Arlon banged his fist upon the table again. "My friends, we have an opportunity here. Let the Nashers be bold where the other Sons of Alagondar have been timid. The Graycloaks—we should call them Graybeards for their constant refusal to act—have repeatedly ignored Karion's claims, but we must not be so foolish."
The shouted talk turned to "What about Karion?" and "What is Arlon babbling about?"
The news from Waterdeep, and the accusation of a spy nosing into Nasher business, seemed forgotten for the moment. Rucas Sarfael slid his half-drawn sword back into its sheath with a relieved sigh. He sidled next to Virchez and clapped the little man on the shoulder while setting his boot squarely on the dropped letter with the damning description of himself as the man who bankrupted his cousin and spoiled that source of funding for the rebellion.
"So, Virchez," he said with all the warmth of an old friend. "Who is this Karion they are all yelling about?"
"Oh, he's that batty old seer, the one constantly predicting some disaster or other," said Virchez, obviously a bit miffed to have been interrupted and eager to impress the friendly chap at his side with his knowledge. "He's been roaming around the city recently, claiming the crown will return to Neverwinter. That the heir will be found. That the dead will come out of the river to attack us. All the usual nonsense."
Across the room, Elyne pitched her voice to be heard over the dozen excitedly talking about Karion's predictions. "Karion has always said he knows secrets. He's spent years tunneling into the castle and searching among the ruins. There's nothing in his house but a remarkable pile of garbage."
"You sound very certain of that," Arlon said to her.
"He's a cousin, thrice removed, of my mother," she said. "I've listened to his tales all my life. Karion rarely knows the past from the present. He savages the city for treasures, but drags home every piece of trash that he finds. I very much doubt there is any truth to this story that he's found the crown."
"But we must learn more," said Arlon. "It seems we can expect no aid from Waterdeep. We counted on that gold to rally the populace to our cause. Finding the crown may be our best hope for dislodging Lord Neverember's grip on the city."
So Dhafiyand was right, Sarfael thought. There would be crown hunting in Neverwinter this spring.
Arlon continued to harp upon Elyne's connection with Karion. She continued to assert that the old man was a cracked pot well beyond repair, although the terms she used were more elegant than that.
"Do you know her?" Virchez asked Sarfael, who had been distracted momentarily by the argument between Elyne and Arlon.
"I have attended a few sessions in the lady's school for elegant fighting," said Sarfael. "She trains her students well."
"They are lucky to have her as a teacher," Virchez said with a sigh of admiration.
"Lucky?" Sarfael reached down as if he were scratching his ankle and snatched up the discarded letter lying on the floor. He stuffed it into the inner pocket of his cloak and turned his attention to Virchez.
"Everyone admires her. After her parents disappeared and that sister of hers ran away, she never wavered. Just formed the school for fighting, helping train our young recruits for the days of glory to come..."
Virchez nattered on about how the city would regain its place on the Sword Coast if only its own people could control it. Paying little heed to the rehash of the usual rebel rhetoric, Sarfael watched Elyne as she flung one hand up to acknowledge some point or other of Arlon's persistent argument.
"Very well," she said. "I will go. If only to end this rumor."
"Take some others with you. I've heard Karion can play vicious tricks on those he dislikes. That's why I told the boy to stay away from him and come to us here."
Elyne glanced around the room. "I'll take him," she said with a nod at Sarfael. "He keeps a cool head in a fight. And, also, Montimort."
"Your Luskar prodigy?" Arlon scowled.
"Montimort is a good lad," Elyne said.
"But he is not and never will be a child of Neverwinter," rejoined Arlon. "And a Luskar ally is no ally at all."
"Stop quoting dead men. It is you who counsel making new alliances with the Dead Rats," she said, "a dangerous idea, I think."
"We must use what tools we can for victory," he said. "But our safety lies with those of the true bloodlines, the children of the city who know and understand its glorious past. Let us look to the sons and daughters of Alagondar to lead us!"
The Nashers nearest Arlon banged their fists upon the table in agreement.
"To go off with the beautiful Elyne," said the plump Virchez with a sigh.
Sarfael watched the tall redhead deftly weave her way through the crowd. He would have to tell Dhafiyand there had been talk of a crown, but already a series of lies began weaving through his head—he didn't want the old man moving too quickly. He needed time to investigate the rumors properly.
In the back of his head, Mavreen snorted as she always did when he tried deceiving her or himself. "You simply want more time with the pretty Elyne," she whispered in his mind.
"Jealous, my darling?" he asked.
Mavreen's rippling laugh came back to him, that joyous beginning to so many of their adventures. "Go on," she whispered. "Forget about your ghosts and look to the living."
At least, that is what he believed that she would have said to him.
Sarfael slipped away from Virchez and intercepted Elyne by the door.
"Why did you want me with you?" he asked her as they stepped into the night.
"I want Montimort. His skills might be useful with Karion, and, more importantly, it gives him a chance to win Arlon's approval," she confessed. "And that means taking you."
"Because?"
"Besides myself, you are the only one who treats Montimort as an equal. Who I can trust to protect him as I would. And where we are going, he may need that."
"So there is talk of a crown?" Dhafiyand wiped the tip of the pen upon a flannel and set it deliberately upon his enameled brass penholder. It was nearly midnight, but the old man seemed as alert and awake as ever.
"There was talk of a man who claims to know of a crown," Sarfael reported. His own head ached from all of Arlon's shouting, and he looked forward to snatching a few hours of sleep before traipsing across the Blacklake District. "All rather vague. But we are being sent to investigate tomorrow."
"But this Arlon Bladeshaper is definitely seeking a crown?"
"He needs supporters. They love to hear themselves talk, these Nashers, but I think they are reluctant to do more than chatter. Arlon says this crown will help turn the mob against Neverember. I think he believes it will move the Nashers to greater feats." He wondered if Dhafiyand would notice how he kept Elyne's name out of the conversation. Probably, but with luck, all the talk of a crown would distract him.
The spymaster leaned forward and steepled his ink-stained fingers beneath his chin. The man might run the largest network of spies in Neverwinter, but he kept his books himself like any clerk. "He may well be right. A crown can be a potent symbol and these are a people desperate for signs and portents."
"Oh, I heard plenty of talk of that during the night." To shake the fog from his head, Sarfael circled the room, stopping at the display of trinkets upon Dhafiyand's mantel. The charming miniature of the moon elf caught his attention again. In the flickering light of the candles, the lady looked older than she had before and seemed to stare at him with displeasure.
Behind him, Dhafiyand went on, "Watch, listen, bring back any news that you hear about the crown or its location. If such a thing exists, we must make certain that it falls into Lord Neverember's hands first."
"So he can crown himself king of Neverwinter?"
Dhafiyand shook his head. "It might not be so simple. He might be well served by its disappearance."
"Then perhaps it would be best if I simply make sure that it is not found," Sarfael suggested.
Dhafiyand considered for a long moment. "No," he said finally, "better to gain the crown and silence the tongues of any who have seen it."
"I am no murderer," Sarfael reminded him, as he had more than once in the past.
And, as he had in the past, Dhafiyand gave him cold comfort in his reply. "It does not matter. There are others without your scruples."
"It was you who said that Lord Neverember had some ties to certain of these young nobles, however rebellious their nature, and he would not necessarily want them punished." Sarfael edged around the topic, still playing the game with a spy's caution and not mentioning Elyne by name.
"True," admitted Dhafiyand. "Especially the pretty redhead."
Sarfael kept his face blank. Better not to let the wily old man know that remark hit home.
"Still," Dhafiyand continued, "of all the remnants of nobility left in Neverwinter, one could say that she has even more right to a crown than any other, even Lord Neverember."
"But I do not know any in this room who would say or even think such a thing," Sarfael said bluntly. "For we are both loyal servants of Lord Neverember."
"Quite so," Dhafiyand said, returning to his papers. "Send me word as soon as you learn more."
As he left the room, Sarfael began to consider ways that he could deliver the crown to Dhafiyand and smuggle Elyne out of Neverwinter. For it seemed the pretty rebel's connections to Lord Neverember might not be enough to protect her.
They went to the Blacklake District at noon. The northwest part of the city held a quiet air of menace even in broad daylight. Sarfael noticed that Elyne looked carefully from side to side as they wove through the streets. She also shrugged back her cloak, despite the cold spring wind, clearly showing that she was armed with sword and dagger.
Montimort's gaze darted to every dark doorway and shadowed alley. His arms were wrapped around a large covered basket.
"We're being followed," Sarfael quietly observed to his companions. Three ruffians, all hooded—two lean men armed with swords and one orc-looking brute with a cudgel—made the same turns and twists they did.
"I know," Elyne said. "I was hoping they wouldn't spot us. Or that they would be reluctant to attack with so few."
"Do you know who they are?" Sarfael asked. "I always prefer to know the names of the men trying to cut my throat."
"Dead Rats," mumbled Montimort.
"Ah," said Sarfael. Luskan's infamous gang was growing in Neverwinter. He had heard the spymaster Dhafiyand complain more than once about the number of newly dead found floating in the river after one of the Dead Rats' territory expansions.
Elyne looked right and left, then led them in a succession of quick turns into a long, narrow street overshadowed by boarded-up buildings. No one else was out on the pavement.
The three Dead Rats hung back.
Sarfael glanced over his shoulder at them. "They don't seem too eager for a fight," he said.
"They know me," said Elyne quietly. She was obviously not boasting but making a simple statement of fact when she added, "It is not wise to challenge me. But there are a great many Dead Rats in this district, and they probably hope to encounter others soon."
Montimort bit his lip and threw many glances over his shoulder, but he kept pace with them and said nothing, although Sarfael could see that the boy was practically bursting with the effort of holding his tongue.
"I'm sorry," Elyne finally said to Montimort. "I shouldn't have exposed you here. But I wanted to show Arlon how much I trust you."
"No, it is all my fault," the boy started in a rush. "You should go on. I can hold them off."
"Nonsense," began Elyne.
Sarfael cut off what was obviously about to become an argument between the pair. The boy's eagerness to sacrifice himself for Elyne was indeed noble, as was Elyne's refusal to accept such a sacrifice. However, nobility lacked practicality in such situations.
"Where can we turn and fight?" he asked Elyne.
"Next alley," she said with admirable quickness. He did admire a woman who understood the practical at such times, another reminder of what he had lost when Mavreen was killed. "It's broad enough for two abreast, but difficult for three. Montimort, move behind us when the blades come out."
"I can defend myself," the young wizard retorted.
"I expect you to do so," she answered calmly. "But from a distance. They want you. If it's a grab-and-run they have in mind, let us make it as difficult as possible."
"Might I ask why they want him?" Sarfael inquired. "Not that you aren't lovable, my friend, but still..."
"They have as few wizards as the Nashers," Elyne answered. "They could use him."
"I won't go back, they know I won't," Montimort said as they entered the narrow alley. Elyne and Sarfael whirled as one to face the entrance, and Montimort slid with obvious reluctance behind them.
"No heroics," Elyne said.
"I'm rarely heroic," Sarfael said.
"I wasn't talking to you." She glanced over her shoulder at Montimort. "Stay back, let us handle them. Don't lose that basket!"
The three Dead Rats rounded the corner slowly, chatting to each other, but when they saw the drawn blades facing them, they gave up all pretense of other business. With a shout, the half-orc charged them, swinging his cudgel in a sweeping blow meant to bowl them over.
Elyne waited until the last possible second then drove her sword precisely under his flailing arm and down into his knee. She wrenched the point free as the brute swayed back with a howl of pain.
At the same time, Sarfael struck a calculated blow at the second man, so his opponent overbalanced in his attempt to block the thrust. Sarfael flowed back and then forward, using the edge and the point of his sword to deliver a flurry of rapid jabs that left his opponent bloodied and bewildered.
With another quick strike, Elyne killed the half-orc and drew back slightly, forcing the third and final Dead Rat to lunge over the body of his comrade to reach her.
Sarfael finished off his man, meaning to come to her aid, but Elyne's sword darted out, parrying the thrust of her attacker and driving straight through his padded vest to his heart. The man was dead before he hit the ground.
"Very neat," he said with one raised eyebrow. "You must teach me that trick."
Elyne stepped back from the corpses. "They were fools and died fools' deaths." She wiped her sword clean and sheathed it.
"Should we do something about the bodies?" he asked her.
Elyne glanced up and down the empty alley. All the windows overlooking it had remained tightly shuttered throughout the fight. The clash of steel, Sarfael noted, had brought no one running, arguing that the citizens of Blacklake were remarkably uncurious or perhaps more cautious than most.
"Safe enough to leave them here," Elyne decided. "The Rats will find them this evening. That's why I wanted to come so early. These streets become much more crowded after twilight. I want to get Montimort out of this district before nightfall."
Beside her, Montimort flushed. "You shouldn't have to protect me," he muttered. "I should be strong enough to keep them away."
"If your magic was greater," said Elyne, "they would send even more after you. For now, be glad that they misjudged us."
The boy still looked sulky, so Sarfael gave him a friendly rap on the head as he passed him. "Keep those brains between your ears, and not decorating the pavement, and your powers will grow every year. A fighter's strength is eaten away by time, but a wizard's only increases."
Montimort sighed and ran his fingers through his hair. "I know. But it is not fast enough. I owe so much to Elyne. I would take this city for her, if I had the spells to do it."
Elyne smiled at him. "Stay safe, that is all I ask. I'm not sure what I'd do with Neverwinter if you gave it to me."
"An odd sentiment for a rebel," said Sarfael.
"I'm a terrible Nasher," Elyne admitted. "But my father believed so passionately in the cause, and I cannot betray him."
"Is he dead then?" Sarfael remembered Virchez's idle chatter at the meeting.
"Lost, along with my mother. They left the city two winters ago and did not return. Like the others, they sought allies to aid us and were last seen entering the Neverwinter Wood."
"A dangerous place, if all the stories are to be believed."
"But one with a rich history. My father thought that a truce might be made with the powers there, or treasures bought with promises of future alliances with the new Neverwinter. But the eladrin who roam that forest guard their secrets and do not look kindly on outsiders. My mother, like myself, had considerable talent with the sword and went to protect him."
"And no word of their fate?"
Elyne shook her head. "My sister started hunting for them last year."
"I heard she ran off."
"Virchez?" At Sarfael's nod, she snorted. "That man can never get anything right. Much like that foolish cousin of his in Waterdeep. No, my sister is an adept in the magical arts. When a child, she trained with an eladrin friend and can walk safely in many places where I would be challenged."
"Such as?"
"Oh, it is not as it was in my grandmother's stories, when the fey folk and others were friendly in their dealings and travel was easy along the Sword Coast. Still, you cannot change the past. We decided that I would stay, for all our wealth is here and someone must manage our household and protect our servants, and she would go. So I remain, the last of our little family in Neverwinter."
"Did you form your school after your sister left?" His guess was rewarded with a nod from Elyne.
"I teach our old playmates how to protect themselves," she said.
So the lady taught sword play to help her friends? Where was the dangerous rebel Dhafiyand feared? Perhaps Lord Neverember's assessment of her had less to do with her looks and more with her character.
"Arlon Bladeshaper grows more violent in his plans every day," Elyne spoke in low tones and continued to scan the alleyways and walls with sharp eyes as they hurried away from the truly dead Rats and their ambush.
"I noticed your argument with Arlon at the meeting last night," Sarfael said. He walked as quickly but kept watch with less obvious turns of the head. She was good with the blade, but he could show her a few tricks of spycraft, such as how to saunter through dangerous streets.
"We often disagree. He thinks too much of bloodlines, and those who trace their lineage back to Alagondar and the Neverwinter Nine. At the same time, Arlon makes alliances left and right with any who he thinks can bring us an advantage. He justifies it by saying that he can keep them at a distance and not give them a place at the table when we meet."
"A tricky path to power, and dangerous to follow."
Elyne nodded. "If he takes complete control, and there are many who see him as their leader already, I fear that the Nashers soon will be openly attacking Lord Neverember's mercenaries. It would be bloody war in the streets."
"When that day comes," Montimort injected, "we will prevail. I just need to find the right master, someone powerful who can teach me more quickly."
Sarfael looked at them and thought that Dhafiyand had been wrong when he named Elyne a pretty ruffian. She was indeed a noble lady, and Montimort, for all his pirate past, a chivalrous boy.
Karion's dark house was squeezed between two larger and heavily damaged buildings. Only wide enough to present a door and a single, boarded-up window on the ground floor, it rose four stories, each upper floor showing only two narrow windows, also shuttered against the sun.
It reminded Sarfael in shape and color of certain types of fungus that grew up through cracks in stones.
Elyne stared with distaste at the black door with its rusty iron knocker.
"Are you sure there is anything living in there?" Sarfael asked.
"A good question," she replied. "I never liked coming here as a child. But it looks as it always did, and Bottleburn seemed certain he'd seen Karion enter."
She reached forward and, not touching the knocker, banged the flat of her hand against the door three times.
"Karion, Karion," she shouted, "it's Elyne."
Silence responded. Elyne hammered on the door again, shouting her name.
The third time, they heard a muffled cry from inside: "Wait, wait."
Bolts screeched and chains rattled. The door swung back with a squeak of rusty hinges.
A tall old man peered blinking into the afternoon sunshine. Dressed in tattered velvets and silks of faded scarlet, cut in the style of forty years ago, he swayed in the doorway. "Iriardne?" he said.
"I am Iriardne's daughter, Elyne." She stepped closer and, to Sarfael's delight, neatly placed one booted foot across the threshold, keeping the skinny old man from slamming the door in their faces. Behind her back, she flapped her hand at them, motioning them forward.
"We've brought you supplies," she said. "Food for the month."
Montimort staggered forward with the wicker basket and Karion's eyes gleamed.
"Cheese?" Karion asked.
Elyne nodded. "Bread, wine, meat, and fruit as well."
Karion stepped back from the door, motioning them inside. "Don't dawdle, boy," he said to Montimort. "They'll sniff it out and come running. You can't keep a good cheese in this district, not for minute, without the rats trying to steal it."
Once inside, Karion slammed the door shut, bolting and chaining it. "Can't keep a good cheese safe," he muttered. A single, guttering candle stood in a sconce by the door. Karion lifted it up and led them down the dark and narrow hallway.
Sarfael noted the portraits of men and women lining the wall from the floor to the shadowed ceiling. The painted eyes of the multitude seemed to track them as they passed.
They went down a narrow staircase, also lined with pictures, although some of them seemed to be landscapes and paintings of the city before the cataclysm. Karion led them into a kitchen lit by a fire sputtering in a cavernous fireplace.
Montimort fell back with a startled cry. An enormous striped cat crouched on the table facing the door, its lips drawn back in a snarl to reveal needle-sharp fangs.
"Not afraid of kitty, are you?" Karion smacked the immobile cat with one hand and a cloud of dust rose into the air. "Kitty has been dead for twenty years or more. I keep him here to scare off intruders, especially certain rodents."
Karion circled the room, pulling down various crockery pots and lidded boxes, muttering as he went. "No, no, still got a bit of bacon in that," he said as he peered into one. Another was hastily capped and replaced with "not sure what that is." Finally he found an empty pot to his satisfaction and brought it back to the table, shoving the stuffed cat aside with one impatient hand.
"Give me the basket," he said to Montimort.
Karion rooted through the basket that they had brought, unearthing a large slab of cheese with a delighted cry. He carefully packed the cheese away in the stoneware crock, fastening the lid tightly over it. Hugging the pot close to his chest, he left the kitchen.
"Are you certain he is sane?" Sarfael asked Elyne.
"Not at all," she replied. "We were terrified of him as children. He would have fits and began to spout threats entangled with prophecies. But he does have some true talent. He once told me that I would stand alone in the city with only my sword for my companion."
A pair of dirty windows overlooked a tiny courtyard. Sarfael glanced outside. All types of rubbish, broken statues, old furniture, boxes, and crates filled the space. Another staircase, forged from iron, twisted up the far wall, apparently leading to the street above.
"That's quite a collection out there," Sarfael remarked.
" _For as long as I remember, he's scoured the city for the items he sees in his visions," Elyne remarked. "Since that day of cataclysm, he's grown_ much worse."
Karion returned empty-handed. "What do you want?" he asked. "You must want something. Everyone wants something in Neverwinter. Everyone wants to be something in Neverwinter. Conquerors, looters, counterfeit kings."
"We've come about the crown, Cousin," said Elyne.
"Stashed away," Karion flitted around his kitchen, unloading the basket and storing the rest of the food both high and low on the shelves. "Keep it safe from goblin kin, rats, and undead things."
"Is he talking about the crown or his cheese?" Montimort asked.
Sarfael shook his head. Something skittered across the end of the room, lost in the gloom. If it was a rat, it was uncommonly large and very pale.
Elyne stopped Karion in his restless wanderings. "There are only friends and family here."
"Who knows who hears?" Karion whispered to her. He stopped by his stuffed cat, his restless hands stroking the dead fur and fondling the creature's pointed ears. He stared at Montimort. "Arklem Greeth's lover listens at keyholes, watches in mirrors, speaks through painted mouths."
"Arklem Greeth!" exclaimed Montimort. "That is a dark name out of Luskan's past. But the villain has been dead a century or more!"
"So should his beloved be, but the grave won't hold Valindra and she's pushing into the city, poking into the shadows, sending her spies to snatch my treasures," Karion crooned to no one in particular. "Pretty little moon elf, grasping with her cold dead hands. But she can't take it from me! My pets will protect me."
Whatever crawled along the edge of the room had acquired a companion. The crooked shadows cast up the wall looked like no creature that Sarfael knew.
"Cousin, we have come about the crown," Elyne said.
Karion's eyes narrowed and the faintest smile curled his thin lips. He beckoned to them all to come closer. Standing next to him, Sarfael became aware of a certain dank odor of decay, a grave-mold smell that evoked past adventures with Mavreen. A whiff of the necromancer hung around the old man.
"I don't have the crown," whispered Karion with exaggerated care. "I have the box."
"A box!" exclaimed Montimort. "What good is a box?"
Karion grinned with a distasteful display of yellowed teeth. "It hides a crown that is not there."
"What?" Elyne looked bewildered.
"Come, come," Karion's expression turned gleeful. Suddenly seeming delighted to have them in his home, the aged seer ushered them back upstairs, passing through the dark hallway with its dozens of painted portraits, all staring down with suspicious eyes.
Behind them, Sarfael heard a skittering sound. He glanced back more than once, but could not see what followed. Yet he was convinced that it was not rats.
Clutter filled the room upon the first floor. All the detritus of the city's past seemed to have washed into Karion's chamber: bits of old clockwork, elaborate sconces obviously ripped from some mansion's wall, ornate chairs missing their seats, and more.
"It looks like the Driftwood Tavern," exclaimed Montimort.
Sarfael raised an eyebrow at him, and Montimort explained that the remnants of Neverwinter's past decorated the inner rooms of the tavern for the patrons' delight.
Karion overheard him and scowled. "The proprietor, Madene Rosene, is a thief and cheat," he huffed. "Why, she's refused many a fine treasure from me, saying that it's not fitting for her place. But the woman uses doors for tables!"
Elyne shot a look at Sarfael and Montimort that was obviously meant to silence them both. Then she turned to Karion. "You wanted to show us a box," she reminded him.
"They made it in the dark days when Alagondar was wounded," Karion said. "When the Neverwinter Nine needed to send the crown from Highcliff to the castle, but they dared not risk it upon the road. The box appears empty, it is empty, and if captured by enemies, can do no harm. But with the right incantations, the crown appears within."
Karion dived into a pile of bric-a-brac, shoving aside a rolled-up carpet and sending two brass vases rolling with a clatter across the floor. With a grunt, he emerged with a carved wooden box clutched in his grimy hands.
"Can I see it?" Elyne said, reaching out with gentle hands.
With some reluctance, and no little urging from Elyne, Karion allowed her to take the box from him to show to the others. Painted red writing was scrawled across every side of the dark wood, words sloping up and down or twisting around themselves in concentric circles. In the center of the lid, a single emerald gleamed.
"It's Thayan," Sarfael said, and he could not keep the revulsion completely from his voice. He had no love for the handiwork of those necromancers. Clever as their artifacts might be, they all carried a trace of human blood and terrible suffering. The memory of Mavreen's face contorted in a final scream of dead rage still haunted him.
"It's a spell," said Montimort, twisting the box in his hands, "but it's a puzzle too. You need to know where to start and stop. There must be a key to this."
"I don't see a keyhole," Elyne said. The lid fit so tightly on the top that only the faintest line showed against the black wood.
Montimort shook his head. "No, a key word, the one that you begin with. Or it could be a letter or a symbol. These old puzzle boxes are highly prized and rare these days. They were made in pairs, one to go on a ship, one to stay back in Luskan. If you read out the ritual in the correct order, an item is transferred from one box to the other."
"A way for pirates to send treasure home," Sarfael guessed.
"Exactly. But it could only be a small prize. The boxes cost a treasure to build. Often they were the size of a ring or gem. And only one or two people would know the correct order of the spell. Usually the captain and somebody trusted on shore."
"This one is linked to the crown." Karion scowled at them with sour dislike, his mood having changed again. "The boy's a Luskar," he said to no one in particular. "A Luskar rooting among my treasures."
"How does it work?" Sarfael asked. Karion's intent stare at Montimort made him uneasy.
"Only box of its kind," Karion said. "He was clever, the Red Wizard who built it, clever enough to link it to the crown so it could call it from wherever it was hidden. But it was a trick too, a trick on those who hired him. He meant to use the box to steal the crown for himself. They caught him and killed him. So he never got a crown. Still he hid the box before he died. Nobody could find it, nobody but me and my little friends. The Luskar's right. You have to know the order of the words as much as the words themselves. Speak as you must, proper beginning to final ending, and the crown is yours."
Sarfael listened to his tangled explanation with scant attention. The scrabbling sound had grown louder. He turned to face the doorway. In the shadowed hallway, things scurried back out of the light.
Inside the room, Karion tried to snatch the box back from Montimort and the boy danced out of his reach. Elyne stepped between the two, trying to soothe the old man. "Cousin, we will take this to those who might unlock its secrets. I promise you that it will be used for the glory of Neverwinter. Your name will be remembered forever as the man who restored the crown to the city."
Karion shook her off and his eyes rolled back in his head. "No heir for the crown, no crown for the heir," he screamed, spittle flying everywhere. "Liars rise, true hearts fall. Look to the Wall, for the dead swim out of the river. The dragon's shadow falls across Neverwinter. She's greedy, grasping, intent on choking the life out of us all, that wicked Valindra!"
"Easy, easy," Elyne tried to maneuver Karion into the one intact chair in the room. "Montimort, run to the kitchen and fetch the wine. He's having one of his fits."
"Don't!" Sarfael stopped the boy. "Don't go out there."
"What?" Elyne turned.
A half-dozen disembodied hands launched themselves through the doorway, springing through the air to fasten upon Montimort and drag him down. The dead claws tore at the boy's clothing and hair as he twisted and shouted beneath them. Others tried to pull the box away from him.
"Drop it!" Sarfael commanded Montimort, but the boy clutched the box tighter and tried to roll away from his attackers.
Behind him, Elyne gave equally urgent commands to her mad relative, but the old man folded himself tight in his chair, muttering, "Boy's a Luskar. Pirate thief. My box, mine!"
Sarfael raced to Montimort's aid, skewering the hands and throwing them off with a flip of his sword. The crawling claws swarmed over the boy. As soon as Sarfael tossed one away, it came springing back. One managed to fasten its fingers tightly around Montimort's throat and began to choke him.
_A mysterious key, the walking dead, and a
grisly murder..._
**LOOK FOR PART III OF**
**COLD STEEL
AND SECRETS**
on sale at your favorite e-bookseller
on December 20!
# **CONTINUE YOUR ADVENTURE**
The Dungeons & Dragons® Fantasy
Roleplaying Game Starter Set has
everything you need for you and your
friends to start playing. Explore infinite
universes, create bold heroes and
prepare to begin—or rediscover—
the game that started it all.
Watch Videos
Read Sample Chapters
Get product previews
Learn more about D&D® products
at
**DungeonsandDragons.com**
| {
"redpajama_set_name": "RedPajamaBook"
} | 5,357 |
define({zoomIn:"Artinti",zoomOut:"Tolinti"}); | {
"redpajama_set_name": "RedPajamaGithub"
} | 8,013 |
\section{Introduction}
Variational regularization often leads to minimizing a sum of two convex functionals and discretization is usually performed by restricting minimization to a finite dimensional subspace.
For inverse problems in the context of large scale PDE models, adaptive refinement of the computational mesh is crucial for an efficient numerical solution.
Recent contributions to the topic of adaptive discretization of inverse problems can be found in, e.g.,
\cite{HaberHeldmannAscher07} on adaptive finite volume discretizations for
Tikhonov--TV regularization,
\cite{KindermannNeubauer03,Neubauer07} on moving mesh regularization and adaptive grid regularization,
\cite{BenAmeurChaventJaffre02,BenAmeurKaltenbacher02,ChaventBissel98} on
refinement and coarsening indicators,
and
\cite{BangerthJoshi08,
BeilinaClason06, BeilinaJohnson05,BeilinaKlibanov10,
GriesbaumKaltenbacherVexler08,KaltenbacherKirchnerVexler11,
KaltenbacherKirchnerVeljovic13,KaltenbacherKirchnerVexler13} on goal oriented error estimators.
A key step for adaptive discretization is reliable estimation of the discretization error using quantities available in the numerical computations, i.e., in an \emph{a posteriori} fashion.
The functional error estimators described in \cite{Repin00} allow for an exact estimate of the discretization error and appear to be particularly promising for Tikhonov regularized inverse problems since they have originally been developed in the context of minimization of a sum of two convex functionals.
Yet so far they have not been considered for inverse problems and only very recently for control problems in, e.g., \cite{GaevskajaHoppeRepin07,LangerRepinWolfmayr14,Wolfmayr14}.
Regarding nonsmooth problems, functional error estimates have been used to derive a posteriori error estimators for the finite-element discretization of total variation denoising in \cite{Bartels:2015}.
In this work, we are concerned with linear inverse problems for PDEs consisting of the forward model
\begin{align}
Ay&=Bu\label{model}
\intertext{together with the measurement equation}
Cy&=g\label{measurement}
\end{align}
where $u$ is the unknown parameter (e.g., source term, boundary data, or coefficient), $y$ is the corresponding state solving \eqref{model}, $g$ is the observable data, $A:\mathcal{Y}\to\mathcal{W}^*$, $B:\mathcal{U}\to\mathcal{W}^*$, and $C:\mathcal{Y}\to\mathcal{G}$ are linear operators, and
$\mathcal{G}$, $\mathcal{U}$, $\mathcal{W}$, and $\mathcal{Y}$ are Banach spaces.
As a simple motivating example, consider the inverse problem of electroencephalography \cite{ElBadiaHaDuongIP00}, which consists in recovering the current density distribution within the brain from potential measurements on the scalp.
This can be formulated (assuming constant conductivity for simplicity) as an inverse problem for the PDE
\begin{equation}\label{eq:ex2}
\left\{\begin{aligned}
-\Delta y &= \chi_{\omega_c} u&&\text{ in }\Omega,\\
\partial_\nu y&= f &&\text{ on } \partial\Omega,
\end{aligned} \right.
\end{equation}
where $u$ is the desired current density, $\omega_c\subset\Omega$ denotes the region of interest inside the skull $\Omega$, and $f$ is the given current flux on the scalp $\partial\Omega$. The measured data is $g=y|_\Gamma$, where $\Gamma\subset\partial\Omega$ denotes the location of the electrodes on the scalp.
Here, $A$ is the negative Laplace operator, $B$ is the extension operator from $\omega_c$ to $\Omega$, and $C$ is the Dirichlet trace operator on $\Gamma$.
In practice, only a noisy measurement $g^\delta$ will typically be available, where the noise level $\delta$ defined by
\begin{equation}
\|g-g^\delta\|_\mathcal{G}\leq\delta
\end{equation}
we here assume to be known. Since the solution of such an inverse problems is typically unstable, regularization needs to be employed; see, e.g., \cite{EnHaNe96,Scherzeretal2009} and the references therein. We will here consider the classical Tikhonov--Philips method in Banach spaces with Morozov's discrepancy principle as a regularization parameter choice strategy.
Using the parameter-to-state mapping
\begin{equation}
S:=A^{-1}B\in L(\mathcal{U},\mathcal{Y})
\end{equation}
and the reduced forward operator
\begin{equation}
K:=CS\in L(\mathcal{U},\mathcal{G}),
\end{equation}
we can write (\refeq{model}--\refeq{measurement}) as a single operator equation
\begin{equation}\label{Kug}
Ku=g.
\end{equation}
For this reduced formulation, Tikhonov's method is given by
\begin{equation}\label{eq:minred}
\begin{aligned}
&\min_{u\in \mathcal{U}} J_\alpha(u,Ku) \quad \text{ where }\quad
J_\alpha(u,g)=G(g)+\mathcal{R}_\alpha(u),
\end{aligned}
\end{equation}
where $\mathcal{R}_\alpha$ is an appropriate regularizing functional and $G$ a discrepancy term, which in this work will be assumed to have the form
\begin{equation}\label{eq:G}
G(g)=\frac12 \norm[]{G}{g-g^\delta}^2.
\end{equation}
The discrepancy principle (or rather its relaxed version) amounts to choosing $\alpha=\alpha(\delta)$ such that
\begin{equation}\label{eq:discrprinc_red}
\underline{\tau}\delta \leq \norm[]{G}{Ku_{\alpha}^\delta-g^\delta} \leq
\overline{\tau}\delta
\end{equation}
holds, where $u_{\alpha}^\delta$ is a minimizer of \eqref{eq:minred} and $\overline{\tau}\geq\underline{\tau}\geq1$ are fixed constants independent of $\delta$.
Convergence of this method has been extensively investigated in the literature; see, e.g., \cite{EnHaNe96} and the references therein for an analysis in Hilbert spaces and \cite{BurgerOsher04,Scherzeretal2009,SeidmanVogel89} for a more general setting similar to the one considered here. For actual numerical computations, the infinite-dimensional problem has to be discretized: Finite-dimensional spaces $\mathcal{U}_h\subset\mathcal{U}$, $\mathcal{Y}_h\subset\mathcal{Y}$, and $\mathcal{W}_h\subset\mathcal{W}$
are chosen, and the solution of $Ay=Bu$ is replaced by finding $y_h\in \mathcal{Y}_h$ such that
\begin{equation}\label{eq:weakdiscreteformulation}
\langle Ay_h - Bu, \ w_h\rangle = 0 \qquad\forall w_h \in \mathcal{W}_h.
\end{equation}
To carry the convergence results over from the infinite-dimensional to the discretized problem, the error due to discretization has to be assessed.
In particular, it is important to carefully balance discretization and regularization.
As it turns out, only errors in the functionals $G$ and $J_\alpha$ need to be controlled in order to obtain a convergent adaptive method.
This makes the theory of functional error estimators in \cite{Repin00} applicable.
As we will show, these estimators are applicable for different choices of regularization functionals.
These include the usual squared Hilbert-space norm, i.e. $\mathcal{R}_\alpha=\frac{\alpha}{2}\norm[]{U}{\cdot}^2$,
but also nonsmooth penalties of the form $\mathcal{R}_\alpha=\delta_{B_{1/\alpha}^{L^\infty(\omega_c)}}$ or $\mathcal{R}_\alpha=\alpha\norm[]{Mc}{\cdot}$,
where
$\mathcal{M}(\Omega)$
is the space of Radon measures. The latter penalty is useful for incorporating sparsity regularization, while the former penalty corresponds to Ivanov regularization (also called method of quasi-solutions, see \cite{Ivanov62, Ivanov63, IvanovVasinTanana02, LorenzWorliczek13, SeidmanVogel89},
as well as \cite{NeubauerRamlau14} in the context of Hilbert scales), where the regularization does not take the usual additive form with $\alpha$ as a multiplier.
In all these cases, the functional error estimators can be computed in terms of residuals in the optimality system.
This work is organized as follows. After fixing some common notation, we present in \cref{sec:setting} the basic results on convergence of adaptively discretized regularization methods and the functional error estimates our analysis relies on. These estimators are then applied to the classical Hilbert space regularization in \cref{sec:errest_red_Hil}, first in the general setting and then specifically for a model inverse source problem for the Poisson equation. Similarly, \cref{sec:errest_red_Ban1} and \cref{sec:errest_red_Ban2} treat the case of Banach space norm constraints and norm regularization, respectively, again both in the general setting and for model problems (Ivanov regularization resp.~sparsity). For the latter, numerical experiments given in \cref{sec:NumTests} demonstrate the efficiency of our approach.
\section{Notation and preliminary results}\label{sec:setting}
For some Banach space $X$ with dual $X^*$, we use the notation $\langle x^*,x\rangle_{X^*,X}$ for the canonical duality pairing.
In case of a Hilbert space $X$, $(x_1,x_2)_X$ denotes the inner product.
Moreover, $\delta_S$ denotes the indicator function of some set $S$ and $B_r^X$ the closed ball of radius $r$ around zero in the normed space $X$.
\subsection{Functional-analytic setting}\label{sec:spaces}
In the following, we assume that $\mathcal{U}$, $\mathcal{W}$, $\mathcal{X}$, $\mathcal{Y}$ are Banach spaces
with $\mathcal{W}$ and $\mathcal{Y}$ being reflexive,
and that $\mathcal{G}$ is a Hilbert space.
Furthermore, we suppose that either $\mathcal{X}=\mathcal{U}^*$ or $\mathcal{U}=\mathcal{X}^*$ holds, which allows us to use a consistent notation in the rest of the paper and to avoid cumbersome case distinctions.
For a convex functional $F:\mathcal{U}\to\bar{\mathbb{R}}$, we will denote by
\begin{equation}
F^*:\mathcal{X}\to\bar{\mathbb{R}},\qquad F^*(x) = \sup_{u\in \mathcal{U}} \langle u,x\rangle_{\mathcal{U},\mathcal{X}} - F(u)
\end{equation}
its Fenchel conjugate.
If $\mathcal{X} = \mathcal{U}^*$, this coincides with the usual definitions in the sense of convex analysis.
For $\mathcal{U} = \mathcal{X}^*$, it is common to define as here the Fenchel conjugate on $\mathcal{X}$ instead of $\mathcal{X}^{**}$ in the special case of $F=G^*$ (i.e., the biconjugate of $G$); the redefinition in the general case is less common but still consistent and coincides with the ``predual'' approach as in, e.g., \cite{Clason:2009}. This will allow working with spaces of continuous functions instead of the dual of measure spaces later on. In particular, the Fenchel conjugate of $F(u) = \alpha\norm[]{U}{u}$ is always given by
\begin{equation}
F^*(x) = \delta_{B^X_\alpha}(x) =
\begin{cases}
0 & \text{if }\norm[]{X}{x}\leq \alpha,\\
\infty & \text{if }\norm[]{X}{x} >\alpha.
\end{cases}
\end{equation}
In the case that $\mathcal{U}$ is a Hilbert space, we set $\mathcal{X}=\mathcal{U}$, in which case the duality pairing coincides with the standard inner product. In particular, for $F(u)=\frac12 \norm[]{U}{u-z}^2$ we have
\begin{equation}
F^*(u)=\frac12 \left(\norm[]{U}{ u-z}^2-\norm[]{U}{z}^2\right).
\end{equation}
We further denote by
\begin{equation}
\partial F(u) := \left\{x\in \mathcal{U}^*: \langle \tilde u - u,x\rangle_{\mathcal{U},\mathcal{X}} \leq F(\tilde u)- F(u) \quad\text{for all } \tilde u\in \mathcal{U}\right\}
\end{equation}
the convex subdifferential of $F:\mathcal{U}\to\bar{\mathbb{R}}$. Note that we always have the inclusion $\mathcal{X}\subset\mathcal{U}^*$, either by equality or by using the canonical injection from $\mathcal{X}$ to $\mathcal{X}^{**}$.
In the latter case, existence of the duality mapping $\mathcal{J}^\mathcal{U}:\mathcal{U}\to \mathcal{X}$, defined by
\begin{equation}
\norm[]{X}{\mathcal{J}^\mathcal{U}(u)}=1 \quad\text{ and }\quad
\langle u,\mathcal{J}^\mathcal{U}(u)\rangle_{\mathcal{U},\mathcal{X}} =\norm[]{U}{u}\qquad\text{for all }u\in\mathcal{U},
\end{equation}
i.e., $\mathcal{J}^\mathcal{U}(u) \in\partial(\norm[]{U}{\cdot})(u)$, becomes an additional assumption.
We further need the linear operators $A\in L(\mathcal{Y},\mathcal{W}^*)$, $B\in L(\mathcal{U},\mathcal{W}^*)$, and $C\in L(\mathcal{Y},\mathcal{G})$, and assume that $A$ is continuously invertible.
We will also make use of the adjoints
\begin{align}
A^*&\in L(\mathcal{W},\mathcal{Y}^*)&&\text{ with }&
\langle A y, w\rangle_{\mathcal{W}^*,\mathcal{W}}&=\langle y, A^* w\rangle_{\mathcal{Y},\mathcal{Y}^*}
&&\text{ for all } y\in \mathcal{Y}, \ w\in \mathcal{W},\\
B^*&\in L(\mathcal{W},\mathcal{X})&&\text{ with }&
\langle B u, w\rangle_{\mathcal{W}^*,\mathcal{W}}&=\langle u, B^* w\rangle_{\mathcal{U},\mathcal{X}}
&&\text{ for all } u\in \mathcal{U}, \ w\in \mathcal{W},\\
C^*&\in L(\mathcal{G},\mathcal{Y}^*)&&\text{ with }&
( C y, g)_\mathcal{G}&=\langle y, C^*g^*\rangle_{\mathcal{Y},\mathcal{Y}^*}
&&\text{ for all } y\in \mathcal{Y}, \ g\in \mathcal{G},\\
K^*&\in L(\mathcal{G},\mathcal{X})&&\text{ with }&
( K u, g)_\mathcal{G}&=\langle u, K^*g^*\rangle_{\mathcal{U},\mathcal{X}}
&&\text{ for all } u\in \mathcal{U}, \ g\in \mathcal{G},\\
S^*&\in L(\mathcal{Y}^*,\mathcal{X})&&\text{ with }&
\langle S u, y^*\rangle_{\mathcal{Y},\mathcal{Y}^*}&=\langle u, S^*y^*\rangle_{\mathcal{U},\mathcal{X}}
&&\text{ for all } u\in \mathcal{U}, \ y^*\in \mathcal{Y}^*.
\end{align}
Let us emphasize that the existence of $B^*$ with the mentioned properties is
an actual assumption in the case $\mathcal{U} = \mathcal{X}^*$, which is equivalent
to the assumption that $B$ is the adjoint operator of an operator ${}^*B$.
(With a slight abuse of notation in the first two cases, since these are actually the compositions of the standard adjoints with the canonical embeddings $W\to W^{**}$).
In addition, $\{\mathcal{R}_\alpha\}_{\alpha>0}$, $\mathcal{R}_\alpha:\mathcal{U}\to\bar\mathbb{R}$, is a family of proper, convex, lower semicontinuous functionals.
Finally, let $\mathcal{U}_h$, $\mathcal{Y}_h$, $\mathcal{W}_h$ be finite dimensional subspaces of $\mathcal{U}$, $\mathcal{Y}$, $\mathcal{W}$, respectively.
In the case that $\mathcal{U}$ is a Hilbert space, we will denote by $P_{\mathcal{U}_h}$ the orthogonal projection onto $\mathcal{U}_h$. Furthermore, $R_{\mathcal{W}_h}:\mathcal{W}^*\to\mathcal{W}_h^*$ and $R_{\mathcal{Y}_h}:\mathcal{Y}^*\to\mathcal{Y}_h^*$ denote the Ritz projectors defined by
\begin{equation}\label{RYhRWh}
\langle R_{\mathcal{W}_h}w^*,w_h\rangle_{\mathcal{W}_h^*,\mathcal{W}_h}=\langle w^*,w_h\rangle_{\mathcal{W}^*,\mathcal{W}}, \qquad
\langle R_{\mathcal{Y}_h}y^*,y_h\rangle_{\mathcal{Y}_h^*,\mathcal{Y}_h}=\langle y^*,y_h\rangle_{\mathcal{Y}^*,\mathcal{Y}}.
\end{equation}
\subsection{Convergence of adaptively discretized Tikhonov regularization} \label{subsec:Tikhonov}
We consider the Tikhonov regularization \eqref{eq:minred} equivalently written as a PDE-constrained minimization problem
\begin{equation}\label{eq:Tikh}
\begin{aligned}
&\min_{u\in \mathcal{U}, \ y\in \mathcal{Y}} J_\alpha(u,y):= \frac12 \norm[]{G}{Cy-g^\delta}^2 +\mathcal{R}_\alpha(u)
\quad\text{ s.t. }\quad
Ay=Bu \text{ in }\mathcal{W}^*.
\end{aligned}
\end{equation}
The discrete counterpart of \eqref{eq:Tikh} reads
\begin{equation}\label{eq:Tikh_h}
\begin{aligned}
&\min_{u\in \mathcal{U}_h, \ y\in \mathcal{Y}_h} J_\alpha (u,y)
\quad
\text{ s.t. }\quad
R_{\mathcal{W}_h} (Ay-Bu)=0.
\end{aligned}
\end{equation}
Let $(u_{\alpha}^\delta,y_{\alpha}^\delta)$ be the exact Tikhonov minimizer, i.e., a solution of \eqref{eq:Tikh},
and let $(u_h,y_h) \in \mathcal{U}_h\times\mathcal{Y}_h$ be some approximation, e.g., a solution of the discrete problem \eqref{eq:Tikh_h}.
In this abstract setting we just presume existence of minimizers of \eqref{eq:Tikh} and \eqref{eq:Tikh_h} and will verify this assumption for the applications in \cref{subsec:example1}, \cref{subsec:example2}, and \cref{subsec:example3}.
The question is now how the convergence of the discrete approximation $u_h$ to solutions of the equation $Ku=g$ can be guaranteed for $(h,\alpha,\delta)\searrow0$.
The following theorem shows (similarly as in \cite{KaltenbacherKirchnerVexler11,NeuSch90}) that it is enough to adapt the discretization
and the choice of the regularization parameter $\alpha(\delta,h)$
in such a way that the
difference in the functional values satisfies
\begin{equation}\label{etaJ}
J_\alpha(u_h,y_h)-J_\alpha(u^\delta_\alpha,y_{\alpha}^\delta)\leq\eta_J,
\end{equation}
and the difference in the discrepancy values satisfies
\begin{equation}\label{etaD}
\norm[]{G}{K_h{u_h}-g^\delta}^2-\norm[]{G}{K{u^\delta_\alpha}-g^\delta}^2
=\norm[]{G}{Cy_h-g^\delta}^2-\norm[]{G}{C{y^\delta_\alpha}-g^\delta}^2\leq \eta_D,
\end{equation}
where $\eta_J$ and $\eta_D$ can be controlled to be small enough relative to $\delta$.
\begin{proposition}\label{prop:conv}
Let $(u_\alpha^\delta,y_\alpha^\delta)$ be a minimizer of \eqref{eq:Tikh} and $(u_{\alpha,h}^\delta,y_{\alpha,h}^\delta)$ be a minimizer of \eqref{eq:Tikh_h}.
Let $\alpha(\delta)$ be chosen such that for some constants $c_1,c_2,\overline{\tau},\underline{\tau}>0$ independent of $\delta$ with $\overline{\tau}>\underline{\tau}\geq\max\{\sqrt{1+2c_2},1+c_1\}$, the estimates
\begin{align}
\underline{\tau}\delta \leq \norm[]{G}{Cy_{\alpha(\delta),h}^\delta-g^\delta} &\leq
\overline{\tau}\delta,\label{eq:discrprinc}\\
\left|\norm[]{G}{Cy_{\alpha(\delta),h}^\delta-g^\delta}-\norm[]{G}{Cy_{\alpha(\delta)}^\delta-g^\delta}\right|&\leq c_1\delta,\label{eq:accuracy_residual}\\
\intertext{and}
J_{\alpha(\delta)}(u_{\alpha(\delta),h}^\delta,y_{\alpha(\delta),h}^\delta)
-J_{\alpha(\delta)}(u_{\alpha(\delta)}^\delta,y_{\alpha(\delta)}^\delta)&\leq c_2\delta^2
\label{eq:accuracy_cost}
\end{align}
hold.
Then for any solution $u^\dagger$ to $Ku=g^\dagger$, we have
\begin{equation}\label{eq:Rbdd}
\mathcal{R}_{\alpha(\delta)}(u_{\alpha(\delta)}^\delta) \leq \mathcal{R}_{\alpha(\delta)}(u^\dagger)
\quad\text{ and }\quad
\mathcal{R}_{\alpha(\delta)}(u_{\alpha(\delta),h}^\delta) \leq \mathcal{R}_{\alpha(\delta)}(u^\dagger)
\quad \text{ for all } \delta>0.
\end{equation}
Moreover, we have
\begin{equation} \label{eq:resOdelta}
\norm[]{G}{Cy_{\alpha(\delta),h}^\delta-g^\delta} \leq \overline{\tau}\delta\to 0
\quad\text{ and }\quad
\norm[]{G}{Cy_{\alpha(\delta)}^\delta-g^\delta} \leq (\overline{\tau}+c_1)\delta\to 0.
\end{equation}
\end{proposition}
\begin{proof}
Set $\alpha_*:=\alpha(\delta)$.
By the assumptions (\refeq{eq:discrprinc}--\refeq{eq:accuracy_cost}) and minimality of
$(u_{\alpha_*}^\delta, y_{\alpha_*}^\delta)$, we have for any solution $u^\dagger$ to $Ku=g^\dagger$
\begin{equation}
\begin{aligned}
{\frac12\underline{\tau}^2\delta^2 + \mathcal{R}_{\alpha_*}(u_{\alpha_*,h}^\delta)-c_2\delta^2}
&\leq \frac12\norm[]{G}{Cy_{\alpha_*,h}^\delta-g^\delta}^2+\mathcal{R}_{\alpha_*}(u_{\alpha_*,h}^\delta)
-c_2\delta^2\\
&\leq \frac12\norm[]{G}{Cy_{\alpha_*}^\delta-g^\delta}^2+\mathcal{R}_{\alpha_*}(u_{\alpha_*}^\delta)
\leq \frac12\norm[]{G}{Ku^\dagger-g^\delta}^2+\mathcal{R}_{\alpha_*}(u^\dagger)
\\
&\leq \frac12\delta^2+\mathcal{R}_{\alpha_*}(u^\dagger)
\leq \frac12\frac{1}{(\underline{\tau}-c_1)^2} \norm[]{G}{Cy_{\alpha_*}^\delta-g^\delta}^2
+\mathcal{R}_{\alpha_*}(u^\dagger)
\end{aligned}
\end{equation}
(where we have used
$\norm[]{G}{Cy_{\alpha_*}^\delta-g^\delta}\geq (\underline{\tau}-c_1)\delta$ in the last estimate),
which by comparison of the third and the sixth as well as of the first and the fifth expression in this chain of inequalities together with $\underline{\tau}\geq\max\{\sqrt{1+2c_2},1+c_1\}$ yields
\eqref{eq:Rbdd}. The convergence \eqref{eq:resOdelta} follows directly from \eqref{eq:discrprinc} and \eqref{eq:accuracy_residual}.
\end{proof}
Note that no absolute value is required in the estimate \eqref{eq:accuracy_cost}.
From \eqref{eq:Rbdd} and \eqref{eq:resOdelta}, convergence and convergence rates for both the continuous and discrete sequence as $\delta \to 0$ follow under the usual assumptions on $\mathcal{R}$, see, e.g., \cite{EnHaNe96,Scherzeretal2009,SKHK12}.
\begin{remark}
Here we have taken into account the fact that in practical computations, the discrepancy principle \eqref{eq:discrprinc} can only be checked for the discrete residual
$\norm[]{G}{Cy_{\alpha_*(\delta),h}^\delta-g^\delta}=\norm[]{G}{K_hu_{\alpha_*(\delta),h}^\delta-g^\delta}$, not the exact residual $\norm[]{G}{Ku_{\alpha_*(\delta),h}^\delta-g^\delta}$ for which \eqref{etaD} can be employed. To bridge the gap between these two quantities, we will use the triangle inequality and an additional estimate of
$\norm[]{G}{K_hu_{\alpha_*(\delta),h}^\delta-Ku_{\alpha_*(\delta),h}^\delta}$.
\end{remark}
The accuracy requirements that will have to be met by an adaptive discretization are stated in assumptions
\eqref{eq:accuracy_residual} and \eqref{eq:accuracy_cost}. Note that for this purpose, the accuracy of $u$ need not be controlled directly,
but only via the residual norm and cost function values.
In the next section, we will derive
corresponding estimates based on the functional error estimates from \cite{Repin00}.
\subsection{Functional a posteriori estimators}\label{subsec:Repin}
Our approach is based on the following functional error estimate,
which is inspired by \cite{Repin00}.
We employ the strong convexity of the discrepancy term \eqref{eq:G}
to obtain a slightly improved estimate.
\begin{proposition}\label{th:Repin}
Let $(u_\alpha^\delta,y_\alpha^\delta)$ be a minimizer of \eqref{eq:Tikh}.
Assume that there is a family of functions $\{\phi_\alpha\}_{\alpha>0}$, $\phi_\alpha:\mathcal{U} \times \mathcal{U} \to\mathbb{R}_0^+$,
satisfying
\begin{equation}\label{eq:strongconvexity}
\lambda(1-\lambda)\phi_\alpha(u_1,u_2)\leq \lambda \mathcal{R}_\alpha(u_1)+(1-\lambda)\mathcal{R}_\alpha(u_2)- \mathcal{R}_\alpha\left(\lambda u_1+(1-\lambda)u_2\right)
\end{equation}
for all $u_1,u_2\in \mathcal{U}$, $\alpha>0$, and $\lambda\in(0,1)$.
Let $v\in \mathcal{U}$ and $g^*\in \mathcal{G}$ be arbitrary.
Then, any $v\in \mathcal{U}$ and $g^*\in \mathcal{G}$ satisfy
\begin{equation}
\begin{aligned}[t]
\frac12\| K(u_\alpha^\delta - v)\|_\mathcal{G}^2 + \phi_\alpha(u_\alpha^\delta, v)
&\le J_\alpha(v,Kv) - J_\alpha(u_\alpha^\delta,Ku_\alpha^\delta)\\
&\le \mathcal{R}_\alpha(v)+\mathcal{R}_\alpha^*(K^*g^*)+G(Kv)+ G^*(-g^*).
\end{aligned}
\label{eq:estRepin}
\end{equation}
\end{proposition}
\begin{proof}
Due to the assumptions and the strong convexity of $G$, we have
for $v\in \mathcal{U}$ and $\lambda\in(0,1)$
\begin{equation}
\begin{aligned}
\lambda(1-\lambda)\left( \frac12 \|K(u_\alpha^\delta - v)\|_\mathcal{G}^2
+ \phi_\alpha(u_\alpha^\delta, v) \right)
&\leq \lambda J_\alpha(u_\alpha^\delta,Ku_\alpha^\delta)+(1-\lambda)J_\alpha(v,Kv)\\
\MoveEqLeft[-1]- J_\alpha\left(\lambda u_\alpha^\delta+(1-\lambda)v,K(\lambda u_\alpha^\delta+(1-\lambda)v)\right)\\
&\leq (1-\lambda)(J_\alpha(v,Kv)-J_\alpha(u_\alpha^\delta,Ku_\alpha^\delta)),
\end{aligned}
\end{equation}
where we have used optimality of $u_\alpha^\delta$ in the last step.
Dividing by $1-\lambda$ and letting $\lambda\nearrow 1$, we obtain the first inequality. The second inequality is a consequence of weak duality.
\end{proof}
Condition~\eqref{eq:strongconvexity} is satisfied, e.g., with
$\varphi_\alpha(u_1,u_2)=\frac{\alpha}{2}\norm[]{U}{u_1-u_2}^2$
in the case of a quadratic Hilbert space penalty; see \cref{sec:errest_red_Hil}.
But we will see that \eqref{eq:estRepin} still provides valuable information on the error if \eqref{eq:strongconvexity} is only satisfied with $\varphi_\alpha(u_1,u_2)=0$, as in the case of Banach space norm constraints and penalties; see \cref{sec:errest_red_Ban1} and \cref{sec:errest_red_Ban2}, respectively.
Here it is important to note that the right-hand side of estimate \eqref{eq:estRepin}
does not contain the unknown solution $u_\alpha^\delta$. We will use this estimate
with $v:=u_{\alpha,h}^\delta$, which is available in the numerical computations.
We also point out that the right-hand side corresponds to the duality gap between problem \eqref{eq:minred} and its dual problem in the sense of convex analysis; see, e.g., \cite{EkelandTemam}. Hence if $v$ and $g^*$ satisfy primal-dual extremality relations for \eqref{eq:minred}, then the right-hand side of \eqref{eq:estRepin} vanishes.
The sub- and superscripts $\alpha$, $\delta$ will be omitted in the following.
Instead, we will write $(\bar{u}, \bar{y})$, $(\bar{u}_h, \bar{y}_h)$ for the continuous and discrete minimizers $(u_\alpha^\delta,y_\alpha^\delta)$, $(u_{\alpha,h}^\delta,y_{\alpha,h}^\delta)$, respectively.
\subsection{Model problem}
To illustrate the derived estimates, we will apply them to the identification of the source term $u$ in
\begin{equation}\label{eq:ex1}
\left\{\begin{aligned}
-\Delta y &= \chi_{\omega_c}u &&\text{ in }\Omega,\\
y&= 0 &&\text{ on } \partial\Omega,
\end{aligned} \right.
\end{equation}
on a domain $\Omega\subseteq\mathbb{R}^n$, $n\in\{1,2,3\}$, from restricted observations $g^\delta$ of $y$ in $\omega_o$.
Hence,
\begin{equation}
\left\{\begin{aligned}
Ay&=-\Delta y, \quad &A^*&=A, \\
Bu&= \chi_{\omega_c}u, \quad &B^* w &= w\vert_{\omega_c},\\
Cy&=y\vert_{\omega_o}, \quad &C^* g&= \chi_{\omega_o} g,\\
\end{aligned}\right.
\end{equation}
and $\mathcal{G}=L^2(\omega_o)$.
In the sequel, we assume that $\Omega$ is polyhedral and convex. This enables us to employ $H^2$-regularity results for the elliptic equation \eqref{eq:ex1}.
In addition, we can avoid technicalities in the finite element setting on curved domains.
We define $\mathcal{Y}_h=\mathcal{W}_h$ by continuous piecewise linear finite elements on a shape regular triangulation ${\mathcal{T}}_h$
consisting of element domains $K$;
see, e.g., \cite{Braess07}.
The set of all faces of elements will be denoted by $\mathcal{E}_h$.
The associated nodal interpolation operator will be denoted by ${\mathcal{I}}^{{\mathcal{T}}}{}\!$, which is continuous from ${C}_b(\overline{\Omega})$ to $\mathcal{Y}_h$.
We will employ the standard interpolation estimates
\begin{equation} \label{eq:estint}
\left\{\begin{aligned}
&\forall K\in {\mathcal{T}}_h:&\norm[]{L2K}{v-{\mathcal{I}}^{\mathcal{T}} v}&\leq c_I h_K^2 |v|_{H^2(K)} \quad &&\forall v\in H^2(\Omega)\,,\\
&\forall F\in \mathcal{E}_h:&\norm[]{L2F}{v-{\mathcal{I}}^{\mathcal{T}} v}&\leq c_I h_K^{3/2} |v|_{H^2(K)} \quad &&\forall v\in H^2(\Omega)\,,
\end{aligned}\right.
\end{equation}
where $h_K$ is the element diameter, as well as the stability estimate
\begin{equation} \label{eq:eststab}
\norm[]{H2}{v} \leq c_S \norm[]{L2}{\Delta v}\qquad \forall v\in H^2(\Omega)\cap H_0^1(\Omega),
\end{equation}
cf. \cite[Thm.~II.6.4]{Braess07} and \cite[Thm.~3.3.7]{Ciarlet78}.
\section{Hilbert space regularization}\label{sec:errest_red_Hil}
In this section, we assume that $\mathcal{U}$ is a Hilbert space, identify $\mathcal{X}$ with $\mathcal{U}$, and consider as regularization term the squared norm, i.e.,
\begin{equation}
\begin{aligned}
\mathcal{R}_\alpha=\frac{\alpha}{2}\norm[]{U}{\cdot}^2, \qquad \text{and hence} \qquad
\mathcal{R}_\alpha^*=\frac{1}{2\alpha}\norm[]{U}{\cdot}^2.
\end{aligned}
\end{equation}
Since $\mathcal{J}_\alpha$ is differentiable, we obtain for \eqref{eq:Tikh} by standard Lagrangian calculus the optimality system
\begin{equation}\label{eq:optsys}
\left\{\begin{aligned}
C^*(C\bar{y}-g^\delta)+A^*\bar{w}&=0,\\
\alpha \bar{u}- B^*\bar{w}&=0,\\
A\bar{y}-B\bar{u}&=0.
\end{aligned}\right.
\end{equation}
The corresponding discrete system for \eqref{eq:Tikh_h} is\begin{equation}\label{eq:optsys_discr}
\left\{\begin{aligned}
R_{\mathcal{Y}_h} \left(C^*(C\bar{y}_h-g^\delta)+ A^*\bar{w}_h\right)&=0,\\
\alpha \bar{u}_h-P_{\mathcal{U}_h} B^*\bar{w}_h&=0,\\
R_{\mathcal{W}_h} \left(A\bar{y}_h-B\bar{u}_h\right)&=0,
\end{aligned}\right.
\end{equation}
with $R_{\mathcal{Y}_h}$, $R_{\mathcal{W}_h}$ as in \eqref{RYhRWh},
which corresponds to a finite element discretization of the state and adjoint equation.
The solution $(\bar{u}_h,\bar{y}_h,\bar{w}_h) \in \mathcal{U}_h\times \mathcal{Y}_h\times \mathcal{W}_h$ of \eqref{eq:optsys_discr} can be considered as an approximation to the solution $(\bar{u},\bar{y},\bar{w})\in\mathcal{U}\times\mathcal{Y}\times\mathcal{W}$ of \eqref{eq:optsys}.
\subsection{Error estimates}
Setting $\phi_\alpha(u_1,u_2)=\frac{\alpha}{2} \norm[]{U}{u_1-u_2}^2$,
we obtain from \cref{th:Repin} that the solution $\bar{u}$ to \eqref{eq:minred}
satisfies
\begin{equation}\label{eq:est_red}
\begin{aligned}[t]
\alpha \norm[]{U}{u-\bar{u}}^2+\norm[]{G}{Ku-K\bar{u}}^2
&\leq2\left(J_\alpha(u,Ku)-J_\alpha(\bar{u},K\bar{u})\right)\\
&\leq\alpha \norm[]{U}{u}^2
+\frac{1}{\alpha}\norm[]{U}{K^* g^*}^2
+\norm[]{G}{Ku-g^\delta}^2+\norm[]{G}{g^*-g^\delta}^2-\norm[]{G}{g^\delta}^2
\\
&= \alpha \norm[]{U}{u}^2
+\frac{1}{\alpha}\norm[]{U}{K^*(g-g^\delta)}^2
+\norm[]{G}{Ku-g^\delta}^2+\norm[]{G}{g}^2-\norm[]{G}{g^\delta}^2
\\
&=
\frac{1}{\alpha} \norm[]{U}{\alpha u + K^*(g-g^\delta)}^2
+\norm[]{G}{Ku-g}^2,
\end{aligned}
\end{equation}
for any $u\in \mathcal{U}$ and $g^*:=g^\delta -g \in \mathcal{G}$ for any $g\in \mathcal{G}$.
We now define
\begin{equation}\label{eq:yhat}
\hat{y}:=S\bar{u}_h=A^{-1}B\bar{u}_h.
\end{equation}
Inserting $u=\bar{u}_h$ and $g=C\bar{y}_h$ in \eqref{eq:est_red},
we arrive at
\begin{equation}\label{eq:est_red_h_aposteriori}
\begin{aligned}[t]
\alpha \norm[]{U}{\bar{u}_h-\bar{u}}^2+ \norm[]{G}{C\hat{y}-C\bar{y}}^2
&\leq 2 \left(J_\alpha(\bar{u}_h,\hat{y})-J_\alpha(\bar{u},\bar{y})\right)\\
&\leq \frac{1}{\alpha} \norm[]{U}{\alpha \bar{u}_h
+ S^* C^* (C\bar{y}_h-g^\delta)}^2
+\norm[]{G}{C(A^{-1}B\bar{u}_h-\bar{y}_h)}^2
\\
&=
\frac{1}{\alpha} \norm[]{U}{\alpha \bar{u}_h- B^*\bar{w}_h \
+\ S^*\left(C^*(C\bar{y}_h-g^\delta)+ A^*\bar{w}_h\right)}^2\\
\MoveEqLeft[-1]
+\norm[]{G}{CA^{-1}\left(A\bar{y}_h-B\bar{u}_h\right)}^2.
\end{aligned}
\end{equation}
Here, \eqref{eq:est_red_h_aposteriori} contains the residuals of the equations in the optimality system \eqref{eq:optsys}, which are given by
\begin{equation}\label{eq:res123}
\left\{\begin{aligned}
\rho_w&:=C^*(C\bar{y}_h-g^\delta)+A^*\bar{w}_h=A^*(\bar{w}_h-\hat{w}),\\
\rho_u&:=\alpha \bar{u}_h- B^*\bar{w}_h,\\
\rho_y&:=A\bar{y}_h-B\bar{u}_h=A(\bar{y}_h-\hat{y}),
\end{aligned}\right.
\end{equation}
where $(\hat{y},\hat{w})\in\mathcal{Y}\times\mathcal{W}$ and $(\bar{y}_h,\bar{w}_h)\in\mathcal{Y}_h\times\mathcal{W}_h$ satisfy
\begin{equation}\label{eq:yhatwhat}
\left\{\begin{aligned}
R_{\mathcal{Y}_h} \left(C^*(C\bar{y}_h-g^\delta)+ A^*\bar{w}_h\right)&=0,\\
R_{\mathcal{W}_h} \left(A\bar{y}_h-B\bar{u}_h\right)&=0,
\end{aligned}\right.
\qquad
\left\{\begin{aligned}
C^*(C\bar{y}_h-g^\delta)+ A^*\hat{w}&=0,\\
A\hat{y}-B\bar{u}_h&=0,
\end{aligned}\right.
\end{equation}
for the same $\bar{u}_h\in\mathcal{U}_h$ (note that the left system is coupled, as opposed to the right one).
Thus the inequality \eqref{eq:est_red_h_aposteriori} appears to be suited for a posteriori error estimation.
Although estimate \eqref{eq:est_red_h_aposteriori} only gives an estimate on $K\bar{u}_h-K\bar{u}=C\hat{y}-C\bar{y}$ and not on $K_h\bar{u}_h-K\bar{u}=C\bar{y}_h-C\bar{y}$ (which is needed for \eqref{eq:accuracy_residual}), we can use the identity $\bar{y}_h-\hat{y}=A^{-1}\rho_y$,i.e.,
\begin{equation}\label{CAres3}
C\bar{y}_h-C\hat{y}=CA^{-1}\left(A\bar{y}_h-B\bar{u}_h\right),
\end{equation}
the triangle inequality, and the fact that
\begin{equation}\label{abcd}
\forall a,b,c,d\geq0 : \ a+b^2\leq c+d^2 \ \Rightarrow \ a+(b+d)^2\leq \gamma c+\sigma d^2
\end{equation}
holds for
\begin{equation}\label{rhosigma}
\left(\sigma=4\text{ and }\gamma\geq2\right) \quad\text{ or }
\quad\left(\sigma>4\text{ and }\gamma>\frac{2\sigma}{\sigma+\sqrt{\sigma^2-4\sigma}}\right)
\end{equation}
(see the \nameref{app} for a proof)
as well as
\begin{equation}\label{estJ}
J_\alpha(\bar{u}_h,\bar{y}_h)-J_\alpha(\bar{u}_h,\hat{y})
= (C\bar{y}_h-g^\delta,C\bar{y}_h-C\hat{y})_\mathcal{G}-\frac12\norm[]{G}{C\bar{y}_h-C\hat{y}}
\end{equation}
to obtain from \eqref{eq:est_red_h_aposteriori} the following a posteriori estimate.
\begin{proposition}\label{aposteriori_hilbert}
Let $\mathcal{U}$ be a Hilbert space and $\mathcal{R}_\alpha=\frac\alpha2\norm[]{U}{\cdot}^2$. Then the minimizers $(\bar{u},\bar{y})$ of \eqref{eq:Tikh} and $(\bar{u}_h,\bar{y}_h)$ of \eqref{eq:Tikh_h} satisfy the estimates
\begin{align}
\qquad\alpha \norm[]{U}{\bar{u}_h-\bar{u}}^2+ \norm[]{G}{C\bar{y}_h-C\bar{y}}^2
&\leq\frac{\gamma}{\alpha} \norm[]{U}{ B^* (A^*)^{-1} \rho_w\ + \ \rho_u}^2
+\sigma\norm[]{G}{CA^{-1}\rho_y}^2,\label{eq:est_red_h_aposteriori_s}\\
J_\alpha(\bar{u}_h,\bar{y}_h)-J_\alpha(\bar{u},\bar{y})
&\leq\frac{1}{2\alpha} \norm[]{U}{ B^* (A^*)^{-1} \rho_w\ + \ \rho_u}^2
+
(C\bar{y}_h-g^\delta,CA^{-1}\rho_y)_\mathcal{G},
\label{eq:est_red_h_aposteriori_J}
\end{align}
with $\sigma$ and $\gamma$ as in \eqref{rhosigma} and $\rho_w$, $\rho_u$, and $\rho_y$ as in \eqref{eq:res123}.
\end{proposition}
Here the factors $\sigma$ and $\gamma$ may be used to minimize the right hand side of the estimate. In the following, we will fix $\sigma=4$, $\gamma=2$ for simplicity.
At a first glance, estimate \eqref{eq:est_red_h_aposteriori} requires solution of state and adjoint equation on a fine grid for applying
$S^*$
and $CA^{-1}$, but this can be avoided in some relevant examples; see, e.g., \cref{subsec:example1} below.
\subsection{Application to inverse source problem}
\label{subsec:example1}
We now apply the estimate from \cref{aposteriori_hilbert} to the model problem \eqref{eq:ex1}. In this case, we have $\mathcal{U}=L^2(\omega_c)$ as well as $\mathcal{Y}= H_0^1(\Omega)=\mathcal{W}$, and the Tikhonov problem is given by
\begin{equation}\label{eq:ex1min}
\left\{\begin{aligned}
&\min_{y,u} \frac12 \norm[]{L2o}{y-g^\delta}^2 +\frac{\alpha}{2}\norm[]{L2c}{u}^2\\
&\mbox{s.t. }-\Delta y = \chi_{\omega_c} u,\quad y|_{\partial\Omega}=0.
\end{aligned}\right.
\end{equation}
Hence, using
\begin{align}
\rho_w&=\chi_{\omega_o}(\bar{y}_h-g^\delta)-\Delta\bar{w}_h,\\
\rho_u&=\alpha \bar{u}_h-\bar{w}_h\vert_{\omega_c},\\
\rho_y&=-\Delta\bar{y}_h-\chi_{\omega_c}\bar{u}_h,
\end{align}
estimates
\eqref{eq:est_red_h_aposteriori_s} and \eqref{eq:est_red_h_aposteriori_J} become
\begin{align}
\qquad \alpha \norm[]{L2c}{\bar{u}_h-\bar{u}}
+\norm[]{L2o}{\bar{y}_h-\bar{y}}
&\leq
\frac{2}{\alpha} \norm[]{L2c}{
(-\Delta)^{-1}[\rho_w] + \rho_u}^2
+4\norm[]{L2o}{(-\Delta)^{-1}[\rho_y]}^2,\label{eq:est_red_h_aposteriori_ex1}\\
J_\alpha(\bar{u}_h,\bar{y}_h)-J_\alpha(\bar{u},\bar{y})
&\leq
\begin{multlined}[t]
\frac{1}{2\alpha} \norm[]{L2c}{(-\Delta)^{-1}[\rho_w] + \rho_u}^2 \\
+
(\bar{y}_h-g^\delta,(-\Delta)^{-1}[\rho_y])_{{L}^{2}(\omega_o)}
.
\label{eq:est_red_h_aposteriori_ex1_J}
\end{multlined}
\end{align}
It remains to describe how the right-hand sides can be evaluated for a given
discrete approximation $(\bar u_h,\bar y_h)$.
The residual $\rho_w$ can be estimated using a conventional error estimator:
Observing that $\bar{w}_h$ and $\hat{w}$ solve the discretized and continuous Poisson equation with the same right-hand side $C^*(C\bar{y}_h-g^\delta)$, we can write
\begin{equation}\label{Ares1}
(A^*)^{-1} \rho_w=(\bar{w}_h-\hat{w})
=((A^*_h)^{-1}-(A^*)^{-1})C^*(C\bar{y}_h-g^\delta).
\end{equation}
Hence, using duality-based error estimators, e.g., from \cite[Sec.~2.4]{ainsworthoden}, with $\phi=A^{-1}BB^*(\bar{w}_h-\hat{w})\in\mathcal{Y}$, we obtain
\begin{equation}
\begin{aligned}
\norm[]{U}{B^*(A^*)^{-1}\rho_w}&=\norm[]{U}{B^*(\bar{w}_h-\hat{w})}
=\langle A\phi,\bar{w}_h-\hat{w}\rangle_{\mathcal{W}^*,\mathcal{W}}\\
&=\langle \phi,A^*(\bar{w}_h-\hat{w})\rangle_{\mathcal{Y},\mathcal{Y}^*}
=\langle \phi-{\mathcal{I}}^{\mathcal{T}}\phi,A^*(\bar{w}_h-\hat{w})\rangle_{\mathcal{Y},\mathcal{Y}^*},
\end{aligned}
\end{equation}
where we have used Galerkin orthogonality in the last equality.
Since $\Omega$ is assumed to be convex and polyhedral,
we can apply \eqref{eq:estint} to $\phi \in H^2(\Omega)$ to obtain for all $K\in{\mathcal{T}}_h$ the estimate
\begin{equation}
\|\phi-{\mathcal{I}}^{\mathcal{T}}\phi\|_{L^2(K)} +
h_K^{1/2} \norm[]{L2pK}{v-{\mathcal{I}}^{\mathcal{T}} v} \leq c_I h_K^2 |\phi|_{H^2(K)},
\end{equation}
Due to $H^2$-regularity, we can also apply \eqref{eq:eststab} to further estimate
$|\phi|_{H^2(\Omega)} \leq c_S \norm[]{L2c}{\bar{w}_h-\hat{w}}$.
From \eqref{eq:yhatwhat} and integration by parts, we thus obtain
\begin{equation}
\begin{aligned}[t]
\norm[]{L2c}{\bar{w}_h-\hat{w}}^2
&= \int_\Omega\nabla (\phi-{\mathcal{I}}^{\mathcal{T}}\phi)\cdot\nabla(\bar{w}_h-\hat{w})\, dx\\
&= \sum_{K\in {\mathcal{T}}_h} \left(\int_K(\phi -{\mathcal{I}}^{\mathcal{T}}\phi)\rho_w\, dx
+\int_{\partial K} (\phi -{\mathcal{I}}^{\mathcal{T}}\phi)\nabla\bar{w}_h\cdot \nu\, ds \right)\\
&
\le c_I\sum_{K\in {\mathcal{T}}_h} \left( h_K^2 \norm[]{L2K}{\rho_w}
+ \frac12 h_K^{3/2} \norm[]{L2pK}{\llbracket\nabla \bar{w}_h\cdot \nu\rrbracket}
\right)|\phi|_{H^2(K)}
\\
&\leq
c_{\mathcal{T}} \left( \sum_{K\in {\mathcal{T}}_h} h_K^4 \norm[]{L2K}{\rho_w}^2
+ \frac12 h_K^{3} \norm[]{L2pK}{\llbracket\nabla \bar{w}_h\cdot \nu\rrbracket}^2 \right)^{1/2}\norm[]{L2c}{\bar{w}_h-\hat{w}},
\end{aligned}
\end{equation}
where $c_{\mathcal{T}}:=c_Ic_S$, and
$\llbracket\cdot\rrbracket$ denotes the jump over the element boundary $\partial K$ with normal $\nu$.
Canceling the norm on both sides then yields
\begin{equation}\label{eq:Hm2}
\begin{aligned}[t]
\norm[]{L2c}{(-\Delta)^{-1}[\rho_w]}
&=\norm[]{L2c}{\bar{w}_h-\hat{w}}\\
&\leq
c_{\mathcal{T}} \left( \sum_{K\in {\mathcal{T}}_h} h_K^4 \norm[]{L2K}{\rho_w}^2
+ \frac12 h_K^{3} \norm[]{L2pK}{\llbracket\nabla \bar{w}_h\cdot \nu\rrbracket}^2 \right)^{1/2}\\
&=:c_{\mathcal{T}} \ \eta_{w}.
\end{aligned}
\end{equation}
Note that although $\rho_w$ is globally only an element of $H^{-1}(\Omega)$, we may take its elementwise $L^2(K)$ norm, since $\bar{w}_h$ is piecewise polynomial and therefore $\Delta (\bar{w}_h\vert_K)\in L^2(K)$. In case of piecewise linear finite elements, we just have $\norm[]{L2K}{\chi_{\omega_o}(\bar{y}_h-g^\delta)}^2$ in place of $\norm[]{L2K}{\rho_w}^2$.
The term containing $\rho_u$ is straightforward to evaluate as a sum of elementwise contributions.
%
Analogously to \eqref{Ares1}, we have a similar representation for $\rho_y$ in \eqref{CAres3}. As in \eqref{eq:Hm2},
we can thus estimate
\begin{equation}\label{eq:Hm2_res3}
\begin{aligned}[t]
\norm[]{L2o}{\bar{y}_h-\hat{y}}
&\leq c_{\mathcal{T}} \left(
\sum_{K\in {\mathcal{T}}_h} h_K^4 \norm[]{L2K}{\rho_y}^2
+ \frac12 h_K^{3} \norm[]{L2pK}{\llbracket\nabla \bar{y}_h\cdot \nu\rrbracket}^2\right)^{1/2}\\
&=:c_{\mathcal{T}} \eta_{y}.
\end{aligned}
\end{equation}
Combining \eqref{eq:est_red_h_aposteriori_ex1} and \eqref{eq:est_red_h_aposteriori_ex1_J} with \eqref{eq:Hm2}, and \eqref{eq:Hm2_res3}, we thus obtain the explicit a posteriori estimates
\begin{equation}
\begin{aligned}
\alpha \norm[]{L2c}{\bar{u}_h-\bar{u}}^2
+\norm[]{L2o}{\bar{y}_h-\bar{y}}^2
&\leq
\frac{2}{\alpha} \norm[]{L2c}{(-\Delta)^{-1}[\rho_w] + \rho_u}^2
+4\norm[]{L2o}{\bar{y}_h-\hat{y}}^2\\
&\leq
\frac{2}{\alpha}
\left(c_{\mathcal{T}} \eta_w + \|\rho_u\|_{L^2(\omega_c)}^2\right)^2
+ 4 \left(c_{\mathcal{T}} \eta_y\right)^2,\\
J_\alpha(\bar{u}_h,\bar{y}_h)-J_\alpha(\bar{u},\bar{y})
&\leq
\frac{1}{2\alpha}
\left(c_{\mathcal{T}} \eta_w + \|\rho_u\|_{L^2(\omega_c)}^2\right)^2
+ c_{\mathcal{T}} \eta_y \norm[]{L2o}{\bar{y}_h-g^\delta}.
\end{aligned}
\end{equation}
\begin{remark}
The $L^2$ inner product term in \eqref{eq:est_red_h_aposteriori_ex1_J} could in principle lead to a negative estimate of $J_\alpha(\bar{u}_h,\bar{y}_h)-J_\alpha(\bar{u},\bar{y})$, which by \eqref{eq:accuracy_cost} would mean that no refinement is required from the point of view of cost functional accuracy.
However, so far we have not found a means to reasonably evaluate this term as a possibly negative inner product (approximating $(-\Delta)^{-1}$ by its discretized version would just make the term vanish) and thus to estimate it by the Cauchy--Schwarz inequality.
Estimates~\eqref{eq:Hm2} and \eqref{eq:Hm2_res3} give bounds on quantities defined on the possibly restricted subdomains $\omega_c$ and $\omega_o$, respectively. However, the estimators are sums of contributions on the whole domain $\Omega$, and the dependence on the subdomains $\omega_c$, $\omega_o$ only enters indirectly via the definition of $\rho_w$, $\bar{w}_h$, $\rho_y$, and $\bar{y}_h$. Still, this makes sense, since these estimators are supposed to indicate local refinement of the finite element mesh for $\bar{w}_h$ and $\bar{y}_h$ defined on all of $\Omega$.
\end{remark}
\begin{remark}
Related results can be found in the literature on a posteriori error estimates for optimal control problems.
We mention \cite{kohlssiebertroesch14,liuyan01}, where $H^1$-error estimates are used in contrast to the $L^2$-estimators employed above.
Goal-oriented error estimators of dual-weighted-residual type are investigated in, e.g.,
\cite{beckervexler04,GriesbaumKaltenbacherVexler08,KaltenbacherKirchnerVexler11,KaltenbacherKirchnerVexler13}.
\end{remark}
\section{Banach space norm constraint}\label{sec:errest_red_Ban1}
In this section, we consider as regularization term
\begin{equation}
\mathcal{R}_\alpha=\delta_{B_{1/\alpha}^{\mathcal{U}}}, \qquad\text{and hence}
\qquad \mathcal{R}_\alpha^*=\frac{1}{\alpha}\norm[]{X}{\cdot}.
\end{equation}
This setting is of particular interest for incorporating pointwise almost everywhere bounds on $u$ via $\mathcal{U}=L^\infty(\omega_c)$; see \cref{subsec:example2} below.
Let us recall that in the setting $\cal U=\mathcal{X}^*$, the operator $B$ is explicitly assumed to be an adjoint operator, which is the case in the example considered in \cref{subsec:example2}.
Using the definitions of \cref{sec:spaces} and standard arguments from convex analysis, we obtain for \eqref{eq:Tikh} the optimality conditions
\begin{equation}\label{eq:optsys_B}
\left\{\begin{aligned}
C^*(C\bar{y}-g^\delta)+A^*\bar{w}&=0,\\
\bar{u}\in B_{1/\alpha}^{\mathcal{U}}\quad\text{and}\quad\langle u-\bar{u}, B^*\bar{w}\rangle_{\mathcal{U},\mathcal{X}}&\leq0 \quad \forall u\in B_{1/\alpha}^{\mathcal{U}},\\
A\bar{y}-B\bar{u}&=0.
\end{aligned}\right.
\end{equation}
The corresponding discrete optimality conditions are
\begin{equation}\label{eq:optsys_discr_B}
\left\{\begin{aligned}
R_{\mathcal{Y}_h} \left(C^*(C\bar{y}_h-g^\delta)+ A^*\bar{w}_h\right)&=0,\\
\bar{u}_h\in B_{1/\alpha}^{\mathcal{U}_h}\quad\text{and}\quad\langle u_h-\bar{u}_h, B^*\bar{w}_h\rangle_{\mathcal{U},\mathcal{X}}&\leq0 \quad \forall u_h\in B_{1/\alpha}^{\mathcal{U}_h},\\
R_{\mathcal{W}_h}\left(A\bar{y}_h-B\bar{u}_h\right)&=0.
\end{aligned}\right.
\end{equation}
\subsection{Error estimates}
Setting
$\phi_\alpha(u_1,u_2)=0$,
we obtain from \cref{th:Repin} that the solution $\bar{u}$ to \eqref{eq:minred} satisfies
\begin{equation}\label{eq:est_red_B}
\begin{aligned}
\norm[]{G}{Ku-K\bar{u}}^2
&\leq2\left(J_\alpha(u,Ku)-J_\alpha(\bar{u},K\bar{u})\right)\\
&\leq\frac{2}{\alpha}\norm[]{X}{K^*g^*}
+\norm[]{G}{Ku-g^\delta}^2+\norm[]{G}{ g^*-g^\delta}^2-\norm[]{G}{g^\delta}^2
\\
&=
\frac{2}{\alpha}\norm[]{X}{K^* (g-g^\delta)}
+2\langle u, K^* (g-g^\delta)\rangle_{\mathcal{U},\mathcal{X}}
+\norm[]{G}{Ku-g}^2
\end{aligned}
\end{equation}
for any $u\in \mathcal{U}$ and $g^*:=g^\delta -g \in \mathcal{G}$ for any $g\in \mathcal{G}$.
Inserting $u=\bar{u}_h$ and $g=C\bar{y}_h$ with $(\bar{u}_h,\bar{y}_h,\bar{w}_h) \in \mathcal{U}_h\times \mathcal{Y}_h\times \mathcal{W}_h$ satisfying \eqref{eq:optsys_discr_B}, we obtain
\begin{equation}\label{eq:est_red_Bh}
\begin{aligned}[t]
{\norm[]{G}{C\hat{y}-C\bar{y}}^2}
&\leq2\left(J_\alpha(\bar{u}_h,\hat{y})-J_\alpha(\bar{u},\bar{y})\right)\\
&\leq
\frac{2}{\alpha} \norm[]{X}{B^* \hat{w}}
- 2\langle \bar{u}_h, B^* \hat{w}\rangle_{\mathcal{U},\mathcal{X}}
+\norm[]{G}{CA^{-1}(A\bar{y}_h-B\bar{u}_h)}^2,
\end{aligned}
\end{equation}
with $\hat{y}$ and $\hat w$ defined as in \eqref{eq:yhat} and \eqref{eq:yhatwhat}, respectively.
Note that by $\norm[]{U}{\bar{u}_h}\leq \frac{1}{\alpha}$, the term $\frac{1}{\alpha} \norm[]{X}{B^* \hat{w}} - \langle \bar{u}_h, B^* \hat{w}\rangle_{\mathcal{U},\mathcal{X}}$ is indeed nonnegative.
For the first and last relation in \eqref{eq:optsys_discr_B}, we can define the residuals $\rho_w$ and $\rho_y$ as in \eqref{eq:res123} and, taking into account
\eqref{CAres3}--\eqref{estJ},
obtain a first a posteriori estimate.
\begin{proposition}\label{prop40}
Let $\mathcal{R}_\alpha = \delta_{B_{1/\alpha}^{\mathcal{U}}}$. Then the minimizers $(\bar{u},\bar{y})$ of \eqref{eq:Tikh} and $(\bar{u}_h,\bar{y}_h)$ of \eqref{eq:Tikh_h} satisfy the estimates
\begin{align}
{\norm[]{G}{C\bar{y}_h-C\bar{y}}^2}
&\leq
\frac{4}{\alpha} \norm[]{X}{B^* \hat{w}}
- 4\langle \bar{u}_h, B^* \hat{w}\rangle_{\mathcal{U},\mathcal{X}}
+4\norm[]{G}{CA^{-1}\rho_y}^2,\\
J_\alpha(\bar{u}_h,\bar{y}_h)-J_\alpha(\bar{u},\bar{y})
&\leq
\frac{1}{\alpha} \norm[]{X}{B^* \hat{w}}
- \langle \bar{u}_h, B^* \hat{w}\rangle_{\mathcal{U},\mathcal{X}}
+\norm[]{G}{CA^{-1}\rho_y}\norm[]{G}{C\bar{y}_h-g^\delta}.
\end{align}
with $\rho_y$ as in \eqref{eq:res123}.
\end{proposition}
If a duality mapping ${\mathcal{J}}^{\mathcal{X}}(x)\in \partial\norm[]{X}{\cdot}(x)$ exists (e.g., if $\mathcal{U}=\mathcal{X}^*$), we can also define a residual for the second relation in \eqref{eq:Tikh_h} by
\begin{equation}\label{eq:res2til}
{\rho}_u:=\alpha \bar{u}_h - {\mathcal{J}}^{\mathcal{X}}(B^* \bar{w}_h).
\end{equation}
From $\langle{\mathcal{J}}^{\mathcal{X}} (B^* \bar{w}_h),B^* \bar w_h\rangle_{\mathcal{U},\mathcal{X}}=\norm[]{X}{B^* \bar w_h}$ it follows that
$\langle {\rho}_u,B^* \bar w_h\rangle_{\mathcal{U},\mathcal{X}}\le0$.
Then we can estimate
\begin{equation}
\norm[]{X}{B^* \hat{w}}
+ \alpha\langle \bar{u}_h, B^* \hat{w}\rangle_{\mathcal{U},\mathcal{X}}
=\langle -{\rho}_u - ({\mathcal{J}}^{\mathcal{X}} (B^* \bar{w}_h) -{\mathcal{J}}^{\mathcal{X}}( B^* \hat w)) ,B^* \hat{w}\rangle_{\mathcal{U},\mathcal{X}}.
\end{equation}
By construction we have that
\begin{equation}
\langle {\mathcal{J}}^{\mathcal{X}} (B^* \bar{w}_h) -{\mathcal{J}}^{\mathcal{X}}( B^* \hat w) ,B^* \bar w_h\rangle_{\mathcal{U},\mathcal{X}}
= \|B^* \bar{w}_h\|_\mathcal{X} - \langle {\mathcal{J}}^{\mathcal{X}}( B^* \hat w) ,B^* \bar w_h\rangle_{\mathcal{U},\mathcal{X}} \ge 0
\end{equation}
Hence it follows that
\begin{equation}
\langle {\mathcal{J}}^{\mathcal{X}} (B^* \hat w) -{\mathcal{J}}^{\mathcal{X}}( B^* \bar w_h)) ,B^* \hat{w}\rangle_{\mathcal{U},\mathcal{X}}
\le \langle {\mathcal{J}}^{\mathcal{X}} (B^* \hat w) -{\mathcal{J}}^{\mathcal{X}}( B^* \bar w_h)) ,B^* (\hat{w}-w_h)\rangle_{\mathcal{U},\mathcal{X}}.
\end{equation}
Introducing the symmetric Bregman distance of $\|\cdot\|_\mathcal{U}$ defined as
\begin{equation}\label{dsymmnx}
\mathrm{D^{sym}_{\|\cdot\|_\calX}}(B^* \hat w, B^* \bar w_h) :=\langle {\mathcal{J}}^{\mathcal{X}} (B^* \hat w) -{\mathcal{J}}^{\mathcal{X}}( B^* \bar w_h)) ,B^* (\hat{w}-w_h)\rangle_{\mathcal{U},\mathcal{X}},
\end{equation}
we obtain the estimate
\begin{equation}
\norm[]{X}{B^* \hat{w}}
- \alpha\langle \bar{u}_h, B^* \hat{w}\rangle_{\mathcal{U},\mathcal{X}}
\le \langle -{\rho}_u,B^* \hat{w}\rangle_{\mathcal{U},\mathcal{X}} + \mathrm{D^{sym}_{\|\cdot\|_\calX}}(B^* \hat w, B^* \bar w_h).
\end{equation}
Using \eqref{CAres3} in \eqref{eq:est_red_Bh} together with
the definitions of $ \rho_u$ and $\mathrm{D^{sym}_{\|\cdot\|_\calX}}$ yields the following estimates.
\begin{proposition}\label{prop41}
Let $\mathcal{U}=\mathcal{X}^*$ and $\mathcal{R}_\alpha = \delta_{B_{1/\alpha}^{\mathcal{U}}}$. Then the minimizers $(\bar{u},\bar{y})$ of \eqref{eq:Tikh} and $(\bar{u}_h,\bar{y}_h)$ of \eqref{eq:Tikh_h} satisfy the estimates
\begin{align}
\norm[]{G}{C\bar{y}_h-C\bar{y}}^2
&\leq
\frac4\alpha \langle -{\rho}_u,B^* \hat{w}\rangle_{\mathcal{U},\mathcal{X}} + \frac4\alpha\mathrm{D^{sym}_{\|\cdot\|_\calX}}(B^* \hat w) +4\norm[]{G}{CA^{-1}\rho_y}^2,\label{eq:est_red_Bh_s}\\
\qquad J_\alpha(\bar{u}_h,\bar{y}_h)-J_\alpha(\bar{u},\bar{y})
&\leq
\frac1\alpha \langle -{\rho}_u,B^* \hat{w}\rangle_{\mathcal{U},\mathcal{X}} + \frac1\alpha\mathrm{D^{sym}_{\|\cdot\|_\calX}}(B^* \hat w, B^* \bar w_h) \label{eq:est_red_Bh_s_J}\\
\MoveEqLeft[-1] +\norm[]{G}{CA^{-1}\rho_y}\norm[]{G}{C\bar{y}_h-g^\delta},
\end{align}
with
$\rho_y$ as in \eqref{eq:res123} and $\rho_u$ as in \eqref{eq:res2til}.
\end{proposition}
Let us remark that due to \eqref{eq:res123}, the unknown $\hat w$ can replaced by $\bar w_h -(A^{*})^{-1}\rho_w$.
Hence, the components of the error estimate are fully available in numerical implementations,
as we will show in more detail in \cref{subsec:example2}.
If a variational discretization, i.e., $\mathcal{U}_h=\mathcal{U}$, is used, then
from \eqref{eq:optsys_discr_B} we obtain $B^* \bar w_h \in \partial \delta_{B_{1/\alpha}^{\mathcal{U}}}(\bar u_h)$,
which is equivalent to $\bar u_h \in \partial \|\cdot\|_\mathcal{X}(B^*\bar w_h)$.
This implies that $\rho_u=0$, and hence \eqref{eq:est_red_Bh_s} and \eqref{eq:est_red_Bh_s_J} reduce to
\begin{align}
\norm[]{G}{C\bar{y}_h-C\bar{y}}^2
&\leq \frac4\alpha\mathrm{D^{sym}_{\|\cdot\|_\calX}}(B^* \hat w, B^* \bar w_h)+4\norm[]{G}{CA^{-1}\rho_y}^2,\label{eq:est_red_Bh_s_var}\\
J_\alpha(\bar{u}_h,\bar{y}_h)-J_\alpha(\bar{u},\bar{y})
&\leq \frac1\alpha\mathrm{D^{sym}_{\|\cdot\|_\calX}}(B^* \hat w, B^* \bar w_h)+\norm[]{G}{CA^{-1}\rho_y}\norm[]{G}{C\bar{y}_h-g^\delta}.\label{eq:est_red_Bh_s_var_J}
\end{align}
\subsection{Application to inverse source problem}\label{subsec:example2}
We now apply the estimate from \cref{prop41} to the model problem \eqref{eq:ex1} for the case of Ivanov regularization. In this case, we have $\mathcal{U}=L^\infty(\omega_c)$ and $\mathcal{X}=L^1(\omega_c)$, i.e., $\mathcal{U}=\mathcal{X}^*$, and hence the duality mapping is given by
\begin{equation}
{\mathcal{J}}^{\mathcal{X}} (B^* w)=\operatorname{sign}(w\vert_{\omega_c}).
\end{equation}
As before, we take $\mathcal{Y}= H_0^1(\Omega)=\mathcal{W}$. The Ivanov problem is then given by
\begin{equation}\label{eq:ex1bmin}
\left\{\begin{aligned}
&\min_{y,u} \frac12 \norm[]{L2o}{y-g^\delta}^2
\quad \text{ s.t. }\quad|u(x)|\leq\frac{1}{\alpha}\quad\text{for a.e. } x\in\omega_c\\
&\text{and }-\Delta y = \chi_{\omega_c} u,\quad y|_{\partial\Omega}=0.
\end{aligned}\right.
\end{equation}
The residuals used in \cref{prop41} are now given by
\begin{align}
\rho_w&:=\chi_{\omega_o}(\bar{y}_h-g^\delta)-\Delta\bar{w}_h,\\
{\rho}_u&:=\alpha \bar{u}_h-\operatorname{sign}(\bar w_h\vert_{\omega_c}),\\
\rho_y&:=-\Delta\bar{y}_h-\chi_{\omega_c}\bar{u}_h.
\end{align}
We will consider the case of variational discretization for simplicity,
where we can make use of the estimate \eqref{eq:est_red_Bh_s_var}.
Since the term containing $\rho_y$ in \eqref{eq:est_red_Bh_s_var} can be estimated by \eqref{eq:Hm2_res3}, it
only remains to consider the term containing $\mathrm{D^{sym}_{\|\cdot\|_\calX}}$, which in this setting can be estimated by
\begin{equation}
\mathrm{D^{sym}_{\|\cdot\|_\calX}}(B^* \hat w, B^* \bar w_h)=
\frac{1}{\alpha}\scalprod{LinftycL1c}{\operatorname{sign}(\hat{w})-\operatorname{sign}(\bar{w}_h)}{\hat{w}-\bar{w}_h}
\leq \frac{2}{\alpha}\norm[]{L1c}{\hat{w}-\bar{w}_h}.
\end{equation}
(Note that we cannot expect smallness of $\norm[]{Linftyc}{\operatorname{sign}(\hat{w})-\operatorname{sign}(\bar{w}_h)}$ directly, since continuity of the $\operatorname{sign}$ operator cannot be quantified on $\mathcal{X}=L^1(\omega_c)$.)
In order to estimate the $\m{L1c}$-norm of $\hat{w}-\bar{w}_h$, we introduce
\begin{equation}
z := (-\Delta)^{-1}[\chi_{\omega_c} \operatorname{sign}(\hat{w}-\bar{w}_h) ]\ \in\ W^{2,p}(\Omega)\cap H^1_0(\Omega) \quad \forall p<\infty.
\end{equation}
We assume from here on that $\Omega\subset \mathbb{R}^2$ is polygonal with interior angles of at most $\frac\pi2$.
In this case, we obtain from \cite[Thm.~1]{dipliniotemam2015} that
\begin{equation}\label{eq:schatzwahlbin}
\|z\|_{W^{2,p}(\Omega)} \le c_S \, p \, \norm[]{Linf}{\operatorname{sign}(\hat{w}-\bar{w}_h)} \leq c_S\,p
\end{equation}
holds for all $p\ge2$ with a constant $c_S>0$ independent of $p$.
In case that $\Omega$ does not allow for such a regularity result, and \eqref{eq:schatzwahlbin} only holds for $p=2$,
we can use the $L^2$-error estimate of \cref{subsec:example1}.
Let ${\mathcal{I}}^{\mathcal{T}} z$ be the piecewise linear interpolant of $z$.
Then we have from \cite[Thm 3.1.6]{Ciarlet78} together with \eqref{eq:schatzwahlbin} for all $p>d$ the estimate
\begin{equation}\label{eq42_interp}
\norm[]{LinftyK}{z - {\mathcal{I}}^{\mathcal{T}} z} + \norm[]{LinftyFF}{z - {\mathcal{I}}^{\mathcal{T}} z} \le c_I \, h_K^{2-d/p} \norm[]{W2pK}{z} \leq c_Ic_S\,p\, h_K^{2-d/p}
\end{equation}
with a constant $c_I>0$ depending only on the chosen finite element family.
Using the definition of $z$, we obtain
\begin{equation}
\begin{aligned}
\norm[]{L1c}{\hat{w}-\bar{w}_h}
&=(\nabla z,\nabla (\hat{w}-\bar{w}_h))_{\m{L2}}\\
&=(\nabla (z-{\mathcal{I}}^{\mathcal{T}} z),\nabla (\hat{w}-\bar{w}_h))_{\m{L2}}
\\
&=-( z-{\mathcal{I}}^{\mathcal{T}} z,\bar{y}_h-g^\delta)_{\m{L2o}} -(\nabla (z-{\mathcal{I}}^{\mathcal{T}} z),\nabla\bar{w}_h)_{\m{L2}},
\end{aligned}
\end{equation}
where
we have used Galerkin orthogonality and the fact that the interpolation operator ${\mathcal{I}}^{\mathcal{T}}:{C}_b(\overline{\Omega})\to\mathcal{Y}_h$ can indeed be applied to
$z\in W^{2,p}(\Omega)\cap H_0^1(\Omega)\hookrightarrow {C}_b(\overline{\Omega})$.
Here and below, $(\cdot,\cdot)_{L^2}$ denotes the $L^2$ inner product.
Now we integrate by parts on each element to obtain
\begin{equation}\label{etaw_normcons}
\begin{aligned}[t]
\norm[]{L1c}{\hat{w}-\bar{w}_h}
&=-\sum_{K\in {\mathcal{T}}_h}
\Bigl((z-{\mathcal{I}}^{\mathcal{T}} z,
-\Delta \bar{w}_h+\chi_{\omega_o}(\bar{y}_h-g^\delta))_{\m{L2K}}\\
\MoveEqLeft[-1] -\int_{\partial K} \nabla \bar{w}_h\cdot \nu (z-{\mathcal{I}}^{\mathcal{T}} z)\,ds
\Bigr)\\
&\leq \sum_{K\in {\mathcal{T}}_h} \Big(\norm[]{LinftyK}{z-{\mathcal{I}}^{\mathcal{T}} z}
\ \norm[]{L1K}{-\Delta \bar{w}_h+\chi_{\omega_o}(\bar{y}_h-g^\delta)}
\\
\MoveEqLeft[-1]
+\norm[]{LinftypK}{z-{\mathcal{I}}^{\mathcal{T}} z}
\norm[]{L1pK}{\llbracket\nabla \bar{w}_h\cdot \nu\rrbracket} \Big)
\\
&\leq c_Ic_S
\sum_{K\in {\mathcal{T}}_h}
p_K \,
h_K^{2-\frac d{p_K}}
\ \left(\norm[]{L1K}{-\Delta \bar{w}_h+\chi_{\omega_o}(\bar{y}_h-g^\delta)}
+
\norm[]{L1pK}{\llbracket\nabla \bar{w}_h\cdot \nu\rrbracket}
\right)
\end{aligned}
\end{equation}
where we have used \eqref{eq42_interp} with $p_K\ge d$ individually for each element $K\in{\mathcal{T}}_h$.
(As in \eqref{eq:Hm2}, the term $\Delta \bar{w}_h$ vanishes in case of piecewise linear finite elements.)
Choosing now $p_K \sim d\,|\log(h_K)|$ yields
\begin{equation}
\norm[]{L1c}{\hat{w}-\bar{w}_h}\le c_Ic_S
\sum_{K\in {\mathcal{T}}_h}
|\log h_K| \,
h_K^{2}
\left(\norm[]{L1K}{\rho_w}
+
\norm[]{L1pK}{\llbracket\nabla \bar{w}_h\cdot \nu\rrbracket}
\right)=:c_{\mathcal{T}} \eta_w.
\end{equation}
With the help of this residual-based error estimate and of \eqref{eq:Hm2_res3}, the error estimates \eqref{eq:est_red_Bh_s_var} and \eqref{eq:est_red_Bh_s_var_J} can be computed.
\section{Banach space norm regularization}\label{sec:errest_red_Ban2}
In this section, we consider as regularization term
\begin{equation}
\mathcal{R}_\alpha=\alpha \norm[]{U}{\cdot}, \qquad\text{and hence}\qquad
\mathcal{R}_\alpha^*=\delta_{B_\alpha^\mathcal{X}}(\cdot).
\end{equation}
This setting is of particular interest for promoting sparsity of $u$ via $\mathcal{U}=\mathcal{M}(\Omega)$; see \cref{subsec:example3}.
Again, in case $\mathcal{U}=\mathcal{X}^*$ we explicitly assume that $B$ is an adjoint operator.
As above, we obtain for \eqref{eq:Tikh} the optimality conditions
\begin{equation}\label{eq:optsys_C}
\left\{\begin{aligned}
C^*(C\bar{y}-g^\delta)+A^*\bar{w}&=0,\\
B^*\bar{w}\in B_\alpha^{\mathcal{X}}\quad\text{and}\quad
\langle\bar{u},u^*- B^*\bar{w}\rangle_{\mathcal{U},\mathcal{X}}&\leq0 \quad \forall u^*\in B_\alpha^{\mathcal{X}},
\\
A\bar{y}-B\bar{u}&=0.
\end{aligned}\right.
\end{equation}
We again consider a discretization of this system.
In the following, let $(\bar y_h,\bar u_h,\bar w_h)$ be a discrete approximation of the solution of \eqref{eq:optsys_C} given by
\begin{equation}\label{eq:optsys_discr_C}
\left\{\begin{aligned}
R_{\mathcal{Y}_h} \left(C^*(C\bar{y}_h-g^\delta)+ A^*\bar{w}_h\right)&=0,\\
R_{\mathcal{W}_h}\left(A\bar{y}_h-B\bar{u}_h\right)&=0,
\end{aligned}\right.
\end{equation}
together with a discretization of the second relation of \eqref{eq:optsys_C}, which however is intimately
linked to the choice of the space $\mathcal{U}_h$ and the discrete approximation
of $B^* w\in B_\alpha^{\mathcal{X}}$.
We refer to \cref{subsec:example3} concerning details for the specific choice $\mathcal{U}=\mathcal{M}(\Omega)$.
\subsection{Error estimates}\label{sec:errest_red_Ban2_est}
Setting again $\phi_\alpha(u_1,u_2)=0$, we obtain from \cref{th:Repin} that the solution $\bar{u}$ to \eqref{eq:minred} satisfies
\begin{equation}\label{eq:est_red_C}
\begin{aligned}
{\norm[]{G}{Ku-K\bar{u}}^2}&\leq2\left(J_\alpha(u,Ku)-J_\alpha(\bar{u},K\bar{u})\right)\\
&\leq 2\alpha \norm[]{U}{u}
+\norm[]{G}{Ku-g^\delta}^2+\norm[]{G}{ g^*-g^\delta}^2-\norm[]{G}{g^\delta}^2
\\
&= 2\alpha \norm[]{U}{u}
+2\langle u, K^* (g-g^\delta)\rangle_{\mathcal{U},\mathcal{X}}
+\norm[]{G}{Ku-g}^2.
\end{aligned}
\end{equation}
for any $u\in \mathcal{U}$ and $g^*:=g^\delta -g \in \mathcal{G}$ for any $g\in \mathcal{G}$ such that $K^*g^*\in B_{\alpha}^{\mathcal{X}}$.
Similarly as before, we set $u=\bar u_h$.
However, the choice $g=C\bar{y}_h$ is not possible, as $K^*(C\bar{y}_h-g^\delta)\notin B_{\alpha}^{\mathcal{X}}$ in general.
Hence, we introduce a scaling factor $\kappa>0$ such that
$g- g^\delta = \kappa(C\bar{y}_h-y^\delta)$
satisfies
\begin{equation}
\kappa K^*(C\bar{y}_h- g^\delta) = \kappa B^*(A^*)^{-1}C^*(C\bar{y}_h-g^\delta)
= \kappa B^* \hat w \in B_{\alpha}^{\mathcal{X}}
\end{equation}
with $\hat w$ as in \eqref{eq:yhatwhat}.
It thus suffices to choose
\begin{equation}\label{eq:kappa}
\kappa=\min\left\{\frac{\alpha}{\norm[]{X}{B^* \hat{w}}},\ 1\right\}.
\end{equation}
The estimation of $\kappa$ will be discussed below; see \eqref{eq:est_kappam1_wh} and \eqref{eq:etakappa}.
Inserting $u=\bar{u}_h$ and $g=\kappa C\bar{y}_h+(1-\kappa)g^\delta$ with $(\bar{u}_h,\bar{y}_h,\bar{w}_h) \in \mathcal{U}_h\times \mathcal{Y}_h\times \mathcal{W}_h$ satisfying \eqref{eq:optsys_discr_C}, we obtain
\begin{equation}\label{eq:est_red_Ch}
\begin{aligned}
\norm[]{G}{C\hat{y}-C\bar{y}}^2
&\leq2\left(J_\alpha(\bar{u}_h,\hat{y})-J_\alpha(\bar{u},\bar{y})\right)\\
&\leq
2\alpha \norm[]{U}{\bar{u}_h}
- 2\langle \bar{u}_h, \kappaB^* \hat{w}\rangle_{\mathcal{U},\mathcal{X}}
+\norm[]{G}{CA^{-1}(A\bar{y}_h-B\bar{u}_h)+(\kappa-1)(C\bar{y}_h-g^\delta)}^2\\
& =
2(\alpha \norm[]{U}{\bar{u}_h}
- \langle \bar{u}_h, B^* \bar w_h\rangle_{\mathcal{U},\mathcal{X}}
+ \kappa\langle \bar{u}_h, B^*(\bar w_h -\hat{w})\rangle_{\mathcal{U},\mathcal{X}}\\
\MoveEqLeft[-1] + (1-\kappa) \langle \bar{u}_h, B^* \bar w_h\rangle_{\mathcal{U},\mathcal{X}})
+\norm[]{G}{CA^{-1}\rho_y+(\kappa-1)(C\bar{y}_h-g^\delta)}^2.
\end{aligned}
\end{equation}
Note that by dual feasibility of $\kappa\hat{w}$, the term $\alpha \norm[]{U}{\bar{u}_h}-\langle \bar{u}_h, \kappaB^* \hat{w}\rangle_{\mathcal{U},\mathcal{X}}$ is nonnegative.
Estimating again the terms on the right-hand side using \eqref{CAres3} and \eqref{abcd} with $\sigma=4$ and $\gamma=2$, we obtain the following a posteriori estimate.
\begin{proposition}\label{prop:est_normpenalty}
Let $\mathcal{R}_\alpha = \alpha\norm[]{U}{\cdot}$. Then the minimizers $(\bar{u},\bar{y})$ of \eqref{eq:Tikh} and $(\bar{u}_h,\bar{y}_h)$ of \eqref{eq:Tikh_h} satisfy the estimate
\begin{align}
\norm[]{G}{C\bar{y}_h-C\bar{y}}^2
&\leq
4(\alpha \norm[]{U}{\bar{u}_h}
- \langle \bar{u}_h, B^* \bar w_h\rangle_{\mathcal{U},\mathcal{X}})
+ 4\kappa\langle \bar{u}_h, B^*(\bar w_h -\hat{w})\rangle_{\mathcal{U},\mathcal{X}}\label{eq:est_red_Ch_1}\\
\MoveEqLeft[-1] + 4(1-\kappa) \langle \bar{u}_h, B^* \bar w_h\rangle_{\mathcal{U},\mathcal{X}}
+4\norm[]{G}{CA^{-1}\rho_y+(\kappa-1)(C\bar{y}_h-g^\delta)}^2,\\
\qquad J_\alpha(\bar{u}_h,\hat{y})-J_\alpha(\bar{u},\bar{y})
&\leq
(\alpha \norm[]{U}{\bar{u}_h}
- \langle \bar{u}_h, B^* \bar w_h\rangle_{\mathcal{U},\mathcal{X}})
+ \kappa\langle \bar{u}_h, B^*(\bar w_h -\hat{w})\rangle_{\mathcal{U},\mathcal{X}}\label{eq:est_red_Ch_1_J}\\
\MoveEqLeft[-1] + (1-\kappa) \langle \bar{u}_h, B^* \bar w_h\rangle_{\mathcal{U},\mathcal{X}}
\\
\MoveEqLeft[-1]+\norm[]{G}{CA^{-1}\rho_y+(\kappa-1)(C\bar{y}_h-g^\delta)}\norm[]{G}{C\bar{y}_h-g^\delta},
\end{align}
with $\rho_y$ as in \eqref{eq:res123} and $\kappa$ satisfying \eqref{eq:kappa}.
\end{proposition}
If a duality mapping ${\mathcal{J}}^\mathcal{U}(u)\in\partial\norm[]{U}{\cdot}(u)$ exists (e.g., if $\mathcal{X}=\mathcal{U}^*$),
we could again define a residual for the discrete version of the second relation in \eqref{eq:optsys_C} via
${\rho}_u:=\alpha{\mathcal{J}}^\mathcal{U}(\bar{u}_h)- B^*\bar{w}_h$
and proceed similarly as in \cref{sec:errest_red_Ban2_est}. Since this will not be the case in the example below, we do not do so here.
The quantity $1-\kappa$ can be estimated by
\begin{equation}\label{eq:est_kappam1_wh}
1-\kappa \leq
\max \left(1- \frac{\alpha}{\norm[]{X}{B^* \hat{w}}}, \ 0\right)
\le
\max \left(\frac{\norm[]{X}{B^* \bar w_h}-\alpha+\norm[]{X}{B^* (\bar w_h-\hat{w})}}{\norm[]{X}{B^* \bar w_h}+\norm[]{X}{B^* (\bar w_h-\hat{w})}}, \ 0\right).
\end{equation}
This bound can be written in terms of the residual $\rho_w$ as
\begin{equation}\label{estkappa}
1-\kappa \le
\max \left(\frac{\norm[]{X}{B^* \bar w_h}-\alpha+\norm[]{X}{B^*(A^*)^{-1}\rho_w}}{\norm[]{X}{B^* \bar w_h}+\norm[]{X}{B^*(A^*)^{-1}\rho_w}}, \ 0\right),
\end{equation}
which implies that the quantity $1-\kappa$ is a combination of the violation of the dual constraint $\norm[]{X}{B^* \bar w_h}\le \alpha$
and the residual $\rho_w$. Thus we can expect $1-\kappa$ to be small for a
sufficiently fine discretization.
We refer to \cite{RoeschWachsmuth} for a related error estimate for state-constrained optimal control problems.
\subsection{Application to inverse source problem}\label{subsec:example3}
We now apply the estimate from \cref{prop:est_normpenalty} to the model problem \eqref{eq:ex1} for the case of sparsity regularization. In this case, we have
$\mathcal{U}=\mathcal{M}(\omega_c)$ and $\mathcal{X}={C}_b(\omega_c)$, i.e., $\mathcal{U}=\mathcal{X}^*$. Due to the low regularity of the source term, we here set $\mathcal{Y}= W_0^{1,q'}(\Omega)$ and $\mathcal{W}= W_0^{1,q}(\Omega)$, where $q'=\frac{q}{q-1}$ with $n<q\leq\frac{2n}{n-2}$ to guarantee $W_0^{1,q'}(\Omega)\subseteq L^2(\Omega)$.
The operator $B:\mathcal{M}(\omega_c) \to W^{-1,q'}(\Omega)$ is defined as
\begin{equation}
\langle Bu, v\rangle_{W^{-1,q'},W_0^{1,q}} = \int_{\omega_c} v\,\mathrm{d} u,
\end{equation}
with $B^* w = w\vert_{\omega_c}$.
The Tikhonov problem is then given by
\begin{equation}\label{eq:ex1cmin}
\left\{\begin{aligned}
&\min_{y,u} \frac12 \norm[]{L2o}{y-g^\delta}^2 +\alpha\norm[]{Mc}{u}\\
&\mbox{s.t. }-\Delta y = \chi_{\omega_c} u,\quad y|_{\partial\Omega}=0.
\end{aligned}\right.
\end{equation}
From \cite{Clason:2012}, we have existence of a minimizer $\bar u\in\mathcal{M}(\omega_c)$ as well as an optimal state $\bar y\in W_0^{1,q'}(\Omega)$ and an adjoint state $\bar w\in W_0^{1,q}(\Omega)$ satisfying the optimality conditions
\begin{equation}\label{eq:optsys_C_ex1}
\left\{\begin{aligned}
&-\Delta\bar{w}+\chi_{\omega_o}(\bar{y}-g^\delta)=0,\quad \bar{y}|_{\partial\Omega}=0\\
&\norm[]{Cc}{\bar{w}}\leq\alpha \ \text{ and } \
\scalprod{McCc}{\bar{u}_h}{\tilde w- \bar{w}}\leq0
\quad \forall \norm[]{Cc}{\tilde{w}}\leq\alpha,\\
&-\Delta \bar{y}-\chi_{\omega_c}\bar{u}=0,\quad \bar{y}|_{\partial\Omega}=0.
\end{aligned}\right.
\end{equation}
As $\Omega$ is convex and polyhedral, we can employ $H^2$-regularity results.
We take here as well $\mathcal{Y}_h\subset\mathcal{Y}$ and $\mathcal{W}_h\subset\mathcal{W}$ as piecewise linear finite elements, and thus the residuals in the first and third relation are once more given by
\begin{align}\label{eq:res123_ex1c}
\rho_w&=\chi_{\omega_o}(\bar{y}_h-g^\delta)-\Delta\bar{w}_h,\\
\rho_y&=-\Delta\bar{y}_h-\chi_{\omega_c}\bar{u}_h.
\end{align}
We again use a variational discretization $\mathcal{U}_h=\mathcal{U}$. It was shown in \cite{CCK12} that the corresponding semi-discretization of \eqref{eq:ex1cmin} admits a unique minimizer of the form $\bar{u}_h=\sum_{j=1}^{N_c} u_j \delta_{x_j}$, where $\delta_x$ denotes the Dirac measure concentrated on $x\in\Omega$ and $\{x_j\}_{j=1}^{N_c}$ are the interior vertices of ${\mathcal{T}}_h$ lying in $\omega_c$. Hence, we have that
\begin{equation}
\alpha\norm[]{Mc}{\bar{u}_h}=\scalprod{McCc}{\bar{u}_h}{\bar w_h},
\end{equation}
so that the first term on the right-hand sides of \eqref{eq:est_red_Ch_1} and \eqref{eq:est_red_Ch_1_J} vanish. Furthermore, from \cite{CCK12} we have that
\begin{equation}
\scalprod{McCc}{\bar {u}_h}{w_h} = \sum_{j=1}^{N_c}u_j w_j,
\end{equation}
for any $w_h = \sum_{j=1}^{N_c} w_j e_j$, where $e_j$ is the piecewise linear finite element basis functions corresponding to the vertex $x_j$.
To estimate the term $A^{-1}\rho_y$, we use the residual error estimator for Dirac measure data from \cite{arayabr06} (note that here $\rho_y\vert_K\notin L^2(K)$):
There exists a constant $c_2>0$ independent of $h$ such that
\begin{equation}\label{eq:etay2}
\norm[]{L2o}{\bar{y}_h-\hat{y}}
\leq
c_2 \left( \sum_{K\in {\mathcal{T}}_h} h_K^{3} \norm[]{L2pK}{\llbracket\nabla \bar{y}_h\cdot \nu\rrbracket}^2\right)^{1/2}
=:c_2\eta_y.
\end{equation}
The term $\scalprod{McCc}{\bar{u}_h}{\bar w_h - \hat w}$ from the right-hand side of \eqref{eq:est_red_Ch_1} can be estimated as
\begin{equation}\label{eq:etaw}
\begin{aligned}[t]
\allowdisplaybreaks
\scalprod{McCc}{\bar{u}_h}{\bar w_h - \hat w}
&= \sum_{j=1}^{N_h} u_j (\bar{w}_h(x_j)-\hat{w}(x_j))
= \sum_{j=1}^{N_h} u_j (\bar{w}_h(x_j)-{\mathcal{I}}^{\mathcal{T}}\hat{w}(x_j))\\
&= \int_\Omega \nabla \bar{y}_h \nabla(\bar{w}_h-{\mathcal{I}}^{\mathcal{T}}\hat{w})\,\mathrm{d}x
= \int_\Omega \nabla \bar{y}_h \nabla(\hat{w}-{\mathcal{I}}^{\mathcal{T}}\hat{w}) \,\mathrm{d}x \\
&= \sum_{K\in {\mathcal{T}}_h} \int_{\partial K} \nabla \bar{y}_h\cdot \nu (\hat{w}-{\mathcal{I}}^{\mathcal{T}}\hat{w})\,\mathrm{d}s\\
&\leq c_2c_I \left( \sum_{K\in {\mathcal{T}}_h} h_K^{3} \norm[]{L2pK}{\llbracket\nabla \bar{y}_h\cdot \nu\rrbracket}^2 \right)^{1/2}
|\hat{w}|_{H^2(\Omega)}\\
&\leq c_2c_Ic_S \left( \sum_{K\in {\mathcal{T}}_h} h_K^{3} \norm[]{L2pK}{\llbracket\nabla \bar{y}_h\cdot \nu\rrbracket}^2\right)^{1/2}
\norm[]{L2o}{\bar{y}_h-g^\delta}\\
&=:c_3\eta_w,
\end{aligned}
\end{equation}
where we have used the definition of $\bar{y}_h$ in the third equality, the definition of $\bar{w}_h$ and $\hat{w}$ in the fourth equality, and elementwise integration by parts, elementwise linearity of $\bar{y}_h$ in the fifth equality, as well as \eqref{eq:estint} and \eqref{eq:eststab}.
In order to estimate $1-\kappa$, we apply the $L^\infty(\Omega)$ residual error estimator of \cite{nssv06}; see also \cite{RoeschWachsmuth},
which is valid even for nonconvex polyhedral domains.
It was proven in \cite{nssv06} that there exists a constant
$c>0$ depending on $\Omega$ and the shape regularity of the triangulation such that
\begin{equation}\label{eq:est_w_linfty}
\begin{aligned}[t]
\norm[]{Linftyc}{\bar w_h - \hat w}&\le c|\log h_\mathrm{min}|^2
\max_{K\in {\mathcal{T}}_h} \Big( h_K^2 \norm[]{LinftyK}{{-}\Delta\bar w_h+\chi_{\omega_o}(\bar y_h-g^\delta)}\\
\MoveEqLeft[-11] + h_K \norm[]{LinftypK}{\llbracket\nabla \bar{w}_h\cdot \nu\rrbracket}\Big) =: c\, \eta_{w}^\infty,
\end{aligned}
\end{equation}
where $h_\mathrm{min}:=\min_{K\in {\mathcal{T}}_h}h_K$. Inserting this into \eqref{eq:est_kappam1_wh}, we obtain
\begin{equation}\label{eq:etakappa}
1-\kappa \leq
\max \left(\frac{\norm[]{X}{B^* \bar w_h}-\alpha+c\eta_w^\infty}{\norm[]{X}{B^* \bar w_h}+c\eta_w^\infty}, \ 0\right)=:\eta_\kappa.
\end{equation}
Collecting all the results, we obtain from \cref{prop:est_normpenalty} the a posteriori estimates
\begin{align}
\norm[]{L2o}{\bar{y}_h-\bar{y}}^2
&\le 4 c_3\eta_w
+4\eta_\kappa\scalprod{McCc}{\bar{u}_h}{\bar w_h}
\label{etaex3} \\
\MoveEqLeft[-1]+4 \Bigl(c_2\eta_y +\eta_\kappa\norm[]{L2o}{\bar{y}_h-g^\delta}\Bigr)^2,\\
J_\alpha(\bar{u}_h,\bar{y}_h)-J_\alpha(\bar{u},\bar{y})
&\le c_3\eta_w
+\eta_\kappa\scalprod{McCc}{\bar{u}_h}{\bar w_h}\label{etaex3_J}\\
\MoveEqLeft[-1]+4 \Bigl(c_2 \eta_y +\eta_\kappa\norm[]{L2o}{\bar{y}_h-g^\delta}\Bigr)\norm[]{L2o}{\bar{y}_h-g^\delta}.
\end{align}
\begin{remark}
A posteriori estimators for a state-constrained control problem can also be found in \cite{roeschssiebertsteinig}.
This control problem is related to the dual problem to \eqref{eq:ex1cmin}, which takes the form of a state-constrained problem without a discrepancy term. (Conversely, the dual to the problem in \cite{roeschssiebertsteinig} involves a Huber norm in place of the measure-space norm in \eqref{eq:ex1cmin}.)
Furthermore, in \cite{roeschssiebertsteinig} the state constraint is penalized, which manifests in an additional $L^2$ penalty in the dual problem.
The resulting error estimator then gives combined bounds on the regularization and the discretization error.
\end{remark}
\section{Numerical example}\label{sec:NumTests}
We illustrate our error estimators with numerical results for the example from \cref{subsec:example3}. In order to have available an exact analytical solution, we use the example from \cite[Section 8.1]{Pieper:2013}: Setting $\Omega=\omega_c=\omega_o=B_1(0)\subseteq \mathbb{R}^2$, we have that $-\Delta y^\dagger = u^\dagger$ for
\begin{equation}
y^\dagger(x)=-\frac{1}{2\pi}\ln\left(\max\left\{\rho,|x|_2\right\}\right), \qquad
u^\dagger=-\frac{1}{2\pi\rho}\mathcal{H}^1\vert_{\partial B_\rho(0)},
\end{equation}
where $\rho\in(0,1)$ is arbitrary and $\mathcal{H}^1$ denotes the one-dimensional Hausdorff measure. Furthermore, $\bar u = u_{\alpha}^\delta = u^\dagger$ is the minimizer of \eqref{eq:ex1cmin} for given $\alpha>0$ if the data is chosen as
\begin{equation}
g^\delta(x)=-\frac{1}{2\pi}\ln\left(\max\left\{\rho,|x|_2\right\}\right)+\alpha\phi\left(|x|_2\right)
\end{equation}
with
\begin{equation}
\phi(r)=\begin{cases}
\frac{6(3r-2\rho)}{\rho^3} &\text{ for }r<\rho\\
\frac{6(3r^2-2r\rho-2r+\rho)}{(\rho-1)^3r} &\text{ for }r\geq\rho.
\end{cases}
\end{equation}
In the following, we set $\rho=0.5$ and $\alpha=10^{-2}$ unless specified otherwise. The corresponding discrete approximations $\bar u_h$ are computed using the approach from \cite{CCK12}.
We first illustrate \cref{prop:est_normpenalty} by comparing in \cref{fig:funcerr-pieper-vexler} the errors in residual and functional value to the terms in \eqref{etaex3} and \eqref{etaex3_J} for a sequence of adaptively refined meshes for uniform refinement (\cref{fig:funcerr-pieper-vexler:uniform}) as well as for adaptive refinement using the procedure described in \cite{RoeschWachsmuth} (\cref{fig:funcerr-pieper-vexler:adaptive}).
We also show to the rate $\mathcal{O}(h^2)$, which up to a logarithmic factor is known to hold for the residual and Tikhonov functional error; see \cite[Thm.~6.2]{Pieper:2013}. This rate also seems to be satisfied for our estimator.
\pgfplotsset{cycle list/Dark2-6}
\begin{figure}
\begin{subfigure}[t]{\textwidth}
\centering
\input{conv_h.tikz}
\caption{uniform refinement}
\label{fig:funcerr-pieper-vexler:uniform}
\end{subfigure}
\begin{subfigure}[t]{\textwidth}
\centering
\input{conv_h_adapt.tikz}
\caption{adaptive refinement}
\label{fig:funcerr-pieper-vexler:adaptive}
\end{subfigure}
\caption{Comparison of true error and estimator}
\label{fig:funcerr-pieper-vexler}
\end{figure}
To illustrate \cref{prop:conv}, we consider $g:=y^\dagger$ as exact data, add Gaussian noise at different levels $\delta$, and adaptively compute the corresponding minimizer $u_{\alpha(\delta),h(\delta)}^\delta$. Specifically, we
start from a relatively large $\alpha_0=10^{-2}$ and coarse uniform mesh. In an outer loop, we then reduce the regularization parameter $\alpha_k=\alpha_0\theta^k$ for $\theta=0.6$ until the discrepancy principle \eqref{eq:discrprinc} with $\overline{\tau}=2$ is satisfied. In an inner loop, we adaptively refine the discretization according to the error estimator from \cref{prop:est_normpenalty} until the precision requirements \eqref{eq:accuracy_residual} and \eqref{eq:accuracy_cost} from \cref{prop:conv} are satisfied. The resulting residuals, regularization parameters and functional values for different noise levels are plotted in \cref{fig:plot_delta} and show a convergence rate of $\mathcal{O}(\delta)$.
\pgfplotsset{cycle list/Dark2-3}
\begin{figure}
\centering
\input{conv_delta.tikz}
\caption{Illustration of convergence rates as $\delta\to0$}
\label{fig:plot_delta}
\end{figure}
\section{Conclusion}
Reliable estimators for the discretization error in Tikhonov regularization can be computed using the approach from \cite{Repin00}.
Combining this with a general result on convergence of discrete approximations and an appropriate adaptive mesh refinement strategy yields convergence of these approximations to a solution of the inverse problem.
The approach can in particular be applied to the Banach-space setting required for sparsity enhancement or Ivanov regularization.
These error estimators can be incorporated into a local refinement strategy for mesh adaptation. As shown in the examples, the estimators can be written in terms of sums over the element domains (or their interfaces) of a triangulation. Thus it makes sense to subdivide elements with relatively large contribution to the error estimator.
Note that using variational discretizations according to \cite{CCK12,Hinze05}, we do not refine independently for parameter, state, and adjoint, but use a common mesh for all three quantities.
Future research will be devoted to transferring this approach to nonlinear inverse problems via iterative linearization similarly to \cite{KaltenbacherKirchnerVeljovic13} as well as to all-at-once approaches based on the model-and-measurement formulation (\refeq{model}--\refeq{measurement}).
| {
"redpajama_set_name": "RedPajamaArXiv"
} | 2,997 |
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "blimp/engine/app/ui/blimp_screen.h"
#include "ui/aura/window.h"
#include "ui/aura/window_tree_host.h"
#include "ui/display/display_observer.h"
#include "ui/gfx/geometry/size.h"
namespace blimp {
namespace engine {
namespace {
const int64_t kDisplayId = 1;
} // namespace
BlimpScreen::BlimpScreen() {
display::Display display(kDisplayId);
ProcessDisplayChanged(display, true /* is_primary */);
}
BlimpScreen::~BlimpScreen() {}
void BlimpScreen::UpdateDisplayScaleAndSize(float scale,
const gfx::Size& size) {
display::Display display(GetPrimaryDisplay());
if (scale == display.device_scale_factor() &&
size == display.GetSizeInPixel()) {
return;
}
display.SetScaleAndBounds(scale, gfx::Rect(size));
display_list().UpdateDisplay(display);
}
gfx::Point BlimpScreen::GetCursorScreenPoint() {
return gfx::Point();
}
bool BlimpScreen::IsWindowUnderCursor(gfx::NativeWindow window) {
NOTIMPLEMENTED();
return false;
}
gfx::NativeWindow BlimpScreen::GetWindowAtScreenPoint(const gfx::Point& point) {
return window_tree_host_
? window_tree_host_->window()->GetTopWindowContainingPoint(point)
: gfx::NativeWindow(nullptr);
}
display::Display BlimpScreen::GetDisplayNearestWindow(
gfx::NativeWindow window) const {
return GetPrimaryDisplay();
}
} // namespace engine
} // namespace blimp
| {
"redpajama_set_name": "RedPajamaGithub"
} | 4,598 |
Q: Can we configure tomcat/servlet/java to select windows root drive whenever we refer "/" in paths I have been working with Eclipse on windows with Java Servlet frameworks And Tomcat in combination for almost Six years now. Whenever I configured tomcat in eclipse to run any Servlet based framework (mostly Struts2/Struts1) and I tried to access paths on disks like for example to set log file path, from the servlet, I assumed that "/" means C: or the drive on which tomcat is running (I always use external installation of tomcat in Eclipse).
However recently my tomcat selected D: as root drive for path(/var/data/logs). And now it is again selecting C: as root drive for "/" even if my Tomcat and Eclipse installations are running on D: Not sure how root drive is selected whenever we refer to "/" in these scenarios.
I would like to always select D: as my root drive whenever i refer to "/" in my paths.
Ofcourse I don't want to write a code to check current OS and then decide the path to use (like "/" or C:/D: etc)
A: It works just as in the command line.
If in your command line, you're in the directory c:\foo\bla and type cd \, you'll go to c:\. If you're in the directory d:\foo\bla and type cd \, you'll go to d:\. So, the path depends on where you are when the java command starting tomcat is executed.
If you always want to be on d:\, then make sure the .bat file used to start tomcat goes to the d: drive before executing its java command.
It would be simpler and more reliable to have your build system generate an appropriate absolute path depending on the target deployment platform though: use d:\var\data\logs when generating a war for windows, and use /var/data/logs when generating a war for another OS.
| {
"redpajama_set_name": "RedPajamaStackExchange"
} | 6,922 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.